Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 2835

kernel-2.6.18-238.el5.src.rpm

From: Andy Gospodarek <gospo@redhat.com>
Date: Tue, 1 Dec 2009 17:55:29 -0500
Subject: [net] ixgbe: update to upstream version 2.0.44-k2
Message-id: <20091201175527.GG3259@gospo.rdu.redhat.com>
Patchwork-id: 21577
O-Subject: [RHEL5.5 PATCH 1/2] ixgbe: update to upstream version 2.0.44-k2
Bugzilla: 513707 514306 516699
RH-Acked-by: Stefan Assmann <sassmann@redhat.com>
RH-Acked-by: David S. Miller <davem@redhat.com>
RH-Acked-by: Ivan Vecera <ivecera@redhat.com>

This is a backport of the latest ixgbe driver from upstream
(net-next-2.6 in this case) to RHEL.  The last patch included from
upstream was:

    commit f9254edaabfc48f5a28bb5a88c6db48704cc058d
    Author: Ali Gholami Rudi <ali@rudi.ir>
    Date:   Tue Nov 10 06:40:06 2009 +0000

        ixgbe: r_idx not used in ixgbe_msix_clean_rx()

Some additional code that was older than our last update was also added
since DCA is now supported on RHEL, so there was some significant
cleanup to add that and test it.

This will resolve requests made in RHBZ 516699 and 513707 to move to the
latest upstream and RHBZ 514306 for DCA support.

I have verified this patch works well on 82599 hardware and Intel has
provided feedback that it was working well on some initial traffic tests
on several devices.

diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index fbeefda..74548c2 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -31,11 +31,16 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
+#include <linux/aer.h>
 
 #include "ixgbe_type.h"
 #include "ixgbe_common.h"
+#include "ixgbe_dcb.h"
 #include "ixgbe_compat.h"
 
+#ifdef CONFIG_IXGBE_DCA
+#include <linux/dca.h>
+#endif
 
 #define PFX "ixgbe: "
 #define DPRINTK(nlevel, klevel, fmt, args...) \
@@ -68,6 +73,8 @@
 #define IXGBE_RXBUFFER_128   128    /* Used for packet split */
 #define IXGBE_RXBUFFER_256   256    /* Used for packet split */
 #define IXGBE_RXBUFFER_2048  2048
+#define IXGBE_RXBUFFER_4096  4096
+#define IXGBE_RXBUFFER_8192  8192
 #define IXGBE_MAX_RXBUFFER   16384  /* largest size for a single descriptor */
 
 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
@@ -81,9 +88,14 @@
 #define IXGBE_TX_FLAGS_VLAN		(u32)(1 << 1)
 #define IXGBE_TX_FLAGS_TSO		(u32)(1 << 2)
 #define IXGBE_TX_FLAGS_IPV4		(u32)(1 << 3)
+#define IXGBE_TX_FLAGS_FCOE		(u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FSO		(u32)(1 << 5)
 #define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK   0x0000e000
 #define IXGBE_TX_FLAGS_VLAN_SHIFT	16
 
+#define IXGBE_MAX_RSC_INT_RATE          162760
+
 /* wrapper around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer */
 struct ixgbe_tx_buffer {
@@ -108,41 +120,46 @@ struct ixgbe_queue_stats {
 };
 
 struct ixgbe_ring {
-	struct napi_struct napi;
 	void *desc;			/* descriptor ring memory */
-	dma_addr_t dma;			/* phys. address of descriptor ring */
-	unsigned int size;		/* length in bytes */
-	unsigned int count;		/* amount of descriptors */
-	unsigned int next_to_use;
-	unsigned int next_to_clean;
-
-	int queue_index; /* needed for multiqueue queue management */
 	union {
 		struct ixgbe_tx_buffer *tx_buffer_info;
 		struct ixgbe_rx_buffer *rx_buffer_info;
 	};
+	u8 atr_sample_rate;
+	u8 atr_count;
+	u16 count;			/* amount of descriptors */
+	u16 rx_buf_len;
+	u16 next_to_use;
+	u16 next_to_clean;
 
+	u8 queue_index; /* needed for multiqueue queue management */
+
+#define IXGBE_RING_RX_PS_ENABLED                (u8)(1)
+	u8 flags;			/* per ring feature flags */
 	u16 head;
 	u16 tail;
 
 	unsigned int total_bytes;
 	unsigned int total_packets;
 
-	u16 reg_idx; /* holds the special value that gets the hardware register
-		      * offset associated with this ring, which is different
-		      * for DCE and RSS modes */
-
+#ifdef CONFIG_IXGBE_DCA
 	/* cpu for tx queue */
 	int cpu;
+#endif
 
-	struct ixgbe_queue_stats stats;
-	u64 v_idx; /* maps directly to the index for this ring in the hardware
-	            * vector array, can also be used for finding the bit in EICR
-	            * and friends that represents the vector for this ring */
+	u16 work_limit;			/* max work per interrupt */
+	u16 reg_idx;			/* holds the special value that gets
+					 * the hardware register offset
+					 * associated with this ring, which is
+					 * different for DCB and RSS modes
+					 */
 
+	struct ixgbe_queue_stats stats;
+	unsigned long reinit_state;
+	u64 rsc_count;			/* stat for coalesced packets */
 
-	u16 work_limit;                /* max work per interrupt */
-	u16 rx_buf_len;
+	unsigned int size;		/* length in bytes */
+	dma_addr_t dma;			/* phys. address of descriptor ring */
 };
 
 enum ixgbe_ring_f_enum {
@@ -150,12 +167,21 @@ enum ixgbe_ring_f_enum {
 	RING_F_DCB,
 	RING_F_VMDQ,
 	RING_F_RSS,
+	RING_F_FDIR,
+#ifdef IXGBE_FCOE
+	RING_F_FCOE,
+#endif /* IXGBE_FCOE */
 
 	RING_F_ARRAY_SIZE      /* must be last in enum set */
 };
 
+#define IXGBE_MAX_DCB_INDICES   8
 #define IXGBE_MAX_RSS_INDICES  16
 #define IXGBE_MAX_VMDQ_INDICES 16
+#define IXGBE_MAX_FDIR_INDICES 64
+#ifdef IXGBE_FCOE
+#define IXGBE_MAX_FCOE_INDICES  8
+#endif /* IXGBE_FCOE */
 struct ixgbe_ring_feature {
 	int indices;
 	int mask;
@@ -164,11 +190,19 @@ struct ixgbe_ring_feature {
 #define MAX_RX_QUEUES 128
 #define MAX_TX_QUEUES 128
 
+#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
+                              ? 8 : 1)
+#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
+
 /* MAX_MSIX_Q_VECTORS of these are allocated,
  * but we only use one per queue-specific vector.
  */
 struct ixgbe_q_vector {
 	struct ixgbe_adapter *adapter;
+	unsigned int v_idx; /* index of q_vector within array, also used for
+	                     * finding the bit in EICR and friends that
+	                     * represents the vector for this ring */
+	struct napi_struct napi;
 	struct net_device *dummy_netdev;
 	DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
 	DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -199,6 +233,10 @@ struct ixgbe_q_vector {
 	(&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
 
 #define IXGBE_MAX_JUMBO_FRAME_SIZE        16128
+#ifdef IXGBE_FCOE
+/* Use 3K as the baby jumbo frame size for FCoE */
+#define IXGBE_FCOE_JUMBO_FRAME_SIZE       3072
+#endif /* IXGBE_FCOE */
 
 #define OTHER_VECTOR 1
 #define NON_Q_VECTORS (OTHER_VECTOR)
@@ -220,11 +258,16 @@ struct ixgbe_adapter {
 	struct vlan_group *vlgrp;
 	u16 bd_number;
 	struct work_struct reset_task;
-	struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
+	struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
 	char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+	struct ixgbe_dcb_config dcb_cfg;
+	struct ixgbe_dcb_config temp_dcb_cfg;
+	u8 dcb_set_bitmap;
+	enum ixgbe_fc_mode last_lfc_mode;
 
 	/* Interrupt Throttle Rate */
-	u32 itr_setting;
+	u32 rx_itr_setting;
+	u32 tx_itr_setting;
 	u16 eitr_low;
 	u16 eitr_high;
 
@@ -259,41 +302,60 @@ struct ixgbe_adapter {
 	 * thus the additional *_CAPABLE flags.
 	 */
 	u32 flags;
-#define IXGBE_FLAG_RX_CSUM_ENABLED              (u32)(1 << 0)
-#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 1)
-#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 2)
-#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 3)
-#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 4)
-#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 20)
+#define IXGBE_FLAG_RX_CSUM_ENABLED              (u32)(1)
+#define IXGBE_FLAG_MSI_CAPABLE                  (u32)(1 << 1)
+#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 2)
+#define IXGBE_FLAG_MSIX_CAPABLE                 (u32)(1 << 3)
+#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 4)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 6)
+#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 7)
+#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 8)
+#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 9)
+#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 10)
+#define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 11)
+#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 12)
+#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 13)
 #define IXGBE_FLAG_DCB_ENABLED                  (u32)(1 << 14)
-#define IXGBE_FLAG_RSS_ENABLED                  (u32)(1 << 6)
-#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 7)
+#define IXGBE_FLAG_RSS_ENABLED                  (u32)(1 << 16)
+#define IXGBE_FLAG_RSS_CAPABLE                  (u32)(1 << 17)
+#define IXGBE_FLAG_VMDQ_CAPABLE                 (u32)(1 << 18)
+#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 19)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 20)
 #define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 22)
 #define IXGBE_FLAG_IN_WATCHDOG_TASK             (u32)(1 << 23)
 #define IXGBE_FLAG_IN_SFP_LINK_TASK             (u32)(1 << 24)
 #define IXGBE_FLAG_IN_SFP_MOD_TASK              (u32)(1 << 25)
-
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE            (u32)(1 << 26)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         (u32)(1 << 27)
+#define IXGBE_FLAG_FCOE_CAPABLE                 (u32)(1 << 28)
+#define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 29)
+
+	u32 flags2;
+#define IXGBE_FLAG2_RSC_CAPABLE                 (u32)(1)
+#define IXGBE_FLAG2_RSC_ENABLED                 (u32)(1 << 1)
 /* default to trying for four seconds */
 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
 
-
 	/* OS defined structs */
 	struct net_device *netdev;
 	struct pci_dev *pdev;
 	struct net_device_stats net_stats;
 
+	u32 test_icr;
+	struct ixgbe_ring test_tx_ring;
+	struct ixgbe_ring test_rx_ring;
+
 	/* structs defined in ixgbe_hw.h */
 	struct ixgbe_hw hw;
 	u16 msg_enable;
 	struct ixgbe_hw_stats stats;
 
 	/* Interrupt Throttle Rate */
-	u32 eitr_param;
+	u32 rx_eitr_param;
+	u32 tx_eitr_param;
 
 	unsigned long state;
 	u64 tx_busy;
-
 	unsigned int tx_ring_count;
 	unsigned int rx_ring_count;
 
@@ -306,6 +368,11 @@ struct ixgbe_adapter {
 	struct timer_list sfp_timer;
 	struct work_struct multispeed_fiber_task;
 	struct work_struct sfp_config_module_task;
+	u32 fdir_pballoc;
+	u32 atr_sample_rate;
+	spinlock_t fdir_perfect_lock;
+	struct work_struct fdir_reinit_task;
+	u64 rsc_count;
 	u32 wol;
 	u16 eeprom_version;
 };
@@ -314,6 +381,7 @@ enum ixbge_state_t {
 	__IXGBE_TESTING,
 	__IXGBE_RESETTING,
 	__IXGBE_DOWN,
+	__IXGBE_FDIR_INIT_DONE,
 	__IXGBE_SFP_MODULE_NOT_FOUND
 };
 
@@ -324,6 +392,13 @@ enum ixgbe_boards {
 
 extern struct ixgbe_info ixgbe_82598_info;
 extern struct ixgbe_info ixgbe_82599_info;
+#ifdef CONFIG_IXGBE_DCB
+extern struct dcbnl_rtnl_ops dcbnl_ops;
+extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
+                              struct ixgbe_dcb_config *dst_dcb_cfg,
+                              int tc_max);
+#endif
+
 extern char ixgbe_driver_name[];
 extern const char ixgbe_driver_version[];
 
@@ -339,11 +414,43 @@ extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 extern int ixgbe_open(struct net_device *netdev);
 extern int ixgbe_close(struct net_device *netdev);
-extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter);
+extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 extern void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter);
-extern int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern int ixgbe_napi_add_all(struct ixgbe_adapter *adapter);
-extern void ixgbe_napi_del_all(struct ixgbe_adapter *adapter);
-extern void ixgbe_write_eitr(struct ixgbe_adapter *, int, u32);
+extern void __devinit ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
+extern int ethtool_ioctl(struct ifreq *ifr);
+extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
+extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
+extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+                                                 struct ixgbe_atr_input *input,
+                                                 u8 queue);
+extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
+                                       u16 vlan_id);
+extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
+                                        u32 src_addr);
+extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
+                                        u32 dst_addr);
+extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
+                                        u16 src_port);
+extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
+                                        u16 dst_port);
+extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
+                                         u16 flex_byte);
+extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
+                                      u8 l4type);
+#ifdef IXGBE_FCOE
+extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+extern int ixgbe_fso(struct ixgbe_adapter *adapter,
+                     struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+                     u32 tx_flags, u8 *hdr_len);
+extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
+extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+                          union ixgbe_adv_rx_desc *rx_desc,
+                          struct sk_buff *skb);
+extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+                              struct scatterlist *sgl, unsigned int sgc);
+extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+#endif /* IXGBE_FCOE */
 
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 425dedb..c1af581 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -41,8 +41,7 @@
 static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
                                              ixgbe_link_speed *speed,
                                              bool *autoneg);
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
-static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
                                                ixgbe_link_speed speed,
                                                bool autoneg,
                                                bool autoneg_wait_to_complete);
@@ -50,6 +49,51 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
                                        u8 *eeprom_data);
 
 /**
+ *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
+ *  @hw: pointer to the HW structure
+ *
+ *  The defaults for 82598 should be in the range of 50us to 50ms,
+ *  however the hardware default for these parts is 500us to 1ms which is less
+ *  than the 10ms recommended by the pci-e spec.  To address this we need to
+ *  increase the value to either 10ms to 250ms for capability version 1 config,
+ *  or 16ms to 55ms for version 2.
+ **/
+static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
+{
+	struct ixgbe_adapter *adapter = hw->back;
+	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
+	u16 pcie_devctl2;
+
+	/* only take action if timeout value is defaulted to 0 */
+	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
+		goto out;
+
+	/*
+	 * if capababilities version is type 1 we can write the
+	 * timeout of 10ms to 250ms through the GCR register
+	 */
+	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
+		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
+		goto out;
+	}
+
+	/*
+	 * for version 2 capabilities we need to write the config space
+	 * directly in order to set the completion timeout value for
+	 * 16ms to 55ms
+	 */
+	pci_read_config_word(adapter->pdev,
+	                     IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
+	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
+	pci_write_config_word(adapter->pdev,
+	                      IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+out:
+	/* disable completion timeout resend */
+	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
+	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
+}
+
+/**
  *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
  *  @hw: pointer to hardware structure
  *
@@ -75,18 +119,47 @@ static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
 static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
+
+	/* Call PHY identify routine to get the phy type */
+	ixgbe_identify_phy_generic(hw);
+
+	mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
+	mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
+	mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
+	mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
+	mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
+	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize any function pointers that were not able to be
+ *  set during get_invariants because the PHY/SFP type was
+ *  not known.  Perform the SFP init if necessary.
+ *
+ **/
+static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
 	struct ixgbe_phy_info *phy = &hw->phy;
 	s32 ret_val = 0;
 	u16 list_offset, data_offset;
 
-	/* Set the bus information prior to PHY identification */
-	mac->ops.get_bus_info(hw);
+	/* Identify the PHY */
+	phy->ops.identify(hw);
 
-	/* Call PHY identify routine to get the phy type */
-	ixgbe_identify_phy_generic(hw);
+	/* Overwrite the link function pointers if copper PHY */
+	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+		mac->ops.get_link_capabilities =
+		                  &ixgbe_get_copper_link_capabilities_82598;
+	}
 
-	/* PHY Init */
-	switch (phy->type) {
+	switch (hw->phy.type) {
 	case ixgbe_phy_tn:
 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
 		phy->ops.get_firmware_version =
@@ -106,8 +179,8 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
 
 		/* Check to see if SFP+ module is supported */
 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
-		                                              &list_offset,
-		                                              &data_offset);
+		                                            &list_offset,
+		                                            &data_offset);
 		if (ret_val != 0) {
 			ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
 			goto out;
@@ -117,22 +190,27 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
 		break;
 	}
 
-	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
-		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
-		mac->ops.setup_link_speed =
-		                     &ixgbe_setup_copper_link_speed_82598;
-		mac->ops.get_link_capabilities =
-		                     &ixgbe_get_copper_link_capabilities_82598;
-	}
+out:
+	return ret_val;
+}
 
-	mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
-	mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
-	mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
-	mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
-	mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
-	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
+/**
+ *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware using the generic start_hw function.
+ *  Then set pcie completion timeout
+ **/
+static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+{
+	s32 ret_val = 0;
+
+	ret_val = ixgbe_start_hw_generic(hw);
+
+	/* set the completion timeout for interface */
+	if (ret_val == 0)
+		ixgbe_set_pcie_completion_timeout(hw);
 
-out:
 	return ret_val;
 }
 
@@ -149,12 +227,19 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
                                              bool *autoneg)
 {
 	s32 status = 0;
+	u32 autoc = 0;
 
 	/*
 	 * Determine link capabilities based on the stored value of AUTOC,
-	 * which represents EEPROM defaults.
+	 * which represents EEPROM defaults.  If AUTOC value has not been
+	 * stored, use the current register value.
 	 */
-	switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) {
+	if (hw->mac.orig_link_settings_stored)
+		autoc = hw->mac.orig_autoc;
+	else
+		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 		*autoneg = false;
@@ -173,9 +258,9 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
 	case IXGBE_AUTOC_LMS_KX4_AN:
 	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
-		if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+		if (autoc & IXGBE_AUTOC_KX4_SUPP)
 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP)
+		if (autoc & IXGBE_AUTOC_KX_SUPP)
 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
 		*autoneg = true;
 		break;
@@ -238,15 +323,18 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
 		break;
 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
-	case IXGBE_DEV_ID_82598EB_CX4:
-	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
 	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
 	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
 	case IXGBE_DEV_ID_82598EB_XF_LR:
 	case IXGBE_DEV_ID_82598EB_SFP_LOM:
 		media_type = ixgbe_media_type_fiber;
 		break;
+	case IXGBE_DEV_ID_82598EB_CX4:
+	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+		media_type = ixgbe_media_type_cx4;
+		break;
 	case IXGBE_DEV_ID_82598AT:
+	case IXGBE_DEV_ID_82598AT2:
 		media_type = ixgbe_media_type_copper;
 		break;
 	default:
@@ -271,6 +359,17 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
 	u32 rmcs_reg;
 	u32 reg;
 
+#ifdef CONFIG_DCB
+	if (hw->fc.requested_mode == ixgbe_fc_pfc)
+		goto out;
+
+#endif /* CONFIG_DCB */
+	/* Negotiate the fc mode to use */
+	ret_val = ixgbe_fc_autoneg(hw);
+	if (ret_val)
+		goto out;
+
+	/* Disable any previous flow control settings */
 	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
 
@@ -282,14 +381,20 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
 	 * 0: Flow control is completely disabled
 	 * 1: Rx flow control is enabled (we can receive pause frames,
 	 *    but not send pause frames).
-	 * 2:  Tx flow control is enabled (we can send pause frames but
+	 * 2: Tx flow control is enabled (we can send pause frames but
 	 *     we do not support receiving pause frames).
 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
 	 * other: Invalid.
+#ifdef CONFIG_DCB
+	 * 4: Priority Flow Control is enabled.
+#endif
 	 */
 	switch (hw->fc.current_mode) {
 	case ixgbe_fc_none:
-		/* Flow control completely disabled by software override. */
+		/*
+		 * Flow control is disabled by software override or autoneg.
+		 * The code below will actually disable it in the HW.
+		 */
 		break;
 	case ixgbe_fc_rx_pause:
 		/*
@@ -314,14 +419,20 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
 		fctrl_reg |= IXGBE_FCTRL_RFCE;
 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
 		break;
+#ifdef CONFIG_DCB
+	case ixgbe_fc_pfc:
+		goto out;
+		break;
+#endif /* CONFIG_DCB */
 	default:
 		hw_dbg(hw, "Flow control param set incorrectly\n");
-		ret_val = -IXGBE_ERR_CONFIG;
+		ret_val = IXGBE_ERR_CONFIG;
 		goto out;
 		break;
 	}
 
-	/* Enable 802.3x based flow control settings. */
+	/* Set 802.3x based flow control settings. */
+	fctrl_reg |= IXGBE_FCTRL_DPF;
 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
 	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
 
@@ -340,7 +451,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
 	}
 
 	/* Configure pause time (2 TCs per register) */
-	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num));
+	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
 	if ((packetbuf_num & 1) == 0)
 		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
 	else
@@ -354,83 +465,14 @@ out:
 }
 
 /**
- *  ixgbe_setup_fc_82598 - Configure flow control settings
- *  @hw: pointer to hardware structure
- *  @packetbuf_num: packet buffer number (0-7)
- *
- *  Configures the flow control settings based on SW configuration.  This
- *  function is used for 802.3x flow control configuration only.
- **/
-static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
-{
-	s32 ret_val = 0;
-	ixgbe_link_speed speed;
-	bool link_up;
-
-	/* Validate the packetbuf configuration */
-	if (packetbuf_num < 0 || packetbuf_num > 7) {
-		hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
-		          " 0-7\n", packetbuf_num);
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
-	}
-
-	/*
-	 * Validate the water mark configuration.  Zero water marks are invalid
-	 * because it causes the controller to just blast out fc packets.
-	 */
-	if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
-		hw_dbg(hw, "Invalid water mark configuration\n");
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
-	}
-
-	/*
-	 * Validate the requested mode.  Strict IEEE mode does not allow
-	 * ixgbe_fc_rx_pause because it will cause testing anomalies.
-	 */
-	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
-		hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
-	}
-
-	/*
-	 * 10gig parts do not have a word in the EEPROM to determine the
-	 * default flow control setting, so we explicitly set it to full.
-	 */
-	if (hw->fc.requested_mode == ixgbe_fc_default)
-		hw->fc.requested_mode = ixgbe_fc_full;
-
-	/*
-	 * Save off the requested flow control mode for use later.  Depending
-	 * on the link partner's capabilities, we may or may not use this mode.
-	 */
-
-	hw->fc.current_mode = hw->fc.requested_mode;
-
-	/* Decide whether to use autoneg or not. */
-	hw->mac.ops.check_link(hw, &speed, &link_up, false);
-	if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL))
-		ret_val = ixgbe_fc_autoneg(hw);
-
-	if (ret_val)
-		goto out;
-
-	ret_val = ixgbe_fc_enable_82598(hw, packetbuf_num);
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_setup_mac_link_82598 - Configures MAC link settings
+ *  ixgbe_start_mac_link_82598 - Configures MAC link settings
  *  @hw: pointer to hardware structure
  *
  *  Configures link settings based on values in the ixgbe_hw struct.
  *  Restarts the link.  Performs autonegotiation if needed.
  **/
-static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
+static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+                                      bool autoneg_wait_to_complete)
 {
 	u32 autoc_reg;
 	u32 links_reg;
@@ -443,7 +485,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
 
 	/* Only poll for autoneg to complete if specified to do so */
-	if (hw->phy.autoneg_wait_to_complete) {
+	if (autoneg_wait_to_complete) {
 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
 		     IXGBE_AUTOC_LMS_KX4_AN ||
 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -462,13 +504,6 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
 		}
 	}
 
-	/*
-	 * We want to save off the original Flow Control configuration just in
-	 * case we get disconnected and then reconnected into a different hub
-	 * or switch with different Flow Control capabilities.
-	 */
-	ixgbe_setup_fc_82598(hw, 0);
-
 	/* Add delay to filter out noises during initial link setup */
 	msleep(50);
 
@@ -555,13 +590,18 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
 	else
 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 
+	/* if link is down, zero out the current_mode */
+	if (*link_up == false) {
+		hw->fc.current_mode = ixgbe_fc_none;
+		hw->fc.fc_was_autonegged = false;
+	}
 out:
 	return 0;
 }
 
 
 /**
- *  ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
+ *  ixgbe_setup_mac_link_82598 - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
  *  @autoneg: true if auto-negotiation enabled
@@ -569,7 +609,7 @@ out:
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
-static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
                                            ixgbe_link_speed speed, bool autoneg,
                                            bool autoneg_wait_to_complete)
 {
@@ -599,14 +639,12 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
 	}
 
 	if (status == 0) {
-		hw->phy.autoneg_wait_to_complete = autoneg_wait_to_complete;
-
 		/*
 		 * Setup and restart the link based on the new values in
 		 * ixgbe_hw This will write the AUTOC register based on the new
 		 * stored values
 		 */
-		status = ixgbe_setup_mac_link_82598(hw);
+		status = ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
 	}
 
 	return status;
@@ -614,29 +652,7 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
 
 
 /**
- *  ixgbe_setup_copper_link_82598 - Setup copper link settings
- *  @hw: pointer to hardware structure
- *
- *  Configures link settings based on values in the ixgbe_hw struct.
- *  Restarts the link.  Performs autonegotiation if needed.  Restart
- *  phy and wait for autonegotiate to finish.  Then synchronize the
- *  MAC and PHY.
- **/
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
-{
-	s32 status;
-
-	/* Restart autonegotiation on PHY */
-	status = hw->phy.ops.setup_link(hw);
-
-	/* Set up MAC */
-	ixgbe_setup_mac_link_82598(hw);
-
-	return status;
-}
-
-/**
- *  ixgbe_setup_copper_link_speed_82598 - Set the PHY autoneg advertised field
+ *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
  *  @autoneg: true if autonegotiation enabled
@@ -644,7 +660,7 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
  *
  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
  **/
-static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
                                                ixgbe_link_speed speed,
                                                bool autoneg,
                                                bool autoneg_wait_to_complete)
@@ -656,7 +672,7 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
 	                                      autoneg_wait_to_complete);
 
 	/* Set up MAC */
-	ixgbe_setup_mac_link_82598(hw);
+	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
 
 	return status;
 }
@@ -672,6 +688,7 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
 {
 	s32 status = 0;
+	s32 phy_status = 0;
 	u32 ctrl;
 	u32 gheccr;
 	u32 i;
@@ -715,14 +732,27 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
 	}
 
 	/* Reset PHY */
-	if (hw->phy.reset_disable == false)
+	if (hw->phy.reset_disable == false) {
+		/* PHY ops must be identified and initialized prior to reset */
+
+		/* Init PHY and function pointers, perform SFP setup */
+		phy_status = hw->phy.ops.init(hw);
+		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+			goto reset_hw_out;
+		else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+			goto no_phy_reset;
+
+
 		hw->phy.ops.reset(hw);
+	}
 
+no_phy_reset:
 	/*
 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
 	 * access and verify no pending requests before reset
 	 */
-	if (ixgbe_disable_pcie_master(hw) != 0) {
+	status = ixgbe_disable_pcie_master(hw);
+	if (status != 0) {
 		status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
 		hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
 	}
@@ -766,9 +796,19 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
 	}
 
+	/*
+	 * Store MAC address from RAR0, clear receive address registers, and
+	 * clear the multicast table
+	 */
+	hw->mac.ops.init_rx_addrs(hw);
+
 	/* Store the permanent mac address */
 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
 
+reset_hw_out:
+	if (phy_status)
+		status = phy_status;
+
 	return status;
 }
 
@@ -879,62 +919,6 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
 		for (offset = 0; offset < hw->mac.vft_size; offset++)
 			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
 			                0);
-
-	return 0;
-}
-
-/**
- *  ixgbe_blink_led_start_82598 - Blink LED based on index.
- *  @hw: pointer to hardware structure
- *  @index: led number to blink
- **/
-static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
-{
-	ixgbe_link_speed speed = 0;
-	bool link_up = 0;
-	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-
-	/*
-	 * Link must be up to auto-blink the LEDs on the 82598EB MAC;
-	 * force it if link is down.
-	 */
-	hw->mac.ops.check_link(hw, &speed, &link_up, false);
-
-	if (!link_up) {
-		autoc_reg |= IXGBE_AUTOC_FLU;
-		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-		msleep(10);
-	}
-
-	led_reg &= ~IXGBE_LED_MODE_MASK(index);
-	led_reg |= IXGBE_LED_BLINK(index);
-	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
-	IXGBE_WRITE_FLUSH(hw);
-
-	return 0;
-}
-
-/**
- *  ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
- *  @hw: pointer to hardware structure
- *  @index: led number to stop blinking
- **/
-static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
-{
-	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-
-	autoc_reg &= ~IXGBE_AUTOC_FLU;
-	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
-	led_reg &= ~IXGBE_LED_MODE_MASK(index);
-	led_reg &= ~IXGBE_LED_BLINK(index);
-	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
-	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
-	IXGBE_WRITE_FLUSH(hw);
-
 	return 0;
 }
 
@@ -1052,35 +1036,56 @@ out:
 static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
 {
 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+	u16 ext_ability = 0;
+
+	hw->phy.ops.identify(hw);
+
+	/* Copper PHY must be checked before AUTOC LMS to determine correct
+	 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
+	if (hw->phy.type == ixgbe_phy_tn ||
+	    hw->phy.type == ixgbe_phy_cu_unknown) {
+		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+		goto out;
+	}
 
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_82598:
-		/* Default device ID is mezzanine card KX/KX4 */
-		physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
-				  IXGBE_PHYSICAL_LAYER_1000BASE_KX);
-		break;
-	case IXGBE_DEV_ID_82598_BX:
-		physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
-	case IXGBE_DEV_ID_82598EB_CX4:
-	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
-		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
-		break;
-	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
-		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+	case IXGBE_AUTOC_LMS_1G_AN:
+	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
+			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+		else
+			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
 		break;
-	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
-	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
-	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
-		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+		if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+		else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+		else /* XAUI */
+			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
 		break;
-	case IXGBE_DEV_ID_82598EB_XF_LR:
-		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+	case IXGBE_AUTOC_LMS_KX4_AN:
+	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+		if (autoc & IXGBE_AUTOC_KX_SUPP)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+		if (autoc & IXGBE_AUTOC_KX4_SUPP)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
 		break;
-	case IXGBE_DEV_ID_82598AT:
-		physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_T |
-		                  IXGBE_PHYSICAL_LAYER_1000BASE_T);
+	default:
 		break;
-	case IXGBE_DEV_ID_82598EB_SFP_LOM:
+	}
+
+	if (hw->phy.type == ixgbe_phy_nl) {
 		hw->phy.ops.identify_sfp(hw);
 
 		switch (hw->phy.sfp_type) {
@@ -1097,20 +1102,32 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
 			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
 			break;
 		}
-		break;
+	}
 
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+		break;
+	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+		break;
+	case IXGBE_DEV_ID_82598EB_XF_LR:
+		physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+		break;
 	default:
-		physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
 		break;
 	}
 
+out:
 	return physical_layer;
 }
 
 static struct ixgbe_mac_operations mac_ops_82598 = {
 	.init_hw		= &ixgbe_init_hw_generic,
 	.reset_hw		= &ixgbe_reset_hw_82598,
-	.start_hw		= &ixgbe_start_hw_generic,
+	.start_hw		= &ixgbe_start_hw_82598,
 	.clear_hw_cntrs		= &ixgbe_clear_hw_cntrs_generic,
 	.get_media_type		= &ixgbe_get_media_type_82598,
 	.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
@@ -1122,13 +1139,12 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
 	.read_analog_reg8	= &ixgbe_read_analog_reg8_82598,
 	.write_analog_reg8	= &ixgbe_write_analog_reg8_82598,
 	.setup_link		= &ixgbe_setup_mac_link_82598,
-	.setup_link_speed	= &ixgbe_setup_mac_link_speed_82598,
 	.check_link		= &ixgbe_check_mac_link_82598,
 	.get_link_capabilities	= &ixgbe_get_link_capabilities_82598,
 	.led_on			= &ixgbe_led_on_generic,
 	.led_off		= &ixgbe_led_off_generic,
-	.blink_led_start	= &ixgbe_blink_led_start_82598,
-	.blink_led_stop		= &ixgbe_blink_led_stop_82598,
+	.blink_led_start	= &ixgbe_blink_led_start_generic,
+	.blink_led_stop		= &ixgbe_blink_led_stop_generic,
 	.set_rar		= &ixgbe_set_rar_generic,
 	.clear_rar		= &ixgbe_clear_rar_generic,
 	.set_vmdq		= &ixgbe_set_vmdq_82598,
@@ -1142,7 +1158,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
 	.disable_mc		= &ixgbe_disable_mc_generic,
 	.clear_vfta		= &ixgbe_clear_vfta_82598,
 	.set_vfta		= &ixgbe_set_vfta_82598,
-	.setup_fc		= &ixgbe_setup_fc_82598,
+	.fc_enable		= &ixgbe_fc_enable_82598,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
@@ -1155,6 +1171,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
 static struct ixgbe_phy_operations phy_ops_82598 = {
 	.identify		= &ixgbe_identify_phy_generic,
 	.identify_sfp		= &ixgbe_identify_sfp_module_generic,
+	.init			= &ixgbe_init_phy_ops_82598,
 	.reset			= &ixgbe_reset_phy_generic,
 	.read_reg		= &ixgbe_read_phy_reg_generic,
 	.write_reg		= &ixgbe_write_phy_reg_generic,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index a3792bf..2725710 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -38,70 +38,56 @@
 #define IXGBE_82599_MC_TBL_SIZE   128
 #define IXGBE_82599_VFT_TBL_SIZE  128
 
-s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
-                                      ixgbe_link_speed *speed,
-                                      bool *autoneg);
-enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
-                                     ixgbe_link_speed speed, bool autoneg,
-                                     bool autoneg_wait_to_complete);
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw);
-s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw,
-                               ixgbe_link_speed *speed,
-                               bool *link_up, bool link_up_wait_to_complete);
-s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
-                                     ixgbe_link_speed speed,
-                                     bool autoneg,
-                                     bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+                                          ixgbe_link_speed speed,
+                                          bool autoneg,
+                                          bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+                                           ixgbe_link_speed speed,
+                                           bool autoneg,
+                                           bool autoneg_wait_to_complete);
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+                               bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+                               ixgbe_link_speed speed,
+                               bool autoneg,
+                               bool autoneg_wait_to_complete);
 static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
                                              ixgbe_link_speed *speed,
                                              bool *autoneg);
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw,
-                                               ixgbe_link_speed speed,
-                                               bool autoneg,
-                                               bool autoneg_wait_to_complete);
-s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
-s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan,
-                         u32 vind, bool vlan_on);
-s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw);
-s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw);
-s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_start_hw_rev_0_82599(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
-
-void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+                                         ixgbe_link_speed speed,
+                                         bool autoneg,
+                                         bool autoneg_wait_to_complete);
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+
+static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
 	if (hw->phy.multispeed_fiber) {
 		/* Set up dual speed SFP+ support */
-		mac->ops.setup_link =
-		          &ixgbe_setup_mac_link_multispeed_fiber;
-		mac->ops.setup_link_speed =
-		          &ixgbe_setup_mac_link_speed_multispeed_fiber;
+		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
 	} else {
-		mac->ops.setup_link =
-		          &ixgbe_setup_mac_link_82599;
-		mac->ops.setup_link_speed =
-		          &ixgbe_setup_mac_link_speed_82599;
+		if ((mac->ops.get_media_type(hw) ==
+		     ixgbe_media_type_backplane) &&
+		    (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
+		     hw->phy.smart_speed == ixgbe_smart_speed_on))
+			mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
+		else
+			mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
 	}
 }
 
-s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 {
 	s32 ret_val = 0;
 	u16 list_offset, data_offset, data_value;
 
 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
 		ixgbe_init_mac_link_ops_82599(hw);
+
+		hw->phy.ops.reset = NULL;
+
 		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
 		                                              &data_offset);
 
@@ -142,7 +128,7 @@ setup_sfp_out:
  *  Read PCIe configuration space, and get the MSI-X vector count from
  *  the capabilities table.
  **/
-u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
+static u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
 {
 	struct ixgbe_adapter *adapter = hw->back;
 	u16 msix_count;
@@ -159,51 +145,58 @@ u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
 static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
-	struct ixgbe_phy_info *phy = &hw->phy;
-	s32 ret_val;
 
-	/* Set the bus information prior to PHY identification */
-	mac->ops.get_bus_info(hw);
+	ixgbe_init_mac_link_ops_82599(hw);
 
-	/* Call PHY identify routine to get the Cu or SFI phy type */
-	ret_val = phy->ops.identify(hw);
+	mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
+	mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
+	mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
+	mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
+	mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
+	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
 
-	if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
-		goto get_invariants_out;
+	return 0;
+}
 
-	ixgbe_init_mac_link_ops_82599(hw);
+/**
+ *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize any function pointers that were not able to be
+ *  set during get_invariants because the PHY/SFP type was
+ *  not known.  Perform the SFP init if necessary.
+ *
+ **/
+static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
 
-	/* Setup SFP module if there is one present. */
-	ret_val = mac->ops.setup_sfp(hw);
+	/* Identify the PHY or SFP module */
+	ret_val = phy->ops.identify(hw);
+
+	/* Setup function pointers based on detected SFP module and speeds */
+	ixgbe_init_mac_link_ops_82599(hw);
 
 	/* If copper media, overwrite with copper function pointers */
 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
-		mac->ops.setup_link_speed =
-		                  &ixgbe_setup_copper_link_speed_82599;
 		mac->ops.get_link_capabilities =
 		                  &ixgbe_get_copper_link_capabilities_82599;
 	}
 
-	/* PHY Init */
+	/* Set necessary function pointers based on phy type */
 	switch (hw->phy.type) {
 	case ixgbe_phy_tn:
 		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
 		phy->ops.get_firmware_version =
-		                  &ixgbe_get_phy_firmware_version_tnx;
+		             &ixgbe_get_phy_firmware_version_tnx;
 		break;
 	default:
 		break;
 	}
 
-	mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
-	mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
-	mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
-	mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
-	mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
-	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
-
-get_invariants_out:
 	return ret_val;
 }
 
@@ -215,13 +208,24 @@ get_invariants_out:
  *
  *  Determines the link capabilities by reading the AUTOC register.
  **/
-s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
-                                      ixgbe_link_speed *speed,
-                                      bool *negotiation)
+static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+                                             ixgbe_link_speed *speed,
+                                             bool *negotiation)
 {
 	s32 status = 0;
+	u32 autoc = 0;
+
+	/*
+	 * Determine link capabilities based on the stored value of AUTOC,
+	 * which represents EEPROM defaults.  If AUTOC value has not been
+	 * stored, use the current register value.
+	 */
+	if (hw->mac.orig_link_settings_stored)
+		autoc = hw->mac.orig_autoc;
+	else
+		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 
-	switch (hw->mac.orig_autoc & IXGBE_AUTOC_LMS_MASK) {
+	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
 		*negotiation = false;
@@ -245,22 +249,22 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
 	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
-		if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP)
+		if (autoc & IXGBE_AUTOC_KR_SUPP)
 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+		if (autoc & IXGBE_AUTOC_KX4_SUPP)
 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP)
+		if (autoc & IXGBE_AUTOC_KX_SUPP)
 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
 		*negotiation = true;
 		break;
 
 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
 		*speed = IXGBE_LINK_SPEED_100_FULL;
-		if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP)
+		if (autoc & IXGBE_AUTOC_KR_SUPP)
 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+		if (autoc & IXGBE_AUTOC_KX4_SUPP)
 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (hw->mac.orig_autoc & IXGBE_AUTOC_KX_SUPP)
+		if (autoc & IXGBE_AUTOC_KX_SUPP)
 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
 		*negotiation = true;
 		break;
@@ -324,7 +328,7 @@ static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
  *
  *  Returns the media type (fiber, copper, backplane)
  **/
-enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
+static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
 {
 	enum ixgbe_media_type media_type;
 
@@ -336,14 +340,20 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
 	}
 
 	switch (hw->device_id) {
-	case IXGBE_DEV_ID_82599:
 	case IXGBE_DEV_ID_82599_KX4:
+	case IXGBE_DEV_ID_82599_KX4_MEZZ:
+	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+	case IXGBE_DEV_ID_82599_XAUI_LOM:
 		/* Default device ID is mezzanine card KX/KX4 */
 		media_type = ixgbe_media_type_backplane;
 		break;
 	case IXGBE_DEV_ID_82599_SFP:
+	case IXGBE_DEV_ID_82599_SFP_EM:
 		media_type = ixgbe_media_type_fiber;
 		break;
+	case IXGBE_DEV_ID_82599_CX4:
+		media_type = ixgbe_media_type_cx4;
+		break;
 	default:
 		media_type = ixgbe_media_type_unknown;
 		break;
@@ -353,13 +363,15 @@ out:
 }
 
 /**
- *  ixgbe_setup_mac_link_82599 - Setup MAC link settings
+ *  ixgbe_start_mac_link_82599 - Setup MAC link settings
  *  @hw: pointer to hardware structure
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
  *
  *  Configures link settings based on values in the ixgbe_hw struct.
  *  Restarts the link.  Performs autonegotiation if needed.
  **/
-s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw)
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+                               bool autoneg_wait_to_complete)
 {
 	u32 autoc_reg;
 	u32 links_reg;
@@ -372,7 +384,7 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw)
 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
 
 	/* Only poll for autoneg to complete if specified to do so */
-	if (hw->phy.autoneg_wait_to_complete) {
+	if (autoneg_wait_to_complete) {
 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -393,9 +405,6 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw)
 		}
 	}
 
-	/* Set up flow control */
-	status = ixgbe_setup_fc_generic(hw, 0);
-
 	/* Add delay to filter out noises during initial link setup */
 	msleep(50);
 
@@ -403,25 +412,7 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw)
 }
 
 /**
- *  ixgbe_setup_mac_link_multispeed_fiber - Setup MAC link settings
- *  @hw: pointer to hardware structure
- *
- *  Configures link settings based on values in the ixgbe_hw struct.
- *  Restarts the link for multi-speed fiber at 1G speed, if link
- *  fails at 10G.
- *  Performs autonegotiation if needed.
- **/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw)
-{
-	s32 status = 0;
-	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_82599_AUTONEG;
-	status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw, link_speed,
-	                                                     true, true);
-	return status;
-}
-
-/**
- *  ixgbe_setup_mac_link_speed_multispeed_fiber - Set MAC link speed
+ *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
  *  @autoneg: true if autonegotiation enabled
@@ -429,10 +420,10 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw)
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
-s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
-                                                ixgbe_link_speed speed,
-                                                bool autoneg,
-                                                bool autoneg_wait_to_complete)
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+                                          ixgbe_link_speed speed,
+                                          bool autoneg,
+                                          bool autoneg_wait_to_complete)
 {
 	s32 status = 0;
 	ixgbe_link_speed phy_link_speed;
@@ -441,12 +432,23 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
 	bool link_up = false;
 	bool negotiation;
+	int i;
 
 	/* Mask off requested but non-supported speeds */
 	hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation);
 	speed &= phy_link_speed;
 
 	/*
+	 * When the driver changes the link speeds that it can support,
+	 * it sets autotry_restart to true to indicate that we need to
+	 * initiate a new autotry session with the link partner.  To do
+	 * so, we set the speed then disable and re-enable the tx laser, to
+	 * alert the link partner that it also needs to restart autotry on its
+	 * end.  This is consistent with true clause 37 autoneg, which also
+	 * involves a loss of signal.
+	 */
+
+	/*
 	 * Try each speed one by one, highest priority first.  We do this in
 	 * software because 10gb fiber doesn't support speed autonegotiation.
 	 */
@@ -454,21 +456,57 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
 		speedcnt++;
 		highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
 
-		/* Set hardware SDP's */
+		/* If we already have link at this speed, just jump out */
+		hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+
+		if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+			goto out;
+
+		/* Set the module link speed */
 		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
 
-		ixgbe_setup_mac_link_speed_82599(hw,
-		                                 IXGBE_LINK_SPEED_10GB_FULL,
-		                                 autoneg,
-		                                 autoneg_wait_to_complete);
+		/* Allow module to change analog characteristics (1G->10G) */
+		msleep(40);
+
+		status = ixgbe_setup_mac_link_82599(hw,
+		                               IXGBE_LINK_SPEED_10GB_FULL,
+		                               autoneg,
+		                               autoneg_wait_to_complete);
+		if (status != 0)
+			return status;
+
+		/* Flap the tx laser if it has not already been done */
+		if (hw->mac.autotry_restart) {
+			/* Disable tx laser; allow 100us to go dark per spec */
+			esdp_reg |= IXGBE_ESDP_SDP3;
+			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+			udelay(100);
+
+			/* Enable tx laser; allow 2ms to light up per spec */
+			esdp_reg &= ~IXGBE_ESDP_SDP3;
+			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+			msleep(2);
+
+			hw->mac.autotry_restart = false;
+		}
 
-		msleep(50);
+		/*
+		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
+		 * Section 73.10.2, we may have to wait up to 500ms if KR is
+		 * attempted.  82599 uses the same timing for 10g SFI.
+		 */
 
-		/* If we have link, just jump out */
-		hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
-		if (link_up)
-			goto out;
+		for (i = 0; i < 5; i++) {
+			/* Wait for the link partner to also set speed */
+			msleep(100);
+
+			/* If we have link, just jump out */
+			hw->mac.ops.check_link(hw, &phy_link_speed,
+			                       &link_up, false);
+			if (link_up)
+				goto out;
+		}
 	}
 
 	if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
@@ -476,16 +514,44 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
 		if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
 			highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
 
-		/* Set hardware SDP's */
+		/* If we already have link at this speed, just jump out */
+		hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+
+		if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+			goto out;
+
+		/* Set the module link speed */
 		esdp_reg &= ~IXGBE_ESDP_SDP5;
 		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
 		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
 
-		ixgbe_setup_mac_link_speed_82599(
-			hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
-			autoneg_wait_to_complete);
+		/* Allow module to change analog characteristics (10G->1G) */
+		msleep(40);
+
+		status = ixgbe_setup_mac_link_82599(hw,
+		                                      IXGBE_LINK_SPEED_1GB_FULL,
+		                                      autoneg,
+		                                      autoneg_wait_to_complete);
+		if (status != 0)
+			return status;
+
+		/* Flap the tx laser if it has not already been done */
+		if (hw->mac.autotry_restart) {
+			/* Disable tx laser; allow 100us to go dark per spec */
+			esdp_reg |= IXGBE_ESDP_SDP3;
+			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+			udelay(100);
+
+			/* Enable tx laser; allow 2ms to light up per spec */
+			esdp_reg &= ~IXGBE_ESDP_SDP3;
+			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+			msleep(2);
+
+			hw->mac.autotry_restart = false;
+		}
 
-		msleep(50);
+		/* Wait for the link partner to also set speed */
+		msleep(100);
 
 		/* If we have link, just jump out */
 		hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
@@ -499,10 +565,124 @@ s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw,
 	 * single highest speed that the user requested.
 	 */
 	if (speedcnt > 1)
-		status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw,
-		                                     highest_link_speed,
-		                                     autoneg,
-		                                     autoneg_wait_to_complete);
+		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+		                                               highest_link_speed,
+		                                               autoneg,
+		                                               autoneg_wait_to_complete);
+
+out:
+	/* Set autoneg_advertised value based on input link speed */
+	hw->phy.autoneg_advertised = 0;
+
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+	return status;
+}
+
+/**
+ *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: true if autonegotiation enabled
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Implements the Intel SmartSpeed algorithm.
+ **/
+static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+				     ixgbe_link_speed speed, bool autoneg,
+				     bool autoneg_wait_to_complete)
+{
+	s32 status = 0;
+	ixgbe_link_speed link_speed;
+	s32 i, j;
+	bool link_up = false;
+	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+	hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
+
+	 /* Set autoneg_advertised value based on input link speed */
+	hw->phy.autoneg_advertised = 0;
+
+	if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+	if (speed & IXGBE_LINK_SPEED_100_FULL)
+		hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+	/*
+	 * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
+	 * autoneg advertisement if link is unable to be established at the
+	 * highest negotiated rate.  This can sometimes happen due to integrity
+	 * issues with the physical media connection.
+	 */
+
+	/* First, try to get link with full advertisement */
+	hw->phy.smart_speed_active = false;
+	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
+		status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+						    autoneg_wait_to_complete);
+		if (status)
+			goto out;
+
+		/*
+		 * Wait for the controller to acquire link.  Per IEEE 802.3ap,
+		 * Section 73.10.2, we may have to wait up to 500ms if KR is
+		 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
+		 * Table 9 in the AN MAS.
+		 */
+		for (i = 0; i < 5; i++) {
+			mdelay(100);
+
+			/* If we have link, just jump out */
+			hw->mac.ops.check_link(hw, &link_speed,
+			                       &link_up, false);
+			if (link_up)
+				goto out;
+		}
+	}
+
+	/*
+	 * We didn't get link.  If we advertised KR plus one of KX4/KX
+	 * (or BX4/BX), then disable KR and try again.
+	 */
+	if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
+	    ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
+		goto out;
+
+	/* Turn SmartSpeed on to disable KR support */
+	hw->phy.smart_speed_active = true;
+	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+					    autoneg_wait_to_complete);
+	if (status)
+		goto out;
+
+	/*
+	 * Wait for the controller to acquire link.  600ms will allow for
+	 * the AN link_fail_inhibit_timer as well for multiple cycles of
+	 * parallel detect, both 10g and 1g. This allows for the maximum
+	 * connect attempts as defined in the AN MAS table 73-7.
+	 */
+	for (i = 0; i < 6; i++) {
+		mdelay(100);
+
+		/* If we have link, just jump out */
+		hw->mac.ops.check_link(hw, &link_speed,
+		                       &link_up, false);
+		if (link_up)
+			goto out;
+	}
+
+	/* We didn't get link.  Turn SmartSpeed back off. */
+	hw->phy.smart_speed_active = false;
+	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+					    autoneg_wait_to_complete);
 
 out:
 	return status;
@@ -517,8 +697,10 @@ out:
  *
  *  Reads the links register to determine if link is up and the current speed
  **/
-s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
-                               bool *link_up, bool link_up_wait_to_complete)
+static s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw,
+                                      ixgbe_link_speed *speed,
+                                      bool *link_up,
+                                      bool link_up_wait_to_complete)
 {
 	u32 links_reg;
 	u32 i;
@@ -551,12 +733,17 @@ s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
 	else
 		*speed = IXGBE_LINK_SPEED_100_FULL;
 
+	/* if link is down, zero out the current_mode */
+	if (*link_up == false) {
+		hw->fc.current_mode = ixgbe_fc_none;
+		hw->fc.fc_was_autonegged = false;
+	}
 
 	return 0;
 }
 
 /**
- *  ixgbe_setup_mac_link_speed_82599 - Set MAC link speed
+ *  ixgbe_setup_mac_link_82599 - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
  *  @autoneg: true if autonegotiation enabled
@@ -564,13 +751,15 @@ s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
-s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
-                                     ixgbe_link_speed speed, bool autoneg,
-                                     bool autoneg_wait_to_complete)
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+                               ixgbe_link_speed speed, bool autoneg,
+                               bool autoneg_wait_to_complete)
 {
 	s32 status = 0;
 	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+	u32 start_autoc = autoc;
+	u32 orig_autoc = 0;
 	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
 	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
@@ -584,15 +773,26 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
 
 	if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
 		status = IXGBE_ERR_LINK_SETUP;
-	} else if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
-	           link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
-	           link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+		goto out;
+	}
+
+	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
+	if (hw->mac.orig_link_settings_stored)
+		orig_autoc = hw->mac.orig_autoc;
+	else
+		orig_autoc = autoc;
+
+
+	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
 		/* Set KX4/KX/KR support according to speed requested */
 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
 		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
-			if (hw->mac.orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
 				autoc |= IXGBE_AUTOC_KX4_SUPP;
-			if (hw->mac.orig_autoc & IXGBE_AUTOC_KR_SUPP)
+			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
+			    (hw->phy.smart_speed_active == false))
 				autoc |= IXGBE_AUTOC_KR_SUPP;
 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
 			autoc |= IXGBE_AUTOC_KX_SUPP;
@@ -618,7 +818,7 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
 		}
 	}
 
-	if (status == 0) {
+	if (autoc != start_autoc) {
 		/* Restart link */
 		autoc |= IXGBE_AUTOC_AN_RESTART;
 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
@@ -645,37 +845,16 @@ s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw,
 			}
 		}
 
-		/* Set up flow control */
-		status = ixgbe_setup_fc_generic(hw, 0);
-
 		/* Add delay to filter out noises during initial link setup */
 		msleep(50);
 	}
 
+out:
 	return status;
 }
 
 /**
- *  ixgbe_setup_copper_link_82599 - Setup copper link settings
- *  @hw: pointer to hardware structure
- *
- *  Restarts the link on PHY and then MAC. Performs autonegotiation if needed.
- **/
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw)
-{
-	s32 status;
-
-	/* Restart autonegotiation on PHY */
-	status = hw->phy.ops.setup_link(hw);
-
-	/* Set up MAC */
-	ixgbe_setup_mac_link_82599(hw);
-
-	return status;
-}
-
-/**
- *  ixgbe_setup_copper_link_speed_82599 - Set the PHY autoneg advertised field
+ *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
  *  @autoneg: true if autonegotiation enabled
@@ -683,10 +862,10 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw)
  *
  *  Restarts link on PHY and MAC based on settings passed in.
  **/
-static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw,
-                                               ixgbe_link_speed speed,
-                                               bool autoneg,
-                                               bool autoneg_wait_to_complete)
+static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+                                         ixgbe_link_speed speed,
+                                         bool autoneg,
+                                         bool autoneg_wait_to_complete)
 {
 	s32 status;
 
@@ -694,7 +873,7 @@ static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw,
 	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
 	                                      autoneg_wait_to_complete);
 	/* Set up MAC */
-	ixgbe_setup_mac_link_82599(hw);
+	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
 
 	return status;
 }
@@ -707,7 +886,7 @@ static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw,
  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
  *  reset.
  **/
-s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 {
 	s32 status = 0;
 	u32 ctrl, ctrl_ext;
@@ -718,14 +897,30 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 	/* Call adapter stop to disable tx/rx and clear interrupts */
 	hw->mac.ops.stop_adapter(hw);
 
+	/* PHY ops must be identified and initialized prior to reset */
+
+	/* Init PHY and function pointers, perform SFP setup */
+	status = hw->phy.ops.init(hw);
+
+	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+		goto reset_hw_out;
+
+	/* Setup SFP module if there is one present. */
+	if (hw->phy.sfp_setup_needed) {
+		status = hw->mac.ops.setup_sfp(hw);
+		hw->phy.sfp_setup_needed = false;
+	}
+
 	/* Reset PHY */
-	hw->phy.ops.reset(hw);
+	if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
+		hw->phy.ops.reset(hw);
 
 	/*
 	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
 	 * access and verify no pending requests before reset
 	 */
-	if (ixgbe_disable_pcie_master(hw) != 0) {
+	status = ixgbe_disable_pcie_master(hw);
+	if (status != 0) {
 		status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
 		hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
 	}
@@ -783,9 +978,34 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 		}
 	}
 
+	/*
+	 * Store MAC address from RAR0, clear receive address registers, and
+	 * clear the multicast table.  Also reset num_rar_entries to 128,
+	 * since we modify this value when programming the SAN MAC address.
+	 */
+	hw->mac.num_rar_entries = 128;
+	hw->mac.ops.init_rx_addrs(hw);
+
 	/* Store the permanent mac address */
 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
 
+	/* Store the permanent SAN mac address */
+	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+	/* Add the SAN MAC address to the RAR only if it's a valid address */
+	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+		                    hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+		/* Reserve the last RAR for the SAN MAC address */
+		hw->mac.num_rar_entries--;
+	}
+
+	/* Store the alternative WWNN/WWPN prefix */
+	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+	                               &hw->mac.wwpn_prefix);
+
+reset_hw_out:
 	return status;
 }
 
@@ -795,7 +1015,7 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
  *  @rar: receive address register index to disassociate
  *  @vmdq: VMDq pool index to remove from the rar
  **/
-s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
 {
 	u32 mpsar_lo, mpsar_hi;
 	u32 rar_entries = hw->mac.num_rar_entries;
@@ -841,7 +1061,7 @@ done:
  *  @rar: receive address register index to associate with a VMDq index
  *  @vmdq: VMDq pool index
  **/
-s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
 {
 	u32 mpsar;
 	u32 rar_entries = hw->mac.num_rar_entries;
@@ -871,8 +1091,8 @@ s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
  *
  *  Turn on/off specified VLAN in the VLAN filter table.
  **/
-s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-                         bool vlan_on)
+static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+                                bool vlan_on)
 {
 	u32 regindex;
 	u32 bitindex;
@@ -985,7 +1205,7 @@ out:
  *
  *  Clears the VLAN filer table, and the VMDq index associated with the filter
  **/
-s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
 {
 	u32 offset;
 
@@ -1002,55 +1222,956 @@ s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
 }
 
 /**
- *  ixgbe_blink_led_start_82599 - Blink LED based on index.
+ *  ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
  *  @hw: pointer to hardware structure
- *  @index: led number to blink
  **/
-s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index)
+static s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
 {
-	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+	int i;
+	hw_dbg(hw, " Clearing UTA\n");
+
+	for (i = 0; i < 128; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+{
+	int i;
+	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
+
+	/*
+	 * Before starting reinitialization process,
+	 * FDIRCMD.CMD must be zero.
+	 */
+	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+		      IXGBE_FDIRCMD_CMD_MASK))
+			break;
+		udelay(10);
+	}
+	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
+		hw_dbg(hw ,"Flow Director previous command isn't complete, "
+		       "aborting table re-initialization. \n");
+		return IXGBE_ERR_FDIR_REINIT_FAILED;
+	}
 
-	led_reg &= ~IXGBE_LED_MODE_MASK(index);
-	led_reg |= IXGBE_LED_BLINK(index);
-	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
+	IXGBE_WRITE_FLUSH(hw);
+	/*
+	 * 82599 adapters flow director init flow cannot be restarted,
+	 * Workaround 82599 silicon errata by performing the following steps
+	 * before re-writing the FDIRCTRL control register with the same value.
+	 * - write 1 to bit 8 of FDIRCMD register &
+	 * - write 0 to bit 8 of FDIRCMD register
+	 */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+	                (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+	                 IXGBE_FDIRCMD_CLEARHT));
+	IXGBE_WRITE_FLUSH(hw);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+	                (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+	                 ~IXGBE_FDIRCMD_CLEARHT));
+	IXGBE_WRITE_FLUSH(hw);
+	/*
+	 * Clear FDIR Hash register to clear any leftover hashes
+	 * waiting to be programmed.
+	 */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
 	IXGBE_WRITE_FLUSH(hw);
 
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Poll init-done after we write FDIRCTRL register */
+	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+		                   IXGBE_FDIRCTRL_INIT_DONE)
+			break;
+		udelay(10);
+	}
+	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+		hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
+		return IXGBE_ERR_FDIR_REINIT_FAILED;
+	}
+
+	/* Clear FDIR statistics registers (read to clear) */
+	IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+	IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
+	IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+	IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+	IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+
 	return 0;
 }
 
 /**
- *  ixgbe_blink_led_stop_82599 - Stop blinking LED based on index.
+ *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
  *  @hw: pointer to hardware structure
- *  @index: led number to stop blinking
+ *  @pballoc: which mode to allocate filters with
  **/
-s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index)
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
 {
-	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+	u32 fdirctrl = 0;
+	u32 pbsize;
+	int i;
 
-	led_reg &= ~IXGBE_LED_MODE_MASK(index);
-	led_reg &= ~IXGBE_LED_BLINK(index);
-	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	/*
+	 * Before enabling Flow Director, the Rx Packet Buffer size
+	 * must be reduced.  The new value is the current size minus
+	 * flow director memory usage size.
+	 */
+	pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
+	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
+	    (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
+
+	/*
+	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
+	 * intialized to zero for non DCB mode otherwise actual total RX PB
+	 * would be bigger than programmed and filter space would run into
+	 * the PB 0 region.
+	 */
+	for (i = 1; i < 8; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+
+	/* Send interrupt when 64 filters are left */
+	fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
+
+	/* Set the maximum length per hash bucket to 0xA filters */
+	fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
+
+	switch (pballoc) {
+	case IXGBE_FDIR_PBALLOC_64K:
+		/* 8k - 1 signature filters */
+		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
+		break;
+	case IXGBE_FDIR_PBALLOC_128K:
+		/* 16k - 1 signature filters */
+		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
+		break;
+	case IXGBE_FDIR_PBALLOC_256K:
+		/* 32k - 1 signature filters */
+		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
+		break;
+	default:
+		/* bad value */
+		return IXGBE_ERR_CONFIG;
+	};
+
+	/* Move the flexible bytes to use the ethertype - shift 6 words */
+	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
+
+	fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
+
+	/* Prime the keys for hashing */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
+	                htonl(IXGBE_ATR_BUCKET_HASH_KEY));
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
+	                htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+
+	/*
+	 * Poll init-done after we write the register.  Estimated times:
+	 *      10G: PBALLOC = 11b, timing is 60us
+	 *       1G: PBALLOC = 11b, timing is 600us
+	 *     100M: PBALLOC = 11b, timing is 6ms
+	 *
+	 *     Multiple these timings by 4 if under full Rx load
+	 *
+	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
+	 * this might not finish in our poll time, but we can live with that
+	 * for now.
+	 */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
 	IXGBE_WRITE_FLUSH(hw);
+	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+		                   IXGBE_FDIRCTRL_INIT_DONE)
+			break;
+		msleep(1);
+	}
+	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+		hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
 
 	return 0;
 }
 
 /**
- *  ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
+ *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
  *  @hw: pointer to hardware structure
+ *  @pballoc: which mode to allocate filters with
  **/
-s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
 {
+	u32 fdirctrl = 0;
+	u32 pbsize;
 	int i;
-	hw_dbg(hw, " Clearing UTA\n");
 
-	for (i = 0; i < 128; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+	/*
+	 * Before enabling Flow Director, the Rx Packet Buffer size
+	 * must be reduced.  The new value is the current size minus
+	 * flow director memory usage size.
+	 */
+	pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
+	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
+	    (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
+
+	/*
+	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
+	 * intialized to zero for non DCB mode otherwise actual total RX PB
+	 * would be bigger than programmed and filter space would run into
+	 * the PB 0 region.
+	 */
+	for (i = 1; i < 8; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+
+	/* Send interrupt when 64 filters are left */
+	fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
+
+	switch (pballoc) {
+	case IXGBE_FDIR_PBALLOC_64K:
+		/* 2k - 1 perfect filters */
+		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
+		break;
+	case IXGBE_FDIR_PBALLOC_128K:
+		/* 4k - 1 perfect filters */
+		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
+		break;
+	case IXGBE_FDIR_PBALLOC_256K:
+		/* 8k - 1 perfect filters */
+		fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
+		break;
+	default:
+		/* bad value */
+		return IXGBE_ERR_CONFIG;
+	};
+
+	/* Turn perfect match filtering on */
+	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
+	fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
+
+	/* Move the flexible bytes to use the ethertype - shift 6 words */
+	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
+
+	/* Prime the keys for hashing */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
+	                htonl(IXGBE_ATR_BUCKET_HASH_KEY));
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
+	                htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+
+	/*
+	 * Poll init-done after we write the register.  Estimated times:
+	 *      10G: PBALLOC = 11b, timing is 60us
+	 *       1G: PBALLOC = 11b, timing is 600us
+	 *     100M: PBALLOC = 11b, timing is 6ms
+	 *
+	 *     Multiple these timings by 4 if under full Rx load
+	 *
+	 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+	 * 1 msec per poll time.  If we're at line rate and drop to 100M, then
+	 * this might not finish in our poll time, but we can live with that
+	 * for now.
+	 */
+
+	/* Set the maximum length per hash bucket to 0xA filters */
+	fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
+
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+	IXGBE_WRITE_FLUSH(hw);
+	for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+		                   IXGBE_FDIRCTRL_INIT_DONE)
+			break;
+		msleep(1);
+	}
+	if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+		hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
+
+	return 0;
+}
+
+
+/**
+ *  ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
+ *  @stream: input bitstream to compute the hash on
+ *  @key: 32-bit hash key
+ **/
+static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
+                                        u32 key)
+{
+	/*
+	 * The algorithm is as follows:
+	 *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
+	 *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
+	 *    and A[n] x B[n] is bitwise AND between same length strings
+	 *
+	 *    K[n] is 16 bits, defined as:
+	 *       for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
+	 *       for n modulo 32 < 15, K[n] =
+	 *             K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
+	 *
+	 *    S[n] is 16 bits, defined as:
+	 *       for n >= 15, S[n] = S[n:n - 15]
+	 *       for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
+	 *
+	 *    To simplify for programming, the algorithm is implemented
+	 *    in software this way:
+	 *
+	 *    Key[31:0], Stream[335:0]
+	 *
+	 *    tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
+	 *    int_key[350:0] = tmp_key[351:1]
+	 *    int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
+	 *
+	 *    hash[15:0] = 0;
+	 *    for (i = 0; i < 351; i++) {
+	 *        if (int_key[i])
+	 *            hash ^= int_stream[(i + 15):i];
+	 *    }
+	 */
+
+	union {
+		u64    fill[6];
+		u32    key[11];
+		u8     key_stream[44];
+	} tmp_key;
+
+	u8   *stream = (u8 *)atr_input;
+	u8   int_key[44];      /* upper-most bit unused */
+	u8   hash_str[46];     /* upper-most 2 bits unused */
+	u16  hash_result = 0;
+	int  i, j, k, h;
+
+	/*
+	 * Initialize the fill member to prevent warnings
+	 * on some compilers
+	 */
+	 tmp_key.fill[0] = 0;
+
+	/* First load the temporary key stream */
+	for (i = 0; i < 6; i++) {
+		u64 fillkey = ((u64)key << 32) | key;
+		tmp_key.fill[i] = fillkey;
+	}
+
+	/*
+	 * Set the interim key for the hashing.  Bit 352 is unused, so we must
+	 * shift and compensate when building the key.
+	 */
+
+	int_key[0] = tmp_key.key_stream[0] >> 1;
+	for (i = 1, j = 0; i < 44; i++) {
+		unsigned int this_key = tmp_key.key_stream[j] << 7;
+		j++;
+		int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
+	}
+
+	/*
+	 * Set the interim bit string for the hashing.  Bits 368 and 367 are
+	 * unused, so shift and compensate when building the string.
+	 */
+	hash_str[0] = (stream[40] & 0x7f) >> 1;
+	for (i = 1, j = 40; i < 46; i++) {
+		unsigned int this_str = stream[j] << 7;
+		j++;
+		if (j > 41)
+			j = 0;
+		hash_str[i] = (u8)(this_str | (stream[j] >> 1));
+	}
+
+	/*
+	 * Now compute the hash.  i is the index into hash_str, j is into our
+	 * key stream, k is counting the number of bits, and h interates within
+	 * each byte.
+	 */
+	for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
+		for (h = 0; h < 8 && k < 351; h++, k++) {
+			if (int_key[j] & (1 << h)) {
+				/*
+				 * Key bit is set, XOR in the current 16-bit
+				 * string.  Example of processing:
+				 *    h = 0,
+				 *      tmp = (hash_str[i - 2] & 0 << 16) |
+				 *            (hash_str[i - 1] & 0xff << 8) |
+				 *            (hash_str[i] & 0xff >> 0)
+				 *      So tmp = hash_str[15 + k:k], since the
+				 *      i + 2 clause rolls off the 16-bit value
+				 *    h = 7,
+				 *      tmp = (hash_str[i - 2] & 0x7f << 9) |
+				 *            (hash_str[i - 1] & 0xff << 1) |
+				 *            (hash_str[i] & 0x80 >> 7)
+				 */
+				int tmp = (hash_str[i] >> h);
+				tmp |= (hash_str[i - 1] << (8 - h));
+				tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
+				             << (16 - h);
+				hash_result ^= (u16)tmp;
+			}
+		}
+	}
+
+	return hash_result;
+}
+
+/**
+ *  ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
+ *  @input: input stream to modify
+ *  @vlan: the VLAN id to load
+ **/
+s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
+{
+	input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
+	input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
+ *  @input: input stream to modify
+ *  @src_addr: the IP address to load
+ **/
+s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
+{
+	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
+	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
+	                                               (src_addr >> 16) & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
+	                                                (src_addr >> 8) & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
+ *  @input: input stream to modify
+ *  @dst_addr: the IP address to load
+ **/
+s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
+{
+	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
+	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
+	                                               (dst_addr >> 16) & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
+	                                                (dst_addr >> 8) & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
+ *  @input: input stream to modify
+ *  @src_addr_1: the first 4 bytes of the IP address to load
+ *  @src_addr_2: the second 4 bytes of the IP address to load
+ *  @src_addr_3: the third 4 bytes of the IP address to load
+ *  @src_addr_4: the fourth 4 bytes of the IP address to load
+ **/
+s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
+                                        u32 src_addr_1, u32 src_addr_2,
+                                        u32 src_addr_3, u32 src_addr_4)
+{
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
+	                                               (src_addr_4 >> 8) & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
+	                                              (src_addr_4 >> 16) & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
+
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
+	                                               (src_addr_3 >> 8) & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
+	                                              (src_addr_3 >> 16) & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
+
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
+	                                               (src_addr_2 >> 8) & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
+	                                              (src_addr_2 >> 16) & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
+
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
+	                                               (src_addr_1 >> 8) & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
+	                                              (src_addr_1 >> 16) & 0xff;
+	input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
+ *  @input: input stream to modify
+ *  @dst_addr_1: the first 4 bytes of the IP address to load
+ *  @dst_addr_2: the second 4 bytes of the IP address to load
+ *  @dst_addr_3: the third 4 bytes of the IP address to load
+ *  @dst_addr_4: the fourth 4 bytes of the IP address to load
+ **/
+s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
+                                        u32 dst_addr_1, u32 dst_addr_2,
+                                        u32 dst_addr_3, u32 dst_addr_4)
+{
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
+	                                               (dst_addr_4 >> 8) & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
+	                                              (dst_addr_4 >> 16) & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
+
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
+	                                               (dst_addr_3 >> 8) & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
+	                                              (dst_addr_3 >> 16) & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
+
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
+	                                               (dst_addr_2 >> 8) & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
+	                                              (dst_addr_2 >> 16) & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
+
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
+	                                               (dst_addr_1 >> 8) & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
+	                                              (dst_addr_1 >> 16) & 0xff;
+	input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_set_src_port_82599 - Sets the source port
+ *  @input: input stream to modify
+ *  @src_port: the source port to load
+ **/
+s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
+{
+	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
+	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_set_dst_port_82599 - Sets the destination port
+ *  @input: input stream to modify
+ *  @dst_port: the destination port to load
+ **/
+s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
+{
+	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
+	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
+ *  @input: input stream to modify
+ *  @flex_bytes: the flexible bytes to load
+ **/
+s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
+{
+	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
+	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
+ *  @input: input stream to modify
+ *  @vm_pool: the Virtual Machine pool to load
+ **/
+s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
+                                       u8 vm_pool)
+{
+	input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
+ *  @input: input stream to modify
+ *  @l4type: the layer 4 type value to load
+ **/
+s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
+{
+	input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
+ *  @input: input stream to search
+ *  @vlan: the VLAN id to load
+ **/
+static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input,
+                                       u16 *vlan)
+{
+	*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
+	*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
+ *  @input: input stream to search
+ *  @src_addr: the IP address to load
+ **/
+static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
+                                        u32 *src_addr)
+{
+	*src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
+	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
+	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
+	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
+ *  @input: input stream to search
+ *  @dst_addr: the IP address to load
+ **/
+static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
+                                        u32 *dst_addr)
+{
+	*dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
+	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
+	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
+	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
+ *  @input: input stream to search
+ *  @src_addr_1: the first 4 bytes of the IP address to load
+ *  @src_addr_2: the second 4 bytes of the IP address to load
+ *  @src_addr_3: the third 4 bytes of the IP address to load
+ *  @src_addr_4: the fourth 4 bytes of the IP address to load
+ **/
+static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
+                                        u32 *src_addr_1, u32 *src_addr_2,
+                                        u32 *src_addr_3, u32 *src_addr_4)
+{
+	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
+	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
+	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
+	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
+
+	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
+	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
+	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
+	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
+
+	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
+	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
+	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
+	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
+
+	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
+	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
+	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
+	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
+ *  @input: input stream to search
+ *  @dst_addr_1: the first 4 bytes of the IP address to load
+ *  @dst_addr_2: the second 4 bytes of the IP address to load
+ *  @dst_addr_3: the third 4 bytes of the IP address to load
+ *  @dst_addr_4: the fourth 4 bytes of the IP address to load
+ **/
+s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
+                                        u32 *dst_addr_1, u32 *dst_addr_2,
+                                        u32 *dst_addr_3, u32 *dst_addr_4)
+{
+	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
+	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
+	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
+	*dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
+
+	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
+	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
+	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
+	*dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
+
+	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
+	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
+	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
+	*dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
+
+	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
+	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
+	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
+	*dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_get_src_port_82599 - Gets the source port
+ *  @input: input stream to modify
+ *  @src_port: the source port to load
+ *
+ *  Even though the input is given in big-endian, the FDIRPORT registers
+ *  expect the ports to be programmed in little-endian.  Hence the need to swap
+ *  endianness when retrieving the data.  This can be confusing since the
+ *  internal hash engine expects it to be big-endian.
+ **/
+static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input,
+                                        u16 *src_port)
+{
+	*src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
+	*src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_get_dst_port_82599 - Gets the destination port
+ *  @input: input stream to modify
+ *  @dst_port: the destination port to load
+ *
+ *  Even though the input is given in big-endian, the FDIRPORT registers
+ *  expect the ports to be programmed in little-endian.  Hence the need to swap
+ *  endianness when retrieving the data.  This can be confusing since the
+ *  internal hash engine expects it to be big-endian.
+ **/
+static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
+                                        u16 *dst_port)
+{
+	*dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
+	*dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
+ *  @input: input stream to modify
+ *  @flex_bytes: the flexible bytes to load
+ **/
+static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
+                                         u16 *flex_byte)
+{
+	*flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
+	*flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
+ *  @input: input stream to modify
+ *  @vm_pool: the Virtual Machine pool to load
+ **/
+s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input,
+                                       u8 *vm_pool)
+{
+	*vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
 
 	return 0;
 }
 
 /**
+ *  ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
+ *  @input: input stream to modify
+ *  @l4type: the layer 4 type value to load
+ **/
+static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
+                                      u8 *l4type)
+{
+	*l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
+
+	return 0;
+}
+
+/**
+ *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ *  @hw: pointer to hardware structure
+ *  @stream: input bitstream
+ *  @queue: queue index to direct traffic to
+ **/
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+                                          struct ixgbe_atr_input *input,
+                                          u8 queue)
+{
+	u64  fdirhashcmd;
+	u64  fdircmd;
+	u32  fdirhash;
+	u16  bucket_hash, sig_hash;
+	u8   l4type;
+
+	bucket_hash = ixgbe_atr_compute_hash_82599(input,
+	                                           IXGBE_ATR_BUCKET_HASH_KEY);
+
+	/* bucket_hash is only 15 bits */
+	bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+	sig_hash = ixgbe_atr_compute_hash_82599(input,
+	                                        IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+	/* Get the l4type in order to program FDIRCMD properly */
+	/* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
+	ixgbe_atr_get_l4type_82599(input, &l4type);
+
+	/*
+	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
+	 */
+	fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
+
+	fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+	           IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
+
+	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
+	case IXGBE_ATR_L4TYPE_TCP:
+		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
+		break;
+	case IXGBE_ATR_L4TYPE_UDP:
+		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
+		break;
+	case IXGBE_ATR_L4TYPE_SCTP:
+		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+		break;
+	default:
+		hw_dbg(hw, "Error on l4type input\n");
+		return IXGBE_ERR_CONFIG;
+	}
+
+	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
+		fdircmd |= IXGBE_FDIRCMD_IPV6;
+
+	fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
+	fdirhashcmd = ((fdircmd << 32) | fdirhash);
+
+	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
+ *  @hw: pointer to hardware structure
+ *  @input: input bitstream
+ *  @queue: queue index to direct traffic to
+ *
+ *  Note that the caller to this function must lock before calling, since the
+ *  hardware writes must be protected from one another.
+ **/
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+                                               struct ixgbe_atr_input *input,
+                                               u16 soft_id,
+                                               u8 queue)
+{
+	u32 fdircmd = 0;
+	u32 fdirhash;
+	u32 src_ipv4, dst_ipv4;
+	u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
+	u16 src_port, dst_port, vlan_id, flex_bytes;
+	u16 bucket_hash;
+	u8  l4type;
+
+	/* Get our input values */
+	ixgbe_atr_get_l4type_82599(input, &l4type);
+
+	/*
+	 * Check l4type formatting, and bail out before we touch the hardware
+	 * if there's a configuration issue
+	 */
+	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
+	case IXGBE_ATR_L4TYPE_TCP:
+		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
+		break;
+	case IXGBE_ATR_L4TYPE_UDP:
+		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
+		break;
+	case IXGBE_ATR_L4TYPE_SCTP:
+		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+		break;
+	default:
+		hw_dbg(hw, "Error on l4type input\n");
+		return IXGBE_ERR_CONFIG;
+	}
+
+	bucket_hash = ixgbe_atr_compute_hash_82599(input,
+	                                           IXGBE_ATR_BUCKET_HASH_KEY);
+
+	/* bucket_hash is only 15 bits */
+	bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+	ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
+	ixgbe_atr_get_src_port_82599(input, &src_port);
+	ixgbe_atr_get_dst_port_82599(input, &dst_port);
+	ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
+
+	fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
+
+	/* Now figure out if we're IPv4 or IPv6 */
+	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+		/* IPv6 */
+		ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
+	                                     &src_ipv6_3, &src_ipv6_4);
+
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
+		/* The last 4 bytes is the same register as IPv4 */
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
+
+		fdircmd |= IXGBE_FDIRCMD_IPV6;
+		fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
+	} else {
+		/* IPv4 */
+		ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
+		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
+
+	}
+
+	ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
+
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
+	                            (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
+	                       (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+
+	fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
+	fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
+	fdircmd |= IXGBE_FDIRCMD_LAST;
+	fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
+	fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+
+	return 0;
+}
+/**
  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
  *  @hw: pointer to hardware structure
  *  @reg: analog register to read
@@ -1058,7 +2179,7 @@ s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
  *
  *  Performs read operation to Omer analog register specified.
  **/
-s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
 {
 	u32  core_ctl;
 
@@ -1080,7 +2201,7 @@ s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
  *
  *  Performs write operation to Omer analog register specified.
  **/
-s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
 {
 	u32  core_ctl;
 
@@ -1100,11 +2221,12 @@ s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
  *  Then performs device-specific:
  *  Clears the rate limiter registers.
  **/
-s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
 {
 	u32 q_num;
+	s32 ret_val;
 
-	ixgbe_start_hw_generic(hw);
+	ret_val = ixgbe_start_hw_generic(hw);
 
 	/* Clear the rate limiters */
 	for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) {
@@ -1113,7 +2235,13 @@ s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
 	}
 	IXGBE_WRITE_FLUSH(hw);
 
-	return 0;
+	/* We need to run link autotry after the driver loads */
+	hw->mac.autotry_restart = true;
+
+	if (ret_val == 0)
+		ret_val = ixgbe_verify_fw_version_82599(hw);
+
+	return ret_val;
 }
 
 /**
@@ -1122,7 +2250,7 @@ s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
  *
  *  Determines the physical layer module found on the current adapter.
  **/
-s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
 {
 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
 	status = ixgbe_identify_phy_generic(hw);
@@ -1137,56 +2265,103 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
  *
  *  Determines physical layer capabilities of the current configuration.
  **/
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
 {
 	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+	u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+	u16 ext_ability = 0;
 	u8 comp_codes_10g = 0;
 
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_82599:
-	case IXGBE_DEV_ID_82599_KX4:
-		/* Default device ID is mezzanine card KX/KX4 */
-		physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
-		                  IXGBE_PHYSICAL_LAYER_1000BASE_KX);
+	hw->phy.ops.identify(hw);
+
+	if (hw->phy.type == ixgbe_phy_tn ||
+	    hw->phy.type == ixgbe_phy_cu_unknown) {
+		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+		IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+		if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+		if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+		if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+		goto out;
+	}
+
+	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+	case IXGBE_AUTOC_LMS_1G_AN:
+	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+		if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
+			physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
+			    IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+			goto out;
+		} else
+			/* SFI mode so read SFP module */
+			goto sfp_check;
 		break;
-	case IXGBE_DEV_ID_82599_SFP:
-		hw->phy.ops.identify_sfp(hw);
+	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+		if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+		else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
+		goto out;
+		break;
+	case IXGBE_AUTOC_LMS_10G_SERIAL:
+		if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
+			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+			goto out;
+		} else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
+			goto sfp_check;
+		break;
+	case IXGBE_AUTOC_LMS_KX4_KX_KR:
+	case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+		if (autoc & IXGBE_AUTOC_KX_SUPP)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+		if (autoc & IXGBE_AUTOC_KX4_SUPP)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+		if (autoc & IXGBE_AUTOC_KR_SUPP)
+			physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+		goto out;
+		break;
+	default:
+		goto out;
+		break;
+	}
 
-		switch (hw->phy.sfp_type) {
-		case ixgbe_sfp_type_da_cu:
-		case ixgbe_sfp_type_da_cu_core0:
-		case ixgbe_sfp_type_da_cu_core1:
-			physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-			break;
-		case ixgbe_sfp_type_sr:
+sfp_check:
+	/* SFP check must be done last since DA modules are sometimes used to
+	 * test KR mode -  we need to id KR mode correctly before SFP module.
+	 * Call identify_sfp because the pluggable module may have changed */
+	hw->phy.ops.identify_sfp(hw);
+	if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+		goto out;
+
+	switch (hw->phy.type) {
+	case ixgbe_phy_tw_tyco:
+	case ixgbe_phy_tw_unknown:
+		physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+		break;
+	case ixgbe_phy_sfp_avago:
+	case ixgbe_phy_sfp_ftl:
+	case ixgbe_phy_sfp_intel:
+	case ixgbe_phy_sfp_unknown:
+		hw->phy.ops.read_i2c_eeprom(hw,
+		      IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
+		if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-			break;
-		case ixgbe_sfp_type_lr:
+		else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
 			physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-			break;
-		case ixgbe_sfp_type_srlr_core0:
-		case ixgbe_sfp_type_srlr_core1:
-			hw->phy.ops.read_i2c_eeprom(hw,
-			                            IXGBE_SFF_10GBE_COMP_CODES,
-			                            &comp_codes_10g);
-			if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-				physical_layer =
-				                IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-			else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-				physical_layer =
-				                IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-			else
-				physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-		default:
-			physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-			break;
-		}
 		break;
 	default:
-		physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
 		break;
 	}
 
+out:
 	return physical_layer;
 }
 
@@ -1197,7 +2372,7 @@ u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
  *
  *  Enables the Rx DMA unit for 82599
  **/
-s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
 {
 #define IXGBE_MAX_SECRX_POLL 30
 	int i;
@@ -1234,6 +2409,183 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
 	return 0;
 }
 
+/**
+ *  ixgbe_get_device_caps_82599 - Get additional device capabilities
+ *  @hw: pointer to hardware structure
+ *  @device_caps: the EEPROM word with the extra device capabilities
+ *
+ *  This function will read the EEPROM location for the device capabilities,
+ *  and return the word through device_caps.
+ **/
+static s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
+{
+	hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599
+ *  @hw: pointer to hardware structure
+ *  @san_mac_offset: SAN MAC address offset
+ *
+ *  This function will read the EEPROM location for the SAN MAC address
+ *  pointer, and returns the value at that location.  This is used in both
+ *  get and set mac_addr routines.
+ **/
+static s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
+                                               u16 *san_mac_offset)
+{
+	/*
+	 * First read the EEPROM pointer to see if the MAC addresses are
+	 * available.
+	 */
+	hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
+ *  per-port, so set_lan_id() must be called before reading the addresses.
+ *  set_lan_id() is called by identify_sfp(), but this cannot be relied
+ *  upon for non-SFP connections, so we must call it here.
+ **/
+static s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+	u16 san_mac_data, san_mac_offset;
+	u8 i;
+
+	/*
+	 * First read the EEPROM pointer to see if the MAC addresses are
+	 * available.  If they're not, no point in calling set_lan_id() here.
+	 */
+	ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
+
+	if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
+		/*
+		 * No addresses available in this EEPROM.  It's not an
+		 * error though, so just wipe the local address and return.
+		 */
+		for (i = 0; i < 6; i++)
+			san_mac_addr[i] = 0xFF;
+
+		goto san_mac_addr_out;
+	}
+
+	/* make sure we know which port we need to program */
+	hw->mac.ops.set_lan_id(hw);
+	/* apply the port offset to the address offset */
+	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+	                 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+	for (i = 0; i < 3; i++) {
+		hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+		san_mac_addr[i * 2] = (u8)(san_mac_data);
+		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+		san_mac_offset++;
+	}
+
+san_mac_addr_out:
+	return 0;
+}
+
+/**
+ *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
+ *  @hw: pointer to hardware structure
+ *
+ *  Verifies that installed the firmware version is 0.6 or higher
+ *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
+ *
+ *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
+ *  if the FW version is not supported.
+ **/
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_ERR_EEPROM_VERSION;
+	u16 fw_offset, fw_ptp_cfg_offset;
+	u16 fw_version = 0;
+
+	/* firmware check is only necessary for SFI devices */
+	if (hw->phy.media_type != ixgbe_media_type_fiber) {
+		status = 0;
+		goto fw_version_out;
+	}
+
+	/* get the offset to the Firmware Module block */
+	hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
+		goto fw_version_out;
+
+	/* get the offset to the Pass Through Patch Configuration block */
+	hw->eeprom.ops.read(hw, (fw_offset +
+	                         IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
+	                         &fw_ptp_cfg_offset);
+
+	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
+		goto fw_version_out;
+
+	/* get the firmware version */
+	hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
+	                         IXGBE_FW_PATCH_VERSION_4),
+	                         &fw_version);
+
+	if (fw_version > 0x5)
+		status = 0;
+
+fw_version_out:
+	return status;
+}
+
+/**
+ *  ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
+ *  the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @wwnn_prefix: the alternative WWNN prefix
+ *  @wwpn_prefix: the alternative WWPN prefix
+ *
+ *  This function will read the EEPROM from the alternative SAN MAC address
+ *  block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                                      u16 *wwpn_prefix)
+{
+	u16 offset, caps;
+	u16 alt_san_mac_blk_offset;
+
+	/* clear output first */
+	*wwnn_prefix = 0xFFFF;
+	*wwpn_prefix = 0xFFFF;
+
+	/* check if alternative SAN MAC is supported */
+	hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+	                    &alt_san_mac_blk_offset);
+
+	if ((alt_san_mac_blk_offset == 0) ||
+	    (alt_san_mac_blk_offset == 0xFFFF))
+		goto wwn_prefix_out;
+
+	/* check capability in alternative san mac address block */
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+	hw->eeprom.ops.read(hw, offset, &caps);
+	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+		goto wwn_prefix_out;
+
+	/* get the corresponding prefix for WWNN/WWPN */
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+	hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+	hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+	return 0;
+}
+
 static struct ixgbe_mac_operations mac_ops_82599 = {
 	.init_hw                = &ixgbe_init_hw_generic,
 	.reset_hw               = &ixgbe_reset_hw_82599,
@@ -1243,19 +2595,21 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
 	.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
 	.enable_rx_dma          = &ixgbe_enable_rx_dma_82599,
 	.get_mac_addr           = &ixgbe_get_mac_addr_generic,
+	.get_san_mac_addr       = &ixgbe_get_san_mac_addr_82599,
+	.get_device_caps        = &ixgbe_get_device_caps_82599,
+	.get_wwn_prefix         = &ixgbe_get_wwn_prefix_82599,
 	.stop_adapter           = &ixgbe_stop_adapter_generic,
 	.get_bus_info           = &ixgbe_get_bus_info_generic,
 	.set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
 	.read_analog_reg8       = &ixgbe_read_analog_reg8_82599,
 	.write_analog_reg8      = &ixgbe_write_analog_reg8_82599,
 	.setup_link             = &ixgbe_setup_mac_link_82599,
-	.setup_link_speed       = &ixgbe_setup_mac_link_speed_82599,
 	.check_link             = &ixgbe_check_mac_link_82599,
 	.get_link_capabilities  = &ixgbe_get_link_capabilities_82599,
 	.led_on                 = &ixgbe_led_on_generic,
 	.led_off                = &ixgbe_led_off_generic,
-	.blink_led_start        = &ixgbe_blink_led_start_82599,
-	.blink_led_stop         = &ixgbe_blink_led_stop_82599,
+	.blink_led_start        = &ixgbe_blink_led_start_generic,
+	.blink_led_stop         = &ixgbe_blink_led_stop_generic,
 	.set_rar                = &ixgbe_set_rar_generic,
 	.clear_rar              = &ixgbe_clear_rar_generic,
 	.set_vmdq               = &ixgbe_set_vmdq_82599,
@@ -1269,7 +2623,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
 	.disable_mc             = &ixgbe_disable_mc_generic,
 	.clear_vfta             = &ixgbe_clear_vfta_82599,
 	.set_vfta               = &ixgbe_set_vfta_82599,
-	.setup_fc               = &ixgbe_setup_fc_generic,
+	.fc_enable               = &ixgbe_fc_enable_generic,
 	.init_uta_tables        = &ixgbe_init_uta_tables_82599,
 	.setup_sfp              = &ixgbe_setup_sfp_modules_82599,
 };
@@ -1285,6 +2639,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
 static struct ixgbe_phy_operations phy_ops_82599 = {
 	.identify               = &ixgbe_identify_phy_82599,
 	.identify_sfp           = &ixgbe_identify_sfp_module_generic,
+	.init			= &ixgbe_init_phy_ops_82599,
 	.reset                  = &ixgbe_reset_phy_generic,
 	.read_reg               = &ixgbe_read_phy_reg_generic,
 	.write_reg              = &ixgbe_write_phy_reg_generic,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 9be19a7..5e6b7fa 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -50,7 +50,7 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
 static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
 static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
 
 /**
  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -71,12 +71,6 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
 	/* Identify the PHY */
 	hw->phy.ops.identify(hw);
 
-	/*
-	 * Store MAC address from RAR0, clear receive address registers, and
-	 * clear the multicast table
-	 */
-	hw->mac.ops.init_rx_addrs(hw);
-
 	/* Clear the VLAN filter table */
 	hw->mac.ops.clear_vfta(hw);
 
@@ -89,6 +83,9 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
 	IXGBE_WRITE_FLUSH(hw);
 
+	/* Setup flow control */
+	ixgbe_setup_fc(hw, 0);
+
 	/* Clear adapter stopped flag */
 	hw->adapter_stopped = false;
 
@@ -107,13 +104,17 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
  **/
 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
 {
+	s32 status;
+
 	/* Reset the hardware */
-	hw->mac.ops.reset_hw(hw);
+	status = hw->mac.ops.reset_hw(hw);
 
-	/* Start the HW */
-	hw->mac.ops.start_hw(hw);
+	if (status == 0) {
+		/* Start the HW */
+		status = hw->mac.ops.start_hw(hw);
+	}
 
-	return 0;
+	return status;
 }
 
 /**
@@ -1373,8 +1374,7 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
 	 * Clear accounting of old secondary address list,
 	 * don't count RAR[0]
 	 */
-	uc_addr_in_use = hw->addr_ctrl.rar_used_count -
-	                 hw->addr_ctrl.mc_addr_in_rar_count - 1;
+	uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
 	hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
 	hw->addr_ctrl.overflow_promisc = 0;
 
@@ -1489,40 +1489,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
 }
 
 /**
- *  ixgbe_add_mc_addr - Adds a multicast address.
- *  @hw: pointer to hardware structure
- *  @mc_addr: new multicast address
- *
- *  Adds it to unused receive address register or to the multicast table.
- **/
-static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
-{
-	u32 rar_entries = hw->mac.num_rar_entries;
-	u32 rar;
-
-	hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
-	       mc_addr[0], mc_addr[1], mc_addr[2],
-	       mc_addr[3], mc_addr[4], mc_addr[5]);
-
-	/*
-	 * Place this multicast address in the RAR if there is room,
-	 * else put it in the MTA
-	 */
-	if (hw->addr_ctrl.rar_used_count < rar_entries) {
-		rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1;
-		/* use RAR from the end up for multicast */
-		hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV);
-		hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar);
-		hw->addr_ctrl.rar_used_count++;
-		hw->addr_ctrl.mc_addr_in_rar_count++;
-	} else {
-		ixgbe_set_mta(hw, mc_addr);
-	}
-
-	hw_dbg(hw, "ixgbe_add_mc_addr Complete\n");
-}
-
-/**
  *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
  *  @hw: pointer to hardware structure
  *  @mc_addr_list: the list of new multicast addresses
@@ -1538,7 +1504,6 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
                                       u32 mc_addr_count, ixgbe_mc_addr_itr next)
 {
 	u32 i;
-	u32 rar_entries = hw->mac.num_rar_entries;
 	u32 vmdq;
 
 	/*
@@ -1546,18 +1511,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
 	 * use.
 	 */
 	hw->addr_ctrl.num_mc_addrs = mc_addr_count;
-	hw->addr_ctrl.rar_used_count -= hw->addr_ctrl.mc_addr_in_rar_count;
-	hw->addr_ctrl.mc_addr_in_rar_count = 0;
 	hw->addr_ctrl.mta_in_use = 0;
 
-	/* Zero out the other receive addresses. */
-	hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count,
-	          rar_entries - 1);
-	for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
-		IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
-	}
-
 	/* Clear the MTA */
 	hw_dbg(hw, " Clearing MTA\n");
 	for (i = 0; i < hw->mac.mcft_size; i++)
@@ -1566,7 +1521,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
 	/* Add the new addresses */
 	for (i = 0; i < mc_addr_count; i++) {
 		hw_dbg(hw, " Adding the multicast addresses:\n");
-		ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq));
+		ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
 	}
 
 	/* Enable mta */
@@ -1626,19 +1581,30 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
 }
 
 /**
- *  ixgbe_fc_enable - Enable flow control
+ *  ixgbe_fc_enable_generic - Enable flow control
  *  @hw: pointer to hardware structure
  *  @packetbuf_num: packet buffer number (0-7)
  *
  *  Enable flow control according to the current settings.
  **/
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
 {
 	s32 ret_val = 0;
-	u32 mflcn_reg;
-	u32 fccfg_reg;
+	u32 mflcn_reg, fccfg_reg;
 	u32 reg;
+	u32 rx_pba_size;
 
+#ifdef CONFIG_DCB
+	if (hw->fc.requested_mode == ixgbe_fc_pfc)
+		goto out;
+
+#endif /* CONFIG_DCB */
+	/* Negotiate the fc mode to use */
+	ret_val = ixgbe_fc_autoneg(hw);
+	if (ret_val)
+		goto out;
+
+	/* Disable any previous flow control settings */
 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
 	mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
 
@@ -1658,7 +1624,10 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
 	 */
 	switch (hw->fc.current_mode) {
 	case ixgbe_fc_none:
-		/* Flow control completely disabled by software override. */
+		/*
+		 * Flow control is disabled by software override or autoneg.
+		 * The code below will actually disable it in the HW.
+		 */
 		break;
 	case ixgbe_fc_rx_pause:
 		/*
@@ -1687,33 +1656,56 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
 	case ixgbe_fc_pfc:
 		goto out;
 		break;
-#endif
+#endif /* CONFIG_DCB */
 	default:
 		hw_dbg(hw, "Flow control param set incorrectly\n");
-		ret_val = -IXGBE_ERR_CONFIG;
+		ret_val = IXGBE_ERR_CONFIG;
 		goto out;
 		break;
 	}
 
-	/* Enable 802.3x based flow control settings. */
+	/* Set 802.3x based flow control settings. */
+	mflcn_reg |= IXGBE_MFLCN_DPF;
 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
 
-	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
-	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-		if (hw->fc.send_xon)
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num),
-			                (hw->fc.low_water | IXGBE_FCRTL_XONE));
-		else
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num),
-			                hw->fc.low_water);
+	reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
+	/* Thresholds are different for link flow control when in DCB mode */
+	if (reg & IXGBE_MTQC_RT_ENA) {
+		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+
+		/* Always disable XON for LFC when in DCB mode */
+		reg = (rx_pba_size >> 5) & 0xFFE0;
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
+
+		reg = (rx_pba_size >> 2) & 0xFFE0;
+		if (hw->fc.current_mode & ixgbe_fc_tx_pause)
+			reg |= IXGBE_FCRTH_FCEN;
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
+	} else {
+		/*
+		 * Set up and enable Rx high/low water mark thresholds,
+		 * enable XON.
+		 */
+		if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+			if (hw->fc.send_xon) {
+				IXGBE_WRITE_REG(hw,
+				              IXGBE_FCRTL_82599(packetbuf_num),
+			                      (hw->fc.low_water |
+				              IXGBE_FCRTL_XONE));
+			} else {
+				IXGBE_WRITE_REG(hw,
+				              IXGBE_FCRTL_82599(packetbuf_num),
+				              hw->fc.low_water);
+			}
 
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
-		                (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
+			               (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+		}
 	}
 
 	/* Configure pause time (2 TCs per register) */
-	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num));
+	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
 	if ((packetbuf_num & 1) == 0)
 		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
 	else
@@ -1730,157 +1722,162 @@ out:
  *  ixgbe_fc_autoneg - Configure flow control
  *  @hw: pointer to hardware structure
  *
- *  Negotiates flow control capabilities with link partner using autoneg and
- *  applies the results.
+ *  Compares our advertised flow control capabilities to those advertised by
+ *  our link partner, and determines the proper flow control mode to use.
  **/
 s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 {
 	s32 ret_val = 0;
-	u32 i, reg, pcs_anadv_reg, pcs_lpab_reg;
-
-	reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+	ixgbe_link_speed speed;
+	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+	u32 links2, anlp1_reg, autoc_reg, links;
+	bool link_up;
 
 	/*
-	 * The possible values of fc.current_mode are:
-	 * 0:  Flow control is completely disabled
-	 * 1:  Rx flow control is enabled (we can receive pause frames,
-	 *     but not send pause frames).
-	 * 2:  Tx flow control is enabled (we can send pause frames but
-	 *     we do not support receiving pause frames).
-	 * 3:  Both Rx and Tx flow control (symmetric) are enabled.
-	 * 4:  Priority Flow Control is enabled.
-	 * other: Invalid.
+	 * AN should have completed when the cable was plugged in.
+	 * Look for reasons to bail out.  Bail out if:
+	 * - FC autoneg is disabled, or if
+	 * - link is not up.
+	 *
+	 * Since we're being called from an LSC, link is already known to be up.
+	 * So use link_up_wait_to_complete=false.
 	 */
-	switch (hw->fc.current_mode) {
-	case ixgbe_fc_none:
-		/* Flow control completely disabled by software override. */
-		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-		break;
-	case ixgbe_fc_rx_pause:
-		/*
-		 * Rx Flow control is enabled and Tx Flow control is
-		 * disabled by software override. Since there really
-		 * isn't a way to advertise that we are capable of RX
-		 * Pause ONLY, we will advertise that we support both
-		 * symmetric and asymmetric Rx PAUSE.  Later, we will
-		 * disable the adapter's ability to send PAUSE frames.
-		 */
-		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-		break;
-	case ixgbe_fc_tx_pause:
-		/*
-		 * Tx Flow control is enabled, and Rx Flow control is
-		 * disabled by software override.
-		 */
-		reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
-		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
-		break;
-	case ixgbe_fc_full:
-		/* Flow control (both Rx and Tx) is enabled by SW override. */
-		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-		break;
-#ifdef CONFIG_DCB
-	case ixgbe_fc_pfc:
-		goto out;
-		break;
-#endif
-	default:
-		hw_dbg(hw, "Flow control param set incorrectly\n");
-		ret_val = -IXGBE_ERR_CONFIG;
+	hw->mac.ops.check_link(hw, &speed, &link_up, false);
+
+	if (hw->fc.disable_fc_autoneg || (!link_up)) {
+		hw->fc.fc_was_autonegged = false;
+		hw->fc.current_mode = hw->fc.requested_mode;
 		goto out;
-		break;
 	}
 
-	IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
-	reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
-	/* Set PCS register for autoneg */
-	/* Enable and restart autoneg */
-	reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
-
-	/* Disable AN timeout */
-	if (hw->fc.strict_ieee)
-		reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
-
-	hw_dbg(hw, "Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
-	IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
-
-	/* See if autonegotiation has succeeded */
-	hw->mac.autoneg_succeeded = 0;
-	for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
-		msleep(10);
-		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
-		if ((reg & (IXGBE_PCS1GLSTA_LINK_OK |
-		     IXGBE_PCS1GLSTA_AN_COMPLETE)) ==
-		    (IXGBE_PCS1GLSTA_LINK_OK |
-		     IXGBE_PCS1GLSTA_AN_COMPLETE)) {
-			if (!(reg & IXGBE_PCS1GLSTA_AN_TIMED_OUT))
-				hw->mac.autoneg_succeeded = 1;
-			break;
+	/*
+	 * On backplane, bail out if
+	 * - backplane autoneg was not completed, or if
+	 * - link partner is not AN enabled
+	 */
+	if (hw->phy.media_type == ixgbe_media_type_backplane) {
+		links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+		if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
+		    ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
+			hw->fc.fc_was_autonegged = false;
+			hw->fc.current_mode = hw->fc.requested_mode;
+			goto out;
 		}
 	}
 
-	if (!hw->mac.autoneg_succeeded) {
-		/* Autoneg failed to achieve a link, so we turn fc off */
-		hw->fc.current_mode = ixgbe_fc_none;
-		hw_dbg(hw, "Flow Control = NONE.\n");
-		goto out;
+	/*
+	 * On multispeed fiber at 1g, bail out if
+	 * - link is up but AN did not complete, or if
+	 * - link is up and AN completed but timed out
+	 */
+	if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
+		linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+		if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+		    ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+			hw->fc.fc_was_autonegged = false;
+			hw->fc.current_mode = hw->fc.requested_mode;
+			goto out;
+		}
 	}
 
 	/*
 	 * Read the AN advertisement and LP ability registers and resolve
 	 * local flow control settings accordingly
 	 */
-	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
-	if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-		(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
+	if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
+	    (hw->phy.media_type != ixgbe_media_type_backplane)) {
+		pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+		pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+		if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+		    (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
+			/*
+			 * Now we need to check if the user selected Rx ONLY
+			 * of pause frames.  In this case, we had to advertise
+			 * FULL flow control because we could not advertise RX
+			 * ONLY. Hence, we must now check to see if we need to
+			 * turn OFF the TRANSMISSION of PAUSE frames.
+			 */
+			if (hw->fc.requested_mode == ixgbe_fc_full) {
+				hw->fc.current_mode = ixgbe_fc_full;
+				hw_dbg(hw, "Flow Control = FULL.\n");
+			} else {
+				hw->fc.current_mode = ixgbe_fc_rx_pause;
+				hw_dbg(hw, "Flow Control=RX PAUSE only\n");
+			}
+		} else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+			   (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
+			   (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+			   (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
+			hw->fc.current_mode = ixgbe_fc_tx_pause;
+			hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+		} else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+			   (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
+			   !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+			   (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
+			hw->fc.current_mode = ixgbe_fc_rx_pause;
+			hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+		} else {
+			hw->fc.current_mode = ixgbe_fc_none;
+			hw_dbg(hw, "Flow Control = NONE.\n");
+		}
+	}
+
+	if (hw->phy.media_type == ixgbe_media_type_backplane) {
 		/*
-		 * Now we need to check if the user selected Rx ONLY
-		 * of pause frames.  In this case, we had to advertise
-		 * FULL flow control because we could not advertise RX
-		 * ONLY. Hence, we must now check to see if we need to
-		 * turn OFF the TRANSMISSION of PAUSE frames.
+		 * Read the 10g AN autoc and LP ability registers and resolve
+		 * local flow control settings accordingly
 		 */
-		if (hw->fc.requested_mode == ixgbe_fc_full) {
-			hw->fc.current_mode = ixgbe_fc_full;
-			hw_dbg(hw, "Flow Control = FULL.\n");
-		} else {
+		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+
+		if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+		    (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
+			/*
+			 * Now we need to check if the user selected Rx ONLY
+			 * of pause frames.  In this case, we had to advertise
+			 * FULL flow control because we could not advertise RX
+			 * ONLY. Hence, we must now check to see if we need to
+			 * turn OFF the TRANSMISSION of PAUSE frames.
+			 */
+			if (hw->fc.requested_mode == ixgbe_fc_full) {
+				hw->fc.current_mode = ixgbe_fc_full;
+				hw_dbg(hw, "Flow Control = FULL.\n");
+			} else {
+				hw->fc.current_mode = ixgbe_fc_rx_pause;
+				hw_dbg(hw, "Flow Control=RX PAUSE only\n");
+			}
+		} else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+			   (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
+			   (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
+			   (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
+			hw->fc.current_mode = ixgbe_fc_tx_pause;
+			hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+		} else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+			   (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
+			   !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
+			   (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
 			hw->fc.current_mode = ixgbe_fc_rx_pause;
 			hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+		} else {
+			hw->fc.current_mode = ixgbe_fc_none;
+			hw_dbg(hw, "Flow Control = NONE.\n");
 		}
-	} else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-		   (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
-		   (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-		   (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
-		hw->fc.current_mode = ixgbe_fc_tx_pause;
-		hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
-	} else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-		   (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
-		   !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-		   (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
-		hw->fc.current_mode = ixgbe_fc_rx_pause;
-		hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
-	} else {
-		hw->fc.current_mode = ixgbe_fc_none;
-		hw_dbg(hw, "Flow Control = NONE.\n");
 	}
-
 out:
 	return ret_val;
 }
 
 /**
- *  ixgbe_setup_fc_generic - Set up flow control
+ *  ixgbe_setup_fc - Set up flow control
  *  @hw: pointer to hardware structure
  *
- *  Sets up flow control.
+ *  Called at init time to set up flow control.
  **/
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
 {
 	s32 ret_val = 0;
-	ixgbe_link_speed speed;
-	bool link_up;
+	u32 reg;
 
 #ifdef CONFIG_DCB
 	if (hw->fc.requested_mode == ixgbe_fc_pfc) {
@@ -1902,14 +1899,16 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
 	 * because it causes the controller to just blast out fc packets.
 	 */
 	if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
-		hw_dbg(hw, "Invalid water mark configuration\n");
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
+		if (hw->fc.requested_mode != ixgbe_fc_none) {
+			hw_dbg(hw, "Invalid water mark configuration\n");
+			ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+			goto out;
+		}
 	}
 
 	/*
 	 * Validate the requested mode.  Strict IEEE mode does not allow
-	 * ixgbe_fc_rx_pause because it will cause testing anomalies.
+	 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
 	 */
 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
 		hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
@@ -1925,21 +1924,139 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
 	if (hw->fc.requested_mode == ixgbe_fc_default)
 		hw->fc.requested_mode = ixgbe_fc_full;
 
+        /*
+	 * Set up the 1G flow control advertisement registers so the HW will be
+	 * able to do fc autoneg once the cable is plugged in.  If we end up
+	 * using 10g instead, this is harmless.
+	 */
+	reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ 
 	/*
-	 * Save off the requested flow control mode for use later.  Depending
-	 * on the link partner's capabilities, we may or may not use this mode.
+	 * The possible values of fc.requested_mode are:
+	 * 0: Flow control is completely disabled
+	 * 1: Rx flow control is enabled (we can receive pause frames,
+	 *    but not send pause frames).
+	 * 2: Tx flow control is enabled (we can send pause frames but
+	 *    we do not support receiving pause frames).
+	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
+#ifdef CONFIG_DCB
+	 * 4: Priority Flow Control is enabled.
+#endif
+	 * other: Invalid.
 	 */
-	hw->fc.current_mode = hw->fc.requested_mode;
+	switch (hw->fc.requested_mode) {
+	case ixgbe_fc_none:
+		/* Flow control completely disabled by software override. */
+		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+		break;
+	case ixgbe_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is
+		 * disabled by software override. Since there really
+		 * isn't a way to advertise that we are capable of RX
+		 * Pause ONLY, we will advertise that we support both
+		 * symmetric and asymmetric Rx PAUSE.  Later, we will
+		 * disable the adapter's ability to send PAUSE frames.
+		 */
+		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+		break;
+	case ixgbe_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled by software override.
+		 */
+		reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
+		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
+		break;
+	case ixgbe_fc_full:
+		/* Flow control (both Rx and Tx) is enabled by SW override. */
+		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+		break;
+#ifdef CONFIG_DCB
+	case ixgbe_fc_pfc:
+		goto out;
+		break;
+#endif /* CONFIG_DCB */
+	default:
+		hw_dbg(hw, "Flow control param set incorrectly\n");
+		ret_val = IXGBE_ERR_CONFIG;
+		goto out;
+		break;
+	}
 
-	/* Decide whether to use autoneg or not. */
-	hw->mac.ops.check_link(hw, &speed, &link_up, false);
-	if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL))
-		ret_val = ixgbe_fc_autoneg(hw);
+	IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+	reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+ 
+	/* Disable AN timeout */
+	if (hw->fc.strict_ieee)
+		reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
 
-	if (ret_val)
-		goto out;
+	IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+	hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
 
-	ret_val = ixgbe_fc_enable(hw, packetbuf_num);
+	/*
+	 * Set up the 10G flow control advertisement registers so the HW
+	 * can do fc autoneg once the cable is plugged in.  If we end up
+	 * using 1g instead, this is harmless.
+	 */
+	reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+	/*
+	 * The possible values of fc.requested_mode are:
+	 * 0: Flow control is completely disabled
+	 * 1: Rx flow control is enabled (we can receive pause frames,
+	 *    but not send pause frames).
+	 * 2: Tx flow control is enabled (we can send pause frames but
+	 *    we do not support receiving pause frames).
+	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
+	 * other: Invalid.
+	 */
+	switch (hw->fc.requested_mode) {
+	case ixgbe_fc_none:
+		/* Flow control completely disabled by software override. */
+		reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+		break;
+	case ixgbe_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is
+		 * disabled by software override. Since there really
+		 * isn't a way to advertise that we are capable of RX
+		 * Pause ONLY, we will advertise that we support both
+		 * symmetric and asymmetric Rx PAUSE.  Later, we will
+		 * disable the adapter's ability to send PAUSE frames.
+		 */
+		reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+		break;
+	case ixgbe_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled by software override.
+		 */
+		reg |= (IXGBE_AUTOC_ASM_PAUSE);
+		reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
+		break;
+	case ixgbe_fc_full:
+		/* Flow control (both Rx and Tx) is enabled by SW override. */
+		reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+		break;
+#ifdef CONFIG_DCB
+	case ixgbe_fc_pfc:
+		goto out;
+		break;
+#endif /* CONFIG_DCB */
+	default:
+		hw_dbg(hw, "Flow control param set incorrectly\n");
+		ret_val = IXGBE_ERR_CONFIG;
+		goto out;
+		break;
+	}
+	/*
+	 * AUTOC restart handles negotiation of 1G and 10G. There is
+	 * no need to set the PCS1GCTL register.
+	 */
+	reg |= IXGBE_AUTOC_AN_RESTART;
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
+	hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
 
 out:
 	return ret_val;
@@ -2004,7 +2121,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
 
 	while (timeout) {
 		if (ixgbe_get_eeprom_semaphore(hw))
-			return -IXGBE_ERR_SWFW_SYNC;
+			return IXGBE_ERR_SWFW_SYNC;
 
 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
 		if (!(gssr & (fwmask | swmask)))
@@ -2021,7 +2138,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
 
 	if (!timeout) {
 		hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
-		return -IXGBE_ERR_SWFW_SYNC;
+		return IXGBE_ERR_SWFW_SYNC;
 	}
 
 	gssr |= swmask;
@@ -2066,3 +2183,59 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
 
 	return 0;
 }
+
+/**
+ *  ixgbe_blink_led_start_generic - Blink LED based on index.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to blink
+ **/
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+{
+	ixgbe_link_speed speed = 0;
+	bool link_up = 0;
+	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+	/*
+	 * Link must be up to auto-blink the LEDs;
+	 * Force it if link is down.
+	 */
+	hw->mac.ops.check_link(hw, &speed, &link_up, false);
+
+	if (!link_up) {
+		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+		autoc_reg |= IXGBE_AUTOC_FLU;
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+		msleep(10);
+	}
+
+	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+	led_reg |= IXGBE_LED_BLINK(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to stop blinking
+ **/
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+{
+	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+	autoc_reg &= ~IXGBE_AUTOC_FLU;
+	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+	led_reg &= ~IXGBE_LED_MODE_MASK(index);
+	led_reg &= ~IXGBE_LED_BLINK(index);
+	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index b98bf37..4f15f04 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -68,8 +68,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
-s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packtetbuf_num);
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
 s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
 
 s32 ixgbe_validate_mac_addr(u8 *mac_addr);
@@ -80,6 +79,9 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
 s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
 s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
 
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+
 #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
 
 #ifndef writeq
@@ -100,14 +102,11 @@ s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
 #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
 
 #ifdef DEBUG
+extern char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw);
 #define hw_dbg(hw, format, arg...) \
-printk(KERN_DEBUG, "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg);
+	printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg)
 #else
-static inline int __attribute__ ((format (printf, 2, 3)))
-hw_dbg(struct ixgbe_hw *hw, const char *format, ...)
-{
-	return 0;
-}
+#define hw_dbg(hw, format, arg...) do {} while (0)
 #endif
 
 #endif /* IXGBE_COMMON */
diff --git a/drivers/net/ixgbe/ixgbe_compat.h b/drivers/net/ixgbe/ixgbe_compat.h
index b488c10..61f3a26 100644
--- a/drivers/net/ixgbe/ixgbe_compat.h
+++ b/drivers/net/ixgbe/ixgbe_compat.h
@@ -17,4 +17,15 @@ static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
 	vg->vlan_devices[vlan_id] = NULL;
 }
 
+/*
+ * * FCoE CRC & EOF - 8 bytes.
+ **/
+struct fcoe_crc_eof {
+	__le32          fcoe_crc32;     /* CRC for FC packet */
+	__u8            fcoe_eof;       /* EOF from RFC 3643 */
+	__u8            fcoe_resvd[3];  /* reserved - send zero and ignore */
+} __attribute__((packed));
+
+#define IXGBE_RTTDCS_ARBDIS     0x00000040 /* DCB arbiter disable */
+
 #endif 
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
new file mode 100644
index 0000000..62dfd24
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -0,0 +1,157 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2007 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _DCB_CONFIG_H_
+#define _DCB_CONFIG_H_
+
+#include "ixgbe_type.h"
+
+/* DCB data structures */
+
+#define IXGBE_MAX_PACKET_BUFFERS 8
+#define MAX_USER_PRIORITY        8
+#define MAX_TRAFFIC_CLASS        8
+#define MAX_BW_GROUP             8
+#define BW_PERCENT               100
+
+#define DCB_TX_CONFIG            0
+#define DCB_RX_CONFIG            1
+
+/* DCB error Codes */
+#define DCB_SUCCESS              0
+#define DCB_ERR_CONFIG           -1
+#define DCB_ERR_PARAM            -2
+
+/* Transmit and receive Errors */
+/* Error in bandwidth group allocation */
+#define DCB_ERR_BW_GROUP        -3
+/* Error in traffic class bandwidth allocation */
+#define DCB_ERR_TC_BW           -4
+/* Traffic class has both link strict and group strict enabled */
+#define DCB_ERR_LS_GS           -5
+/* Link strict traffic class has non zero bandwidth */
+#define DCB_ERR_LS_BW_NONZERO   -6
+/* Link strict bandwidth group has non zero bandwidth */
+#define DCB_ERR_LS_BWG_NONZERO  -7
+/*  Traffic class has zero bandwidth */
+#define DCB_ERR_TC_BW_ZERO      -8
+
+#define DCB_NOT_IMPLEMENTED      0x7FFFFFFF
+
+struct dcb_pfc_tc_debug {
+	u8  tc;
+	u8  pause_status;
+	u64 pause_quanta;
+};
+
+enum strict_prio_type {
+	prio_none = 0,
+	prio_group,
+	prio_link
+};
+
+/* Traffic class bandwidth allocation per direction */
+struct tc_bw_alloc {
+	u8 bwg_id;		  /* Bandwidth Group (BWG) ID */
+	u8 bwg_percent;		  /* % of BWG's bandwidth */
+	u8 link_percent;	  /* % of link bandwidth */
+	u8 up_to_tc_bitmap;	  /* User Priority to Traffic Class mapping */
+	u16 data_credits_refill;  /* Credit refill amount in 64B granularity */
+	u16 data_credits_max;	  /* Max credits for a configured packet buffer
+				   * in 64B granularity.*/
+	enum strict_prio_type prio_type; /* Link or Group Strict Priority */
+};
+
+enum dcb_pfc_type {
+	pfc_disabled = 0,
+	pfc_enabled_full,
+	pfc_enabled_tx,
+	pfc_enabled_rx
+};
+
+/* Traffic class configuration */
+struct tc_configuration {
+	struct tc_bw_alloc path[2]; /* One each for Tx/Rx */
+	enum dcb_pfc_type  dcb_pfc; /* Class based flow control setting */
+
+	u16 desc_credits_max; /* For Tx Descriptor arbitration */
+	u8 tc; /* Traffic class (TC) */
+};
+
+enum dcb_rx_pba_cfg {
+	pba_equal,     /* PBA[0-7] each use 64KB FIFO */
+	pba_80_48      /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
+};
+
+struct ixgbe_dcb_config {
+	struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
+	u8     bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
+
+	bool  round_robin_enable;
+
+	enum dcb_rx_pba_cfg rx_pba_cfg;
+
+	u32  dcb_cfg_version; /* Not used...OS-specific? */
+	u32  link_speed; /* For bandwidth allocation validation purpose */
+};
+
+/* DCB driver APIs */
+
+/* DCB rule checking function.*/
+s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config);
+
+/* DCB credits calculation */
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8);
+
+/* DCB PFC functions */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, struct ixgbe_dcb_config *g);
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+
+/* DCB traffic class stats */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *,
+                                     struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *,
+                                     struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+
+/* DCB hw initialization */
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+
+/* DCB definitions for credit calculation */
+#define MAX_CREDIT_REFILL       511  /* 0x1FF * 64B = 32704B */
+#define MINIMUM_CREDIT_REFILL   5    /* 5*64B = 320B */
+#define MINIMUM_CREDIT_FOR_JUMBO 145  /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
+#define DCB_MAX_TSO_SIZE        (32*1024) /* MAX TSO packet size supported in DCB mode */
+#define MINIMUM_CREDIT_FOR_TSO  (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
+#define MAX_CREDIT              4095 /* Maximum credit supported: 256KB * 1204 / 64B */
+
+#endif /* _DCB_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index c84bfad..14ab654 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -54,6 +54,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
 	{"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
 	{"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
 	{"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
+	{"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
+	{"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
+	{"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
+	{"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
 	{"lsc_int", IXGBE_STAT(lsc_int)},
 	{"tx_busy", IXGBE_STAT(tx_busy)},
 	{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
@@ -68,6 +72,9 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
 	{"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)},
 	{"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)},
 	{"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)},
+	{"hw_rsc_count", IXGBE_STAT(rsc_count)},
+	{"fdir_match", IXGBE_STAT(stats.fdirmatch)},
+	{"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
 	{"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)},
 	{"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)},
 	{"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)},
@@ -91,15 +98,39 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
 	{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
 	{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
 	{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
+#ifdef IXGBE_FCOE
+	{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
+	{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
+	{"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
+	{"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
+	{"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
+	{"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
+#endif /* IXGBE_FCOE */
 };
 
 #define IXGBE_QUEUE_STATS_LEN \
                 ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
                  ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
                  (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
-#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
-#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#define IXGBE_PB_STATS_LEN ( \
+                 (((struct ixgbe_adapter *)netdev->priv)->flags & \
+                 IXGBE_FLAG_DCB_ENABLED) ? \
+                 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
+                  sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
+                  sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
+                  sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
+                  / sizeof(u64) : 0)
+#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
+                         IXGBE_PB_STATS_LEN + \
+                         IXGBE_QUEUE_STATS_LEN)
+
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)", "Eeprom test    (offline)",
+	"Interrupt test (offline)", "Loopback test  (offline)",
+	"Link test   (on/offline)"
+};
+#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
 
 static int ixgbe_get_settings(struct net_device *netdev,
                               struct ethtool_cmd *ecmd)
@@ -112,36 +143,51 @@ static int ixgbe_get_settings(struct net_device *netdev,
 	ecmd->supported = SUPPORTED_10000baseT_Full;
 	ecmd->autoneg = AUTONEG_ENABLE;
 	ecmd->transceiver = XCVR_EXTERNAL;
-	if (hw->phy.media_type == ixgbe_media_type_copper) {
+	if ((hw->phy.media_type == ixgbe_media_type_copper) ||
+	    (hw->phy.multispeed_fiber)) {
 		ecmd->supported |= (SUPPORTED_1000baseT_Full |
-		                    SUPPORTED_TP | SUPPORTED_Autoneg);
+		                    SUPPORTED_Autoneg);
 
-		ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
+		ecmd->advertising = ADVERTISED_Autoneg;
 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
-
-		ecmd->port = PORT_TP;
+		/*
+		 * It's possible that phy.autoneg_advertised may not be
+		 * set yet.  If so display what the default would be -
+		 * both 1G and 10G supported.
+		 */
+		if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full |
+					   ADVERTISED_10000baseT_Full)))
+			ecmd->advertising |= (ADVERTISED_10000baseT_Full |
+					      ADVERTISED_1000baseT_Full);
+
+		if (hw->phy.media_type == ixgbe_media_type_copper) {
+			ecmd->supported |= SUPPORTED_TP;
+			ecmd->advertising |= ADVERTISED_TP;
+			ecmd->port = PORT_TP;
+		} else {
+			ecmd->supported |= SUPPORTED_FIBRE;
+			ecmd->advertising |= ADVERTISED_FIBRE;
+			ecmd->port = PORT_FIBRE;
+		}
 	} else if (hw->phy.media_type == ixgbe_media_type_backplane) {
 		/* Set as FIBRE until SERDES defined in kernel */
-		switch (hw->device_id) {
-		case IXGBE_DEV_ID_82598:
-			ecmd->supported |= (SUPPORTED_1000baseT_Full |
-				SUPPORTED_FIBRE);
-			ecmd->advertising = (ADVERTISED_10000baseT_Full |
-				ADVERTISED_1000baseT_Full |
-				ADVERTISED_FIBRE);
-			ecmd->port = PORT_FIBRE;
-			break;
-		case IXGBE_DEV_ID_82598_BX:
+		if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
 			ecmd->supported = (SUPPORTED_1000baseT_Full |
 					   SUPPORTED_FIBRE);
 			ecmd->advertising = (ADVERTISED_1000baseT_Full |
 					     ADVERTISED_FIBRE);
 			ecmd->port = PORT_FIBRE;
 			ecmd->autoneg = AUTONEG_DISABLE;
-			break;
+		} else {
+			ecmd->supported |= (SUPPORTED_1000baseT_Full |
+					    SUPPORTED_FIBRE);
+			ecmd->advertising = (ADVERTISED_10000baseT_Full |
+					     ADVERTISED_1000baseT_Full |
+					     ADVERTISED_FIBRE);
+			ecmd->port = PORT_FIBRE;
 		}
 	} else {
 		ecmd->supported |= SUPPORTED_FIBRE;
@@ -170,16 +216,10 @@ static int ixgbe_set_settings(struct net_device *netdev,
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 advertised, old;
-	s32 err;
+	s32 err = 0;
 
-	switch (hw->phy.media_type) {
-	case ixgbe_media_type_fiber:
-		if ((ecmd->autoneg == AUTONEG_ENABLE) ||
-		    (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
-			return -EINVAL;
-		/* in this case we currently only support 10Gb/FULL */
-		break;
-	case ixgbe_media_type_copper:
+	if ((hw->phy.media_type == ixgbe_media_type_copper) ||
+	    (hw->phy.multispeed_fiber)) {
 		/* 10000/copper and 1000/copper must autoneg
 		 * this function does not support any duplex forcing, but can
 		 * limit the advertising of the adapter to only 10000 or 1000 */
@@ -195,20 +235,24 @@ static int ixgbe_set_settings(struct net_device *netdev,
 			advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 
 		if (old == advertised)
-			break;
+			return err;
 		/* this sets the link speed and restarts auto-neg */
-		err = hw->mac.ops.setup_link_speed(hw, advertised, true, true);
+		hw->mac.autotry_restart = true;
+		err = hw->mac.ops.setup_link(hw, advertised, true, true);
 		if (err) {
 			DPRINTK(PROBE, INFO,
 			        "setup link failed with code %d\n", err);
-			hw->mac.ops.setup_link_speed(hw, old, true, true);
+			hw->mac.ops.setup_link(hw, old, true, true);
 		}
-		break;
-	default:
-		break;
+	} else {
+		/* in this case we currently only support 10Gb/FULL */
+		if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+		    (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
+		    (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+			return -EINVAL;
 	}
 
-	return 0;
+	return err;
 }
 
 static void ixgbe_get_pauseparam(struct net_device *netdev,
@@ -217,8 +261,24 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
 
-	pause->autoneg = (hw->fc.current_mode == ixgbe_fc_full ? 1 : 0);
+	/*
+	 * Flow Control Autoneg isn't on if
+	 *  - we didn't ask for it OR
+	 *  - it failed, we know this by tx & rx being off
+	 */
+	if (hw->fc.disable_fc_autoneg ||
+	    (hw->fc.current_mode == ixgbe_fc_none))
+		pause->autoneg = 0;
+	else
+		pause->autoneg = 1;
 
+#ifdef CONFIG_DCB
+	if (hw->fc.current_mode == ixgbe_fc_pfc) {
+		pause->rx_pause = 0;
+		pause->tx_pause = 0;
+	}
+
+#endif
 	if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
 		pause->rx_pause = 1;
 	} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -234,20 +294,46 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbe_fc_info fc;
+
+#ifdef CONFIG_DCB
+	if (adapter->dcb_cfg.pfc_mode_enable ||
+		((hw->mac.type == ixgbe_mac_82598EB) &&
+		(adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
+		return -EINVAL;
 
-	if ((pause->autoneg == AUTONEG_ENABLE) ||
-	    (pause->rx_pause && pause->tx_pause))
-		hw->fc.requested_mode = ixgbe_fc_full;
+#endif
+
+	fc = hw->fc;
+
+	if (pause->autoneg != AUTONEG_ENABLE)
+		fc.disable_fc_autoneg = true;
+	else
+		fc.disable_fc_autoneg = false;
+
+	if (pause->rx_pause && pause->tx_pause)
+		fc.requested_mode = ixgbe_fc_full;
 	else if (pause->rx_pause && !pause->tx_pause)
-		hw->fc.requested_mode = ixgbe_fc_rx_pause;
+		fc.requested_mode = ixgbe_fc_rx_pause;
 	else if (!pause->rx_pause && pause->tx_pause)
-		hw->fc.requested_mode = ixgbe_fc_tx_pause;
+		fc.requested_mode = ixgbe_fc_tx_pause;
 	else if (!pause->rx_pause && !pause->tx_pause)
-		hw->fc.requested_mode = ixgbe_fc_none;
+		fc.requested_mode = ixgbe_fc_none;
 	else
 		return -EINVAL;
 
-	hw->mac.ops.setup_fc(hw, 0);
+#ifdef CONFIG_DCB
+	adapter->last_lfc_mode = fc.requested_mode;
+#endif
+
+	/* if the thing changed then we'll update and use new autoneg */
+	if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
+		hw->fc = fc;
+		if (netif_running(netdev))
+			ixgbe_reinit_locked(adapter);
+		else
+			ixgbe_reset(adapter);
+	}
 
 	return 0;
 }
@@ -262,9 +348,9 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	if (data)
-		adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+		adapter->flags2 |= IXGBE_FLAG_RX_CSUM_ENABLED;
 	else
-		adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+		adapter->flags2 &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
 
 	if (netif_running(netdev))
 		ixgbe_reinit_locked(adapter);
@@ -681,6 +767,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
 	strncpy(drvinfo->fw_version, firmware_version, 32);
 	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
 	drvinfo->n_stats = IXGBE_STATS_LEN;
+	drvinfo->testinfo_len = IXGBE_TEST_LEN;
 	drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
 }
 
@@ -706,9 +793,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                                struct ethtool_ringparam *ring)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	struct ixgbe_ring *temp_ring;
-	int i, err;
+	struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
+	int i, err = 0;
 	u32 new_rx_count, new_tx_count;
+	bool need_update = false;
 
 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 		return -EINVAL;
@@ -727,78 +815,96 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
 		return 0;
 	}
 
-	temp_ring = kcalloc(adapter->num_tx_queues,
-	                    sizeof(struct ixgbe_ring), GFP_KERNEL);
-	if (!temp_ring)
-		return -ENOMEM;
-
 	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
 		msleep(1);
 
-	if (new_tx_count != adapter->tx_ring->count) {
+	if (!netif_running(adapter->netdev)) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			adapter->tx_ring[i].count = new_tx_count;
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i].count = new_rx_count;
+		adapter->tx_ring_count = new_tx_count;
+		adapter->rx_ring_count = new_rx_count;
+		goto err_setup;
+	}
+
+	temp_tx_ring = kcalloc(adapter->num_tx_queues,
+	                       sizeof(struct ixgbe_ring), GFP_KERNEL);
+	if (!temp_tx_ring) {
+		err = -ENOMEM;
+		goto err_setup;
+	}
+
+	if (new_tx_count != adapter->tx_ring_count) {
+		memcpy(temp_tx_ring, adapter->tx_ring,
+		       adapter->num_tx_queues * sizeof(struct ixgbe_ring));
 		for (i = 0; i < adapter->num_tx_queues; i++) {
-			temp_ring[i].count = new_tx_count;
-			err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
+			temp_tx_ring[i].count = new_tx_count;
+			err = ixgbe_setup_tx_resources(adapter,
+			                               &temp_tx_ring[i]);
 			if (err) {
 				while (i) {
 					i--;
 					ixgbe_free_tx_resources(adapter,
-					                        &temp_ring[i]);
+					                        &temp_tx_ring[i]);
 				}
 				goto err_setup;
 			}
-			temp_ring[i].v_idx = adapter->tx_ring[i].v_idx;
 		}
-		if (netif_running(netdev))
-			ixgbe_close(netdev);
-		ixgbe_reset_interrupt_capability(adapter);
-		ixgbe_napi_del_all(adapter);
-		kfree(adapter->tx_ring);
-		adapter->tx_ring = temp_ring;
-		temp_ring = NULL;
-		adapter->tx_ring_count = new_tx_count;
+		need_update = true;
 	}
 
-	temp_ring = kcalloc(adapter->num_rx_queues,
-	                    sizeof(struct ixgbe_ring), GFP_KERNEL);
-	if (!temp_ring) {
-		if (netif_running(netdev))
-			ixgbe_open(netdev);
-		return -ENOMEM;
+	temp_rx_ring = kcalloc(adapter->num_rx_queues,
+	                       sizeof(struct ixgbe_ring), GFP_KERNEL);
+	if ((!temp_rx_ring) && (need_update)) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
+		kfree(temp_tx_ring);
+		err = -ENOMEM;
+		goto err_setup;
 	}
 
-	if (new_rx_count != adapter->rx_ring->count) {
+	if (new_rx_count != adapter->rx_ring_count) {
+		memcpy(temp_rx_ring, adapter->rx_ring,
+		       adapter->num_rx_queues * sizeof(struct ixgbe_ring));
 		for (i = 0; i < adapter->num_rx_queues; i++) {
-			temp_ring[i].count = new_rx_count;
-			err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
+			temp_rx_ring[i].count = new_rx_count;
+			err = ixgbe_setup_rx_resources(adapter,
+			                               &temp_rx_ring[i]);
 			if (err) {
 				while (i) {
 					i--;
 					ixgbe_free_rx_resources(adapter,
-					                        &temp_ring[i]);
+					                      &temp_rx_ring[i]);
 				}
 				goto err_setup;
 			}
-			temp_ring[i].v_idx = adapter->rx_ring[i].v_idx;
 		}
-		if (netif_running(netdev))
-			ixgbe_close(netdev);
-		ixgbe_reset_interrupt_capability(adapter);
-		ixgbe_napi_del_all(adapter);
-		kfree(adapter->rx_ring);
-		adapter->rx_ring = temp_ring;
-		temp_ring = NULL;
-		adapter->rx_ring_count = new_rx_count;
+		need_update = true;
 	}
 
-	/* success! */
-	err = 0;
-err_setup:
-	ixgbe_init_interrupt_scheme(adapter);
+	/* if rings need to be updated, here's the place to do it in one shot */
+	if (need_update) {
+		ixgbe_down(adapter);
 
-	if (netif_running(netdev))
-		ixgbe_open(netdev);
+		/* tx */
+		if (new_tx_count != adapter->tx_ring_count) {
+			kfree(adapter->tx_ring);
+			adapter->tx_ring = temp_tx_ring;
+			temp_tx_ring = NULL;
+			adapter->tx_ring_count = new_tx_count;
+		}
 
+		/* rx */
+		if (new_rx_count != adapter->rx_ring_count) {
+			kfree(adapter->rx_ring);
+			adapter->rx_ring = temp_rx_ring;
+			temp_rx_ring = NULL;
+			adapter->rx_ring_count = new_rx_count;
+		}
+		ixgbe_up(adapter);
+	}
+err_setup:
 	clear_bit(__IXGBE_RESETTING, &adapter->state);
 	return err;
 }
@@ -835,6 +941,16 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
 			data[i + k] = queue_stat[k];
 		i += k;
 	}
+	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+		for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
+			data[i++] = adapter->stats.pxontxc[j];
+			data[i++] = adapter->stats.pxofftxc[j];
+		}
+		for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
+			data[i++] = adapter->stats.pxonrxc[j];
+			data[i++] = adapter->stats.pxoffrxc[j];
+		}
+	}
 }
 
 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
@@ -845,6 +961,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
 	int i;
 
 	switch (stringset) {
+	case ETH_SS_TEST:
+		memcpy(data, *ixgbe_gstrings_test,
+		       IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+		break;
 	case ETH_SS_STATS:
 		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
 			memcpy(p, ixgbe_gstrings_stats[i].stat_string,
@@ -863,11 +983,849 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
 			sprintf(p, "rx_queue_%u_bytes", i);
 			p += ETH_GSTRING_LEN;
 		}
+		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+			for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+				sprintf(p, "tx_pb_%u_pxon", i);
+			}
+			for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
+			}
+		}
 		/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
 		break;
 	}
 }
 
+static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	bool link_up;
+	u32 link_speed = 0;
+	*data = 0;
+
+	hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+	if (link_up)
+		return *data;
+	else
+		*data = 1;
+	return *data;
+}
+
+/* ethtool register test data */
+struct ixgbe_reg_test {
+	u16 reg;
+	u8  array_len;
+	u8  test_type;
+	u32 mask;
+	u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables.  We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST	1
+#define SET_READ_TEST	2
+#define WRITE_NO_TEST	3
+#define TABLE32_TEST	4
+#define TABLE64_TEST_LO	5
+#define TABLE64_TEST_HI	6
+
+/* default 82599 register test */
+static struct ixgbe_reg_test reg_test_82599[] = {
+	{ IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	{ IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
+	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
+	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ 0, 0, 0, 0 }
+};
+
+/* default 82598 register test */
+static struct ixgbe_reg_test reg_test_82598[] = {
+	{ IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+	/* Enable all four RX queues before testing. */
+	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+	/* RDH is read-only for 82598, only test RDT. */
+	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
+	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
+	{ IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
+	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
+	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ 0, 0, 0, 0 }
+};
+
+#define REG_PATTERN_TEST(R, M, W)                                             \
+{                                                                             \
+	u32 pat, val, before;                                                 \
+	const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+	for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {                       \
+		before = readl(adapter->hw.hw_addr + R);                      \
+		writel((_test[pat] & W), (adapter->hw.hw_addr + R));          \
+		val = readl(adapter->hw.hw_addr + R);                         \
+		if (val != (_test[pat] & W & M)) {                            \
+			DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\
+					  "0x%08X expected 0x%08X\n",         \
+				R, val, (_test[pat] & W & M));                \
+			*data = R;                                            \
+			writel(before, adapter->hw.hw_addr + R);              \
+			return 1;                                             \
+		}                                                             \
+		writel(before, adapter->hw.hw_addr + R);                      \
+	}                                                                     \
+}
+
+#define REG_SET_AND_CHECK(R, M, W)                                            \
+{                                                                             \
+	u32 val, before;                                                      \
+	before = readl(adapter->hw.hw_addr + R);                              \
+	writel((W & M), (adapter->hw.hw_addr + R));                           \
+	val = readl(adapter->hw.hw_addr + R);                                 \
+	if ((W & M) != (val & M)) {                                           \
+		DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
+				 "expected 0x%08X\n", R, (val & M), (W & M)); \
+		*data = R;                                                    \
+		writel(before, (adapter->hw.hw_addr + R));                    \
+		return 1;                                                     \
+	}                                                                     \
+	writel(before, (adapter->hw.hw_addr + R));                            \
+}
+
+static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+	struct ixgbe_reg_test *test;
+	u32 value, before, after;
+	u32 i, toggle;
+
+	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+		toggle = 0x7FFFF30F;
+		test = reg_test_82599;
+	} else {
+		toggle = 0x7FFFF3FF;
+		test = reg_test_82598;
+	}
+
+	/*
+	 * Because the status register is such a special case,
+	 * we handle it separately from the rest of the register
+	 * tests.  Some bits are read-only, some toggle, and some
+	 * are writeable on newer MACs.
+	 */
+	before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
+	value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
+	after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
+	if (value != after) {
+		DPRINTK(DRV, ERR, "failed STATUS register test got: "
+		        "0x%08X expected: 0x%08X\n", after, value);
+		*data = 1;
+		return 1;
+	}
+	/* restore previous status */
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
+
+	/*
+	 * Perform the remainder of the register test, looping through
+	 * the test table until we either fail or reach the null entry.
+	 */
+	while (test->reg) {
+		for (i = 0; i < test->array_len; i++) {
+			switch (test->test_type) {
+			case PATTERN_TEST:
+				REG_PATTERN_TEST(test->reg + (i * 0x40),
+						test->mask,
+						test->write);
+				break;
+			case SET_READ_TEST:
+				REG_SET_AND_CHECK(test->reg + (i * 0x40),
+						test->mask,
+						test->write);
+				break;
+			case WRITE_NO_TEST:
+				writel(test->write,
+				       (adapter->hw.hw_addr + test->reg)
+				       + (i * 0x40));
+				break;
+			case TABLE32_TEST:
+				REG_PATTERN_TEST(test->reg + (i * 4),
+						test->mask,
+						test->write);
+				break;
+			case TABLE64_TEST_LO:
+				REG_PATTERN_TEST(test->reg + (i * 8),
+						test->mask,
+						test->write);
+				break;
+			case TABLE64_TEST_HI:
+				REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+						test->mask,
+						test->write);
+				break;
+			}
+		}
+		test++;
+	}
+
+	*data = 0;
+	return 0;
+}
+
+static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	if (hw->eeprom.ops.validate_checksum(hw, NULL))
+		*data = 1;
+	else
+		*data = 0;
+	return *data;
+}
+
+static irqreturn_t ixgbe_test_intr(int irq, void *data, struct pt_regs *regs)
+{
+	struct net_device *netdev = (struct net_device *) data;
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
+
+	return IRQ_HANDLED;
+}
+
+static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 mask, i = 0, shared_int = true;
+	u32 irq = adapter->pdev->irq;
+
+	*data = 0;
+
+	/* Hook up test interrupt handler just for this test */
+	if (adapter->msix_entries) {
+		/* NOTE: we don't test MSI-X interrupts here, yet */
+		return 0;
+	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
+		shared_int = false;
+		if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name,
+				netdev)) {
+			*data = 1;
+			return -1;
+		}
+	} else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED,
+	                        netdev->name, netdev)) {
+		shared_int = false;
+	} else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED,
+	                       netdev->name, netdev)) {
+		*data = 1;
+		return -1;
+	}
+	DPRINTK(HW, INFO, "testing %s interrupt\n",
+		(shared_int ? "shared" : "unshared"));
+
+	/* Disable all the interrupts */
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Test each interrupt */
+	for (; i < 10; i++) {
+		/* Interrupt to test */
+		mask = 1 << i;
+
+		if (!shared_int) {
+			/*
+			 * Disable the interrupts to be reported in
+			 * the cause register and then force the same
+			 * interrupt and see if one gets posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+			                ~mask & 0x00007FFF);
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+			                ~mask & 0x00007FFF);
+			msleep(10);
+
+			if (adapter->test_icr & mask) {
+				*data = 3;
+				break;
+			}
+		}
+
+		/*
+		 * Enable the interrupt to be reported in the cause
+		 * register and then force the same interrupt and see
+		 * if one gets posted.  If an interrupt was not posted
+		 * to the bus, the test failed.
+		 */
+		adapter->test_icr = 0;
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+		msleep(10);
+
+		if (!(adapter->test_icr &mask)) {
+			*data = 4;
+			break;
+		}
+
+		if (!shared_int) {
+			/*
+			 * Disable the other interrupts to be reported in
+			 * the cause register and then force the other
+			 * interrupts and see if any get posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+			                ~mask & 0x00007FFF);
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
+			                ~mask & 0x00007FFF);
+			msleep(10);
+
+			if (adapter->test_icr) {
+				*data = 5;
+				break;
+			}
+		}
+	}
+
+	/* Disable all the interrupts */
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Unhook test interrupt handler */
+	free_irq(irq, netdev);
+
+	return *data;
+}
+
+static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	u32 reg_ctl;
+	int i;
+
+	/* shut down the DMA engines now so they can be reinitialized later */
+
+	/* first Rx */
+	reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	reg_ctl &= ~IXGBE_RXCTRL_RXEN;
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
+	reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0));
+	reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
+	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl);
+
+	/* now Tx */
+	reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0));
+	reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
+	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl);
+	if (hw->mac.type == ixgbe_mac_82599EB) {
+		reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+		reg_ctl &= ~IXGBE_DMATXCTL_TE;
+		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
+	}
+
+	ixgbe_reset(adapter);
+
+	if (tx_ring->desc && tx_ring->tx_buffer_info) {
+		for (i = 0; i < tx_ring->count; i++) {
+			struct ixgbe_tx_buffer *buf =
+					&(tx_ring->tx_buffer_info[i]);
+			if (buf->dma)
+				pci_unmap_single(pdev, buf->dma, buf->length,
+				                 PCI_DMA_TODEVICE);
+			if (buf->skb)
+				dev_kfree_skb(buf->skb);
+		}
+	}
+
+	if (rx_ring->desc && rx_ring->rx_buffer_info) {
+		for (i = 0; i < rx_ring->count; i++) {
+			struct ixgbe_rx_buffer *buf =
+					&(rx_ring->rx_buffer_info[i]);
+			if (buf->dma)
+				pci_unmap_single(pdev, buf->dma,
+						 IXGBE_RXBUFFER_2048,
+						 PCI_DMA_FROMDEVICE);
+			if (buf->skb)
+				dev_kfree_skb(buf->skb);
+		}
+	}
+
+	if (tx_ring->desc) {
+		pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
+		                    tx_ring->dma);
+		tx_ring->desc = NULL;
+	}
+	if (rx_ring->desc) {
+		pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
+		                    rx_ring->dma);
+		rx_ring->desc = NULL;
+	}
+
+	kfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+	kfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+
+	return;
+}
+
+static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	u32 rctl, reg_data;
+	int i, ret_val;
+
+	/* Setup Tx descriptor ring and Tx buffers */
+
+	if (!tx_ring->count)
+		tx_ring->count = IXGBE_DEFAULT_TXD;
+
+	tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
+	                                  sizeof(struct ixgbe_tx_buffer),
+	                                  GFP_KERNEL);
+	if (!(tx_ring->tx_buffer_info)) {
+		ret_val = 1;
+		goto err_nomem;
+	}
+
+	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+	if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+						   &tx_ring->dma))) {
+		ret_val = 2;
+		goto err_nomem;
+	}
+	tx_ring->next_to_use = tx_ring->next_to_clean = 0;
+
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
+			((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
+			((u64) tx_ring->dma >> 32));
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
+			tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
+
+	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+	reg_data |= IXGBE_HLREG0_TXPADEN;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+
+	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+		reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
+		reg_data |= IXGBE_DMATXCTL_TE;
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
+	}
+	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
+	reg_data |= IXGBE_TXDCTL_ENABLE;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
+
+	for (i = 0; i < tx_ring->count; i++) {
+		union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+		struct sk_buff *skb;
+		unsigned int size = 1024;
+
+		skb = alloc_skb(size, GFP_KERNEL);
+		if (!skb) {
+			ret_val = 3;
+			goto err_nomem;
+		}
+		skb_put(skb, size);
+		tx_ring->tx_buffer_info[i].skb = skb;
+		tx_ring->tx_buffer_info[i].length = skb->len;
+		tx_ring->tx_buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, skb->len,
+			               PCI_DMA_TODEVICE);
+		desc->read.buffer_addr =
+		                    cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
+		desc->read.cmd_type_len = cpu_to_le32(skb->len);
+		desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
+		                                       IXGBE_TXD_CMD_IFCS |
+		                                       IXGBE_TXD_CMD_RS);
+		desc->read.olinfo_status = 0;
+		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+			desc->read.olinfo_status |=
+			                (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+	}
+
+	/* Setup Rx Descriptor ring and Rx buffers */
+
+	if (!rx_ring->count)
+		rx_ring->count = IXGBE_DEFAULT_RXD;
+
+	rx_ring->rx_buffer_info = kcalloc(rx_ring->count,
+	                                  sizeof(struct ixgbe_rx_buffer),
+	                                  GFP_KERNEL);
+	if (!(rx_ring->rx_buffer_info)) {
+		ret_val = 4;
+		goto err_nomem;
+	}
+
+	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+	if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+						   &rx_ring->dma))) {
+		ret_val = 5;
+		goto err_nomem;
+	}
+	rx_ring->next_to_use = rx_ring->next_to_clean = 0;
+
+	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
+			((u64)rx_ring->dma & 0xFFFFFFFF));
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
+			((u64) rx_ring->dma >> 32));
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
+
+	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+	reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
+
+	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+	reg_data &= ~IXGBE_HLREG0_LPBK;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+
+	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
+#define IXGBE_RDRXCTL_RDMTS_MASK    0x00000003 /* Receive Descriptor Minimum
+                                                  Threshold Size mask */
+	reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
+
+	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
+#define IXGBE_MCSTCTRL_MO_MASK      0x00000003 /* Multicast Offset mask */
+	reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
+	reg_data |= adapter->hw.mac.mc_filter_type;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
+
+	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
+	reg_data |= IXGBE_RXDCTL_ENABLE;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
+	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+		int j = adapter->rx_ring[0].reg_idx;
+		u32 k;
+		for (k = 0; k < 10; k++) {
+			if (IXGBE_READ_REG(&adapter->hw,
+			                   IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
+				break;
+			else
+				msleep(1);
+		}
+	}
+
+	rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
+
+	for (i = 0; i < rx_ring->count; i++) {
+		union ixgbe_adv_rx_desc *rx_desc =
+		                                 IXGBE_RX_DESC_ADV(*rx_ring, i);
+		struct sk_buff *skb;
+
+		skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
+		if (!skb) {
+			ret_val = 6;
+			goto err_nomem;
+		}
+		skb_reserve(skb, NET_IP_ALIGN);
+		rx_ring->rx_buffer_info[i].skb = skb;
+		rx_ring->rx_buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
+			               PCI_DMA_FROMDEVICE);
+		rx_desc->read.pkt_addr =
+				cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
+		memset(skb->data, 0x00, skb->len);
+	}
+
+	return 0;
+
+err_nomem:
+	ixgbe_free_desc_rings(adapter);
+	return ret_val;
+}
+
+static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 reg_data;
+
+	/* right now we only support MAC loopback in the driver */
+
+	/* Setup MAC loopback */
+	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+	reg_data |= IXGBE_HLREG0_LPBK;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+
+	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
+	reg_data &= ~IXGBE_AUTOC_LMS_MASK;
+	reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
+
+	/* Disable Atlas Tx lanes; re-enabled in reset path */
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		u8 atlas;
+
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
+		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
+
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
+		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
+
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
+		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
+
+		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
+		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
+	}
+
+	return 0;
+}
+
+static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
+{
+	u32 reg_data;
+
+	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
+	reg_data &= ~IXGBE_HLREG0_LPBK;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
+}
+
+static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
+                                      unsigned int frame_size)
+{
+	memset(skb->data, 0xFF, frame_size);
+	frame_size &= ~1;
+	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
+                                    unsigned int frame_size)
+{
+	frame_size &= ~1;
+	if (*(skb->data + 3) == 0xFF) {
+		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+		    (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+			return 0;
+		}
+	}
+	return 13;
+}
+
+static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
+	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	int i, j, k, l, lc, good_cnt, ret_val = 0;
+	unsigned long time;
+
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1);
+
+	/*
+	 * Calculate the loop count based on the largest descriptor ring
+	 * The idea is to wrap the largest ring a number of times using 64
+	 * send/receive pairs during each loop
+	 */
+
+	if (rx_ring->count <= tx_ring->count)
+		lc = ((tx_ring->count / 64) * 2) + 1;
+	else
+		lc = ((rx_ring->count / 64) * 2) + 1;
+
+	k = l = 0;
+	for (j = 0; j <= lc; j++) {
+		for (i = 0; i < 64; i++) {
+			ixgbe_create_lbtest_frame(
+					tx_ring->tx_buffer_info[k].skb,
+					1024);
+			pci_dma_sync_single_for_device(pdev,
+				tx_ring->tx_buffer_info[k].dma,
+				tx_ring->tx_buffer_info[k].length,
+				PCI_DMA_TODEVICE);
+			if (unlikely(++k == tx_ring->count))
+				k = 0;
+		}
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
+		msleep(200);
+		/* set the start time for the receive */
+		time = jiffies;
+		good_cnt = 0;
+		do {
+			/* receive the sent packets */
+			pci_dma_sync_single_for_cpu(pdev,
+					rx_ring->rx_buffer_info[l].dma,
+					IXGBE_RXBUFFER_2048,
+					PCI_DMA_FROMDEVICE);
+			ret_val = ixgbe_check_lbtest_frame(
+					rx_ring->rx_buffer_info[l].skb, 1024);
+			if (!ret_val)
+				good_cnt++;
+			if (++l == rx_ring->count)
+				l = 0;
+			/*
+			 * time + 20 msecs (200 msecs on 2.4) is more than
+			 * enough time to complete the receives, if it's
+			 * exceeded, break and error off
+			 */
+		} while (good_cnt < 64 && jiffies < (time + 20));
+		if (good_cnt != 64) {
+			/* ret_val is the same as mis-compare */
+			ret_val = 13;
+			break;
+		}
+		if (jiffies >= (time + 20)) {
+			/* Error code for time out error */
+			ret_val = 14;
+			break;
+		}
+	}
+
+	return ret_val;
+}
+
+static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
+{
+	*data = ixgbe_setup_desc_rings(adapter);
+	if (*data)
+		goto out;
+	*data = ixgbe_setup_loopback_test(adapter);
+	if (*data)
+		goto err_loopback;
+	*data = ixgbe_run_loopback_test(adapter);
+	ixgbe_loopback_cleanup(adapter);
+
+err_loopback:
+	ixgbe_free_desc_rings(adapter);
+out:
+	return *data;
+}
+
+static void ixgbe_diag_test(struct net_device *netdev,
+                            struct ethtool_test *eth_test, u64 *data)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	bool if_running = netif_running(netdev);
+
+	set_bit(__IXGBE_TESTING, &adapter->state);
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		/* Offline tests */
+
+		DPRINTK(HW, INFO, "offline testing starting\n");
+
+		/* Link test performed before hardware reset so autoneg doesn't
+		 * interfere with test result */
+		if (ixgbe_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		if (if_running)
+			/* indicate we're in test mode */
+			dev_close(netdev);
+		else
+			ixgbe_reset(adapter);
+
+		DPRINTK(HW, INFO, "register testing starting\n");
+		if (ixgbe_reg_test(adapter, &data[0]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		ixgbe_reset(adapter);
+		DPRINTK(HW, INFO, "eeprom testing starting\n");
+		if (ixgbe_eeprom_test(adapter, &data[1]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		ixgbe_reset(adapter);
+		DPRINTK(HW, INFO, "interrupt testing starting\n");
+		if (ixgbe_intr_test(adapter, &data[2]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		ixgbe_reset(adapter);
+		DPRINTK(HW, INFO, "loopback testing starting\n");
+		if (ixgbe_loopback_test(adapter, &data[3]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		ixgbe_reset(adapter);
+
+		clear_bit(__IXGBE_TESTING, &adapter->state);
+		if (if_running)
+			dev_open(netdev);
+	} else {
+		DPRINTK(HW, INFO, "online testing starting\n");
+		/* Online tests */
+		if (ixgbe_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* Online tests aren't run; pass by default */
+		data[0] = 0;
+		data[1] = 0;
+		data[2] = 0;
+		data[3] = 0;
+
+		clear_bit(__IXGBE_TESTING, &adapter->state);
+	}
+	msleep_interruptible(4 * 1000);
+}
+
+static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
+                               struct ethtool_wolinfo *wol)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int retval = 1;
+
+	switch(hw->device_id) {
+	case IXGBE_DEV_ID_82599_KX4:
+		retval = 0;
+		break;
+	default:
+		wol->supported = 0;
+	}
+
+	return retval;
+}
 
 static void ixgbe_get_wol(struct net_device *netdev,
                           struct ethtool_wolinfo *wol)
@@ -878,7 +1836,8 @@ static void ixgbe_get_wol(struct net_device *netdev,
 	                 WAKE_BCAST | WAKE_MAGIC;
 	wol->wolopts = 0;
 
-	if (!device_can_wakeup(&adapter->pdev->dev))
+	if (ixgbe_wol_exclusion(adapter, wol) ||
+	    !device_can_wakeup(&adapter->pdev->dev))
 		return;
 
 	if (adapter->wol & IXGBE_WUFC_EX)
@@ -900,6 +1859,9 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
 		return -EOPNOTSUPP;
 
+	if (ixgbe_wol_exclusion(adapter, wol))
+		return wol->wolopts ? -EOPNOTSUPP : 0;
+
 	adapter->wol = 0;
 
 	if (wol->wolopts & WAKE_UCAST)
@@ -957,7 +1919,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
 	ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
 
 	/* only valid if in constant ITR mode */
-	switch (adapter->itr_setting) {
+	switch (adapter->rx_itr_setting) {
 	case 0:
 		/* throttling disabled */
 		ec->rx_coalesce_usecs = 0;
@@ -968,9 +1930,25 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
 		break;
 	default:
 		/* fixed interrupt rate mode */
-		ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
+		ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param;
+		break;
+	}
+
+	/* only valid if in constant ITR mode */
+	switch (adapter->tx_itr_setting) {
+	case 0:
+		/* throttling disabled */
+		ec->tx_coalesce_usecs = 0;
+		break;
+	case 1:
+		/* dynamic ITR mode */
+		ec->tx_coalesce_usecs = 1;
+		break;
+	default:
+		ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param;
 		break;
 	}
+
 	return 0;
 }
 
@@ -978,8 +1956,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
                               struct ethtool_coalesce *ec)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_q_vector *q_vector;
 	int i;
 
+	/*
+	 * don't accept tx specific changes if we've got mixed RxTx vectors
+	 * test and jump out here if needed before changing the rx numbers
+	 */
+	if ((1000000/ec->tx_coalesce_usecs) != adapter->tx_eitr_param &&
+	    adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
+		return -EINVAL;
+
 	if (ec->tx_max_coalesced_frames_irq)
 		adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
 
@@ -990,41 +1977,75 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
 			return -EINVAL;
 
 		/* store the value in ints/second */
-		adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
+		adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
 
 		/* static value of interrupt rate */
-		adapter->itr_setting = adapter->eitr_param;
+		adapter->rx_itr_setting = adapter->rx_eitr_param;
 		/* clear the lower bit as its used for dynamic state */
-		adapter->itr_setting &= ~1;
+		adapter->rx_itr_setting &= ~1;
 	} else if (ec->rx_coalesce_usecs == 1) {
 		/* 1 means dynamic mode */
-		adapter->eitr_param = 20000;
-		adapter->itr_setting = 1;
+		adapter->rx_eitr_param = 20000;
+		adapter->rx_itr_setting = 1;
 	} else {
 		/*
 		 * any other value means disable eitr, which is best
 		 * served by setting the interrupt rate very high
 		 */
-		adapter->eitr_param = IXGBE_MAX_INT_RATE;
-		adapter->itr_setting = 0;
+		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+			adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
+		else
+			adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
+		adapter->rx_itr_setting = 0;
 	}
 
-	for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
-		struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
-		if (q_vector->txr_count && !q_vector->rxr_count)
-			/* tx vector gets half the rate */
-			q_vector->eitr = (adapter->eitr_param >> 1);
-		else
-			/* rx only or mixed */
-			q_vector->eitr = adapter->eitr_param;
-		ixgbe_write_eitr(adapter, i,
-		                 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+	if (ec->tx_coalesce_usecs > 1) {
+		/* check the limits */
+		if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
+		    (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
+			return -EINVAL;
+
+		/* store the value in ints/second */
+		adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs;
+
+		/* static value of interrupt rate */
+		adapter->tx_itr_setting = adapter->tx_eitr_param;
+
+		/* clear the lower bit as its used for dynamic state */
+		adapter->tx_itr_setting &= ~1;
+	} else if (ec->tx_coalesce_usecs == 1) {
+		/* 1 means dynamic mode */
+		adapter->tx_eitr_param = 10000;
+		adapter->tx_itr_setting = 1;
+	} else {
+		adapter->tx_eitr_param = IXGBE_MAX_INT_RATE;
+		adapter->tx_itr_setting = 0;
+	}
+
+	/* MSI/MSIx Interrupt Mode */
+	if (adapter->flags &
+	    (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) {
+		int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+		for (i = 0; i < num_vectors; i++) {
+			q_vector = adapter->q_vector[i];
+			if (q_vector->txr_count && !q_vector->rxr_count)
+				/* tx only */
+				q_vector->eitr = adapter->tx_eitr_param;
+			else
+				/* rx only or mixed */
+				q_vector->eitr = adapter->rx_eitr_param;
+			ixgbe_write_eitr(q_vector);
+		}
+	/* Legacy Interrupt Mode */
+	} else {
+		q_vector = adapter->q_vector[0];
+		q_vector->eitr = adapter->rx_eitr_param;
+		ixgbe_write_eitr(q_vector);
 	}
 
 	return 0;
 }
 
-
 static struct ethtool_ops ixgbe_ethtool_ops = {
 	.get_settings           = ixgbe_get_settings,
 	.set_settings           = ixgbe_set_settings,
@@ -1051,6 +2072,7 @@ static struct ethtool_ops ixgbe_ethtool_ops = {
 	.set_msglevel           = ixgbe_set_msglevel,
 	.get_tso                = ethtool_op_get_tso,
 	.set_tso                = ixgbe_set_tso,
+	.self_test              = ixgbe_diag_test,
 	.get_strings            = ixgbe_get_strings,
 	.phys_id                = ixgbe_phys_id,
 	.get_stats_count        = ixgbe_get_stats_count,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
new file mode 100644
index 0000000..84b0f62
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -0,0 +1,67 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_FCOE_H
+#define _IXGBE_FCOE_H
+
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_fcoe.h>
+
+/* shift bits within STAT fo FCSTAT */
+#define IXGBE_RXDADV_FCSTAT_SHIFT	4
+
+/* ddp user buffer */
+#define IXGBE_BUFFCNT_MAX	256	/* 8 bits bufcnt */
+#define IXGBE_FCPTR_ALIGN	16
+#define IXGBE_FCPTR_MAX	(IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t))
+#define IXGBE_FCBUFF_4KB	0x0
+#define IXGBE_FCBUFF_8KB	0x1
+#define IXGBE_FCBUFF_16KB	0x2
+#define IXGBE_FCBUFF_64KB	0x3
+#define IXGBE_FCBUFF_MAX	65536	/* 64KB max */
+#define IXGBE_FCBUFF_MIN	4096	/* 4KB min */
+#define IXGBE_FCOE_DDP_MAX	512	/* 9 bits xid */
+
+/* fcerr */
+#define IXGBE_FCERR_BADCRC       0x00100000
+
+struct ixgbe_fcoe_ddp {
+	int len;
+	u32 err;
+	unsigned int sgc;
+	struct scatterlist *sgl;
+	dma_addr_t udp;
+	unsigned long *udl;
+};
+
+struct ixgbe_fcoe {
+	spinlock_t lock;
+	struct pci_pool *pool;
+	struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+};
+
+#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 01aa91a..08e7743 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -34,6 +34,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
+#include <linux/pkt_sched.h>
 #include <linux/ipv6.h>
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
@@ -48,7 +49,7 @@ char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
                               "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "2.0.8-k3"
+#define DRV_VERSION "2.0.44-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
 
@@ -74,6 +75,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
 	 board_82598 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
 	 board_82598 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
+	 board_82598 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
 	 board_82598 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
@@ -90,14 +93,34 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
 	 board_82598 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
 	 board_82599 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
+	 board_82599 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
 	 board_82599 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
+	 board_82599 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
+	 board_82599 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
+	 board_82599 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
+	 board_82599 },
 
 	/* required last entry */
 	{0, }
 };
 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
 
+#ifdef CONFIG_IXGBE_DCA
+static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
+                            void *p);
+static struct notifier_block dca_notifier = {
+	.notifier_call = ixgbe_notify_dca,
+	.next          = NULL,
+	.priority      = 0
+};
+#endif
+
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL");
@@ -174,6 +197,22 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
 	}
 }
 
+static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
+                                          u64 qmask)
+{
+	u32 mask;
+
+	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+	} else {
+		mask = (qmask & 0xFFFFFFFF);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
+		mask = (qmask >> 32);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+	}
+}
+
 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
                                              struct ixgbe_tx_buffer
                                              *tx_buffer_info)
@@ -190,6 +229,56 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
 	/* tx_buffer_info must be completely set up in the transmit path */
 }
 
+/**
+ * ixgbe_tx_is_paused - check if the tx ring is paused
+ * @adapter: the ixgbe adapter
+ * @tx_ring: the corresponding tx_ring
+ *
+ * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
+ * corresponding TC of this tx_ring when checking TFCS.
+ *
+ * Returns : true if paused
+ */
+static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
+                                      struct ixgbe_ring *tx_ring)
+{
+	u32 txoff = IXGBE_TFCS_TXOFF;
+#ifdef CONFIG_IXGBE_DCB
+	int tc;
+
+	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+		int reg_idx = tx_ring->reg_idx;
+		int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+
+		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+			tc = reg_idx >> 2;
+			txoff = IXGBE_TFCS_TXOFF0;
+		} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+			tc = 0;
+			txoff = IXGBE_TFCS_TXOFF;
+			if (dcb_i == 8) {
+				/* TC0, TC1 */
+				tc = reg_idx >> 5;
+				if (tc == 2) /* TC2, TC3 */
+					tc += (reg_idx - 64) >> 4;
+				else if (tc == 3) /* TC4, TC5, TC6, TC7 */
+					tc += 1 + ((reg_idx - 96) >> 3);
+			} else if (dcb_i == 4) {
+				/* TC0, TC1 */
+				tc = reg_idx >> 6;
+				if (tc == 1) {
+					tc += (reg_idx - 64) >> 5;
+					if (tc == 2) /* TC2, TC3 */
+						tc += (reg_idx - 96) >> 4;
+				}
+			}
+		}
+		txoff <<= tc;
+	}
+#endif
+	return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
+}
+
 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
                                        struct ixgbe_ring *tx_ring,
                                        unsigned int eop)
@@ -205,7 +294,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
 	if ((head != tail) &&
 	    tx_ring->tx_buffer_info[eop].time_stamp &&
 	    time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
-	    !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
+	    !ixgbe_tx_is_paused(adapter, tx_ring)) {
 		/* detected Tx unit hang */
 		union ixgbe_adv_tx_desc *tx_desc;
 		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -237,16 +326,16 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
 	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
 
 static void ixgbe_tx_timeout(struct net_device *netdev);
+
 /**
  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: structure containing interrupt and ring information
  * @tx_ring: tx ring to clean
- *
- * returns true if transmit work is done
  **/
-static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
+static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                                struct ixgbe_ring *tx_ring)
 {
+	struct ixgbe_adapter *adapter = q_vector->adapter;
 	struct net_device *netdev = adapter->netdev;
 	union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
 	struct ixgbe_tx_buffer *tx_buffer_info;
@@ -269,12 +358,24 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
 
 			if (cleaned && skb) {
 				unsigned int segs, bytecount;
+				unsigned int hlen = skb_headlen(skb);
 
 				/* gso_segs is currently only valid for tcp */
 				segs = skb_shinfo(skb)->gso_segs ?: 1;
+#ifdef IXGBE_FCOE
+				/* adjust for FCoE Sequence Offload */
+				if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+				    && (skb->protocol == htons(ETH_P_FCOE)) &&
+				    skb_is_gso(skb)) {
+					hlen = skb_transport_offset(skb) +
+						sizeof(struct fc_frame_header) +
+						sizeof(struct fcoe_crc_eof);
+					segs = DIV_ROUND_UP(skb->len - hlen,
+						skb_shinfo(skb)->gso_size);
+				}
+#endif /* IXGBE_FCOE */
 				/* multiply data chunks by size of headers */
-				bytecount = ((segs - 1) * skb_headlen(skb)) +
-				            skb->len;
+				bytecount = ((segs - 1) * hlen) + skb->len;
 				total_packets += segs;
 				total_bytes += bytecount;
 			}
@@ -321,7 +422,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
 
 	/* re-arm the interrupt */
 	if (count >= tx_ring->work_limit)
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
+		ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
 
 	tx_ring->total_bytes += total_bytes;
 	tx_ring->total_packets += total_packets;
@@ -332,6 +433,113 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
 	return (count < tx_ring->work_limit);
 }
 
+#ifdef CONFIG_IXGBE_DCA
+static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
+                                struct ixgbe_ring *rx_ring)
+{
+	u32 rxctrl;
+	int cpu = get_cpu();
+	int q = rx_ring - adapter->rx_ring;
+
+	if (rx_ring->cpu != cpu) {
+		rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
+		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+			rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+			rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+		} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+			rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
+			rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+			           IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+		}
+		rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+		rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+		rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+		rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+		            IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
+		rx_ring->cpu = cpu;
+	}
+	put_cpu();
+}
+
+static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
+                                struct ixgbe_ring *tx_ring)
+{
+	u32 txctrl;
+	int cpu = get_cpu();
+	int q = tx_ring - adapter->tx_ring;
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	if (tx_ring->cpu != cpu) {
+		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
+			txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+			txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+			txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
+		} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
+			txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
+			txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+			          IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+			txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
+		}
+		tx_ring->cpu = cpu;
+	}
+	put_cpu();
+}
+
+static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
+{
+	int i;
+
+	if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+		return;
+
+	/* always use CB2 mode, difference is masked in the CB driver */
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		adapter->tx_ring[i].cpu = -1;
+		ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
+	}
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		adapter->rx_ring[i].cpu = -1;
+		ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
+	}
+}
+
+static int __ixgbe_notify_dca(struct device *dev, void *data)
+{
+	struct net_device *netdev = dev_get_drvdata(dev);
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	unsigned long event = *(unsigned long *)data;
+
+	switch (event) {
+	case DCA_PROVIDER_ADD:
+		/* if we're already enabled, don't do it again */
+		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+			break;
+		if (dca_add_requester(dev) == 0) {
+			adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+			ixgbe_setup_dca(adapter);
+			break;
+		}
+		/* Fall Through since DCA is disabled. */
+	case DCA_PROVIDER_REMOVE:
+		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+			dca_remove_requester(dev);
+			adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+#endif /* CONFIG_IXGBE_DCA */
 /**
  * ixgbe_receive_skb - Send a completed packet up the stack
  * @adapter: board private structure
@@ -340,32 +548,26 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
  * @rx_ring: rx descriptor ring (for a specific queue) to setup
  * @rx_desc: rx descriptor
  **/
-static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
+static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
                               struct sk_buff *skb, u8 status,
                               struct ixgbe_ring *ring,
                               union ixgbe_adv_rx_desc *rx_desc)
 {
-	struct napi_struct *napi = &ring->napi;
+	struct ixgbe_adapter *adapter = q_vector->adapter;
+	struct napi_struct *napi = &q_vector->napi;
 	bool is_vlan = (status & IXGBE_RXD_STAT_VP);
 	u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
 
-	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
-		if (adapter->vlgrp && is_vlan)
+	if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+		if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
 			vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
 		else
 			napi_gro_receive(napi, skb);
 	} else {
-		if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
-			if (adapter->vlgrp && is_vlan)
-				vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
-			else
-				netif_receive_skb(skb);
-		} else {
-			if (adapter->vlgrp && is_vlan)
-				vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
-			else
-				netif_rx(skb);
-		}
+		if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
+			vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+		else
+			netif_rx(skb);
 	}
 }
 
@@ -376,8 +578,11 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
  * @skb: skb currently being received and modified
  **/
 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
-                                     u32 status_err, struct sk_buff *skb)
+				     union ixgbe_adv_rx_desc *rx_desc,
+				     struct sk_buff *skb)
 {
+	u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
+
 	skb->ip_summed = CHECKSUM_NONE;
 
 	/* Rx csum disabled */
@@ -395,6 +600,16 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
 		return;
 
 	if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+		u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+		/*
+		 * 82599 errata, UDP frames with a 0 checksum can be marked as
+		 * checksum errors.
+		 */
+		if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
+		    (adapter->hw.mac.type == ixgbe_mac_82599EB))
+			return;
+
 		adapter->hw_csum_rx_error++;
 		return;
 	}
@@ -429,7 +644,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
 	union ixgbe_adv_rx_desc *rx_desc;
 	struct ixgbe_rx_buffer *bi;
 	unsigned int i;
-	unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
 
 	i = rx_ring->next_to_use;
 	bi = &rx_ring->rx_buffer_info[i];
@@ -438,7 +652,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
 		rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
 
 		if (!bi->page_dma &&
-		    (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+		    (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
 			if (!bi->page) {
 				bi->page = alloc_page(GFP_ATOMIC);
 				if (!bi->page) {
@@ -459,7 +673,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
 
 		if (!bi->skb) {
 			struct sk_buff *skb;
-			skb = netdev_alloc_skb(adapter->netdev, bufsz);
+			skb = netdev_alloc_skb(adapter->netdev,
+			                       (rx_ring->rx_buf_len +
+			                        NET_IP_ALIGN));
 
 			if (!skb) {
 				adapter->alloc_rx_buff_failed++;
@@ -474,12 +690,13 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
 			skb_reserve(skb, NET_IP_ALIGN);
 
 			bi->skb = skb;
-			bi->dma = pci_map_single(pdev, skb->data, bufsz,
+			bi->dma = pci_map_single(pdev, skb->data,
+			                         rx_ring->rx_buf_len,
 			                         PCI_DMA_FROMDEVICE);
 		}
 		/* Refresh the desc even if buffer_addrs didn't change because
 		 * each write-back erases this info. */
-		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+		if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
 			rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
 			rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
 		} else {
@@ -512,20 +729,58 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
 	return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
 }
 
-static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
+static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
+{
+	return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+	        IXGBE_RXDADV_RSCCNT_MASK) >>
+	        IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+/**
+ * ixgbe_transform_rsc_queue - change rsc queue into a full packet
+ * @skb: pointer to the last skb in the rsc queue
+ *
+ * This function changes a queue full of hw rsc buffers into a completed
+ * packet.  It uses the ->prev pointers to find the first packet and then
+ * turns it into the frag list owner.
+ **/
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
+{
+	unsigned int frag_list_size = 0;
+
+	while (skb->prev) {
+		struct sk_buff *prev = skb->prev;
+		frag_list_size += skb->len;
+		skb->prev = NULL;
+		skb = prev;
+	}
+
+	skb_shinfo(skb)->frag_list = skb->next;
+	skb->next = NULL;
+	skb->len += frag_list_size;
+	skb->data_len += frag_list_size;
+	skb->truesize += frag_list_size;
+	return skb;
+}
+
+static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                struct ixgbe_ring *rx_ring,
                                int *work_done, int work_to_do)
 {
+	struct ixgbe_adapter *adapter = q_vector->adapter;
 	struct pci_dev *pdev = adapter->pdev;
 	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
 	struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
 	struct sk_buff *skb;
-	unsigned int i;
+	unsigned int i, rsc_count = 0;
 	u32 len, staterr;
 	u16 hdr_info;
 	bool cleaned = false;
 	int cleaned_count = 0;
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+#ifdef IXGBE_FCOE
+	int ddp_bytes = 0;
+#endif /* IXGBE_FCOE */
 
 	i = rx_ring->next_to_clean;
 	rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
@@ -538,7 +793,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
 			break;
 		(*work_done)++;
 
-		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+		if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
 			hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
 			len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
 			       IXGBE_RXDADV_HDRBUFLEN_SHIFT;
@@ -556,10 +811,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
 		prefetch(skb->data - NET_IP_ALIGN);
 		rx_buffer_info->skb = NULL;
 
-		if (len && !skb_shinfo(skb)->nr_frags) {
+		if (rx_buffer_info->dma) {
 			pci_unmap_single(pdev, rx_buffer_info->dma,
 			                 rx_ring->rx_buf_len,
 			                 PCI_DMA_FROMDEVICE);
+			rx_buffer_info->dma = 0;
 			skb_put(skb, len);
 		}
 
@@ -586,20 +842,38 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
 		i++;
 		if (i == rx_ring->count)
 			i = 0;
-		next_buffer = &rx_ring->rx_buffer_info[i];
 
 		next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
 		prefetch(next_rxd);
-
 		cleaned_count++;
+
+		if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
+			rsc_count = ixgbe_get_rsc_count(rx_desc);
+
+		if (rsc_count) {
+			u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
+				     IXGBE_RXDADV_NEXTP_SHIFT;
+			next_buffer = &rx_ring->rx_buffer_info[nextp];
+			rx_ring->rsc_count += (rsc_count - 1);
+		} else {
+			next_buffer = &rx_ring->rx_buffer_info[i];
+		}
+
 		if (staterr & IXGBE_RXD_STAT_EOP) {
+			if (skb->prev)
+				skb = ixgbe_transform_rsc_queue(skb);
 			rx_ring->stats.packets++;
 			rx_ring->stats.bytes += skb->len;
 		} else {
-			rx_buffer_info->skb = next_buffer->skb;
-			rx_buffer_info->dma = next_buffer->dma;
-			next_buffer->skb = skb;
-			next_buffer->dma = 0;
+			if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+				rx_buffer_info->skb = next_buffer->skb;
+				rx_buffer_info->dma = next_buffer->dma;
+				next_buffer->skb = skb;
+				next_buffer->dma = 0;
+			} else {
+				skb->next = next_buffer->skb;
+				skb->next->prev = skb;
+			}
 			adapter->non_eop_descs++;
 			goto next_desc;
 		}
@@ -609,15 +883,22 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
 			goto next_desc;
 		}
 
-		ixgbe_rx_checksum(adapter, staterr, skb);
+		ixgbe_rx_checksum(adapter, rx_desc, skb);
 
 		/* probably a little skewed due to removing CRC */
 		total_rx_bytes += skb->len;
 		total_rx_packets++;
 
 		skb->protocol = eth_type_trans(skb, adapter->netdev);
-		ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
-		adapter->netdev->last_rx = jiffies;
+#ifdef IXGBE_FCOE
+		/* if ddp, not passing to ULD unless for FCP_RSP or error */
+		if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+			ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
+			if (!ddp_bytes)
+				goto next_desc;
+		}
+#endif /* IXGBE_FCOE */
+		ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
 
 next_desc:
 		rx_desc->wb.upper.status_error = 0;
@@ -630,12 +911,12 @@ next_desc:
 
 		/* use prefetched values */
 		rx_desc = next_rxd;
-		rx_buffer_info = next_buffer;
+		rx_buffer_info = &rx_ring->rx_buffer_info[i];
 
 		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 	}
 
-	napi_gro_flush(&rx_ring->napi);
+	napi_gro_flush(&q_vector->napi);
 
 	rx_ring->next_to_clean = i;
 	cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
@@ -643,6 +924,21 @@ next_desc:
 	if (cleaned_count)
 		ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 
+#ifdef IXGBE_FCOE
+	/* include DDPed FCoE data */
+	if (ddp_bytes > 0) {
+		unsigned int mss;
+
+		mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
+			sizeof(struct fc_frame_header) -
+			sizeof(struct fcoe_crc_eof);
+		if (mss > 512)
+			mss &= ~511;
+		total_rx_bytes += ddp_bytes;
+		total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
+	}
+#endif /* IXGBE_FCOE */
+
 	rx_ring->total_packets += total_rx_packets;
 	rx_ring->total_bytes += total_rx_bytes;
 	adapter->net_stats.rx_bytes += total_rx_bytes;
@@ -672,7 +968,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 	 * corresponding register.
 	 */
 	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
-		q_vector = &adapter->q_vector[v_idx];
+		q_vector = adapter->q_vector[v_idx];
 		/* XXX for_each_bit(...) */
 		r_idx = find_first_bit(q_vector->rxr_idx,
 		                       adapter->num_rx_queues);
@@ -695,19 +991,14 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 			                      r_idx + 1);
 		}
 
-		/* if this is a tx only vector halve the interrupt rate */
 		if (q_vector->txr_count && !q_vector->rxr_count)
-			q_vector->eitr = (adapter->eitr_param >> 1);
+			/* tx only */
+			q_vector->eitr = adapter->tx_eitr_param;
 		else if (q_vector->rxr_count)
-			/* rx only */
-			q_vector->eitr = adapter->eitr_param;
+			/* rx or mixed */
+			q_vector->eitr = adapter->rx_eitr_param;
 
-		/*
-		 * since this is initial set up don't need to call
-		 * ixgbe_write_eitr helper
-		 */
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
-		                EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
+		ixgbe_write_eitr(q_vector);
 	}
 
 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
@@ -792,17 +1083,19 @@ update_itr_done:
 
 /**
  * ixgbe_write_eitr - write EITR register in hardware specific way
- * @adapter: pointer to adapter struct
- * @v_idx: vector index into q_vector array
- * @itr_reg: new value to be written in *register* format, not ints/s
+ * @q_vector: structure containing interrupt and ring information
  *
  * This function is made to be called by ethtool and by the driver
  * when it needs to update EITR registers at runtime.  Hardware
  * specific quirks/differences are taken care of here.
  */
-void ixgbe_write_eitr(struct ixgbe_adapter *adapter, int v_idx, u32 itr_reg)
+void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
 {
+	struct ixgbe_adapter *adapter = q_vector->adapter;
 	struct ixgbe_hw *hw = &adapter->hw;
+	int v_idx = q_vector->v_idx;
+	u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
+
 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 		/* must write high and low 16 bits to reset counter */
 		itr_reg |= (itr_reg << 16);
@@ -821,8 +1114,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
 	struct ixgbe_adapter *adapter = q_vector->adapter;
 	u32 new_itr;
 	u8 current_itr, ret_itr;
-	int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
-	                       sizeof(struct ixgbe_q_vector);
+	int i, r_idx;
 	struct ixgbe_ring *rx_ring, *tx_ring;
 
 	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
@@ -872,14 +1164,13 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
 	}
 
 	if (new_itr != q_vector->eitr) {
-		u32 itr_reg;
+		/* do an exponential smoothing */
+		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
 
 		/* save the algorithm value here, not the smoothed one */
 		q_vector->eitr = new_itr;
-		/* do an exponential smoothing */
-		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
-		itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
-		ixgbe_write_eitr(adapter, v_idx, itr_reg);
+
+		ixgbe_write_eitr(q_vector);
 	}
 
 	return;
@@ -950,14 +1241,64 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data, struct pt_regs *regs)
 	if (hw->mac.type == ixgbe_mac_82598EB)
 		ixgbe_check_fan_failure(adapter, eicr);
 
-	if (hw->mac.type == ixgbe_mac_82599EB)
+	if (hw->mac.type == ixgbe_mac_82599EB) {
 		ixgbe_check_sfp_event(adapter, eicr);
+
+		/* Handle Flow Director Full threshold interrupt */
+		if (eicr & IXGBE_EICR_FLOW_DIR) {
+			int i;
+			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
+			/* Disable transmits before FDIR Re-initialization */
+			netif_stop_queue(netdev);
+			for (i = 0; i < adapter->num_tx_queues; i++) {
+				struct ixgbe_ring *tx_ring =
+				                           &adapter->tx_ring[i];
+				if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
+				                       &tx_ring->reinit_state))
+					schedule_work(&adapter->fdir_reinit_task);
+			}
+		}
+	}
 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 
 	return IRQ_HANDLED;
 }
 
+static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
+					   u64 qmask)
+{
+	u32 mask;
+
+	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+	} else {
+		mask = (qmask & 0xFFFFFFFF);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+		mask = (qmask >> 32);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+	}
+	/* skip the flush */
+}
+
+static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
+                                            u64 qmask)
+{
+	u32 mask;
+
+	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+	} else {
+		mask = (qmask & 0xFFFFFFFF);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+		mask = (qmask >> 32);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+	}
+	/* skip the flush */
+}
+
 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data, struct pt_regs *regs)
 {
 	struct ixgbe_q_vector *q_vector = data;
@@ -973,11 +1314,14 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data, struct pt_regs *regs
 		tx_ring = &(adapter->tx_ring[r_idx]);
 		tx_ring->total_bytes = 0;
 		tx_ring->total_packets = 0;
-		ixgbe_clean_tx_irq(adapter, tx_ring);
 		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
 		                      r_idx + 1);
 	}
 
+	/* disable interrupts on this vector only */
+	ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+	netif_rx_schedule(q_vector->dummy_netdev);
+
 	return IRQ_HANDLED;
 }
 
@@ -1006,10 +1350,8 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data, struct pt_regs *regs
 	if (!q_vector->rxr_count)
 		return IRQ_HANDLED;
 
-	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-	rx_ring = &(adapter->rx_ring[r_idx]);
 	/* disable interrupts on this vector only */
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
+	ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
 	netif_rx_schedule(q_vector->dummy_netdev);
 
 	return IRQ_HANDLED;
@@ -1017,8 +1359,36 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data, struct pt_regs *regs
 
 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data, struct pt_regs *regs)
 {
-	ixgbe_msix_clean_rx(irq, data, regs);
-	ixgbe_msix_clean_tx(irq, data, regs);
+	struct ixgbe_q_vector *q_vector = data;
+	struct ixgbe_adapter  *adapter = q_vector->adapter;
+	struct ixgbe_ring  *ring;
+	int r_idx;
+	int i;
+
+	if (!q_vector->txr_count && !q_vector->rxr_count)
+		return IRQ_HANDLED;
+
+	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+	for (i = 0; i < q_vector->txr_count; i++) {
+		ring = &(adapter->tx_ring[r_idx]);
+		ring->total_bytes = 0;
+		ring->total_packets = 0;
+		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+		                      r_idx + 1);
+	}
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	for (i = 0; i < q_vector->rxr_count; i++) {
+		ring = &(adapter->rx_ring[r_idx]);
+		ring->total_bytes = 0;
+		ring->total_packets = 0;
+		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+		                      r_idx + 1);
+	}
+
+	/* disable interrupts on this vector only */
+	ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
+	netif_rx_schedule(q_vector->dummy_netdev);
 
 	return IRQ_HANDLED;
 }
@@ -1042,12 +1412,16 @@ static int ixgbe_clean_rxonly(struct net_device *netdev, int *budget)
 
 	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 	rx_ring = &(adapter->rx_ring[r_idx]);
+#ifdef CONFIG_IXGBE_DCA
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+		ixgbe_update_rx_dca(adapter, rx_ring);
+#endif
 
 	/* Keep link state information with original netdev */
 	if (!netif_carrier_ok(adapter->netdev))
 		goto quit_polling;
 
-	ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, work_to_do);
+	ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, work_to_do);
 
 	*budget -= work_done;
 	netdev->quota -= work_done;
@@ -1056,10 +1430,11 @@ static int ixgbe_clean_rxonly(struct net_device *netdev, int *budget)
 	if ((work_done < work_to_do) || !netif_running(adapter->netdev)) {
 quit_polling:
 		netif_rx_complete(netdev);
-		if (adapter->itr_setting & 1)
+		if (adapter->rx_itr_setting & 1)
 			ixgbe_set_itr_msix(q_vector);
 		if (!test_bit(__IXGBE_DOWN, &adapter->state))
-			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
+			ixgbe_irq_enable_queues(adapter,
+			                        ((u64)1 << q_vector->v_idx));
 		return 0;
 	}
 
@@ -1067,35 +1442,52 @@ quit_polling:
 }
 
 /**
- * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
  *
  * This function will clean more than one rx queue associated with a
  * q_vector.
  **/
-static int ixgbe_clean_rxonly_many(struct net_device *netdev, int *budget)
+static int ixgbe_clean_rxtx_many(struct net_device *netdev, int *budget)
 {
 	struct ixgbe_q_vector *q_vector = netdev->priv;
 	struct ixgbe_adapter *adapter = q_vector->adapter;
-	struct ixgbe_ring *rx_ring = NULL;
+	struct ixgbe_ring *ring = NULL;
 	int work_to_do = min(*budget, netdev->quota);
 	int work_done = 0, i;
 	long r_idx;
-	u16 enable_mask = 0;
+	bool tx_clean_complete = true;
+
+	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+	for (i = 0; i < q_vector->txr_count; i++) {
+		ring = &(adapter->tx_ring[r_idx]);
+#ifdef CONFIG_IXGBE_DCA
+		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+			ixgbe_update_tx_dca(adapter, ring);
+#endif
+		tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
+		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+		                      r_idx + 1);
+	}
 
 	/* attempt to distribute budget to each queue fairly, but don't allow
 	 * the budget to go below 1 because we'll exit polling */
 	work_to_do /= (q_vector->rxr_count ?: 1);
-	work_to_do = max(work_to_do, 1);
+	work_to_do = max(*budget, 1);
 	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 
+	/* Keep link state information with original netdev */
+	if (!netif_carrier_ok(adapter->netdev))
+		goto quit_polling;
+
 	for (i = 0; i < q_vector->rxr_count; i++) {
-		rx_ring = &(adapter->rx_ring[r_idx]);
-		/* only clean ring if netdev is linked up */
-		if (netif_carrier_ok(adapter->netdev))
-			ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, work_to_do);
-		enable_mask |= rx_ring->v_idx;
+		ring = &(adapter->rx_ring[r_idx]);
+#ifdef CONFIG_IXGBE_DCA
+		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+			ixgbe_update_rx_dca(adapter, ring);
+#endif
+		ixgbe_clean_rx_irq(q_vector, ring, &work_done, work_to_do);
 		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
 		                      r_idx + 1);
 	}
@@ -1104,35 +1496,80 @@ static int ixgbe_clean_rxonly_many(struct net_device *netdev, int *budget)
 	netdev->quota -= work_done;
 
 	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-	rx_ring = &(adapter->rx_ring[r_idx]);
+	ring = &(adapter->rx_ring[r_idx]);
 	/* If all Rx work done, exit the polling mode */
 	if ((work_done < work_to_do) || !netif_running(adapter->netdev)) {
+quit_polling:
 		netif_rx_complete(netdev);
-		if (adapter->itr_setting & 1)
+		if (adapter->rx_itr_setting & 1)
+			ixgbe_set_itr_msix(q_vector);
+		if (!test_bit(__IXGBE_DOWN, &adapter->state))
+			ixgbe_irq_enable_queues(adapter,
+			                        ((u64)1 << q_vector->v_idx));
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbe_clean_txonly(struct net_device *netdev, int *budget)
+{
+	struct ixgbe_q_vector *q_vector = netdev->priv;
+	struct ixgbe_adapter *adapter = q_vector->adapter;
+	struct ixgbe_ring *tx_ring = NULL;
+	int work_to_do = min(*budget, netdev->quota);
+	int work_done = 0;
+	long r_idx;
+
+	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+	tx_ring = &(adapter->tx_ring[r_idx]);
+#ifdef CONFIG_IXGBE_DCA
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+		ixgbe_update_tx_dca(adapter, tx_ring);
+#endif
+	if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
+		work_done = work_to_do;
+
+	*budget -= work_done;
+	netdev->quota -= work_done;
+
+	/* If all Tx work done, exit the polling mode */
+	if (work_done < work_to_do) {
+		netif_rx_complete(q_vector->dummy_netdev);
+		if (adapter->tx_itr_setting & 1)
 			ixgbe_set_itr_msix(q_vector);
 		if (!test_bit(__IXGBE_DOWN, &adapter->state))
-			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
+			ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
 		return 0;
 	}
 
 	return 1;
 }
+
 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
                                      int r_idx)
 {
-	a->q_vector[v_idx].adapter = a;
-	set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
-	a->q_vector[v_idx].rxr_count++;
-	a->rx_ring[r_idx].v_idx = 1 << v_idx;
+	struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+
+	set_bit(r_idx, q_vector->rxr_idx);
+	q_vector->rxr_count++;
 }
 
 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
-                                     int r_idx)
+                                     int t_idx)
 {
-	a->q_vector[v_idx].adapter = a;
-	set_bit(r_idx, a->q_vector[v_idx].txr_idx);
-	a->q_vector[v_idx].txr_count++;
-	a->tx_ring[r_idx].v_idx = 1 << v_idx;
+	struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+
+	set_bit(t_idx, q_vector->txr_idx);
+	q_vector->txr_count++;
 }
 
 /**
@@ -1228,7 +1665,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
                          (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
                          &ixgbe_msix_clean_many)
 	for (vector = 0; vector < q_vectors; vector++) {
-		handler = SET_HANDLER(&adapter->q_vector[vector]);
+		handler = SET_HANDLER(adapter->q_vector[vector]);
 
 		if(handler == &ixgbe_msix_clean_rx) {
 			sprintf(adapter->name[vector], "%s-%s-%d",
@@ -1244,7 +1681,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 
 		err = request_irq(adapter->msix_entries[vector].vector,
 		                  handler, 0, adapter->name[vector],
-		                  &(adapter->q_vector[vector]));
+		                  adapter->q_vector[vector]);
 		if (err) {
 			DPRINTK(PROBE, ERR,
 			        "request_irq failed for MSIX interrupt "
@@ -1269,7 +1706,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 free_queue_irqs:
 	for (i = vector - 1; i >= 0; i--)
 		free_irq(adapter->msix_entries[--vector].vector,
-		         &(adapter->q_vector[i]));
+		         adapter->q_vector[i]);
 	adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 	pci_disable_msix(adapter->pdev);
 	kfree(adapter->msix_entries);
@@ -1280,7 +1717,7 @@ out:
 
 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 {
-	struct ixgbe_q_vector *q_vector = adapter->q_vector;
+	struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
 	u8 current_itr;
 	u32 new_itr = q_vector->eitr;
 	struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
@@ -1313,14 +1750,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 	}
 
 	if (new_itr != q_vector->eitr) {
-		u32 itr_reg;
+		/* do an exponential smoothing */
+		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
 
 		/* save the algorithm value here, not the smoothed one */
 		q_vector->eitr = new_itr;
-		/* do an exponential smoothing */
-		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
-		itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
-		ixgbe_write_eitr(adapter, 0, itr_reg);
+
+		ixgbe_write_eitr(q_vector);
 	}
 
 	return;
@@ -1333,7 +1769,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
 {
 	u32 mask;
-	mask = IXGBE_EIMS_ENABLE_MASK;
+
+	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
 		mask |= IXGBE_EIMS_GPI_SDP1;
 	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -1341,16 +1778,12 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
 		mask |= IXGBE_EIMS_GPI_SDP1;
 		mask |= IXGBE_EIMS_GPI_SDP2;
 	}
+	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+	    adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+		mask |= IXGBE_EIMS_FLOW_DIR;
 
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-		/* enable the rest of the queue vectors */
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1),
-		                (IXGBE_EIMS_RTX_QUEUE << 16));
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2),
-		                ((IXGBE_EIMS_RTX_QUEUE << 16) |
-		                  IXGBE_EIMS_RTX_QUEUE));
-	}
+	ixgbe_irq_enable_queues(adapter, ~0);
 	IXGBE_WRITE_FLUSH(&adapter->hw);
 }
 
@@ -1365,6 +1798,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data, struct pt_regs *regs)
 	struct net_device *netdev = data;
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
 	u32 eicr;
 
 	/*
@@ -1392,13 +1826,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data, struct pt_regs *regs)
 
 	ixgbe_check_fan_failure(adapter, eicr);
 
-	if (netif_rx_schedule_prep(adapter->q_vector[0].dummy_netdev)) {
+	if (netif_rx_schedule_prep((q_vector->dummy_netdev))) {
 		adapter->tx_ring[0].total_packets = 0;
 		adapter->tx_ring[0].total_bytes = 0;
 		adapter->rx_ring[0].total_packets = 0;
 		adapter->rx_ring[0].total_bytes = 0;
 		/* would disable interrupts here but EIAM disabled it */
-		__netif_rx_schedule(adapter->q_vector[0].dummy_netdev);
+		__netif_rx_schedule((q_vector->dummy_netdev));
 	}
 
 	return IRQ_HANDLED;
@@ -1409,7 +1843,7 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
 	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
 	for (i = 0; i < q_vectors; i++) {
-		struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+		struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
 		bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
 		bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
 		q_vector->rxr_count = 0;
@@ -1460,7 +1894,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
 		i--;
 		for (; i >= 0; i--) {
 			free_irq(adapter->msix_entries[i].vector,
-			         &(adapter->q_vector[i]));
+			         adapter->q_vector[i]);
 		}
 
 		ixgbe_reset_q_vectors(adapter);
@@ -1475,10 +1909,12 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 {
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
-	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+	} else {
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(2), ~0);
 	}
 	IXGBE_WRITE_FLUSH(&adapter->hw);
 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1490,18 +1926,6 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 	}
 }
 
-static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter)
-{
-	u32 mask = IXGBE_EIMS_RTX_QUEUE;
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask << 16);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2),
-		                (mask << 16 | mask));
-	}
-	/* skip the flush */
-}
-
 /**
  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
  *
@@ -1511,7 +1935,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
 	struct ixgbe_hw *hw = &adapter->hw;
 
 	IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
-	                EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
+	                EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
 
 	ixgbe_set_ivar(adapter, 0, 0, 0);
 	ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -1541,58 +1965,74 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
 		tdba = ring->dma;
 		tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
-		                (tdba & DMA_32BIT_MASK));
+		                (tdba & DMA_BIT_MASK(32)));
 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
 		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
 		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
 		adapter->tx_ring[i].head = IXGBE_TDH(j);
 		adapter->tx_ring[i].tail = IXGBE_TDT(j);
-		/* Disable Tx Head Writeback RO bit, since this hoses
+		/*
+		 * Disable Tx Head Writeback RO bit, since this hoses
 		 * bookkeeping if things aren't delivered in order.
 		 */
-		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+		switch (hw->mac.type) {
+		case ixgbe_mac_82598EB:
+			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+			break;
+		case ixgbe_mac_82599EB:
+		default:
+			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
+			break;
+		}
 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
-		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+		switch (hw->mac.type) {
+		case ixgbe_mac_82598EB:
+			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+			break;
+		case ixgbe_mac_82599EB:
+		default:
+			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
+			break;
+		}
 	}
+
 	if (hw->mac.type == ixgbe_mac_82599EB) {
+		u32 rttdcs;
+
+		/* disable the arbiter while setting MTQC */
+		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+		rttdcs |= IXGBE_RTTDCS_ARBDIS;
+		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
 		/* We enable 8 traffic classes, DCB only */
 		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
 			IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
 			                IXGBE_MTQC_8TC_8TQ));
+		else
+			IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+
+		/* re-eable the arbiter */
+		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
 	}
 }
 
-#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
 
-static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
+static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
+                                   struct ixgbe_ring *rx_ring)
 {
-	struct ixgbe_ring *rx_ring;
 	u32 srrctl;
-	int queue0 = 0;
-	unsigned long mask;
+	int index;
+	struct ixgbe_ring_feature *feature = adapter->ring_feature;
 
-	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-			int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
-			if (dcb_i == 8)
-				queue0 = index >> 4;
-			else if (dcb_i == 4)
-				queue0 = index >> 5;
-			else
-				dev_err(&adapter->pdev->dev, "Invalid DCB "
-				        "configuration\n");
-		} else {
-			queue0 = index;
-		}
-	} else {
-		mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
-		queue0 = index & mask;
+	index = rx_ring->reg_idx;
+	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+		unsigned long mask;
+		mask = (unsigned long) feature[RING_F_RSS].mask;
 		index = index & mask;
 	}
-
-	rx_ring = &adapter->rx_ring[queue0];
-
 	srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
 
 	srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -1601,7 +2041,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
 	srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
 		  IXGBE_SRRCTL_BSIZEHDR_MASK;
 
-	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+	if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
 		srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 #else
@@ -1617,8 +2057,79 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
 }
 
-#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
-			(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
+{
+	u32 mrqc = 0;
+	int mask;
+
+	if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
+		return mrqc;
+
+	mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
+#ifdef CONFIG_IXGBE_DCB
+				 | IXGBE_FLAG_DCB_ENABLED
+#endif
+				);
+
+	switch (mask) {
+	case (IXGBE_FLAG_RSS_ENABLED):
+		mrqc = IXGBE_MRQC_RSSEN;
+		break;
+#ifdef CONFIG_IXGBE_DCB
+	case (IXGBE_FLAG_DCB_ENABLED):
+		mrqc = IXGBE_MRQC_RT8TCEN;
+		break;
+#endif /* CONFIG_IXGBE_DCB */
+	default:
+		break;
+	}
+
+	return mrqc;
+}
+
+/**
+ * ixgbe_configure_rscctl - enable RSC for the indicated ring
+ * @adapter:    address of board private structure
+ * @index:      index of ring to set
+ * @rx_buf_len: rx buffer length
+ **/
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index,
+                                   int rx_buf_len)
+{
+	struct ixgbe_ring *rx_ring;
+	struct ixgbe_hw *hw = &adapter->hw;
+	int j;
+	u32 rscctrl;
+
+	rx_ring = &adapter->rx_ring[index];
+	j = rx_ring->reg_idx;
+	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
+	rscctrl |= IXGBE_RSCCTL_RSCEN;
+	/*
+	 * we must limit the number of descriptors so that the
+	 * total size of max desc * buf_len is not greater
+	 * than 65535
+	 */
+	if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+#if (MAX_SKB_FRAGS > 16)
+		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#elif (MAX_SKB_FRAGS > 8)
+		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+#elif (MAX_SKB_FRAGS > 4)
+		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#else
+		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+#endif
+	} else {
+		if (rx_buf_len < IXGBE_RXBUFFER_4096)
+			rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+		else if (rx_buf_len < IXGBE_RXBUFFER_8192)
+			rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+		else
+			rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
+}
 
 /**
  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
@@ -1630,6 +2141,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 {
 	u64 rdba;
 	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbe_ring *rx_ring;
 	struct net_device *netdev = adapter->netdev;
 	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 	int i, j;
@@ -1653,11 +2165,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 			u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
 			              IXGBE_PSRTYPE_UDPHDR |
 			              IXGBE_PSRTYPE_IPV4HDR |
-			              IXGBE_PSRTYPE_IPV6HDR;
+			              IXGBE_PSRTYPE_IPV6HDR |
+			              IXGBE_PSRTYPE_L2HDR;
 			IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
 		}
 	} else {
-		if (netdev->mtu <= ETH_DATA_LEN)
+		if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
+		    (netdev->mtu <= ETH_DATA_LEN))
 			rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
 		else
 			rx_buf_len = ALIGN(max_frame, 1024);
@@ -1674,6 +2188,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
 	else
 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+#ifdef IXGBE_FCOE
+	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+#endif
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
 	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
@@ -1681,21 +2199,42 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 
-	/* Setup the HW Rx Head and Tail Descriptor Pointers and
-	 * the Base and Length of the Rx Descriptor Ring */
+	/*
+	 * Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring
+	 */
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		rdba = adapter->rx_ring[i].dma;
-		j = adapter->rx_ring[i].reg_idx;
-		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
+		rx_ring = &adapter->rx_ring[i];
+		rdba = rx_ring->dma;
+		j = rx_ring->reg_idx;
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
 		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
 		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
-		adapter->rx_ring[i].head = IXGBE_RDH(j);
-		adapter->rx_ring[i].tail = IXGBE_RDT(j);
-		adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+		rx_ring->head = IXGBE_RDH(j);
+		rx_ring->tail = IXGBE_RDT(j);
+		rx_ring->rx_buf_len = rx_buf_len;
 
-		ixgbe_configure_srrctl(adapter, j);
+		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
+			rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
+		else
+			rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+
+#ifdef IXGBE_FCOE
+		if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+			struct ixgbe_ring_feature *f;
+			f = &adapter->ring_feature[RING_F_FCOE];
+			if ((i >= f->mask) && (i < f->mask + f->indices)) {
+				rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+				if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
+					rx_ring->rx_buf_len =
+					        IXGBE_FCOE_JUMBO_FRAME_SIZE;
+			}
+		}
+
+#endif /* IXGBE_FCOE */
+		ixgbe_configure_srrctl(adapter, rx_ring);
 	}
 
 	if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -1709,29 +2248,14 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 		 * effects of setting this bit are only that SRRCTL must be
 		 * fully programmed [0..15]
 		 */
-		if (adapter->flags &
-		    (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) {
-			rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-			rdrxctl |= IXGBE_RDRXCTL_MVMEN;
-			IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
-		}
+		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+		rdrxctl |= IXGBE_RDRXCTL_MVMEN;
+		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
 	}
 
 	/* Program MRQC for the distribution of queues */
-	if (hw->mac.type == ixgbe_mac_82599EB) {
-		int mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
+	mrqc = ixgbe_setup_mrqc(adapter);
 
-		switch (mask) {
-		case (IXGBE_FLAG_RSS_ENABLED):
-			mrqc = IXGBE_MRQC_RSSEN;
-			break;
-		case (IXGBE_FLAG_DCB_ENABLED):
-			mrqc = IXGBE_MRQC_RT8TCEN;
-			break;
-		default:
-			break;
-		}
-	}
 	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
 		/* Fill out redirection table */
 		for (i = 0, j = 0; i < 128; i++, j++) {
@@ -1779,8 +2303,19 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 	if (hw->mac.type == ixgbe_mac_82599EB) {
 		rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
 		rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+		rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
 	}
+
+	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+		/* Enable 82599 HW-RSC */
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			ixgbe_configure_rscctl(adapter, i, rx_buf_len);
+
+		/* Disable RSC for ACK packets */
+		IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+		   (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+	}
 }
 
 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -1813,12 +2348,19 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
                                    struct vlan_group *grp)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	u32 ctrl = 0;
+	u32 ctrl;
 	int i, j;
 
 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
 		ixgbe_irq_disable(adapter);
 	adapter->vlgrp = grp;
+
+	/*
+	 * For a DCB driver, always enable VLAN tag stripping so we can
+	 * still receive traffic from a DCB-enabled host even if we're
+	 * not in DCB mode.
+	 */
+	ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 		ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
@@ -1906,26 +2448,28 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
 		}
 		vlnctrl |= IXGBE_VLNCTRL_VFE;
 		hw->addr_ctrl.user_set_promisc = 0;
-		vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
-		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
 	}
 
 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-#if 0
+#if 0 /* not in RHEL */
+	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
 	/* reprogram secondary unicast list */
 	addr_count = netdev->uc_count;
 	if (addr_count)
 		addr_list = netdev->uc_list->dmi_addr;
-	ixgbe_update_uc_addr_list(hw, addr_list, addr_count,
-	                          ixgbe_addr_list_itr);
+	hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
+	                                  ixgbe_addr_list_itr);
+	hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
 
 #endif
 	/* reprogram multicast list */
 	addr_count = netdev->mc_count;
+
 	if (addr_count)
 		addr_list = netdev->mc_list->dmi_addr;
 	hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
-	               			ixgbe_addr_list_itr);
+	                                ixgbe_addr_list_itr);
 }
 
 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -1940,14 +2484,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
 
 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
 		struct net_device *netdev;
-		q_vector = &adapter->q_vector[q_idx];
-		if (!q_vector->rxr_count)
-			continue;
-
+		q_vector = adapter->q_vector[q_idx];
 		netdev = q_vector->dummy_netdev;
-		if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
-		    (q_vector->rxr_count > 1))
-			netdev->poll = &ixgbe_clean_rxonly_many;
+		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+			if (!q_vector->rxr_count || !q_vector->txr_count) {
+				if (q_vector->txr_count == 1)
+					netdev->poll = &ixgbe_clean_txonly;
+				else if (q_vector->rxr_count == 1)
+					netdev->poll = &ixgbe_clean_rxonly;
+			}
+		}
 
 		netif_poll_enable(q_vector->dummy_netdev);
 	}
@@ -1964,21 +2510,89 @@ void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
 		q_vectors = 1;
 
 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-		q_vector = &adapter->q_vector[q_idx];
-		if (!q_vector->rxr_count)
-			continue;
+		q_vector = adapter->q_vector[q_idx];
 		netif_poll_disable(q_vector->dummy_netdev);
 	}
 }
 
+#ifdef CONFIG_IXGBE_DCB
+/*
+ * ixgbe_configure_dcb - Configure DCB hardware
+ * @adapter: ixgbe adapter struct
+ *
+ * This is called by the driver on open to configure the DCB hardware.
+ * This is also called by the gennetlink interface when reconfiguring
+ * the DCB state.
+ */
+static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 txdctl, vlnctrl;
+	int i, j;
+
+	ixgbe_dcb_check_config(&adapter->dcb_cfg);
+	ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
+	ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
+
+	/* reconfigure the hardware */
+	ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		j = adapter->tx_ring[i].reg_idx;
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+		/* PThresh workaround for Tx hang with DFP enabled. */
+		txdctl |= 32;
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+	}
+	/* Enable VLAN tag insert/strip */
+	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
+		vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+	} else if (hw->mac.type == ixgbe_mac_82599EB) {
+		vlnctrl |= IXGBE_VLNCTRL_VFE;
+		vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			j = adapter->rx_ring[i].reg_idx;
+			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+			vlnctrl |= IXGBE_RXDCTL_VME;
+			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+		}
+	}
+	hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
+}
+
+#endif
 static void ixgbe_configure(struct ixgbe_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
 	int i;
 
 	ixgbe_set_rx_mode(netdev);
 
 	ixgbe_restore_vlan(adapter);
+#ifdef CONFIG_IXGBE_DCB
+	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+		ixgbe_configure_dcb(adapter);
+	}
+#endif
+
+#ifdef IXGBE_FCOE
+	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+		ixgbe_configure_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			adapter->tx_ring[i].atr_sample_rate =
+			                               adapter->atr_sample_rate;
+		ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
+	} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
+		ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
+	}
 
 	ixgbe_configure_tx(adapter);
 	ixgbe_configure_rx(adapter);
@@ -2043,7 +2657,7 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
 {
 	u32 autoneg;
-	bool link_up = false;
+	bool negotiation, link_up = false;
 	u32 ret = IXGBE_ERR_LINK_SETUP;
 
 	if (hw->mac.ops.check_link)
@@ -2053,14 +2667,12 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
 		goto link_cfg_out;
 
 	if (hw->mac.ops.get_link_capabilities)
-		ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
-		                                        &hw->mac.autoneg);
+		ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
 	if (ret)
 		goto link_cfg_out;
 
-	if (hw->mac.ops.setup_link_speed)
-		ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, link_up);
-
+	if (hw->mac.ops.setup_link)
+		ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
 link_cfg_out:
 	return ret;
 }
@@ -2128,13 +2740,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 		IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 	}
 
-	/* Enable fan failure interrupt if media type is copper */
-	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
-		gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
-		gpie |= IXGBE_SDP1_GPIEN;
-		IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-	}
-
 	if (hw->mac.type == ixgbe_mac_82599EB) {
 		gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
 		gpie |= IXGBE_SDP1_GPIEN;
@@ -2142,6 +2747,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 		IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 	}
 
+#ifdef IXGBE_FCOE
+	/* adjust max frame to be able to do baby jumbo for FCoE */
+	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+	    (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
+		max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+
+#endif /* IXGBE_FCOE */
 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
 	if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -2206,25 +2818,35 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 	ixgbe_irq_enable(adapter);
 
 	/*
-	 * For hot-pluggable SFP+ devices, a new SFP+ module may have
-	 * arrived before interrupts were enabled.  We need to kick off
-	 * the SFP+ module setup first, then try to bring up link.
-	 * If we're not hot-pluggable SFP+, we just need to configure
-	 * link
-	 * and bring it up.
+	 * If this adapter has a fan, check to see if we had a failure
+	 * before we enabled the interrupt.
 	 */
+	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+		if (esdp & IXGBE_ESDP_SDP1)
+			DPRINTK(DRV, CRIT,
+				"Fan has stopped, replace the adapter\n");
+	}
+
 	/*
 	 * For hot-pluggable SFP+ devices, a new SFP+ module may have
-	 * arrived before interrupts were enabled.  We need to kick off
-	 * the SFP+ module setup first, then try to bring up link.
+	 * arrived before interrupts were enabled but after probe.  Such
+	 * devices wouldn't have their type identified yet. We need to
+	 * kick off the SFP+ module setup first, then try to bring up link.
 	 * If we're not hot-pluggable SFP+, we just need to configure link
 	 * and bring it up.
 	 */
-	err = hw->phy.ops.identify(hw);
-	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-		DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
-		ixgbe_down(adapter);
-		return err;
+	if (hw->phy.type == ixgbe_phy_unknown) {
+		err = hw->phy.ops.identify(hw);
+		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+			/*
+			 * Take the device down and schedule the sfp tasklet
+			 * which will unregister_netdev and log it.
+			 */
+			ixgbe_down(adapter);
+			schedule_work(&adapter->sfp_config_module_task);
+			return err;
+		}
 	}
 
 	if (ixgbe_is_sfp(hw)) {
@@ -2235,6 +2857,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 			DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
 	}
 
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		set_bit(__IXGBE_FDIR_INIT_DONE,
+		        &(adapter->tx_ring[i].reinit_state));
+
 	/* enable transmits */
 	netif_start_queue(netdev);
 
@@ -2245,6 +2871,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 	mod_timer(&adapter->watchdog_timer, jiffies);
 	return 0;
 }
+
 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
 {
 	WARN_ON(in_interrupt());
@@ -2260,20 +2887,37 @@ int ixgbe_up(struct ixgbe_adapter *adapter)
 	/* hardware has been reset, we need to reload some things */
 	ixgbe_configure(adapter);
 
-	ixgbe_napi_add_all(adapter);
-
 	return ixgbe_up_complete(adapter);
 }
 
 void ixgbe_reset(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-	if (hw->mac.ops.init_hw(hw))
-		dev_err(&adapter->pdev->dev, "Hardware Error\n");
+	int err;
+
+	err = hw->mac.ops.init_hw(hw);
+	switch (err) {
+	case 0:
+	case IXGBE_ERR_SFP_NOT_PRESENT:
+		break;
+	case IXGBE_ERR_MASTER_REQUESTS_PENDING:
+		dev_err(&adapter->pdev->dev, "master disable timed out\n");
+		break;
+	case IXGBE_ERR_EEPROM_VERSION:
+		/* We are running on a pre-production device, log a warning */
+		dev_warn(&adapter->pdev->dev, "This device is a pre-production "
+		         "adapter/LOM.  Please be aware there may be issues "
+		         "associated with your hardware.  If you are "
+		         "experiencing problems please contact your Intel or "
+		         "hardware representative who provided you with this "
+		         "hardware.\n");
+		break;
+	default:
+		dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
+	}
 
 	/* reprogram the RAR[0] in case user changed it. */
 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
 }
 
 /**
@@ -2301,14 +2945,21 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
 			rx_buffer_info->dma = 0;
 		}
 		if (rx_buffer_info->skb) {
-			dev_kfree_skb(rx_buffer_info->skb);
+			struct sk_buff *skb = rx_buffer_info->skb;
 			rx_buffer_info->skb = NULL;
+			do {
+				struct sk_buff *this = skb;
+				skb = skb->prev;
+				dev_kfree_skb(this);
+			} while (skb);
 		}
 		if (!rx_buffer_info->page)
 			continue;
-		pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
-		               PCI_DMA_FROMDEVICE);
-		rx_buffer_info->page_dma = 0;
+		if (rx_buffer_info->page_dma) {
+			pci_unmap_page(pdev, rx_buffer_info->page_dma,
+			               PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+			rx_buffer_info->page_dma = 0;
+		}
 		put_page(rx_buffer_info->page);
 		rx_buffer_info->page = NULL;
 		rx_buffer_info->page_offset = 0;
@@ -2330,46 +2981,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
 }
 
 /**
- * ixgbe_poll - NAPI Rx polling callback
- * @napi: structure for representing this polling device
- * @budget: how many packets driver is allowed to clean
- *
- * This function is used for legacy and MSI, NAPI mode
- **/
-static int ixgbe_poll(struct net_device *netdev, int *budget)
-{
-	struct ixgbe_q_vector *q_vector = netdev->priv;
-	struct ixgbe_adapter *adapter = q_vector->adapter;
-	int work_to_do = min(*budget, netdev->quota);
-	int tx_clean_complete, work_done = 0;
-
-	/* Keep link state information with original netdev */
-	if (!netif_carrier_ok(adapter->netdev))
-		goto quit_polling;
-
-	tx_clean_complete = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
-	ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, work_to_do);
-
-	*budget -= work_done;
-	netdev->quota -= work_done;
-
-	if (!tx_clean_complete)
-		work_done = work_to_do;
-
-        /* If budget not fully consumed, exit the polling mode */
-        if ((work_done < work_to_do) || !netif_running(adapter->netdev)) {
-quit_polling:
-		netif_rx_complete(netdev);
-		if (adapter->itr_setting & 1)
-			ixgbe_set_itr(adapter);
-		if (!test_bit(__IXGBE_DOWN, &adapter->state))
-			ixgbe_irq_enable_queues(adapter);
-		return 0;
-	}
-	return 1;
-}
-
-/**
  * ixgbe_clean_tx_ring - Free Tx Buffers
  * @adapter: board private structure
  * @tx_ring: ring to be cleaned
@@ -2430,42 +3041,107 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
 void ixgbe_down(struct ixgbe_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
 	u32 rxctrl;
+	u32 txdctl;
+	int i, j;
 
 	/* signal that we are down to the interrupt handler */
 	set_bit(__IXGBE_DOWN, &adapter->state);
 
 	/* disable receives */
-	rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
-			rxctrl & ~IXGBE_RXCTRL_RXEN);
-
-	/* Disable the Tx DMA engine on 82599 */
-	if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL,
-		                (IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL) &
-		                 ~IXGBE_DMATXCTL_TE));
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 
 	netif_tx_disable(netdev);
 
-	/* disable transmits in the hardware */
-
-	/* flush both disables */
-	IXGBE_WRITE_FLUSH(&adapter->hw);
+	IXGBE_WRITE_FLUSH(hw);
 	msleep(10);
 
+	netif_stop_queue(netdev);
+
 	ixgbe_irq_disable(adapter);
 
 	ixgbe_napi_disable_all(adapter);
+
+	clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
+	del_timer_sync(&adapter->sfp_timer);
 	del_timer_sync(&adapter->watchdog_timer);
 
+	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+	    adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+		cancel_delayed_work(&adapter->fdir_reinit_task);
+
+	/* disable transmits in the hardware now that interrupts are off */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		j = adapter->tx_ring[i].reg_idx;
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
+		                (txdctl & ~IXGBE_TXDCTL_ENABLE));
+	}
+	/* Disable the Tx DMA engine on 82599 */
+	if (hw->mac.type == ixgbe_mac_82599EB)
+		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
+		                (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
+		                 ~IXGBE_DMATXCTL_TE));
+
 	netif_carrier_off(netdev);
-	netif_stop_queue(netdev);
 
 	if (!pci_channel_offline(adapter->pdev))
 		ixgbe_reset(adapter);
 	ixgbe_clean_all_tx_rings(adapter);
 	ixgbe_clean_all_rx_rings(adapter);
+
+#ifdef CONFIG_IXGBE_DCA
+	/* since we reset the hardware DCA settings were cleared */
+	ixgbe_setup_dca(adapter);
+#endif
+#ifdef IXGBE_FCOE
+	.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
+	.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
+#endif /* IXGBE_FCOE */
+}
+
+/**
+ * ixgbe_poll - NAPI Rx polling callback
+ * @napi: structure for representing this polling device
+ * @budget: how many packets driver is allowed to clean
+ *
+ * This function is used for legacy and MSI, NAPI mode
+ **/
+static int ixgbe_poll(struct net_device *netdev, int *budget)
+{
+	struct ixgbe_q_vector *q_vector = netdev->priv;
+	struct ixgbe_adapter *adapter = q_vector->adapter;
+	int work_to_do = min(*budget, netdev->quota);
+	int tx_clean_complete, work_done = 0;
+
+#ifdef CONFIG_IXGBE_DCA
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+		ixgbe_update_tx_dca(adapter, adapter->tx_ring);
+		ixgbe_update_rx_dca(adapter, adapter->rx_ring);
+	}
+#endif
+
+	tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
+	ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, work_to_do);
+
+	if (!tx_clean_complete)
+		work_done = work_to_do;
+
+	*budget -= work_done;
+	netdev->quota -= work_done;
+
+	/* If budget not fully consumed, exit the polling mode */
+	if (work_done < work_to_do) {
+		netif_rx_complete(netdev);
+		if (adapter->rx_itr_setting & 1)
+			ixgbe_set_itr(adapter);
+		if (!test_bit(__IXGBE_DOWN, &adapter->state))
+			ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
+		return 0;
+	}
+	return 1;
 }
 
 /**
@@ -2484,11 +3160,34 @@ static void ixgbe_reset_task(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
+	/* If we're already down or resetting, just bail */
+	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+	    test_bit(__IXGBE_RESETTING, &adapter->state))
+		return;
+
 	adapter->tx_timeout_count++;
 
 	ixgbe_reinit_locked(adapter);
 }
 
+#ifdef CONFIG_IXGBE_DCB
+static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+{
+	bool ret = false;
+	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
+
+	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+		return ret;
+
+	f->mask = 0x7 << 3;
+	adapter->num_rx_queues = f->indices;
+	adapter->num_tx_queues = f->indices;
+	ret = true;
+
+	return ret;
+}
+#endif
+
 /**
  * ixgbe_set_rss_queues: Allocate queues for RSS
  * @adapter: board private structure to initialize
@@ -2500,13 +3199,12 @@ static void ixgbe_reset_task(struct net_device *netdev)
 static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
 {
 	bool ret = false;
+	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
 
 	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-		adapter->ring_feature[RING_F_RSS].mask = 0xF;
-		adapter->num_rx_queues =
-		                      adapter->ring_feature[RING_F_RSS].indices;
-		adapter->num_tx_queues =
-		                      adapter->ring_feature[RING_F_RSS].indices;
+		f->mask = 0xF;
+		adapter->num_rx_queues = f->indices;
+		adapter->num_tx_queues = f->indices;
 		ret = true;
 	} else {
 		ret = false;
@@ -2515,6 +3213,84 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
 	return ret;
 }
 
+/**
+ * ixgbe_set_fdir_queues: Allocate queues for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Flow Director is an advanced Rx filter, attempting to get Rx flows back
+ * to the original CPU that initiated the Tx session.  This runs in addition
+ * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
+ * Rx load across CPUs using RSS.
+ *
+ **/
+static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
+{
+	bool ret = false;
+	struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
+
+	f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
+	f_fdir->mask = 0;
+
+	/* Flow Director must have RSS enabled */
+	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
+	    ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+	     (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
+		adapter->num_tx_queues = f_fdir->indices;
+		adapter->num_rx_queues = f_fdir->indices;
+		ret = true;
+	} else {
+		adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+		adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+	}
+	return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * @adapter: board private structure to initialize
+ *
+ * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
+ * The ring feature mask is not used as a mask for FCoE, as it can take any 8
+ * rx queues out of the max number of rx queues, instead, it is used as the
+ * index of the first rx queue used by FCoE.
+ *
+ **/
+static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
+{
+	bool ret = false;
+	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+	f->indices = min((int)num_online_cpus(), f->indices);
+	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+		adapter->num_rx_queues = 1;
+		adapter->num_tx_queues = 1;
+#ifdef CONFIG_IXGBE_DCB
+		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+			DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
+			ixgbe_set_dcb_queues(adapter);
+		}
+#endif
+		if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+			DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
+			if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+			    (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+				ixgbe_set_fdir_queues(adapter);
+			else
+				ixgbe_set_rss_queues(adapter);
+		}
+		/* adding FCoE rx rings to the end */
+		f->mask = adapter->num_rx_queues;
+		adapter->num_rx_queues += f->indices;
+		adapter->num_tx_queues += f->indices;
+
+		ret = true;
+	}
+
+	return ret;
+}
+
+#endif /* IXGBE_FCOE */
 /*
  * ixgbe_set_num_queues: Allocate queues for device, feature dependant
  * @adapter: board private structure to initialize
@@ -2528,14 +3304,26 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
  **/
 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 {
-	/* Start with base case */
-	adapter->num_rx_queues = 1;
-	adapter->num_tx_queues = 1;
+#ifdef IXGBE_FCOE
+	if (ixgbe_set_fcoe_queues(adapter))
+		goto done;
+
+#endif /* IXGBE_FCOE */
+#ifdef CONFIG_IXGBE_DCB
+	if (ixgbe_set_dcb_queues(adapter))
+		return;
+
+#endif
+	if (ixgbe_set_fdir_queues(adapter))
+		return;
 
 	if (ixgbe_set_rss_queues(adapter))
 		return;
-}
 
+	/* fallback to base case */
+	adapter->num_rx_queues = 1;
+	adapter->num_tx_queues = 1;
+}
 
 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
                                        int vectors)
@@ -2575,8 +3363,6 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 		kfree(adapter->msix_entries);
 		adapter->msix_entries = NULL;
-		adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-		ixgbe_set_num_queues(adapter);
 	} else {
 		adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
 		/*
@@ -2587,8 +3373,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
 		adapter->num_msix_vectors = min(vectors,
 		                   adapter->max_msix_q_vectors + NON_Q_VECTORS);
 	}
-
 }
+
 /**
  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
  * @adapter: board private structure to initialize
@@ -2610,9 +3396,180 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 	} else {
 		ret = false;
 	}
+
 	return ret;
 }
 
+#ifdef CONFIG_IXGBE_DCB
+/**
+ * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for DCB to the assigned rings.
+ *
+ **/
+static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
+{
+	int i;
+	bool ret = false;
+	int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+
+	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+			/* the number of queues is assumed to be symmetric */
+			for (i = 0; i < dcb_i; i++) {
+				adapter->rx_ring[i].reg_idx = i << 3;
+				adapter->tx_ring[i].reg_idx = i << 2;
+			}
+			ret = true;
+		} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+			if (dcb_i == 8) {
+				/*
+				 * Tx TC0 starts at: descriptor queue 0
+				 * Tx TC1 starts at: descriptor queue 32
+				 * Tx TC2 starts at: descriptor queue 64
+				 * Tx TC3 starts at: descriptor queue 80
+				 * Tx TC4 starts at: descriptor queue 96
+				 * Tx TC5 starts at: descriptor queue 104
+				 * Tx TC6 starts at: descriptor queue 112
+				 * Tx TC7 starts at: descriptor queue 120
+				 *
+				 * Rx TC0-TC7 are offset by 16 queues each
+				 */
+				for (i = 0; i < 3; i++) {
+					adapter->tx_ring[i].reg_idx = i << 5;
+					adapter->rx_ring[i].reg_idx = i << 4;
+				}
+				for ( ; i < 5; i++) {
+					adapter->tx_ring[i].reg_idx =
+					                         ((i + 2) << 4);
+					adapter->rx_ring[i].reg_idx = i << 4;
+				}
+				for ( ; i < dcb_i; i++) {
+					adapter->tx_ring[i].reg_idx =
+					                         ((i + 8) << 3);
+					adapter->rx_ring[i].reg_idx = i << 4;
+				}
+
+				ret = true;
+			} else if (dcb_i == 4) {
+				/*
+				 * Tx TC0 starts at: descriptor queue 0
+				 * Tx TC1 starts at: descriptor queue 64
+				 * Tx TC2 starts at: descriptor queue 96
+				 * Tx TC3 starts at: descriptor queue 112
+				 *
+				 * Rx TC0-TC3 are offset by 32 queues each
+				 */
+				adapter->tx_ring[0].reg_idx = 0;
+				adapter->tx_ring[1].reg_idx = 64;
+				adapter->tx_ring[2].reg_idx = 96;
+				adapter->tx_ring[3].reg_idx = 112;
+				for (i = 0 ; i < dcb_i; i++)
+					adapter->rx_ring[i].reg_idx = i << 5;
+
+				ret = true;
+			} else {
+				ret = false;
+			}
+		} else {
+			ret = false;
+		}
+	} else {
+		ret = false;
+	}
+
+	return ret;
+}
+#endif
+
+/**
+ * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for Flow Director to the assigned rings.
+ *
+ **/
+static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+{
+	int i;
+	bool ret = false;
+
+	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
+	    ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+	     (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i].reg_idx = i;
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			adapter->tx_ring[i].reg_idx = i;
+		ret = true;
+	}
+
+	return ret;
+}
+
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
+ *
+ */
+static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
+{
+	int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
+	bool ret = false;
+	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+#ifdef CONFIG_IXGBE_DCB
+		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+			struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+			ixgbe_cache_ring_dcb(adapter);
+			/* find out queues in TC for FCoE */
+			fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
+			fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
+			/*
+			 * In 82599, the number of Tx queues for each traffic
+			 * class for both 8-TC and 4-TC modes are:
+			 * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
+			 * 8 TCs:  32  32  16  16   8   8   8   8
+			 * 4 TCs:  64  64  32  32
+			 * We have max 8 queues for FCoE, where 8 the is
+			 * FCoE redirection table size. If TC for FCoE is
+			 * less than or equal to TC3, we have enough queues
+			 * to add max of 8 queues for FCoE, so we start FCoE
+			 * tx descriptor from the next one, i.e., reg_idx + 1.
+			 * If TC for FCoE is above TC3, implying 8 TC mode,
+			 * and we need 8 for FCoE, we have to take all queues
+			 * in that traffic class for FCoE.
+			 */
+			if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
+				fcoe_tx_i--;
+		}
+#endif /* CONFIG_IXGBE_DCB */
+		if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+			if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+			    (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+				ixgbe_cache_ring_fdir(adapter);
+			else
+				ixgbe_cache_ring_rss(adapter);
+
+			fcoe_rx_i = f->mask;
+			fcoe_tx_i = f->mask;
+		}
+		for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+			adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
+			adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
+		}
+		ret = true;
+	}
+	return ret;
+}
+
+#endif /* IXGBE_FCOE */
 /**
  * ixgbe_cache_ring_register - Descriptor ring to register mapping
  * @adapter: board private structure to initialize
@@ -2630,6 +3587,19 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
 	adapter->rx_ring[0].reg_idx = 0;
 	adapter->tx_ring[0].reg_idx = 0;
 
+#ifdef IXGBE_FCOE
+	if (ixgbe_cache_ring_fcoe(adapter))
+		return;
+
+#endif /* IXGBE_FCOE */
+#ifdef CONFIG_IXGBE_DCB
+	if (ixgbe_cache_ring_dcb(adapter))
+		return;
+
+#endif
+	if (ixgbe_cache_ring_fdir(adapter))
+		return;
+
 	if (ixgbe_cache_ring_rss(adapter))
 		return;
 }
@@ -2642,7 +3612,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
  * number of queues at compile-time.  The polling_netdev array is
  * intended for Multiqueue, but should work fine with a single queue.
  **/
-static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
+static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
 {
 	int i;
 
@@ -2664,7 +3634,6 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		adapter->rx_ring[i].count = adapter->rx_ring_count;
 		adapter->rx_ring[i].queue_index = i;
-		adapter->rx_ring[i].napi.dev = adapter->netdev;
 	}
 
 	ixgbe_cache_ring_register(adapter);
@@ -2712,30 +3681,23 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
 	 * mean we disable MSI-X capabilities of the adapter. */
 	adapter->msix_entries = kcalloc(v_budget,
 	                                sizeof(struct msix_entry), GFP_KERNEL);
-	if (!adapter->msix_entries) {
-		adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-		ixgbe_set_num_queues(adapter);
-		kfree(adapter->tx_ring);
-		kfree(adapter->rx_ring);
-		err = ixgbe_alloc_queues(adapter);
-		if (err) {
-			DPRINTK(PROBE, ERR, "Unable to allocate memory "
-			        "for queues\n");
-			goto out;
-		}
-
-		goto try_msi;
-	}
+	if (adapter->msix_entries) {
+		for (vector = 0; vector < v_budget; vector++)
+			adapter->msix_entries[vector].entry = vector;
 
-	for (vector = 0; vector < v_budget; vector++)
-		adapter->msix_entries[vector].entry = vector;
+		ixgbe_acquire_msix_vectors(adapter, v_budget);
 
-	ixgbe_acquire_msix_vectors(adapter, v_budget);
+		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+			goto out;
+	}
 
-	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
-		goto out;
+	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+	adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+	adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+	adapter->atr_sample_rate = 0;
+	ixgbe_set_num_queues(adapter);
 
-try_msi:
 	err = pci_enable_msi(adapter->pdev);
 	if (!err) {
 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
@@ -2745,13 +3707,97 @@ try_msi:
 		/* reset err */
 		err = 0;
 	}
-
 out:
-
 	return err;
 }
 
-void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
+/**
+ * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
+{
+	int q_idx, num_q_vectors;
+	struct ixgbe_q_vector *q_vector;
+	int napi_vectors;
+	int (*poll)(struct net_device *, int *);
+
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+		num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+		napi_vectors = adapter->num_rx_queues;
+		poll = &ixgbe_clean_rxtx_many;
+	} else {
+		num_q_vectors = 1;
+		napi_vectors = 1;
+		poll = &ixgbe_poll;
+	}
+
+	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+		q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
+		if (!q_vector)
+			goto err_out;
+		q_vector->adapter = adapter;
+		if (q_vector->txr_count && !q_vector->rxr_count)
+			q_vector->eitr = adapter->tx_eitr_param;
+		else
+			q_vector->eitr = adapter->rx_eitr_param;
+		q_vector->v_idx = q_idx;
+		if (!q_vector->dummy_netdev) {
+			q_vector->dummy_netdev = alloc_netdev(0, "", ether_setup);
+			if (!q_vector->dummy_netdev)
+				return -ENOMEM;
+		}
+		q_vector->dummy_netdev->priv = q_vector;
+		q_vector->dummy_netdev->poll = poll;
+		q_vector->dummy_netdev->weight = 64;
+       		set_bit(__LINK_STATE_START, &q_vector->dummy_netdev->state);
+		adapter->q_vector[q_idx] = q_vector;
+	}
+
+	return 0;
+
+err_out:
+	while (q_idx) {
+		q_idx--;
+		q_vector = adapter->q_vector[q_idx];
+		free_netdev(q_vector->dummy_netdev);
+		q_vector->dummy_netdev = NULL;
+		kfree(q_vector);
+		adapter->q_vector[q_idx] = NULL;
+	}
+	return -ENOMEM;
+}
+
+/**
+ * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
+{
+	int q_idx, num_q_vectors;
+
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+		num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+	else
+		num_q_vectors = 1;
+
+	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+		struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
+		adapter->q_vector[q_idx] = NULL;
+		free_netdev(q_vector->dummy_netdev);
+		q_vector->dummy_netdev = NULL;
+		kfree(q_vector);
+	}
+}
+
+static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
 {
 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
@@ -2775,25 +3821,32 @@ void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
  * - Hardware queue count (num_*_queues)
  *   - defined by miscellaneous hardware support/features (RSS, etc.)
  **/
-int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
 {
 	int err;
 
 	/* Number of supported queues */
 	ixgbe_set_num_queues(adapter);
 
-	err = ixgbe_alloc_queues(adapter);
-	if (err) {
-		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
-		goto err_alloc_queues;
-	}
-
 	err = ixgbe_set_interrupt_capability(adapter);
 	if (err) {
 		DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
 		goto err_set_interrupt;
 	}
 
+	err = ixgbe_alloc_q_vectors(adapter);
+	if (err) {
+		DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
+		        "vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	err = ixgbe_alloc_queues(adapter);
+	if (err) {
+		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+		goto err_alloc_queues;
+	}
+
 	DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
 	        "Tx Queue count = %u\n",
 	        (adapter->num_rx_queues > 1) ? "Enabled" :
@@ -2803,11 +3856,30 @@ int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
 
 	return 0;
 
+err_alloc_queues:
+	ixgbe_free_q_vectors(adapter);
+err_alloc_q_vectors:
+	ixgbe_reset_interrupt_capability(adapter);
 err_set_interrupt:
+	return err;
+}
+
+/**
+ * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
 	kfree(adapter->tx_ring);
 	kfree(adapter->rx_ring);
-err_alloc_queues:
-	return err;
+	adapter->tx_ring = NULL;
+	adapter->rx_ring = NULL;
+
+	ixgbe_free_q_vectors(adapter);
+	ixgbe_reset_interrupt_capability(adapter);
 }
 
 /**
@@ -2836,14 +3908,15 @@ static void ixgbe_sfp_task(struct ixgbe_adapter *adapter)
 	if ((hw->phy.type == ixgbe_phy_nl) &&
 	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
 		s32 ret = hw->phy.ops.identify_sfp(hw);
-		if (ret)
+		if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
 			goto reschedule;
 		ret = hw->phy.ops.reset(hw);
 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-			DPRINTK(PROBE, ERR, "failed to initialize because an "
-			        "unsupported SFP+ module type was detected.\n"
-			        "Reload the driver after installing a "
-			        "supported module.\n");
+			dev_err(&adapter->pdev->dev, "failed to initialize "
+				"because an unsupported SFP+ module type "
+				"was detected.\n"
+				"Reload the driver after installing a "
+				"supported module.\n");
 			unregister_netdev(adapter->netdev);
 		} else {
 			DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
@@ -2872,12 +3945,15 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
 	unsigned int rss;
+#ifdef CONFIG_IXGBE_DCB
+	int j;
+	struct tc_configuration *tc;
+#endif
 
 	/* PCI config space info */
 
 	hw->vendor_id = pdev->vendor;
 	hw->device_id = pdev->device;
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
 	hw->revision_id = pdev->revision;
 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 	hw->subsystem_device_id = pdev->subsystem_device;
@@ -2886,30 +3962,74 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 	rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
 	adapter->ring_feature[RING_F_RSS].indices = rss;
 	adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
-
-	if (hw->mac.type == ixgbe_mac_82598EB)
+	adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		if (hw->device_id == IXGBE_DEV_ID_82598AT)
+			adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
 		adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
-	else if (hw->mac.type == ixgbe_mac_82599EB)
+	} else if (hw->mac.type == ixgbe_mac_82599EB) {
 		adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
+		adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
+		adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+		adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+		adapter->ring_feature[RING_F_FDIR].indices =
+		                                         IXGBE_MAX_FDIR_INDICES;
+		adapter->atr_sample_rate = 20;
+		adapter->fdir_pballoc = 0;
+#ifdef IXGBE_FCOE
+		adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
+		adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+		adapter->ring_feature[RING_F_FCOE].indices = 0;
+#endif /* IXGBE_FCOE */
+	}
+
+#ifdef CONFIG_IXGBE_DCB
+	/* Configure DCB traffic classes */
+	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+		tc = &adapter->dcb_cfg.tc_config[j];
+		tc->path[DCB_TX_CONFIG].bwg_id = 0;
+		tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
+		tc->path[DCB_RX_CONFIG].bwg_id = 0;
+		tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
+		tc->dcb_pfc = pfc_disabled;
+	}
+	adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
+	adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+	adapter->dcb_cfg.rx_pba_cfg = pba_equal;
+	adapter->dcb_cfg.pfc_mode_enable = false;
+	adapter->dcb_cfg.round_robin_enable = false;
+	adapter->dcb_set_bitmap = 0x00;
+	ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
+	                   adapter->ring_feature[RING_F_DCB].indices);
+
+#endif
 
 	/* default flow control settings */
 	hw->fc.requested_mode = ixgbe_fc_full;
+	hw->fc.current_mode = ixgbe_fc_full;	/* init for ethtool output */
+#ifdef CONFIG_DCB
+	adapter->last_lfc_mode = hw->fc.current_mode;
+#endif
 	hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
 	hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
 	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
-
-	/* set default ring sizes */
-	adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
-	adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
+	hw->fc.send_xon = true;
+	hw->fc.disable_fc_autoneg = false;
 
 	/* enable itr by default in dynamic mode */
-	adapter->itr_setting = 1;
-	adapter->eitr_param = 20000;
+	adapter->rx_itr_setting = 1;
+	adapter->rx_eitr_param = 20000;
+	adapter->tx_itr_setting = 1;
+	adapter->tx_eitr_param = 10000;
 
 	/* set defaults for eitr in MegaBytes */
 	adapter->eitr_low = 10;
 	adapter->eitr_high = 20;
 
+	/* set default ring sizes */
+	adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
+	adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
+
 	/* initialize eeprom parameters */
 	if (ixgbe_init_eeprom_params_generic(hw)) {
 		dev_err(&pdev->dev, "EEPROM initialization failed\n");
@@ -2966,6 +4086,34 @@ err:
 }
 
 /**
+ * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (!err)
+			continue;
+		DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
+		break;
+	}
+
+	return err;
+}
+
+/**
+ * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
@@ -3007,6 +4155,32 @@ alloc_failed:
 }
 
 /**
+ * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		if (!err)
+			continue;
+		DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
+		break;
+	}
+
+	return err;
+}
+
+/**
  * ixgbe_free_tx_resources - Free Tx Resources per Queue
  * @adapter: board private structure
  * @tx_ring: Tx descriptor ring for a specific queue
@@ -3014,7 +4188,7 @@ alloc_failed:
  * Free all transmit software resources
  **/
 void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
-                                    struct ixgbe_ring *tx_ring)
+                             struct ixgbe_ring *tx_ring)
 {
 	struct pci_dev *pdev = adapter->pdev;
 
@@ -3081,57 +4255,6 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 }
 
 /**
- * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
-{
-	int i, err = 0;
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
-		if (!err)
-			continue;
-		DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
-		break;
-	}
-
-	return err;
-}
-
-/**
- * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
- * @adapter: board private structure
- *
- * If this function returns with an error, then it's possible one or
- * more of the rings is populated (while the rest are not).  It is the
- * callers duty to clean those orphaned rings.
- *
- * Return 0 on success, negative on failure
- **/
-
-static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
-{
-	int i, err = 0;
-
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
-		if (!err)
-			continue;
-		DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
-		break;
-	}
-
-	return err;
-}
-
-/**
  * ixgbe_change_mtu - Change the Maximum Transfer Unit
  * @netdev: network interface device structure
  * @new_mtu: new value for maximum frame size
@@ -3179,6 +4302,8 @@ int ixgbe_open(struct net_device *netdev)
 	if (test_bit(__IXGBE_TESTING, &adapter->state))
 		return -EBUSY;
 
+	netif_carrier_off(netdev);
+
 	/* allocate transmit descriptors */
 	err = ixgbe_setup_all_tx_resources(adapter);
 	if (err)
@@ -3191,8 +4316,6 @@ int ixgbe_open(struct net_device *netdev)
 
 	ixgbe_configure(adapter);
 
-	ixgbe_napi_add_all(adapter);
-
 	err = ixgbe_request_irq(adapter);
 	if (err)
 		goto err_req_irq;
@@ -3209,10 +4332,10 @@ err_up:
 	ixgbe_release_hw_control(adapter);
 	ixgbe_free_irq(adapter);
 err_req_irq:
-	ixgbe_free_all_rx_resources(adapter);
 err_setup_rx:
-	ixgbe_free_all_tx_resources(adapter);
+	ixgbe_free_all_rx_resources(adapter);
 err_setup_tx:
+	ixgbe_free_all_tx_resources(adapter);
 	ixgbe_reset(adapter);
 
 	return err;
@@ -3244,63 +4367,6 @@ int ixgbe_close(struct net_device *netdev)
 	return 0;
 }
 
-/**
- * ixgbe_napi_add_all - prep napi structs for use
- * @adapter: private struct
- *
- * helper function to napi_add each possible q_vector->napi
- */
-int ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
-{
-	int q_idx, q_vectors;
-	int (*poll)(struct net_device *, int *);
-
-	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-		poll = &ixgbe_clean_rxonly;
-		/* Only enable as many vectors as we have rx queues. */
-		q_vectors = adapter->num_rx_queues;
-	} else {
-		poll = &ixgbe_poll;
-		/* only one q_vector for legacy modes */
-		q_vectors = 1;
-	}
-
-	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-		struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
-
-		if (!q_vector->dummy_netdev) {
-			q_vector->dummy_netdev = alloc_netdev(0, "", ether_setup);
-			if (!q_vector->dummy_netdev)
-				return -ENOMEM;
-		}
-		q_vector->dummy_netdev->priv = q_vector;
-		q_vector->dummy_netdev->poll = poll;
-		q_vector->dummy_netdev->weight = 64;
-                set_bit(__LINK_STATE_START, &q_vector->dummy_netdev->state);
-	}
-	return 0;
-}
-
-void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
-{
-	int q_idx;
-	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-	/* legacy and MSI only use one vector */
-	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
-		q_vectors = 1;
-
-	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-		struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
-
-		if (!q_vector->rxr_count)
-			continue;
-
-		free_netdev(q_vector->dummy_netdev);
-		q_vector->dummy_netdev = NULL;
-	}
-}
-
 #ifdef CONFIG_PM
 static int ixgbe_resume(struct pci_dev *pdev)
 {
@@ -3310,7 +4376,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
-	err = pci_enable_device(pdev);
+
+	err = pci_enable_device_mem(pdev);
 	if (err) {
 		printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
 				"suspend\n");
@@ -3330,6 +4397,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
 	ixgbe_reset(adapter);
 
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+
 	if (netif_running(netdev)) {
 		err = ixgbe_open(adapter->netdev);
 		if (err)
@@ -3342,7 +4411,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
 }
 #endif
 
-static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
+
+static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -3361,10 +4431,7 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
 		ixgbe_free_all_tx_resources(adapter);
 		ixgbe_free_all_rx_resources(adapter);
 	}
-	ixgbe_reset_interrupt_capability(adapter);
-	ixgbe_napi_del_all(adapter);
-	kfree(adapter->tx_ring);
-	kfree(adapter->rx_ring);
+	ixgbe_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PM
 	retval = pci_save_state(pdev);
@@ -3400,18 +4467,49 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
 		pci_enable_wake(pdev, PCI_D3cold, 0);
 	}
 
+	*enable_wake = !!wufc;
+
 	ixgbe_release_hw_control(adapter);
 
 	pci_disable_device(pdev);
 
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	int retval;
+	bool wake;
+
+	retval = __ixgbe_shutdown(pdev, &wake);
+	if (retval)
+		return retval;
+
+	if (wake) {
+		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+		pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	} else {
+		pci_enable_wake(pdev, PCI_D3cold, 0);
+		pci_enable_wake(pdev, PCI_D3hot, 0);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
 
 	return 0;
 }
+#endif /* CONFIG_PM */
 
 static void ixgbe_shutdown(struct pci_dev *pdev)
 {
-	ixgbe_suspend(pdev, PMSG_SUSPEND);
+	bool wake;
+
+	__ixgbe_shutdown(pdev, &wake);
+
+	if (system_state == SYSTEM_POWER_OFF) {
+		pci_enable_wake(pdev, PCI_D3cold, wake);
+		pci_enable_wake(pdev, PCI_D3hot, wake);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
 }
 
 /**
@@ -3422,29 +4520,75 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u64 total_mpc = 0;
-	u32 i, missed_rx = 0, bprc, lxon, lxoff, xon_off_tot;
+	u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
 
 	if (hw->mac.type == ixgbe_mac_82599EB) {
+		u64 rsc_count = 0;
 		for (i = 0; i < 16; i++)
 			adapter->hw_rx_no_dma_resources +=
 			                     IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			rsc_count += adapter->rx_ring[i].rsc_count;
+		adapter->rsc_count = rsc_count;
 	}
 
 	adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+	for (i = 0; i < 8; i++) {
+		/* for packet buffers not used, the register should read 0 */
+		mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+		missed_rx += mpc;
+		adapter->stats.mpc[i] += mpc;
+		total_mpc += adapter->stats.mpc[i];
+		if (hw->mac.type == ixgbe_mac_82598EB)
+			adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+		adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+		adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+		adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+		adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+		if (hw->mac.type == ixgbe_mac_82599EB) {
+			adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
+			                                    IXGBE_PXONRXCNT(i));
+			adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
+			                                   IXGBE_PXOFFRXCNT(i));
+			adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+		} else {
+			adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
+			                                      IXGBE_PXONRXC(i));
+			adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
+			                                     IXGBE_PXOFFRXC(i));
+		}
+		adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
+		                                            IXGBE_PXONTXC(i));
+		adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
+		                                             IXGBE_PXOFFTXC(i));
+	}
 	adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
 	/* work around hardware counting issue */
 	adapter->stats.gprc -= missed_rx;
 
 	/* 82598 hardware only has a 32 bit counter in the high register */
 	if (hw->mac.type == ixgbe_mac_82599EB) {
+		u64 tmp;
 		adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
-		IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
+		tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
+		adapter->stats.gorc += (tmp << 32);
 		adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
-		IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
+		tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
+		adapter->stats.gotc += (tmp << 32);
 		adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
 		IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
 		adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
 		adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+		adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+		adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+#ifdef IXGBE_FCOE
+		adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+		adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+		adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+		adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+		adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+		adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+#endif /* IXGBE_FCOE */
 	} else {
 		adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
 		adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
@@ -3506,6 +4650,54 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 }
 
 /**
+ * ixgbe_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbe_watchdog(unsigned long data)
+{
+	struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u64 eics = 0;
+	int i;
+
+	/*
+	 *  Do the watchdog outside of interrupt context due to the lovely
+	 * delays that some of the newer hardware requires
+	 */
+
+	if (test_bit(__IXGBE_DOWN, &adapter->state))
+		goto watchdog_short_circuit;
+
+	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+		/*
+		 * for legacy and MSI interrupts don't set any bits
+		 * that are enabled for EIAM, because this operation
+		 * would set *both* EIMS and EICS for any bit in EIAM
+		 */
+		IXGBE_WRITE_REG(hw, IXGBE_EICS,
+			(IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
+		goto watchdog_reschedule;
+	}
+
+	/* get one bit for every active tx/rx interrupt vector */
+	for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+		struct ixgbe_q_vector *qv = adapter->q_vector[i];
+		if (qv->rxr_count || qv->txr_count)
+			eics |= ((u64)1 << i);
+	}
+
+	/* Cause software interrupt to ensure rx rings are cleaned */
+	ixgbe_irq_rearm_queues(adapter, eics);
+
+watchdog_reschedule:
+	/* Reset the timer */
+	mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
+
+watchdog_short_circuit:
+	schedule_work(&adapter->watchdog_task);
+}
+
+/**
  * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
  * @work: pointer to work_struct containing our data
  **/
@@ -3513,13 +4705,14 @@ static void ixgbe_multispeed_fiber_task(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 autoneg;
+	bool negotiation;
 
 	adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
-	if (hw->mac.ops.get_link_capabilities)
-		hw->mac.ops.get_link_capabilities(hw, &autoneg,
-		                                  &hw->mac.autoneg);
-	if (hw->mac.ops.setup_link_speed)
-		hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
+	autoneg = hw->phy.autoneg_advertised;
+	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
+		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+	if (hw->mac.ops.setup_link)
+		hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
 	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 	adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
 }
@@ -3534,87 +4727,46 @@ static void ixgbe_sfp_config_module_task(struct ixgbe_adapter *adapter)
 	u32 err;
 
 	adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
+
+	/* Time for electrical oscillations to settle down */
+	msleep(100);
 	err = hw->phy.ops.identify_sfp(hw);
+
 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-		DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
-		ixgbe_down(adapter);
+		dev_err(&adapter->pdev->dev, "failed to initialize because "
+			"an unsupported SFP+ module type was detected.\n"
+			"Reload the driver after installing a supported "
+			"module.\n");
+		unregister_netdev(adapter->netdev);
 		return;
 	}
 	hw->mac.ops.setup_sfp(hw);
 
-	if (!adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)
+	if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
 		/* This will also work for DA Twinax connections */
 		schedule_work(&adapter->multispeed_fiber_task);
 	adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
 }
 
 /**
- * ixgbe_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
+ * @work: pointer to work_struct containing our data
  **/
-static void ixgbe_watchdog(unsigned long data)
+static void ixgbe_fdir_reinit_task(struct ixgbe_adapter *adapter)
 {
-	struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
 	struct ixgbe_hw *hw = &adapter->hw;
+	int i;
 
-	/* Do the watchdog outside of interrupt context due to the lovely
-	 * delays that some of the newer hardware requires */
-	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
-		u64 eics = 0;
-		int i;
-
-		for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++)
-			eics |= (1 << i);
-
-		/* Cause software interrupt to ensure rx rings are cleaned */
-		switch (hw->mac.type) {
-		case ixgbe_mac_82598EB:
-			if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-				IXGBE_WRITE_REG(hw, IXGBE_EICS, (u32)eics);
-			} else {
-				/*
-				 * for legacy and MSI interrupts don't set any
-				 * bits that are enabled for EIAM, because this
-				 * operation would set *both* EIMS and EICS for
-				 * any bit in EIAM
-				 */
-				IXGBE_WRITE_REG(hw, IXGBE_EICS,
-				     (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
-			}
-			break;
-		case ixgbe_mac_82599EB:
-			if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-				/*
-				 * EICS(0..15) first 0-15 q vectors
-				 * EICS[1] (16..31) q vectors 16-31
-				 * EICS[2] (0..31) q vectors 32-63
-				 */
-				IXGBE_WRITE_REG(hw, IXGBE_EICS,
-				                (u32)(eics & 0xFFFF));
-				IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1),
-				                (u32)(eics & 0xFFFF0000));
-				IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(2),
-				                (u32)(eics >> 32));
-			} else {
-				/*
-				 * for legacy and MSI interrupts don't set any
-				 * bits that are enabled for EIAM, because this
-				 * operation would set *both* EIMS and EICS for
-				 * any bit in EIAM
-				 */
-				IXGBE_WRITE_REG(hw, IXGBE_EICS,
-				     (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
-			}
-			break;
-		default:
-			break;
-		}
-		/* Reset the timer */
-		mod_timer(&adapter->watchdog_timer,
-		          round_jiffies(jiffies + 2 * HZ));
+	if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			set_bit(__IXGBE_FDIR_INIT_DONE,
+			        &(adapter->tx_ring[i].reinit_state));
+	} else {
+		DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
+		        "ignored adding FDIR ATR filters \n");
 	}
-
-	schedule_work(&adapter->watchdog_task);
+	/* Done FDIR Re-initialization, enable transmits */
+	netif_start_queue(adapter->netdev);
 }
 
 /**
@@ -3627,16 +4779,32 @@ static void ixgbe_watchdog_task(struct ixgbe_adapter *adapter)
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 link_speed = adapter->link_speed;
 	bool link_up = adapter->link_up;
+	int i;
+	struct ixgbe_ring *tx_ring;
+	int some_tx_pending = 0;
 
 	adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
 
 	if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
 		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+		if (link_up) {
+#ifdef CONFIG_DCB
+			if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+				for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+					hw->mac.ops.fc_enable(hw, i);
+			} else {
+				hw->mac.ops.fc_enable(hw, 0);
+			}
+#else
+			hw->mac.ops.fc_enable(hw, 0);
+#endif
+		}
+
 		if (link_up ||
 		    time_after(jiffies, (adapter->link_check_timeout +
 		                         IXGBE_TRY_LINK_TIMEOUT))) {
-			IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
 			adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+			IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
 		}
 		adapter->link_up = link_up;
 		adapter->link_speed = link_speed;
@@ -3649,13 +4817,13 @@ static void ixgbe_watchdog_task(struct ixgbe_adapter *adapter)
 			if (hw->mac.type == ixgbe_mac_82599EB) {
 				u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
 				u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-				flow_rx = (mflcn & IXGBE_MFLCN_RFCE);
-				flow_tx = (fccfg & IXGBE_FCCFG_TFCE_802_3X);
+				flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+				flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
 			} else {
 				u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 				u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
-				flow_rx = (frctl & IXGBE_FCTRL_RFCE);
-				flow_tx = (rmcs & IXGBE_RMCS_TFCE_802_3X);
+				flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
+				flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
 			}
 
 			printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
@@ -3670,6 +4838,7 @@ static void ixgbe_watchdog_task(struct ixgbe_adapter *adapter)
 			        (flow_tx ? "TX" : "None"))));
 
 			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
 		} else {
 			/* Force detection of hung controller */
 			adapter->detect_tx_hung = true;
@@ -3681,6 +4850,26 @@ static void ixgbe_watchdog_task(struct ixgbe_adapter *adapter)
 			printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
 			       netdev->name);
 			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+		}
+	}
+
+	if (!netif_carrier_ok(netdev)) {
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			tx_ring = &adapter->tx_ring[i];
+			if (tx_ring->next_to_use != tx_ring->next_to_clean) {
+				some_tx_pending = 1;
+				break;
+			}
+		}
+
+		if (some_tx_pending) {
+			/* We've lost link, so the controller stops DMA,
+			 * but we've got queued Tx work that's never going
+			 * to get done, so reset controller to flush Tx.
+			 * (Do the reset outside of interrupt context).
+			 */
+			 schedule_work(&adapter->reset_task);
 		}
 	}
 
@@ -3698,7 +4887,6 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
 	struct ixgbe_tx_buffer *tx_buffer_info;
 	u32 vlan_macip_lens = 0, type_tucmd_mlhl;
 	u32 mss_l4len_idx, l4len;
-	*hdr_len = 0;
 
 	if (skb_is_gso(skb)) {
 		if (skb_header_cloned(skb)) {
@@ -3808,13 +4996,13 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 			switch (skb->protocol) {
-			case __constant_htons(ETH_P_IP):
+			case cpu_to_be16(ETH_P_IP):
 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
 				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 					type_tucmd_mlhl |=
 					        IXGBE_ADVTXD_TUCMD_L4T_TCP;
 				break;
-			case __constant_htons(ETH_P_IPV6):
+			case cpu_to_be16(ETH_P_IPV6):
 				/* XXX what about other V6 headers?? */
 				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 					type_tucmd_mlhl |=
@@ -3851,18 +5039,23 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 
 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                         struct ixgbe_ring *tx_ring,
-                        struct sk_buff *skb, unsigned int first)
+                        struct sk_buff *skb, u32 tx_flags,
+                        unsigned int first)
 {
 	struct ixgbe_tx_buffer *tx_buffer_info;
-	unsigned int len = skb->len;
+	unsigned int len;
+	unsigned int total = skb->len;
 	unsigned int offset = 0, size, count = 0, i;
 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 	unsigned int f;
 
-	len -= skb->data_len;
-
 	i = tx_ring->next_to_use;
 
+	if (tx_flags & IXGBE_TX_FLAGS_FCOE)
+		/* excluding fcoe_crc_eof for FCoE */
+		total -= sizeof(struct fcoe_crc_eof);
+
+	len = min(skb_headlen(skb), total);
 	while (len) {
 		tx_buffer_info = &tx_ring->tx_buffer_info[i];
 		size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
@@ -3875,6 +5068,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 		tx_buffer_info->next_to_watch = i;
 
 		len -= size;
+		total -= size;
 		offset += size;
 		count++;
 		i++;
@@ -3886,7 +5080,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 		struct skb_frag_struct *frag;
 
 		frag = &skb_shinfo(skb)->frags[f];
-		len = frag->size;
+		len = min((unsigned int)frag->size, total);
 		offset = frag->page_offset;
 
 		while (len) {
@@ -3903,6 +5097,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 			tx_buffer_info->next_to_watch = i;
 
 			len -= size;
+			total -= size;
 			offset += size;
 			count++;
 			i++;
@@ -3953,6 +5148,13 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
 		olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
 		                 IXGBE_ADVTXD_POPTS_SHIFT;
 
+	if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+		olinfo_status |= IXGBE_ADVTXD_CC;
+		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+		if (tx_flags & IXGBE_TX_FLAGS_FSO)
+			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+	}
+
 	olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
 	i = tx_ring->next_to_use;
@@ -3982,6 +5184,51 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
 	writel(i, adapter->hw.hw_addr + tx_ring->tail);
 }
 
+static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+	              int queue, u32 tx_flags)
+{
+	/* Right now, we support IPv4 only */
+	struct ixgbe_atr_input atr_input;
+	struct tcphdr *th;
+	struct iphdr *iph = ip_hdr(skb);
+	struct ethhdr *eth = (struct ethhdr *)skb->data;
+	u16 vlan_id, src_port, dst_port, flex_bytes;
+	u32 src_ipv4_addr, dst_ipv4_addr;
+	u8 l4type = 0;
+
+	/* check if we're UDP or TCP */
+	if (iph->protocol == IPPROTO_TCP) {
+		th = tcp_hdr(skb);
+		src_port = th->source;
+		dst_port = th->dest;
+		l4type |= IXGBE_ATR_L4TYPE_TCP;
+		/* l4type IPv4 type is 0, no need to assign */
+	} else {
+		/* Unsupported L4 header, just bail here */
+		return;
+	}
+
+	memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+
+	vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
+	           IXGBE_TX_FLAGS_VLAN_SHIFT;
+	src_ipv4_addr = iph->saddr;
+	dst_ipv4_addr = iph->daddr;
+	flex_bytes = eth->h_proto;
+
+	ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
+	ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
+	ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
+	ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
+	ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+	/* src and dst are inverted, think how the receiver sees them */
+	ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
+	ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+
+	/* This assumes the Rx queue and Tx queue are bound to the same CPU */
+	ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
+}
+
 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
                                  struct ixgbe_ring *tx_ring, int size)
 {
@@ -4018,23 +5265,42 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 	struct ixgbe_ring *tx_ring;
 	unsigned int first;
 	unsigned int tx_flags = 0;
-	u8 hdr_len;
-	int tso;
+	u8 hdr_len = 0;
+	int r_idx = 0, tso;
 	int count = 0;
 	unsigned int f;
 
-
-	tx_ring = adapter->tx_ring;
-
 	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
 		tx_flags |= vlan_tx_tag_get(skb);
+		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+			tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
+		}
 		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
 		tx_flags |= IXGBE_TX_FLAGS_VLAN;
+	} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+		if (skb->priority != TC_PRIO_CONTROL) {
+			tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+			tx_flags |= IXGBE_TX_FLAGS_VLAN;
+		}
+	}
+
+	tx_ring = &adapter->tx_ring[r_idx];
+
+	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+	    (skb->protocol == htons(ETH_P_FCOE))) {
+		tx_flags |= IXGBE_TX_FLAGS_FCOE;
+#ifdef IXGBE_FCOE
+		r_idx = smp_processor_id();
+		r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+		r_idx += adapter->ring_feature[RING_F_FCOE].mask;
+		tx_ring = &adapter->tx_ring[r_idx];
+#endif
 	}
-	/* three things can cause us to need a context descriptor */
+	/* four things can cause us to need a context descriptor */
 	if (skb_is_gso(skb) ||
 	    (skb->ip_summed == CHECKSUM_PARTIAL) ||
-	    (tx_flags & IXGBE_TX_FLAGS_VLAN))
+	    (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
+	    (tx_flags & IXGBE_TX_FLAGS_FCOE))
 		count++;
 
 	count += TXD_USE_COUNT(skb_headlen(skb));
@@ -4046,28 +5312,57 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 		return NETDEV_TX_BUSY;
 	}
 
-	if (skb->protocol == htons(ETH_P_IP))
-		tx_flags |= IXGBE_TX_FLAGS_IPV4;
 	first = tx_ring->next_to_use;
-	tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
-	if (tso < 0) {
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
-	}
-
-	if (tso)
-		tx_flags |= IXGBE_TX_FLAGS_TSO;
-	else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
-	         (skb->ip_summed == CHECKSUM_PARTIAL))
-		tx_flags |= IXGBE_TX_FLAGS_CSUM;
-
-	ixgbe_tx_queue(adapter, tx_ring, tx_flags,
-	               ixgbe_tx_map(adapter, tx_ring, skb, first),
-	               skb->len, hdr_len);
+	if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+#ifdef IXGBE_FCOE
+		/* setup tx offload for FCoE */
+		tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+		if (tso < 0) {
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
+		if (tso)
+			tx_flags |= IXGBE_TX_FLAGS_FSO;
+#endif /* IXGBE_FCOE */
+	} else {
+		if (skb->protocol == htons(ETH_P_IP))
+			tx_flags |= IXGBE_TX_FLAGS_IPV4;
+		tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+		if (tso < 0) {
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
 
-	netdev->trans_start = jiffies;
+		if (tso)
+			tx_flags |= IXGBE_TX_FLAGS_TSO;
+		else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+			 (skb->ip_summed == CHECKSUM_PARTIAL))
+			tx_flags |= IXGBE_TX_FLAGS_CSUM;
+	}
+
+	count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
+	if (count) {
+		/* add the ATR filter if ATR is on */
+		if (tx_ring->atr_sample_rate) {
+			++tx_ring->atr_count;
+			if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
+		             test_bit(__IXGBE_FDIR_INIT_DONE,
+				      &tx_ring->reinit_state)) {
+				ixgbe_atr(adapter, skb, tx_ring->queue_index,
+				          tx_flags);
+				tx_ring->atr_count = 0;
+			}
+		}
+		ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
+			       hdr_len);
+		netdev->trans_start = jiffies;
+		ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
 
-	ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+	} else {
+		dev_kfree_skb_any(skb);
+		tx_ring->tx_buffer_info[first].time_stamp = 0;
+		tx_ring->next_to_use = first;
+	}
 
 	return NETDEV_TX_OK;
 }
@@ -4120,14 +5415,21 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
 static void ixgbe_netpoll(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	int i;
 
-	disable_irq(adapter->pdev->irq);
 	adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
-	ixgbe_intr(adapter->pdev->irq, netdev, NULL);
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+		int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+		for (i = 0; i < num_q_vectors; i++) {
+			struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
+			ixgbe_msix_clean_many(0, q_vector, NULL);
+		}
+	} else {
+		ixgbe_intr(adapter->pdev->irq, netdev, NULL);
+	}
 	adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
-	enable_irq(adapter->pdev->irq);
 }
-#endif /* CONFIG_NET_POLL_CONTROLLER */
+#endif
 
 
 /**
@@ -4150,20 +5452,22 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
 	static int cards_found;
 	int i, err, pci_using_dac;
-	u16 pm_value = 0;
+#ifdef IXGBE_FCOE
+	u16 device_caps;
+#endif
 	u32 part_num, eec;
 
-	err = pci_enable_device(pdev);
+	err = pci_enable_device_mem(pdev);
 	if (err)
 		return err;
 
-	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
-	    !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
 		pci_using_dac = 1;
 	} else {
-		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (err) {
-			err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+			err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 			if (err) {
 				dev_err(&pdev->dev, "No usable DMA "
 				        "configuration, aborting\n");
@@ -4173,11 +5477,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 		pci_using_dac = 0;
 	}
 
-	err = pci_request_regions(pdev, ixgbe_driver_name);
-	if (err) {
-		dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
-		goto err_pci_reg;
-	}
+	pci_request_selected_regions(pdev, pci_select_bars(pdev,
+	                                   IORESOURCE_MEM), ixgbe_driver_name);
+
+	pci_enable_pcie_error_reporting(pdev);
 
 	pci_set_master(pdev);
 	pci_save_state(pdev);
@@ -4266,29 +5569,44 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	INIT_WORK(&adapter->sfp_config_module_task,
 	          (void (*)(void *))ixgbe_sfp_config_module_task, adapter);
 
-	err = ii->get_invariants(hw);
-	if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
-		/* start a kernel thread to watch for a module to arrive */
-		set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
-		mod_timer(&adapter->sfp_timer,
-		          round_jiffies(jiffies + (2 * HZ)));
-		err = 0;
-	} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-		DPRINTK(PROBE, ERR, "failed to load because an "
-		        "unsupported SFP+ module type was detected.\n");
-		goto err_hw_init;
-	} else if (err) {
-		goto err_hw_init;
-	}
+	ii->get_invariants(hw);
 
 	/* setup the private structure */
 	err = ixgbe_sw_init(adapter);
 	if (err)
 		goto err_sw_init;
 
+	/*
+	 * If there is a fan on this device and it has failed log the
+	 * failure.
+	 */
+	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+		if (esdp & IXGBE_ESDP_SDP1)
+			DPRINTK(PROBE, CRIT,
+				"Fan has stopped, replace the adapter\n");
+	}
+
 	/* reset_hw fills in the perm_addr as well */
 	err = hw->mac.ops.reset_hw(hw);
-	if (err) {
+	if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
+	    hw->mac.type == ixgbe_mac_82598EB) {
+		/*
+		 * Start a kernel thread to watch for a module to arrive.
+		 * Only do this for 82598, since 82599 will generate
+		 * interrupts on module arrival.
+		 */
+		set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
+		mod_timer(&adapter->sfp_timer,
+			  round_jiffies(jiffies + (2 * HZ)));
+		err = 0;
+	} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+		dev_err(&adapter->pdev->dev, "failed to initialize because "
+			"an unsupported SFP+ module type was detected.\n"
+			"Reload the driver after installing a supported "
+			"module.\n");
+		goto err_sw_init;
+	} else if (err) {
 		dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
 		goto err_sw_init;
 	}
@@ -4303,6 +5621,22 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	netdev->features |= NETIF_F_TSO6;
 	netdev->features |= NETIF_F_GRO;
 
+	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+		adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+
+#ifdef CONFIG_IXGBE_DCB
+	netdev->dcbnl_ops = &dcbnl_ops;
+#endif
+
+#ifdef IXGBE_FCOE
+	if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+		if (hw->mac.ops.get_device_caps) {
+			hw->mac.ops.get_device_caps(hw, &device_caps);
+			if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
+				adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+		}
+	}
+#endif /* IXGBE_FCOE */
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
@@ -4326,10 +5660,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	adapter->watchdog_timer.function = &ixgbe_watchdog;
 	adapter->watchdog_timer.data = (unsigned long)adapter;
 
-	hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
-	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
-	hw->fc.send_xon = true;
-
 	INIT_WORK(&adapter->reset_task,
 		(void (*)(void *)) ixgbe_reset_task, netdev);
 	INIT_WORK(&adapter->watchdog_task,
@@ -4341,19 +5671,21 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
 	switch (pdev->device) {
 	case IXGBE_DEV_ID_82599_KX4:
-#define IXGBE_PCIE_PMCSR 0x44
-		adapter->wol = IXGBE_WUFC_MAG;
-		pci_read_config_word(pdev, IXGBE_PCIE_PMCSR, &pm_value);
-		pci_write_config_word(pdev, IXGBE_PCIE_PMCSR,
-		                      (pm_value | (1 << 8)));
+		adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
+		                IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+		/* Enable ACPI wakeup in GRC */
+		IXGBE_WRITE_REG(hw, IXGBE_GRC,
+		             (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
 		break;
 	default:
 		adapter->wol = 0;
 		break;
 	}
-	device_init_wakeup(&adapter->pdev->dev, true);
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
+	/* pick up the PCI bus settings for reporting later */
+	hw->mac.ops.get_bus_info(hw);
+
 	/* print bus type/speed/width info */
 	dev_info(&pdev->dev, "(PCI Express:%s:%s) "
 	         "%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -4388,31 +5720,44 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
 	/* reset the hardware with the new settings */
 	err = hw->mac.ops.start_hw(hw);
-	if (err)
-		goto err_sw_init;
-
-	netif_carrier_off(netdev);
-
-	err = ixgbe_napi_add_all(adapter);
-	if (err)
-		goto err_napi_add;
 
+	if (err == IXGBE_ERR_EEPROM_VERSION) {
+		/* We are running on a pre-production device, log a warning */
+		dev_warn(&pdev->dev, "This device is a pre-production "
+		         "adapter/LOM.  Please be aware there may be issues "
+		         "associated with your hardware.  If you are "
+		         "experiencing problems please contact your Intel or "
+		         "hardware representative who provided you with this "
+		         "hardware.\n");
+	}
 	strcpy(netdev->name, "eth%d");
 	err = register_netdev(netdev);
 	if (err)
 		goto err_register;
 
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	netif_carrier_off(netdev);
+
+	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+	    adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+		INIT_WORK(&adapter->fdir_reinit_task,
+			  (void (*)(void *))ixgbe_fdir_reinit_task,
+			  adapter);
+
+#ifdef CONFIG_IXGBE_DCA
+	if (dca_add_requester(&pdev->dev) == 0) {
+		adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
+		ixgbe_setup_dca(adapter);
+	}
+#endif
 	dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
 	cards_found++;
 	return 0;
 
 err_register:
-	ixgbe_napi_del_all(adapter);
-err_napi_add:
 	ixgbe_release_hw_control(adapter);
-err_hw_init:
+	ixgbe_clear_interrupt_scheme(adapter);
 err_sw_init:
-	ixgbe_reset_interrupt_capability(adapter);
 err_eeprom:
 	clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
 	del_timer_sync(&adapter->sfp_timer);
@@ -4423,8 +5768,8 @@ err_eeprom:
 err_ioremap:
 	free_netdev(netdev);
 err_alloc_etherdev:
-	pci_release_regions(pdev);
-err_pci_reg:
+	pci_release_selected_regions(pdev, pci_select_bars(pdev,
+	                             IORESOURCE_MEM));
 err_dma:
 	pci_disable_device(pdev);
 	return err;
@@ -4456,25 +5801,42 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 	cancel_delayed_work(&adapter->sfp_task);
 	cancel_delayed_work(&adapter->multispeed_fiber_task);
 	cancel_delayed_work(&adapter->sfp_config_module_task);
+	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+	    adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+		cancel_delayed_work(&adapter->fdir_reinit_task);
 	flush_scheduled_work();
 
+#ifdef CONFIG_IXGBE_DCA
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
+		adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
+		dca_remove_requester(&pdev->dev);
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+	}
+
+#endif
+#ifdef IXGBE_FCOE
+	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+		ixgbe_cleanup_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+
 	if (netdev->reg_state == NETREG_REGISTERED)
 		unregister_netdev(netdev);
 
-	ixgbe_reset_interrupt_capability(adapter);
-	ixgbe_napi_del_all(adapter);
-	kfree(adapter->tx_ring);
-	kfree(adapter->rx_ring);
+	ixgbe_clear_interrupt_scheme(adapter);
 
 	ixgbe_release_hw_control(adapter);
 
 	iounmap(adapter->hw.hw_addr);
-
-	pci_release_regions(pdev);
+	pci_release_selected_regions(pdev, pci_select_bars(pdev,
+	                             IORESOURCE_MEM));
 
 	DPRINTK(PROBE, INFO, "complete\n");
+
 	free_netdev(netdev);
 
+	pci_disable_pcie_error_reporting(pdev);
+
 	pci_disable_device(pdev);
 }
 
@@ -4490,7 +5852,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
                                                 pci_channel_state_t state)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev->priv;
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
 	netif_device_detach(netdev);
 
@@ -4514,22 +5876,34 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev->priv;
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	pci_ers_result_t result;
+	int err;
 
-	if (pci_enable_device(pdev)) {
+	if (pci_enable_device_mem(pdev)) {
 		DPRINTK(PROBE, ERR,
 		        "Cannot re-enable PCI device after reset.\n");
-		return PCI_ERS_RESULT_DISCONNECT;
-	}
-	pci_set_master(pdev);
-	pci_restore_state(pdev);
+		result = PCI_ERS_RESULT_DISCONNECT;
+	} else {
+		pci_set_master(pdev);
+		pci_restore_state(pdev);
 
-	pci_enable_wake(pdev, PCI_D3hot, 0);
-	pci_enable_wake(pdev, PCI_D3cold, 0);
+		pci_enable_wake(pdev, PCI_D3hot, 0);
+		pci_enable_wake(pdev, PCI_D3cold, 0);
 
-	ixgbe_reset(adapter);
+		ixgbe_reset(adapter);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
-	return PCI_ERS_RESULT_RECOVERED;
+		result = PCI_ERS_RESULT_RECOVERED;
+	}
+
+	err = pci_cleanup_aer_uncorrect_error_status(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+		  "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
+		/* non-fatal, continue */
+	}
+
+	return result;
 }
 
 /**
@@ -4542,7 +5916,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
 static void ixgbe_io_resume(struct pci_dev *pdev)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev->priv;
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
 	if (netif_running(netdev)) {
 		if (ixgbe_up(adapter)) {
@@ -4587,6 +5961,10 @@ static int __init ixgbe_init_module(void)
 
 	printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
 
+#ifdef CONFIG_IXGBE_DCA
+	dca_register_notify(&dca_notifier);
+#endif
+
 	ret = pci_register_driver(&ixgbe_driver);
 	return ret;
 }
@@ -4601,8 +5979,37 @@ module_init(ixgbe_init_module);
  **/
 static void __exit ixgbe_exit_module(void)
 {
+#ifdef CONFIG_IXGBE_DCA
+	dca_unregister_notify(&dca_notifier);
+#endif
 	pci_unregister_driver(&ixgbe_driver);
 }
+
+#ifdef CONFIG_IXGBE_DCA
+static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
+                            void *p)
+{
+	int ret_val;
+
+	ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
+	                                 __ixgbe_notify_dca);
+
+	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
+}
+
+#endif /* CONFIG_IXGBE_DCA */
+#ifdef DEBUG
+/**
+ * ixgbe_get_hw_dev_name - return device name string
+ * used by hardware layer to print debugging information
+ **/
+char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
+{
+	struct ixgbe_adapter *adapter = hw->back;
+	return adapter->netdev->name;
+}
+
+#endif
 module_exit(ixgbe_exit_module);
 
 /* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 030409b..7408af0 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -445,23 +445,6 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
 }
 
 /**
- *  ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
- *  @hw: pointer to hardware structure
- *  @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
-                                       u16 *firmware_version)
-{
-	s32 status = 0;
-
-	status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
-	                              IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
-	                              firmware_version);
-
-	return status;
-}
-
-/**
  *  ixgbe_reset_phy_nl - Performs a PHY reset
  *  @hw: pointer to hardware structure
  **/
@@ -569,18 +552,30 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 {
 	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
 	u32 vendor_oui = 0;
+	enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
 	u8 identifier = 0;
 	u8 comp_codes_1g = 0;
 	u8 comp_codes_10g = 0;
 	u8 oui_bytes[3] = {0, 0, 0};
-	u8 transmission_media = 0;
+	u8 cable_tech = 0;
 	u16 enforce_sfp = 0;
 
+	if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
+		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+		status = IXGBE_ERR_SFP_NOT_PRESENT;
+		goto out;
+	}
+
 	status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
 	                                     &identifier);
 
-	if (status == IXGBE_ERR_SFP_NOT_PRESENT) {
+	if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) {
+		status = IXGBE_ERR_SFP_NOT_PRESENT;
 		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+		if (hw->phy.type != ixgbe_phy_nl) {
+			hw->phy.id = 0;
+			hw->phy.type = ixgbe_phy_unknown;
+		}
 		goto out;
 	}
 
@@ -589,8 +584,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 		                            &comp_codes_1g);
 		hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
 		                            &comp_codes_10g);
-		hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_TRANSMISSION_MEDIA,
-		                            &transmission_media);
+		hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY,
+		                            &cable_tech);
 
 		/* ID Module
 		 * =========
@@ -603,7 +598,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 		 * 6    SFP_SR/LR_CORE1 - 82599-specific
 		 */
 		if (hw->mac.type == ixgbe_mac_82598EB) {
-			if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE)
+			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
 				hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
 			else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
 				hw->phy.sfp_type = ixgbe_sfp_type_sr;
@@ -612,7 +607,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 			else
 				hw->phy.sfp_type = ixgbe_sfp_type_unknown;
 		} else if (hw->mac.type == ixgbe_mac_82599EB) {
-			if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE)
+			if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
 				if (hw->bus.lan_id == 0)
 					hw->phy.sfp_type =
 					             ixgbe_sfp_type_da_cu_core0;
@@ -637,8 +632,19 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 				hw->phy.sfp_type = ixgbe_sfp_type_unknown;
 		}
 
+		if (hw->phy.sfp_type != stored_sfp_type)
+			hw->phy.sfp_setup_needed = true;
+
+		/* Determine if the SFP+ PHY is dual speed or not. */
+		hw->phy.multispeed_fiber = false;
+		if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+		   (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+		   ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+		   (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+			hw->phy.multispeed_fiber = true;
+
 		/* Determine PHY vendor */
-		if (hw->phy.type == ixgbe_phy_unknown) {
+		if (hw->phy.type != ixgbe_phy_nl) {
 			hw->phy.id = identifier;
 			hw->phy.ops.read_i2c_eeprom(hw,
 			                            IXGBE_SFF_VENDOR_OUI_BYTE0,
@@ -657,8 +663,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 
 			switch (vendor_oui) {
 			case IXGBE_SFF_VENDOR_OUI_TYCO:
-				if (transmission_media &
-				    IXGBE_SFF_TWIN_AX_CAPABLE)
+				if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
 					hw->phy.type = ixgbe_phy_tw_tyco;
 				break;
 			case IXGBE_SFF_VENDOR_OUI_FTL:
@@ -671,31 +676,42 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 				hw->phy.type = ixgbe_phy_sfp_intel;
 				break;
 			default:
-				if (transmission_media &
-				    IXGBE_SFF_TWIN_AX_CAPABLE)
+				if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
 					hw->phy.type = ixgbe_phy_tw_unknown;
 				else
 					hw->phy.type = ixgbe_phy_sfp_unknown;
 				break;
 			}
 		}
-		if (hw->mac.type == ixgbe_mac_82598EB ||
-		    (hw->phy.sfp_type != ixgbe_sfp_type_sr &&
-		     hw->phy.sfp_type != ixgbe_sfp_type_lr &&
-		     hw->phy.sfp_type != ixgbe_sfp_type_srlr_core0 &&
-		     hw->phy.sfp_type != ixgbe_sfp_type_srlr_core1)) {
+
+		/* All passive DA cables are supported */
+		if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
 			status = 0;
 			goto out;
 		}
 
-		hw->eeprom.ops.read(hw, IXGBE_PHY_ENFORCE_INTEL_SFP_OFFSET,
-		                    &enforce_sfp);
-		if (!(enforce_sfp & IXGBE_PHY_ALLOW_ANY_SFP)) {
+		/* 1G SFP modules are not supported */
+		if (comp_codes_10g == 0) {
+			hw->phy.type = ixgbe_phy_sfp_unsupported;
+			status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+			goto out;
+		}
+
+		/* Anything else 82598-based is supported */
+		if (hw->mac.type == ixgbe_mac_82598EB) {
+			status = 0;
+			goto out;
+		}
+
+		/* This is guaranteed to be 82599, no need to check for NULL */
+		hw->mac.ops.get_device_caps(hw, &enforce_sfp);
+		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
 			/* Make sure we're a supported PHY type */
 			if (hw->phy.type == ixgbe_phy_sfp_intel) {
 				status = 0;
 			} else {
 				hw_dbg(hw, "SFP+ module not supported\n");
+				hw->phy.type = ixgbe_phy_sfp_unsupported;
 				status = IXGBE_ERR_SFP_NOT_SUPPORTED;
 			}
 		} else {
@@ -1314,4 +1330,20 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
 	return status;
 }
 
+/**
+ *  ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+                                       u16 *firmware_version)
+{
+	s32 status = 0;
+
+	status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
+	                              IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+	                              firmware_version);
+
+	return status;
+}
 
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9953d18..9b700f5 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -39,11 +39,12 @@
 #define IXGBE_SFF_VENDOR_OUI_BYTE2   0x27
 #define IXGBE_SFF_1GBE_COMP_CODES    0x6
 #define IXGBE_SFF_10GBE_COMP_CODES   0x3
-#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
+#define IXGBE_SFF_CABLE_TECHNOLOGY   0x8
 
 /* Bitmasks */
-#define IXGBE_SFF_TWIN_AX_CAPABLE            0x80
+#define IXGBE_SFF_DA_PASSIVE_CABLE           0x4
 #define IXGBE_SFF_1GBASESX_CAPABLE           0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE           0x2
 #define IXGBE_SFF_10GBASESR_CAPABLE          0x10
 #define IXGBE_SFF_10GBASELR_CAPABLE          0x20
 #define IXGBE_I2C_EEPROM_READ_MASK           0x100
@@ -91,9 +92,6 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
                                        bool autoneg_wait_to_complete);
 
 /* PHY specific */
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
-                                       u16 *firmware_version);
-
 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
                              ixgbe_link_speed *speed,
                              bool *link_up);
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 8041fc9..7d3f712 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -41,14 +41,19 @@
 #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
 #define IXGBE_DEV_ID_82598EB_SFP_LOM     0x10DB
 #define IXGBE_DEV_ID_82598AT             0x10C8
+#define IXGBE_DEV_ID_82598AT2            0x150B
 #define IXGBE_DEV_ID_82598EB_CX4         0x10DD
 #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
 #define IXGBE_DEV_ID_82598_DA_DUAL_PORT  0x10F1
 #define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM      0x10E1
 #define IXGBE_DEV_ID_82598EB_XF_LR       0x10F4
-#define IXGBE_DEV_ID_82599               0x10D8
 #define IXGBE_DEV_ID_82599_KX4           0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ      0x1514
+#define IXGBE_DEV_ID_82599_CX4           0x10F9
 #define IXGBE_DEV_ID_82599_SFP           0x10FB
+#define IXGBE_DEV_ID_82599_SFP_EM        0x1507
+#define IXGBE_DEV_ID_82599_XAUI_LOM      0x10FC
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
 
 /* General Registers */
 #define IXGBE_CTRL      0x00000
@@ -230,6 +235,34 @@
 #define IXGBE_RETA(_i)  (0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */
 #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */
 
+/* Flow Director registers */
+#define IXGBE_FDIRCTRL  0x0EE00
+#define IXGBE_FDIRHKEY  0x0EE68
+#define IXGBE_FDIRSKEY  0x0EE6C
+#define IXGBE_FDIRDIP4M 0x0EE3C
+#define IXGBE_FDIRSIP4M 0x0EE40
+#define IXGBE_FDIRTCPM  0x0EE44
+#define IXGBE_FDIRUDPM  0x0EE48
+#define IXGBE_FDIRIP6M  0x0EE74
+#define IXGBE_FDIRM     0x0EE70
+
+/* Flow Director Stats registers */
+#define IXGBE_FDIRFREE  0x0EE38
+#define IXGBE_FDIRLEN   0x0EE4C
+#define IXGBE_FDIRUSTAT 0x0EE50
+#define IXGBE_FDIRFSTAT 0x0EE54
+#define IXGBE_FDIRMATCH 0x0EE58
+#define IXGBE_FDIRMISS  0x0EE5C
+
+/* Flow Director Programming registers */
+#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
+#define IXGBE_FDIRIPSA      0x0EE18
+#define IXGBE_FDIRIPDA      0x0EE1C
+#define IXGBE_FDIRPORT      0x0EE20
+#define IXGBE_FDIRVLAN      0x0EE24
+#define IXGBE_FDIRHASH      0x0EE28
+#define IXGBE_FDIRCMD       0x0EE2C
+
 /* Transmit DMA registers */
 #define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
 #define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
@@ -444,6 +477,21 @@
 
 #define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE    0x4
 
+/* HW RSC registers */
+#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+                          (0x0D02C + ((_i - 64) * 0x40)))
+#define IXGBE_RSCDBU      0x03028
+#define IXGBE_RSCCTL_RSCEN          0x01
+#define IXGBE_RSCCTL_MAXDESC_1      0x00
+#define IXGBE_RSCCTL_MAXDESC_4      0x04
+#define IXGBE_RSCCTL_MAXDESC_8      0x08
+#define IXGBE_RSCCTL_MAXDESC_16     0x0C
+#define IXGBE_RXDADV_RSCCNT_SHIFT     17
+#define IXGBE_GPIE_RSC_DELAY_SHIFT    11
+#define IXGBE_RXDADV_RSCCNT_MASK    0x001E0000
+#define IXGBE_RSCDBU_RSCACKDIS      0x00000080
+#define IXGBE_RDRXCTL_RSCFRSTSIZE   0x003E0000
+
 /* DCB registers */
 #define IXGBE_RTRPCS      0x02430
 #define IXGBE_RTTDCS      0x04900
@@ -463,6 +511,63 @@
 #define IXGBE_RTTDTECC_NO_BCN   0x00000100
 #define IXGBE_RTTBCNRC    0x04984
 
+/* FCoE registers */
+#define IXGBE_FCPTRL    0x02410 /* FC User Desc. PTR Low */
+#define IXGBE_FCPTRH    0x02414 /* FC USer Desc. PTR High */
+#define IXGBE_FCBUFF    0x02418 /* FC Buffer Control */
+#define IXGBE_FCDMARW   0x02420 /* FC Receive DMA RW */
+#define IXGBE_FCINVST0  0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
+#define IXGBE_FCINVST(_i)       (IXGBE_FCINVST0 + ((_i) * 4))
+#define IXGBE_FCBUFF_VALID      (1 << 0)   /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE   (3 << 3)   /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX    (1 << 7)   /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_BUFFCNT    0x0000ff00 /* Number of User Buffers */
+#define IXGBE_FCBUFF_OFFSET     0xffff0000 /* User Buffer Offset */
+#define IXGBE_FCBUFF_BUFFSIZE_SHIFT  3
+#define IXGBE_FCBUFF_BUFFCNT_SHIFT   8
+#define IXGBE_FCBUFF_OFFSET_SHIFT    16
+#define IXGBE_FCDMARW_WE        (1 << 14)   /* Write enable */
+#define IXGBE_FCDMARW_RE        (1 << 15)   /* Read enable */
+#define IXGBE_FCDMARW_FCOESEL   0x000001ff  /* FC X_ID: 11 bits */
+#define IXGBE_FCDMARW_LASTSIZE  0xffff0000  /* Last User Buffer Size */
+#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
+
+/* FCoE SOF/EOF */
+#define IXGBE_TEOFF     0x04A94 /* Tx FC EOF */
+#define IXGBE_TSOFF     0x04A98 /* Tx FC SOF */
+#define IXGBE_REOFF     0x05158 /* Rx FC EOF */
+#define IXGBE_RSOFF     0x051F8 /* Rx FC SOF */
+/* FCoE Filter Context Registers */
+#define IXGBE_FCFLT     0x05108 /* FC FLT Context */
+#define IXGBE_FCFLTRW   0x05110 /* FC Filter RW Control */
+#define IXGBE_FCPARAM   0x051d8 /* FC Offset Parameter */
+#define IXGBE_FCFLT_VALID       (1 << 0)   /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST       (1 << 1)   /* Filter First */
+#define IXGBE_FCFLT_SEQID       0x00ff0000 /* Sequence ID */
+#define IXGBE_FCFLT_SEQCNT      0xff000000 /* Sequence Count */
+#define IXGBE_FCFLTRW_RVALDT    (1 << 13)  /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE        (1 << 14)  /* Write Enable */
+#define IXGBE_FCFLTRW_RE        (1 << 15)  /* Read Enable */
+/* FCoE Receive Control */
+#define IXGBE_FCRXCTRL  0x05100 /* FC Receive Control */
+#define IXGBE_FCRXCTRL_FCOELLI  (1 << 0)   /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD   (1 << 1)   /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH  (1 << 2)   /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3)   /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH     (1 << 4)   /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5)   /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC     (1 << 6)   /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO  (1 << 7)   /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOEVER  0x00000f00 /* FCoE Version: 4 bits */
+#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
+/* FCoE Redirection */
+#define IXGBE_FCRECTL   0x0ED00 /* FC Redirection Control */
+#define IXGBE_FCRETA0   0x0ED10 /* FC Redirection Table 0 */
+#define IXGBE_FCRETA(_i)        (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
+#define IXGBE_FCRECTL_ENA       0x1        /* FCoE Redir Table Enable */
+#define IXGBE_FCRETA_SIZE       8          /* Max entries in FCRETA */
+#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+
 /* Stats registers */
 #define IXGBE_CRCERRS   0x04000
 #define IXGBE_ILLERRC   0x04004
@@ -534,6 +639,13 @@
 #define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
 #define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
 #define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_FCCRC     0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */
+#define IXGBE_FCOERPDC  0x0241C /* FCoE Rx Packets Dropped Count */
+#define IXGBE_FCLAST    0x02424 /* FCoE Last Error Count */
+#define IXGBE_FCOEPRC   0x02428 /* Number of FCoE Packets Received */
+#define IXGBE_FCOEDWRC  0x0242C /* Number of FCoE DWords Received */
+#define IXGBE_FCOEPTC   0x08784 /* Number of FCoE Packets Transmitted */
+#define IXGBE_FCOEDWTC  0x08788 /* Number of FCoE DWords Transmitted */
 
 /* Management */
 #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -610,6 +722,12 @@
 #define IXGBE_ECC_STATUS_82599  0x110E0
 #define IXGBE_BAR_CTRL_82599    0x110F4
 
+/* PCI Express Control */
+#define IXGBE_GCR_CMPL_TMOUT_MASK       0x0000F000
+#define IXGBE_GCR_CMPL_TMOUT_10ms       0x00001000
+#define IXGBE_GCR_CMPL_TMOUT_RESEND     0x00010000
+#define IXGBE_GCR_CAP_VER2              0x00040000
+
 /* Time Sync Registers */
 #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
 #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -731,8 +849,8 @@
 /* RDRXCTL Bit Masks */
 #define IXGBE_RDRXCTL_RDMTS_1_2     0x00000000 /* Rx Desc Min Threshold Size */
 #define IXGBE_RDRXCTL_CRCSTRIP      0x00000002 /* CRC Strip */
-#define IXGBE_RDRXCTL_DMAIDONE      0x00000008 /* DMA init cycle done */
 #define IXGBE_RDRXCTL_MVMEN         0x00000020
+#define IXGBE_RDRXCTL_DMAIDONE      0x00000008 /* DMA init cycle done */
 #define IXGBE_RDRXCTL_AGGDIS        0x00010000 /* Aggregation disable */
 
 /* RQTC Bit Masks and Shifts */
@@ -781,6 +899,9 @@
 #define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
 #define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
 #define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */
 
 #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
 #define IXGBE_DCA_TXCTRL_CPUID_MASK_82599  0xFF000000 /* Tx CPUID Mask */
@@ -860,6 +981,7 @@
 #define IXGBE_MDIO_PHY_EXT_ABILITY        0xB /* Ext Ability Reg */
 #define IXGBE_MDIO_PHY_10GBASET_ABILITY   0x0004 /* 10GBaseT capable */
 #define IXGBE_MDIO_PHY_1000BASET_ABILITY  0x0020 /* 1000BaseT capable */
+#define IXGBE_MDIO_PHY_100BASETX_ABILITY  0x0080 /* 100BaseTX capable */
 
 #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR     0xC30A /* PHY_XS SDA/SCL Addr Reg */
 #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA     0xC30B /* PHY_XS SDA/SCL Data Reg */
@@ -883,7 +1005,6 @@
 #define ATH_PHY_ID       0x03429050
 
 /* PHY Types */
-#define TNX_FW_REV       0xB
 #define IXGBE_M88E1145_E_PHY_ID  0x01410CD0
 
 /* Special PHY Init Routine */
@@ -897,8 +1018,6 @@
 #define IXGBE_CONTROL_NL         0x000F
 #define IXGBE_CONTROL_EOL_NL     0x0FFF
 #define IXGBE_CONTROL_SOL_NL     0x0000
-#define IXGBE_PHY_ENFORCE_INTEL_SFP_OFFSET 0x002C
-#define IXGBE_PHY_ALLOW_ANY_SFP            0x1
 
 /* General purpose Interrupt Enable */
 #define IXGBE_SDP0_GPIEN         0x00000001 /* SDP0 */
@@ -957,6 +1076,8 @@
 #define IXGBE_VT_CTL_DIS_DEFPL  0x20000000 /* disable default pool */
 #define IXGBE_VT_CTL_REPLEN     0x40000000 /* replication enabled */
 #define IXGBE_VT_CTL_VT_ENABLE  0x00000001  /* Enable VT Mode */
+#define IXGBE_VT_CTL_POOL_SHIFT 7
+#define IXGBE_VT_CTL_POOL_MASK  (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
 
 /* VMOLR bitmasks */
 #define IXGBE_VMOLR_AUPE        0x01000000 /* accept untagged packets */
@@ -1046,7 +1167,6 @@
 #define IXGBE_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
 #define IXGBE_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
 
-
 /* Extended Interrupt Cause Set */
 #define IXGBE_EICS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
 #define IXGBE_EICS_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */
@@ -1059,9 +1179,7 @@
 #define IXGBE_EICS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
 #define IXGBE_EICS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
 #define IXGBE_EICS_ECC          IXGBE_EICR_ECC       /* ECC Error */
-#define IXGBE_EIMS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
+#define IXGBE_EICS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
 #define IXGBE_EICS_DHER         IXGBE_EICR_DHER      /* Desc Handler Error */
 #define IXGBE_EICS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
 #define IXGBE_EICS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
@@ -1150,6 +1268,7 @@
 
 /* Interrupt Vector Allocation Registers */
 #define IXGBE_IVAR_REG_NUM      25
+#define IXGBE_IVAR_REG_NUM_82599       64
 #define IXGBE_IVAR_TXRX_ENTRY   96
 #define IXGBE_IVAR_RX_ENTRY     64
 #define IXGBE_IVAR_RX_QUEUE(_i)    (0 + (_i))
@@ -1165,6 +1284,7 @@
 
 /* ETYPE Queue Filter/Select Bit Masks */
 #define IXGBE_MAX_ETQF_FILTERS  8
+#define IXGBE_ETQF_FCOE         0x08000000 /* bit 27 */
 #define IXGBE_ETQF_BCN          0x10000000 /* bit 28 */
 #define IXGBE_ETQF_1588         0x40000000 /* bit 30 */
 #define IXGBE_ETQF_FILTER_EN    0x80000000 /* bit 31 */
@@ -1187,6 +1307,7 @@
  */
 #define IXGBE_ETQF_FILTER_EAPOL          0
 #define IXGBE_ETQF_FILTER_BCN            1
+#define IXGBE_ETQF_FILTER_FCOE           2
 #define IXGBE_ETQF_FILTER_1588           3
 /* VLAN Control Bit Masks */
 #define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */
@@ -1210,8 +1331,10 @@
 #define IXGBE_STATUS_LAN_ID_1   0x00000004 /* LAN ID 1 */
 
 /* ESDP Bit Masks */
-#define IXGBE_ESDP_SDP0 0x00000001
-#define IXGBE_ESDP_SDP1 0x00000002
+#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
+#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
+#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
 #define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
 #define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
 #define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
@@ -1243,6 +1366,8 @@
 #define IXGBE_AUTOC_KX4_SUPP    0x80000000
 #define IXGBE_AUTOC_KX_SUPP     0x40000000
 #define IXGBE_AUTOC_PAUSE       0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE   0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE   0x10000000
 #define IXGBE_AUTOC_RF          0x08000000
 #define IXGBE_AUTOC_PD_TMR      0x06000000
 #define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
@@ -1311,7 +1436,7 @@
 #define IXGBE_LINK_UP_TIME      90 /* 9.0 Seconds */
 #define IXGBE_AUTO_NEG_TIME     45 /* 4.5 Seconds */
 
-#define FIBER_LINK_UP_LIMIT     50
+#define IXGBE_LINKS2_AN_SUPPORTED   0x00000040
 
 /* PCS1GLSTA Bit Masks */
 #define IXGBE_PCS1GLSTA_LINK_OK         1
@@ -1333,6 +1458,11 @@
 #define IXGBE_PCS1GLCTL_AN_ENABLE       0x10000
 #define IXGBE_PCS1GLCTL_AN_RESTART      0x20000
 
+/* ANLP1 Bit Masks */
+#define IXGBE_ANLP1_PAUSE               0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE           0x0400
+#define IXGBE_ANLP1_ASM_PAUSE           0x0800
+
 /* SW Semaphore Register bitmasks */
 #define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
 #define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
@@ -1384,6 +1514,8 @@
 #define IXGBE_FW_PTR            0x0F
 #define IXGBE_PBANUM0_PTR       0x15
 #define IXGBE_PBANUM1_PTR       0x16
+#define IXGBE_DEVICE_CAPS       0x2C
+#define IXGBE_SAN_MAC_ADDR_PTR  0x28
 #define IXGBE_PCIE_MSIX_82599_CAPS  0x72
 #define IXGBE_PCIE_MSIX_82598_CAPS  0x62
 
@@ -1427,8 +1559,26 @@
 #define IXGBE_EERD_ATTEMPTS 100000
 #endif
 
+#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET  0x0
+#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET  0x3
+#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP  0x1
+#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS  0x2
+#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR   0x4
+#define IXGBE_FW_PATCH_VERSION_4   0x7
+
+/* Alternative SAN MAC Address Block */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR      0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET  0x0 /* Alt. SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET  0x7 /* Alt. WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET  0x8 /* Alt. WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC  0x0 /* Alt. SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN  0x1 /* Alt. WWN base exists */
+
 /* PCI Bus Info */
 #define IXGBE_PCI_LINK_STATUS     0xB2
+#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
 #define IXGBE_PCI_LINK_WIDTH      0x3F0
 #define IXGBE_PCI_LINK_WIDTH_1    0x10
 #define IXGBE_PCI_LINK_WIDTH_2    0x20
@@ -1439,6 +1589,7 @@
 #define IXGBE_PCI_LINK_SPEED_5000 0x2
 #define IXGBE_PCI_HEADER_TYPE_REGISTER  0x0E
 #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define IXGBE_PCI_DEVICE_CONTROL2_16ms  0x0005
 
 /* Number of 100 microseconds we wait for PCI Express master disable */
 #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
@@ -1555,7 +1706,8 @@
 #define IXGBE_MTQC_RT_ENA       0x1 /* DCB Enable */
 #define IXGBE_MTQC_VT_ENA       0x2 /* VMDQ2 Enable */
 #define IXGBE_MTQC_64Q_1PB      0x0 /* 64 queues 1 pack buffer */
-#define IXGBE_MTQC_64VF         0x8 /* 2 TX Queues per pool w/64VF's */
+#define IXGBE_MTQC_32VF         0x8 /* 4 TX Queues per pool w/32VF's */
+#define IXGBE_MTQC_64VF         0x4 /* 2 TX Queues per pool w/64VF's */
 #define IXGBE_MTQC_8TC_8TQ      0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
 
 /* Receive Descriptor bit definitions */
@@ -1587,6 +1739,11 @@
 #define IXGBE_RXD_ERR_IPE       0x80    /* IP Checksum Error */
 #define IXGBE_RXDADV_ERR_MASK           0xfff00000 /* RDESC.ERRORS mask */
 #define IXGBE_RXDADV_ERR_SHIFT          20         /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_FCEOFE         0x80000000 /* FCoEFe/IPE */
+#define IXGBE_RXDADV_ERR_FCERR          0x00700000 /* FCERR/FDIRERR */
+#define IXGBE_RXDADV_ERR_FDIR_LEN       0x00100000 /* FDIR Length error */
+#define IXGBE_RXDADV_ERR_FDIR_DROP      0x00200000 /* FDIR Drop error */
+#define IXGBE_RXDADV_ERR_FDIR_COLL      0x00400000 /* FDIR Collision error */
 #define IXGBE_RXDADV_ERR_HBO    0x00800000 /*Header Buffer Overflow */
 #define IXGBE_RXDADV_ERR_CE     0x01000000 /* CRC Error */
 #define IXGBE_RXDADV_ERR_LE     0x02000000 /* Length Error */
@@ -1606,12 +1763,19 @@
 #define IXGBE_RXDADV_STAT_FLM           IXGBE_RXD_STAT_FLM /* FDir Match */
 #define IXGBE_RXDADV_STAT_VP            IXGBE_RXD_STAT_VP  /* IEEE VLAN Pkt */
 #define IXGBE_RXDADV_STAT_MASK          0x000fffff /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS        0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT        0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP  0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP    0x00000030 /* 11: Ctxt w/ DDP */
 
 /* PSRTYPE bit definitions */
 #define IXGBE_PSRTYPE_TCPHDR    0x00000010
 #define IXGBE_PSRTYPE_UDPHDR    0x00000020
 #define IXGBE_PSRTYPE_IPV4HDR   0x00000100
 #define IXGBE_PSRTYPE_IPV6HDR   0x00000200
+#define IXGBE_PSRTYPE_L2HDR     0x00001000
 
 /* SRRCTL bit definitions */
 #define IXGBE_SRRCTL_BSIZEPKT_SHIFT     10     /* so many KBs */
@@ -1712,26 +1876,81 @@
 
 #endif
 
-/* Transmit Descriptor - Legacy */
-struct ixgbe_legacy_tx_desc {
-	u64 buffer_addr;       /* Address of the descriptor's data buffer */
-	union {
-		__le32 data;
-		struct {
-			__le16 length;    /* Data buffer length */
-			u8 cso;           /* Checksum offset */
-			u8 cmd;           /* Descriptor control */
-		} flags;
-	} lower;
-	union {
-		__le32 data;
-		struct {
-			u8 status;        /* Descriptor status */
-			u8 css;           /* Checksum start */
-			__le16 vlan;
-		} fields;
-	} upper;
+enum ixgbe_fdir_pballoc_type {
+	IXGBE_FDIR_PBALLOC_64K = 0,
+	IXGBE_FDIR_PBALLOC_128K,
+	IXGBE_FDIR_PBALLOC_256K,
 };
+#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT           16
+
+/* Flow Director register values */
+#define IXGBE_FDIRCTRL_PBALLOC_64K              0x00000001
+#define IXGBE_FDIRCTRL_PBALLOC_128K             0x00000002
+#define IXGBE_FDIRCTRL_PBALLOC_256K             0x00000003
+#define IXGBE_FDIRCTRL_INIT_DONE                0x00000008
+#define IXGBE_FDIRCTRL_PERFECT_MATCH            0x00000010
+#define IXGBE_FDIRCTRL_REPORT_STATUS            0x00000020
+#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS     0x00000080
+#define IXGBE_FDIRCTRL_DROP_Q_SHIFT             8
+#define IXGBE_FDIRCTRL_FLEX_SHIFT               16
+#define IXGBE_FDIRCTRL_SEARCHLIM                0x00800000
+#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT         24
+#define IXGBE_FDIRCTRL_FULL_THRESH_MASK         0xF0000000
+#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT        28
+
+#define IXGBE_FDIRTCPM_DPORTM_SHIFT             16
+#define IXGBE_FDIRUDPM_DPORTM_SHIFT             16
+#define IXGBE_FDIRIP6M_DIPM_SHIFT               16
+#define IXGBE_FDIRM_VLANID                      0x00000001
+#define IXGBE_FDIRM_VLANP                       0x00000002
+#define IXGBE_FDIRM_POOL                        0x00000004
+#define IXGBE_FDIRM_L3P                         0x00000008
+#define IXGBE_FDIRM_L4P                         0x00000010
+#define IXGBE_FDIRM_FLEX                        0x00000020
+#define IXGBE_FDIRM_DIPv6                       0x00000040
+
+#define IXGBE_FDIRFREE_FREE_MASK                0xFFFF
+#define IXGBE_FDIRFREE_FREE_SHIFT               0
+#define IXGBE_FDIRFREE_COLL_MASK                0x7FFF0000
+#define IXGBE_FDIRFREE_COLL_SHIFT               16
+#define IXGBE_FDIRLEN_MAXLEN_MASK               0x3F
+#define IXGBE_FDIRLEN_MAXLEN_SHIFT              0
+#define IXGBE_FDIRLEN_MAXHASH_MASK              0x7FFF0000
+#define IXGBE_FDIRLEN_MAXHASH_SHIFT             16
+#define IXGBE_FDIRUSTAT_ADD_MASK                0xFFFF
+#define IXGBE_FDIRUSTAT_ADD_SHIFT               0
+#define IXGBE_FDIRUSTAT_REMOVE_MASK             0xFFFF0000
+#define IXGBE_FDIRUSTAT_REMOVE_SHIFT            16
+#define IXGBE_FDIRFSTAT_FADD_MASK               0x00FF
+#define IXGBE_FDIRFSTAT_FADD_SHIFT              0
+#define IXGBE_FDIRFSTAT_FREMOVE_MASK            0xFF00
+#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT           8
+#define IXGBE_FDIRPORT_DESTINATION_SHIFT        16
+#define IXGBE_FDIRVLAN_FLEX_SHIFT               16
+#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT       15
+#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT       16
+
+#define IXGBE_FDIRCMD_CMD_MASK                  0x00000003
+#define IXGBE_FDIRCMD_CMD_ADD_FLOW              0x00000001
+#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW           0x00000002
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT        0x00000003
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH        0x00000007
+#define IXGBE_FDIRCMD_FILTER_UPDATE             0x00000008
+#define IXGBE_FDIRCMD_IPv6DMATCH                0x00000010
+#define IXGBE_FDIRCMD_L4TYPE_UDP                0x00000020
+#define IXGBE_FDIRCMD_L4TYPE_TCP                0x00000040
+#define IXGBE_FDIRCMD_L4TYPE_SCTP               0x00000060
+#define IXGBE_FDIRCMD_IPV6                      0x00000080
+#define IXGBE_FDIRCMD_CLEARHT                   0x00000100
+#define IXGBE_FDIRCMD_DROP                      0x00000200
+#define IXGBE_FDIRCMD_INT                       0x00000400
+#define IXGBE_FDIRCMD_LAST                      0x00000800
+#define IXGBE_FDIRCMD_COLLISION                 0x00001000
+#define IXGBE_FDIRCMD_QUEUE_EN                  0x00008000
+#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT            16
+#define IXGBE_FDIRCMD_VT_POOL_SHIFT             24
+#define IXGBE_FDIR_INIT_DONE_POLL               10
+#define IXGBE_FDIRCMD_CMD_POLL                  10
 
 /* Transmit Descriptor - Advanced */
 union ixgbe_adv_tx_desc {
@@ -1747,16 +1966,6 @@ union ixgbe_adv_tx_desc {
 	} wb;
 };
 
-/* Receive Descriptor - Legacy */
-struct ixgbe_legacy_rx_desc {
-	__le64 buffer_addr; /* Address of the descriptor's data buffer */
-	__le16 length;      /* Length of data DMAed into data buffer */
-	__le16 csum;        /* Packet checksum */
-	u8 status;          /* Descriptor status */
-	u8 errors;          /* Descriptor Errors */
-	__le16 vlan;
-};
-
 /* Receive Descriptor - Advanced */
 union ixgbe_adv_rx_desc {
 	struct {
@@ -1838,6 +2047,16 @@ struct ixgbe_adv_tx_context_desc {
 #define IXGBE_ADVTXD_POPTS_IPSEC      0x00000400 /* IPSec offload request */
 #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
 #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
+#define IXGBE_ADVTXT_TUCMD_FCOE      0x00008000       /* FCoE Frame Type */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK  (0x3 << 10)      /* FC EOF index */
+#define IXGBE_ADVTXD_FCOEF_SOF       ((1 << 2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC    ((1 << 3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE      ((1 << 4) << 10) /* Orientation: End */
+#define IXGBE_ADVTXD_FCOEF_ORIS      ((1 << 5) << 10) /* Orientation: Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N     (0x0 << 10)      /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T     (0x1 << 10)      /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI    (0x2 << 10)      /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A     (0x3 << 10)      /* 11: EOFa */
 #define IXGBE_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
 #define IXGBE_ADVTXD_MSS_SHIFT       16  /* Adv ctxt MSS shift */
 
@@ -1863,7 +2082,7 @@ typedef u32 ixgbe_physical_layer;
 #define IXGBE_PHYSICAL_LAYER_UNKNOWN      0
 #define IXGBE_PHYSICAL_LAYER_10GBASE_T    0x0001
 #define IXGBE_PHYSICAL_LAYER_1000BASE_T   0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_T    0x0004
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX   0x0004
 #define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU  0x0008
 #define IXGBE_PHYSICAL_LAYER_10GBASE_LR   0x0010
 #define IXGBE_PHYSICAL_LAYER_10GBASE_LRM  0x0020
@@ -1872,6 +2091,47 @@ typedef u32 ixgbe_physical_layer;
 #define IXGBE_PHYSICAL_LAYER_10GBASE_CX4  0x0100
 #define IXGBE_PHYSICAL_LAYER_1000BASE_KX  0x0200
 #define IXGBE_PHYSICAL_LAYER_1000BASE_BX  0x0400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR   0x0800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
+
+/* Software ATR hash keys */
+#define IXGBE_ATR_BUCKET_HASH_KEY    0xE214AD3D
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
+
+/* Software ATR input stream offsets and masks */
+#define IXGBE_ATR_VLAN_OFFSET       0
+#define IXGBE_ATR_SRC_IPV6_OFFSET   2
+#define IXGBE_ATR_SRC_IPV4_OFFSET  14
+#define IXGBE_ATR_DST_IPV6_OFFSET  18
+#define IXGBE_ATR_DST_IPV4_OFFSET  30
+#define IXGBE_ATR_SRC_PORT_OFFSET  34
+#define IXGBE_ATR_DST_PORT_OFFSET  36
+#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
+#define IXGBE_ATR_VM_POOL_OFFSET   40
+#define IXGBE_ATR_L4TYPE_OFFSET    41
+
+#define IXGBE_ATR_L4TYPE_MASK      0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_L4TYPE_UDP       0x1
+#define IXGBE_ATR_L4TYPE_TCP       0x2
+#define IXGBE_ATR_L4TYPE_SCTP      0x3
+#define IXGBE_ATR_HASH_MASK     0x7fff
+
+/* Flow Director ATR input struct. */
+struct ixgbe_atr_input {
+	/* Byte layout in order, all values with MSB first:
+	 *
+	 * vlan_id    - 2 bytes
+	 * src_ip     - 16 bytes
+	 * dst_ip     - 16 bytes
+	 * src_port   - 2 bytes
+	 * dst_port   - 2 bytes
+	 * flex_bytes - 2 bytes
+	 * vm_pool    - 1 byte
+	 * l4type     - 1 byte
+	 */
+	u8 byte_stream[42];
+};
 
 enum ixgbe_eeprom_type {
 	ixgbe_eeprom_uninitialized = 0,
@@ -1899,6 +2159,7 @@ enum ixgbe_phy_type {
 	ixgbe_phy_sfp_ftl,
 	ixgbe_phy_sfp_unknown,
 	ixgbe_phy_sfp_intel,
+	ixgbe_phy_sfp_unsupported,
 	ixgbe_phy_generic
 };
 
@@ -1932,6 +2193,7 @@ enum ixgbe_media_type {
 	ixgbe_media_type_fiber,
 	ixgbe_media_type_copper,
 	ixgbe_media_type_backplane,
+	ixgbe_media_type_cx4,
 	ixgbe_media_type_virtual
 };
 
@@ -1941,9 +2203,20 @@ enum ixgbe_fc_mode {
 	ixgbe_fc_rx_pause,
 	ixgbe_fc_tx_pause,
 	ixgbe_fc_full,
+#ifdef CONFIG_DCB
+	ixgbe_fc_pfc,
+#endif
 	ixgbe_fc_default
 };
 
+/* Smart Speed Settings */
+#define IXGBE_SMARTSPEED_MAX_RETRIES	3
+enum ixgbe_smart_speed {
+	ixgbe_smart_speed_auto = 0,
+	ixgbe_smart_speed_on,
+	ixgbe_smart_speed_off
+};
+
 /* PCI bus types */
 enum ixgbe_bus_type {
 	ixgbe_bus_type_unknown = 0,
@@ -2004,6 +2277,8 @@ struct ixgbe_fc_info {
 	u16 pause_time; /* Flow Control Pause timer */
 	bool send_xon; /* Flow control send XON */
 	bool strict_ieee; /* Strict IEEE mode */
+	bool disable_fc_autoneg; /* Do not autonegotiate FC */
+	bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
 	enum ixgbe_fc_mode current_mode; /* FC mode in effect */
 	enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
 };
@@ -2073,6 +2348,12 @@ struct ixgbe_hw_stats {
 	u64 fdirfstat_fremove;
 	u64 fdirmatch;
 	u64 fdirmiss;
+	u64 fccrc;
+	u64 fcoerpdc;
+	u64 fcoeprc;
+	u64 fcoeptc;
+	u64 fcoedwrc;
+	u64 fcoedwtc;
 };
 
 /* forward declaration */
@@ -2099,6 +2380,9 @@ struct ixgbe_mac_operations {
 	enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
 	u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
 	s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+	s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+	s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
+	s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
 	s32 (*stop_adapter)(struct ixgbe_hw *);
 	s32 (*get_bus_info)(struct ixgbe_hw *);
 	void (*set_lan_id)(struct ixgbe_hw *);
@@ -2108,9 +2392,7 @@ struct ixgbe_mac_operations {
 	s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
 
 	/* Link */
-	s32 (*setup_link)(struct ixgbe_hw *);
-	s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
-	                        bool);
+	s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
 	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
 	s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
 	s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
@@ -2141,12 +2423,13 @@ struct ixgbe_mac_operations {
 	s32 (*init_uta_tables)(struct ixgbe_hw *);
 
 	/* Flow Control */
-	s32 (*setup_fc)(struct ixgbe_hw *, s32);
+	s32 (*fc_enable)(struct ixgbe_hw *, s32);
 };
 
 struct ixgbe_phy_operations {
 	s32 (*identify)(struct ixgbe_hw *);
 	s32 (*identify_sfp)(struct ixgbe_hw *);
+	s32 (*init)(struct ixgbe_hw *);
 	s32 (*reset)(struct ixgbe_hw *);
 	s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
 	s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
@@ -2174,18 +2457,22 @@ struct ixgbe_mac_info {
 	enum ixgbe_mac_type             type;
 	u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
 	u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	u8                              san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	/* prefix for World Wide Node Name (WWNN) */
+	u16                             wwnn_prefix;
+	/* prefix for World Wide Port Name (WWPN) */
+	u16                             wwpn_prefix;
 	s32                             mc_filter_type;
+	u32                             mcft_size;
+	u32                             vft_size;
+	u32                             num_rar_entries;
 	u32                             max_tx_queues;
 	u32                             max_rx_queues;
-	u32                             num_rar_entries;
-	u32				mcft_size;
-	u32				vft_size;
 	u32                             max_msix_vectors;
-	u32				orig_autoc;
-	u32				orig_autoc2;
-	bool				orig_link_settings_stored;
-	bool                            autoneg;
-	bool                            autoneg_succeeded;
+	u32                             orig_autoc;
+	u32                             orig_autoc2;
+	bool                            orig_link_settings_stored;
+	bool                            autotry_restart;
 };
 
 struct ixgbe_phy_info {
@@ -2194,11 +2481,13 @@ struct ixgbe_phy_info {
 	u32                             addr;
 	u32                             id;
 	enum ixgbe_sfp_type             sfp_type;
+	bool                            sfp_setup_needed;
 	u32                             revision;
 	enum ixgbe_media_type           media_type;
 	bool                            reset_disable;
 	ixgbe_autoneg_advertised        autoneg_advertised;
-	bool                            autoneg_wait_to_complete;
+	enum ixgbe_smart_speed          smart_speed;
+	bool                            smart_speed_active;
 	bool                            multispeed_fiber;
 };
 
@@ -2250,6 +2539,8 @@ struct ixgbe_info {
 #define IXGBE_ERR_SFP_NOT_SUPPORTED             -19
 #define IXGBE_ERR_SFP_NOT_PRESENT               -20
 #define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT       -21
+#define IXGBE_ERR_FDIR_REINIT_FAILED            -23
+#define IXGBE_ERR_EEPROM_VERSION                -24
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 
 #endif /* _IXGBE_TYPE_H_ */