Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 2721

kernel-2.6.18-238.el5.src.rpm

From: Stefan Assmann <sassmann@redhat.com>
Date: Thu, 26 Aug 2010 07:08:03 -0400
Subject: [net] igb: update igb driver to support Portville ACS
Message-id: <4C7612D3.1090405@redhat.com>
Patchwork-id: 27820
O-Subject: [RHEL 5.6 PATCH v2] igb: update igb driver to support Portville ACS
Bugzilla: 566024
RH-Acked-by: David S. Miller <davem@redhat.com>
RH-Acked-by: Andy Gospodarek <gospo@redhat.com>

Bugzilla:
https://bugzilla.redhat.com/show_bug.cgi?id=566024

Description:
Driver update for igb to support Portville ACS. The following commits
or parts of them have been backported for RHEL5.6.
v2:
- Took care of the NETDEV WATCHDOG: transmit timeout
- Added a few more commits

Upstream Status:
from net-next-2.6
3b29a56d31d585d39bf9ffe9ef1f10bd637ee0f1 intel: convert drivers to netdev_tx_t
d314737ad3bad6b4603b243fd6db572385259690 igb: do not allow phy sw reset code to make calls to null pointers
ff41f8dcc63b4d027ed314ae909df53746c40632 igb: add support for set_rx_mode netdevice operation
26ad91783c489486d3fd1a6932e5bdab9d404a38 igb: add combined function for setting rar and pool bits
68d480c4defb69d834e75fd0be9069a8447afe36 igb: make use of the uta to allow for promiscous mode filter
231835e4163cf14c90e295f1729004f571ee1cc7 igb: Fix erroneous display of stats by ethtool -S
6d9f4fc420571ab67c962ef0d94449127218f66e igb: fix memory leak when setting ring size while interface is down
047e0030f1e601233ae5b03910602ec93c620bce igb: add new data structure for handling interrupts and NAPI
094919a4b0c56d6afbfb5ea14567fbb2f8d47554 igb: remove rx checksum good counter
85b430b47736d1f59e8f9efb0e47bc46aeb2b01d igb: move the tx and rx ring specific config into seperate functions
6ec43fe635fb5c96fbc0955b2794b74fee69b723 igb: remove rx_ps_hdr_len
d43c36dc6b357fa1806800f18aa30123c747a6d1 headers: remove sched.h from interrupt.h
7d95b7170eca3f95bad939fc9eb365b823c05e39 igb: increase minimum rx buffer size to 1K
77a22941f9ed052a0fdaa92753b1a1ab8072ee76 igb: resolve panic on shutdown when SR-IOV is enabled
952f72a8ceee3996ef8476a2f05ece1627080c20 igb: move SRRCTL register configuration into ring specific config
fce99e341524c204ef3dd3e7c5f77265a7e05ddd igb: change the head and tail offsets into pointers
65689fef7e484631e996541a6772706627b0991a igb: cleanup tx dma so map & unmap use matching calls
80785298aa5b6f2005a34afb97457ae7a65af270 igb: add pci device pointer to ring structure
6366ad331f436388129dfc044db871de79604e4d igb: remove use of skb_dma_map from driver
8581145f39c39b00cd1b6a2dcb4ce5bf7d8d6070 igb/igbvf: cleanup exception handling in tx_map_adv
8c0ab70ab9cc849af59ef6592bd652a938b21c79 igb: Implement reading of reg RQDPC (Receive Queue Drop Packet Count)
4c844851d15cc08d995179ab5118172711be6eb0 igb: move rx_buffer_len into the ring structure
2844f7975837d47fe7f20aa96c1df3bcecf91935 igb: make rxcsum configuration seperate from multiqueue
8dc92f7e2ecfd93f5c57da78594a7a5482e2c15e sctp: add feature bit for SCTP offload in hardware
04a5fcaaf0e12d066411aa54e42591952aa18da7 igb: move alloc_failed and csum_err stats into per rx-ring stat
85ad76b2f9c4956ec90c86298b22bb35c326e772 igb: add a flags value to the ring
e694e964fc1241b4981873bdccce70438d5f0394 igb: place a pointer to the netdev struct in the ring itself
06cf2666c7f5cc4ba4bf2687d041c61ada76fa3c igb: move the multiple receive queue configuration into seperate function
4478a9cdf007a0418755a8a4016af8352fb1c1f3 igb: drop support for UDP hashing w/ RSS
4cb9be7ab47820a1fa747569f5f035a5f628c91b e1000/e1000e/igb/ixgb: do not use netif_wake_queue un-necessarily
d4960307ea63a5625a175cc2d7b192e68e6b4fba igb: delay VF reset notification until after interrupts are enabed
10d8e9073a320a1c9cc13f996bd600b477eb4872 igb: setup vlan tag replication stripping in igb_vmm_control
d7ee5b3a78f57a8ca9ca2392ff5d03f91ec90bdb igb: re-use ring configuration code in ethtool testing
b1a436c34c44c6e3fb03c12545d87b4c2818f31d igb: make tx ring map and free functionality non-static
ad93d17efe063b6e95f3177fa01706f3b3b15dde igb: make ethtool use core xmit map and free functionality
83ab50a56e6ea94627fea83ce7b03332bd4c2f02 igb: cleanup "todo" code found in igb_ethtool.c
4fc82adfb01bdee79ec21e44557dc409ef31419a igb: add support for seperate tx-usecs setting in ethtool
317f66bdadc31f0c037b91ae7857f5c3d2a4e3e5 igb: misc cleanups within igb_ethtool.c
d249be54745259980dcbd898bdfeb7307c9c5e10 igb: use packet buffer sizes from RXPBS register
7d5753f08c5be5440ac0385b5f2518d2630be7b7 igb: rework use of VMOLR in regards to PF and VFs
51466239fb9f95343e88c14475a0f99fe4882c54 igb: rework handling of the vfta and vlvf registers in relation to mng_vlan
a6b623e0e5787ba5ffd2a3c4448ff6d1eaa904a9 igb: move vf init into a seperate function
3f9c01648146a256d8238292ac9c63fed7e00473 igb: only process global stats in igb_update_stats
4337e993e13eb2f2e05dd65a3ab25b57c2f89d56 igb: move global_quad_port_a from global into local static define
f7ba205e823f32e634712323a221b42bfea06efa igb: make tx hang check multiqueue, check eop descriptor
439705e1d7281cc8a4631a2dc390df7ad868bad8 igb: cleanup code related to ring resource allocation and free
ee1b9f06dca9c406b159904e9b13ea2dfa5ed037 igb: change queue ordering for 82576 based adapters
645a3abd73c2ac05d375f080d2f58d56e1502562 igb: Remove invalid stats counters
2e5655e758736488abbe9c024c8cda0e367214e5 igb: cleanup igb.h header whitespace and some structure formatting
cdfd01fcc674cc1c0c7b54084d74c2b684bf67c2 igb: cleanup igb xmit frame path
42d0781a1337ec5624da0657ba57b734768f489c igb: cleanup clean_rx_irq_adv and alloc_rx_buffers_adv
19147bb5f1f7e7ee79f0c92f59f67da8c0031dc0 igb: remove skb_orphan calls
330a6d6a7c75e11ca6da52092cfa96cda45d3386 igb: replace unecessary &adapter->hw with just hw where applicable
090b17952826eb3c5d712b6d4f90a292fe4acc93 igb: add pci_dev in few spots to clean up use of dev_err/info/warn
c809d2276cb035228cd9e83e2ca7d2b902c61cef igb: limit minimum mtu to 68 to keep ip bound to interface
559e9c4987e90e278db347b0a2ba423e7e496fd3 igb: cleanup whitespace issues in igb_main.c
d6b9076f1b5dd22b63f0178923f420f4e0f81a8d igb: Fix warnings in igb_set_ringparam()
0e15439ae5fefe438056a26a00aa3c6a9de454e9 igb: change type for ring sizes to u16 in igb_set_ring_param
971d1d3a7e9f03af870909fddfc3b2fc08e4f5b1 igb: removed unused tx/rx total bytes/packets from adapter struct
dbabb065802a46d64b8869ba97674bfa90b55d83 igb: check for packets on all tx rings when link is down
d1eff35061b9346cb9bef2b79d9d99c8c096df13 igb: only recycle page if it is on our numa node
a0607fd3a25ba1848a63a0d925e36d914735ab47 drivers/net: request_irq - Remove unnecessary leading & from second arg
55cac248caa4a5f181a11cd2f269a672bef3d3b5 igb: Add full support for 82580 devices
52a1dd4d15cc28da5d3e9bfb1526c314d65fdff8 igb: cleanup usage of virtualization registers
d2ba2ed8fe3aa796a671a6922119d7171bb49515 igb: add support for Intel I350 Gigabit Network Connection
b894fa2627e28c078740dc7041cd08c7e2c353ab igb: Add support for 82576 ET2 Quad Port Server Adapter
d5aa22520dbb49e726420ca56b3dcfe56724d33e igb: restrict WoL for 82576 ET2 Quad Port Server Adapter
feeb2721a7a0bd0cfa5b8847f80aec93aa2cc00d igb: make certain to reassign legacy interrupt vectors after reset
78b1f6070fa16cb442a7c48e8f5364cd84a88b90 igb: always use adapter->itr as EITR value
10d8e9073a320a1c9cc13f996bd600b477eb4872 igb: setup vlan tag replication stripping in igb_vmm_control

Brew Build:
https://brewweb.devel.redhat.com/taskinfo?taskID=2706943

Test Status:
Changes have been tested on several system with igb NICs by myself.
Intel also gave positive feedback.

  Stefan

diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 8355613..aaf99e0 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -94,6 +94,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 	case E1000_DEV_ID_82576_FIBER:
 	case E1000_DEV_ID_82576_SERDES:
 	case E1000_DEV_ID_82576_QUAD_COPPER:
+	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
 	case E1000_DEV_ID_82576_SERDES_QUAD:
 		mac->type = e1000_82576;
 		break;
@@ -104,6 +105,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 	case E1000_DEV_ID_82580_COPPER_DUAL:
 		mac->type = e1000_82580;
 		break;
+	case E1000_DEV_ID_I350_COPPER:
+	case E1000_DEV_ID_I350_FIBER:
+	case E1000_DEV_ID_I350_SERDES:
+	case E1000_DEV_ID_I350_SGMII:
+		mac->type = e1000_i350;
+		break;
 	default:
 		return -E1000_ERR_MAC_INIT;
 		break;
@@ -153,8 +160,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
 	if (mac->type == e1000_82580)
 		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+	if (mac->type == e1000_i350)
+		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
 	/* reset */
-	if (mac->type == e1000_82580)
+	if (mac->type >= e1000_82580)
 		mac->ops.reset_hw = igb_reset_hw_82580;
 	else
 		mac->ops.reset_hw = igb_reset_hw_82575;
@@ -225,7 +234,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 		phy->ops.reset              = igb_phy_hw_reset_sgmii_82575;
 		phy->ops.read_reg           = igb_read_phy_reg_sgmii_82575;
 		phy->ops.write_reg          = igb_write_phy_reg_sgmii_82575;
-	} else if (hw->mac.type == e1000_82580) {
+	} else if (hw->mac.type >= e1000_82580) {
 		phy->ops.reset              = igb_phy_hw_reset;
 		phy->ops.read_reg           = igb_read_phy_reg_82580;
 		phy->ops.write_reg          = igb_write_phy_reg_82580;
@@ -261,6 +270,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 		phy->ops.set_d3_lplu_state  = igb_set_d3_lplu_state;
 		break;
 	case I82580_I_PHY_ID:
+	case I350_I_PHY_ID:
 		phy->type                   = e1000_phy_82580;
 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
 		phy->ops.get_cable_length   = igb_get_cable_length_82580;
@@ -925,6 +935,11 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
 	for (i = 0; i < mac->mta_reg_count; i++)
 		array_wr32(E1000_MTA, i, 0);
 
+	/* Zero out the Unicast HASH table */
+	hw_dbg("Zeroing the UTA\n");
+	for (i = 0; i < mac->uta_reg_count; i++)
+		array_wr32(E1000_UTA, i, 0);
+
 	/* Setup link and flow control */
 	ret_val = igb_setup_link(hw);
 
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index bd8e275..4fb1001 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -36,9 +36,10 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
                                      (ID_LED_DEF1_DEF2 <<  4) | \
                                      (ID_LED_OFF1_ON2))
 
-#define E1000_RAR_ENTRIES_82575   16
-#define E1000_RAR_ENTRIES_82576   24
-#define E1000_RAR_ENTRIES_82580   24
+#define E1000_RAR_ENTRIES_82575        16
+#define E1000_RAR_ENTRIES_82576        24
+#define E1000_RAR_ENTRIES_82580        24
+#define E1000_RAR_ENTRIES_I350         32
 
 #define E1000_SW_SYNCH_MB              0x00000100
 #define E1000_STAT_DEV_RST_SET         0x00100000
@@ -207,6 +208,15 @@ struct e1000_adv_tx_context_desc {
 #define E1000_IOVCTL 0x05BBC
 #define E1000_IOVCTL_REUSE_VFQ 0x00000001
 
+#define E1000_RPLOLR_STRVLAN   0x40000000
+#define E1000_RPLOLR_STRCRC    0x80000000
+
+#define E1000_DTXCTL_8023LL     0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN     0x0020
+#define E1000_DTXCTL_SPOOF_INT  0x0040
+
 #define ALL_QUEUES   0xFFFF
 
 /* RX packet buffer size defines */
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 357a4da..9d60b7a 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -289,8 +289,8 @@
 #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
 
 /* Receive Checksum Control */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
 #define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
-#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
 #define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
 
 /* Header split receive */
@@ -599,6 +599,7 @@
 #define M88E1111_I_PHY_ID    0x01410CC0
 #define IGP03E1000_E_PHY_ID  0x02A80390
 #define I82580_I_PHY_ID      0x015403A0
+#define I350_I_PHY_ID        0x015403B0
 #define M88_VENDOR           0x0141
 
 /* M88E1000 Specific Registers */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 6564914..c9c6fd2 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -41,6 +41,7 @@ struct e1000_hw;
 #define E1000_DEV_ID_82576_FIBER              0x10E6
 #define E1000_DEV_ID_82576_SERDES             0x10E7
 #define E1000_DEV_ID_82576_QUAD_COPPER        0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2    0x1526
 #define E1000_DEV_ID_82576_NS                 0x150A
 #define E1000_DEV_ID_82576_NS_SERDES          0x1518
 #define E1000_DEV_ID_82576_SERDES_QUAD        0x150D
@@ -52,6 +53,10 @@ struct e1000_hw;
 #define E1000_DEV_ID_82580_SERDES             0x1510
 #define E1000_DEV_ID_82580_SGMII              0x1511
 #define E1000_DEV_ID_82580_COPPER_DUAL        0x1516
+#define E1000_DEV_ID_I350_COPPER              0x1521
+#define E1000_DEV_ID_I350_FIBER               0x1522
+#define E1000_DEV_ID_I350_SERDES              0x1523
+#define E1000_DEV_ID_I350_SGMII               0x1524
 
 #define E1000_REVISION_2 2
 #define E1000_REVISION_4 4
@@ -71,6 +76,7 @@ enum e1000_mac_type {
 	e1000_82575,
 	e1000_82576,
 	e1000_82580,
+	e1000_i350,
 	e1000_num_macs  /* List is 1-based, so subtract 1 for true count. */
 };
 
@@ -353,6 +359,7 @@ struct e1000_mac_info {
 	u16 ifs_ratio;
 	u16 ifs_step_size;
 	u16 mta_reg_count;
+	u16 uta_reg_count;
 
 	/* Maximum size of the MTA register table in all supported adapters */
 	#define MAX_MTA_REG 128
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
index ebc02ea..bb112fb 100644
--- a/drivers/net/igb/e1000_mbx.h
+++ b/drivers/net/igb/e1000_mbx.h
@@ -58,10 +58,12 @@
 #define E1000_VT_MSGINFO_MASK     (0xFF << E1000_VT_MSGINFO_SHIFT)
 
 #define E1000_VF_RESET            0x01 /* VF requests reset */
-#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
-#define E1000_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
-#define E1000_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
-#define E1000_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
+#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST    0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_VLAN         0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_LPE          0x05 /* VF requests to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC      0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_MULTICAST    (0x02 << E1000_VT_MSGINFO_SHIFT)
 
 #define E1000_PF_CONTROL_MSG      0x0100 /* PF control message */
 
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index d66e91a..5c9d73e 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1789,9 +1789,12 @@ out:
  **/
 s32 igb_phy_sw_reset(struct e1000_hw *hw)
 {
-	s32 ret_val;
+	s32 ret_val = 0;
 	u16 phy_ctrl;
 
+	if (!(hw->phy.ops.read_reg))
+		goto out;
+
 	ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
 	if (ret_val)
 		goto out;
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 9be0f42..06fe981 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -76,6 +76,8 @@
 #define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
 #define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
 
+#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+
 /* Split and Replication RX Control - RW */
 #define E1000_RXPBS    0x02404  /* Rx Packet Buffer Size - RW */
 /*
@@ -222,10 +224,17 @@
 #define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
 #define E1000_RA       0x05400  /* Receive Address - RW Array */
 #define E1000_RA2      0x054E0  /* 2nd half of receive address array - RW Array */
+#define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
 #define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
                                        (0x054E0 + ((_i - 16) * 8)))
 #define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
                                        (0x054E4 + ((_i - 16) * 8)))
+#define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i)      (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i)      (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i)      (0x05F00 + ((_i) * 8))
 #define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
 #define E1000_VT_CTL   0x0581C  /* VMDq Control - RW */
 #define E1000_WUC      0x05800  /* Wakeup Control - RW */
@@ -266,6 +275,7 @@
 #define E1000_QDE       0x02408 /* Queue Drop Enable - RW */
 #define E1000_DTXSWC    0x03500 /* DMA Tx Switch Control - RW */
 #define E1000_RPLOLR    0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA       0x0A000 /* Unicast Table Array - RW */
 #define E1000_IOVTCL    0x05BBC /* IOV Control Register */
 /* These act per VF so an array friendly macro is used */
 #define E1000_P2VMAILBOX(_n)   (0x00C00 + (4 * (_n)))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index e92ae1d..344356c 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -51,11 +51,13 @@ struct igb_adapter;
 #define IGB_DEFAULT_ITR                    3 /* dynamic */
 #define IGB_MAX_ITR_USECS              10000
 #define IGB_MIN_ITR_USECS                 10
+#define NON_Q_VECTORS                      1
+#define MAX_Q_VECTORS                      8
 
 /* Transmit and receive queues */
 #define IGB_MAX_RX_QUEUES     (adapter->vfs_allocated_count ? \
                                (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4)
-#define IGB_MAX_TX_QUEUES     IGB_MAX_RX_QUEUES
+#define IGB_MAX_TX_QUEUES     IGB_ABS_MAX_TX_QUEUES
 #define IGB_ABS_MAX_TX_QUEUES     1
 
 #define IGB_MAX_VF_MC_ENTRIES              30
@@ -72,6 +74,8 @@ struct vf_data_storage {
 };
 
 #define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
+#define IGB_VF_FLAG_UNI_PROMISC    0x00000002 /* VF has unicast promisc */
+#define IGB_VF_FLAG_MULTI_PROMISC  0x00000004 /* VF has multicast promisc */
 
 /* RX descriptor control thresholds.
  * PTHRESH - MAC will consider prefetch if it has fewer than this number of
@@ -84,9 +88,13 @@ struct vf_data_storage {
  *           descriptors until either it has this many to write back, or the
  *           ITR timer expires.
  */
-#define IGB_RX_PTHRESH                    16
+#define IGB_RX_PTHRESH                    (hw->mac.type <= e1000_82576 ? 16 : 8)
 #define IGB_RX_HTHRESH                     8
 #define IGB_RX_WTHRESH                     1
+#define IGB_TX_PTHRESH                     8
+#define IGB_TX_HTHRESH                     1
+#define IGB_TX_WTHRESH                     ((hw->mac.type == e1000_82576 && \
+                                             adapter->msix_entries) ? 0 : 16)
 
 /* this is the size past which hardware will drop packets when setting LPE=0 */
 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -125,63 +133,91 @@ struct igb_buffer {
 			unsigned long time_stamp;
 			u16 length;
 			u16 next_to_watch;
+			u16 mapped_as_page;
 		};
 		/* RX */
 		struct {
 			struct page *page;
-			u64 page_dma;
-			unsigned int page_offset;
+			dma_addr_t page_dma;
+			u16 page_offset;
 		};
 	};
 };
 
-struct igb_queue_stats {
+struct igb_tx_queue_stats {
 	u64 packets;
 	u64 bytes;
+	u64 restart_queue;
 };
 
-struct igb_ring {
+struct igb_rx_queue_stats {
+	u64 packets;
+	u64 bytes;
+	u64 drops;
+	u64 csum_err;
+	u64 alloc_failed;
+};
+
+struct igb_q_vector {
 	struct igb_adapter *adapter; /* backlink */
-	void *desc;                  /* descriptor ring memory */
-	dma_addr_t dma;              /* phys address of the ring */
-	unsigned int size;           /* length of desc. ring in bytes */
-	unsigned int count;          /* number of desc. in the ring */
-	u16 next_to_use;
-	u16 next_to_clean;
-	u16 head;
-	u16 tail;
-	struct igb_buffer *buffer_info; /* array of buffer info structs */
+	struct igb_ring *rx_ring;
+	struct igb_ring *tx_ring;
+	struct napi_struct napi;
+	struct net_device *dummy_netdev; /* dummy for q_vector */
 
 	u32 eims_value;
-	u32 itr_val;
-	u16 itr_register;
 	u16 cpu;
 
-	u16 queue_index;
-	u16 reg_idx;
+	u16 itr_val;
+	u8 set_itr;
+	u8 itr_shift;
+	void __iomem *itr_register;
+
+	char name[IFNAMSIZ + 9];
+};
+
+struct igb_ring {
+	struct igb_q_vector *q_vector; /* backlink to q_vector */
+	struct net_device *netdev;     /* back pointer to net_device */
+	struct pci_dev *pdev;          /* pci device for dma mapping */
+	dma_addr_t dma;                /* phys address of the ring */
+	void *desc;                    /* descriptor ring memory */
+	unsigned int size;             /* length of desc. ring in bytes */
+	u16 count;                     /* number of desc. in the ring */
+	u16 next_to_use;
+	u16 next_to_clean;
+	u8 queue_index;
+	u8 reg_idx;
+	void __iomem *head;
+	void __iomem *tail;
+	struct igb_buffer *buffer_info; /* array of buffer info structs */
+
 	unsigned int total_bytes;
 	unsigned int total_packets;
 
+	u32 flags;
+
 	union {
 		/* TX */
 		struct {
 			spinlock_t tx_clean_lock;
 			spinlock_t tx_lock;
+			struct igb_tx_queue_stats tx_stats;
 			bool detect_tx_hung;
 		};
 		/* RX */
 		struct {
-			struct igb_queue_stats rx_stats;
-			struct napi_struct napi;
-			struct net_device *netdev;
-			struct igb_ring *buddy;
-			int set_itr;
+			struct igb_rx_queue_stats rx_stats;
+			u32 rx_buffer_len;
 		};
 	};
-
-	char name[IFNAMSIZ + 5];
 };
 
+#define IGB_RING_FLAG_RX_CSUM        0x00000001 /* RX CSUM enabled */
+
+#define IGB_RING_FLAG_TX_CTX_IDX     0x00000001 /* HW requires context index */
+
+#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS)
 #define E1000_RX_DESC_ADV(R, i)	    \
 	(&(((union e1000_adv_rx_desc *)((R).desc))[i]))
 #define E1000_TX_DESC_ADV(R, i)	    \
@@ -189,6 +225,15 @@ struct igb_ring {
 #define E1000_TX_CTXTDESC_ADV(R, i)	    \
 	(&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
 
+/* igb_desc_unused - calculate if we have unused descriptors */
+static inline int igb_desc_unused(struct igb_ring *ring)
+{
+	if (ring->next_to_clean > ring->next_to_use)
+		return ring->next_to_clean - ring->next_to_use - 1;
+
+	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
 /* board specific private data structure */
 
 struct igb_adapter {
@@ -197,18 +242,14 @@ struct igb_adapter {
 	struct vlan_group *vlgrp;
 	u16 mng_vlan_id;
 	u32 bd_number;
-	u32 rx_buffer_len;
 	u32 wol;
 	u32 en_mng_pt;
 	u16 link_speed;
 	u16 link_duplex;
-	unsigned int total_tx_bytes;
-	unsigned int total_tx_packets;
-	unsigned int total_rx_bytes;
-	unsigned int total_rx_packets;
+
 	/* Interrupt Throttle Rate */
-	u32 itr;
-	u32 itr_setting;
+	u32 rx_itr_setting;
+	u32 tx_itr_setting;
 	u16 tx_itr;
 	u16 rx_itr;
 
@@ -221,13 +262,7 @@ struct igb_adapter {
 
 	/* TX */
 	struct igb_ring *tx_ring;      /* One per active queue */
-	unsigned int restart_queue;
 	unsigned long tx_queue_len;
-	u32 txd_cmd;
-	u32 gotc;
-	u64 gotc_old;
-	u64 tpt_old;
-	u64 colc_old;
 	u32 tx_timeout_count;
 
 	/* RX */
@@ -235,13 +270,6 @@ struct igb_adapter {
 	int num_tx_queues;
 	int num_rx_queues;
 
-	u64 hw_csum_err;
-	u64 hw_csum_good;
-	u32 alloc_rx_buff_failed;
-	bool rx_csum;
-	u32 gorc;
-	u64 gorc_old;
-	u16 rx_ps_hdr_size;
 	u32 max_frame_size;
 	u32 min_frame_size;
 
@@ -261,6 +289,9 @@ struct igb_adapter {
 	struct igb_ring test_rx_ring;
 
 	int msg_enable;
+
+	unsigned int num_q_vectors;
+	struct igb_q_vector *q_vector[MAX_Q_VECTORS];
 	struct msix_entry *msix_entries;
 	u32 eims_enable_mask;
 	u32 eims_other;
@@ -272,8 +303,9 @@ struct igb_adapter {
 	unsigned int dca_enabled;
 #endif
 	u32 eeprom_wol;
-	unsigned int tx_ring_count;
-	unsigned int rx_ring_count;
+
+	u16 tx_ring_count;
+	u16 rx_ring_count;
 	unsigned int vfs_allocated_count;
 	struct vf_data_storage *vf_data;
 };
@@ -281,7 +313,7 @@ struct igb_adapter {
 #define IGB_FLAG_HAS_MSI           (1 << 0)
 #define IGB_FLAG_DCA_ENABLED       (1 << 1)
 #define IGB_FLAG_QUAD_PORT_A       (1 << 2)
-#define IGB_FLAG_NEED_CTX_IDX      (1 << 3)
+#define IGB_FLAG_QUEUE_PAIRS       (1 << 3)
 
 enum e1000_state_t {
 	__IGB_TESTING,
@@ -302,10 +334,18 @@ extern void igb_down(struct igb_adapter *);
 extern void igb_reinit_locked(struct igb_adapter *);
 extern void igb_reset(struct igb_adapter *);
 extern int igb_set_spd_dplx(struct igb_adapter *, u16);
-extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *);
+extern int igb_setup_tx_resources(struct igb_ring *);
+extern int igb_setup_rx_resources(struct igb_ring *);
 extern void igb_free_tx_resources(struct igb_ring *);
 extern void igb_free_rx_resources(struct igb_ring *);
+extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+extern void igb_setup_tctl(struct igb_adapter *);
+extern void igb_setup_rctl(struct igb_adapter *);
+extern int igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
+extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
+					   struct igb_buffer *);
+extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
 extern void igb_update_stats(struct igb_adapter *);
 extern void igb_set_ethtool_ops(struct net_device *);
 
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 0fa46b0..b5a033b 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -34,17 +34,23 @@
 #include <linux/interrupt.h>
 #include <linux/if_ether.h>
 #include <linux/ethtool.h>
+#include <linux/sched.h>
 
 #include "igb.h"
 
+enum {NETDEV_STATS, IGB_STATS};
+
 struct igb_stats {
 	char stat_string[ETH_GSTRING_LEN];
+	int type;
 	int sizeof_stat;
 	int stat_offset;
 };
 
-#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \
-		      offsetof(struct igb_adapter, m)
+#define IGB_STAT(m)		IGB_STATS, \
+				FIELD_SIZEOF(struct igb_adapter, m), \
+				offsetof(struct igb_adapter, m)
+
 static const struct igb_stats igb_gstrings_stats[] = {
 	{ "rx_packets", IGB_STAT(stats.gprc) },
 	{ "tx_packets", IGB_STAT(stats.gptc) },
@@ -75,7 +81,6 @@ static const struct igb_stats igb_gstrings_stats[] = {
 	{ "tx_single_coll_ok", IGB_STAT(stats.scc) },
 	{ "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
 	{ "tx_timeout_count", IGB_STAT(tx_timeout_count) },
-	{ "tx_restart_queue", IGB_STAT(restart_queue) },
 	{ "rx_long_length_errors", IGB_STAT(stats.roc) },
 	{ "rx_short_length_errors", IGB_STAT(stats.ruc) },
 	{ "rx_align_errors", IGB_STAT(stats.algnerrc) },
@@ -86,33 +91,32 @@ static const struct igb_stats igb_gstrings_stats[] = {
 	{ "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
 	{ "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
 	{ "rx_long_byte_count", IGB_STAT(stats.gorc) },
-	{ "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
-	{ "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
 	{ "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
-	{ "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
 	{ "tx_smbus", IGB_STAT(stats.mgptc) },
 	{ "rx_smbus", IGB_STAT(stats.mgprc) },
 	{ "dropped_smbus", IGB_STAT(stats.mgpdc) },
 };
 
 #define IGB_QUEUE_STATS_LEN \
-	((((struct igb_adapter *)netdev->priv)->num_rx_queues + \
-	 ((struct igb_adapter *)netdev->priv)->num_tx_queues) * \
-	(sizeof(struct igb_queue_stats) / sizeof(u64)))
+	((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
+	  (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \
+	 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
+	  (sizeof(struct igb_tx_queue_stats) / sizeof(u64))))
 #define IGB_GLOBAL_STATS_LEN	\
-	sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
+	(sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
 #define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
 static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
 	"Register test  (offline)", "Eeprom test    (offline)",
 	"Interrupt test (offline)", "Loopback test  (offline)",
 	"Link test   (on/offline)"
 };
-#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN
+#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
 
 static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
+	u32 status;
 
 	if (hw->phy.media_type == e1000_media_type_copper) {
 
@@ -147,17 +151,20 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
 	ecmd->transceiver = XCVR_INTERNAL;
 
-	if (rd32(E1000_STATUS) & E1000_STATUS_LU) {
+	status = rd32(E1000_STATUS);
 
-		adapter->hw.mac.ops.get_speed_and_duplex(hw,
-					&adapter->link_speed,
-					&adapter->link_duplex);
-		ecmd->speed = adapter->link_speed;
+	if (status & E1000_STATUS_LU) {
 
-		/* unfortunately FULL_DUPLEX != DUPLEX_FULL
-		 *          and HALF_DUPLEX != DUPLEX_HALF */
+		if ((status & E1000_STATUS_SPEED_1000) ||
+		    hw->phy.media_type != e1000_media_type_copper)
+			ecmd->speed = SPEED_1000;
+		else if (status & E1000_STATUS_SPEED_100)
+			ecmd->speed = SPEED_100;
+		else
+			ecmd->speed = SPEED_10;
 
-		if (adapter->link_duplex == FULL_DUPLEX)
+		if ((status & E1000_STATUS_FD) ||
+		    hw->phy.media_type != e1000_media_type_copper)
 			ecmd->duplex = DUPLEX_FULL;
 		else
 			ecmd->duplex = DUPLEX_HALF;
@@ -248,8 +255,9 @@ static int igb_set_pauseparam(struct net_device *netdev,
 		if (netif_running(adapter->netdev)) {
 			igb_down(adapter);
 			igb_up(adapter);
-		} else
+		} else {
 			igb_reset(adapter);
+		}
 	} else {
 		if (pause->rx_pause && pause->tx_pause)
 			hw->fc.requested_mode = e1000_fc_full;
@@ -273,13 +281,20 @@ static int igb_set_pauseparam(struct net_device *netdev,
 static u32 igb_get_rx_csum(struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
-	return adapter->rx_csum;
+	return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM);
 }
 
 static int igb_set_rx_csum(struct net_device *netdev, u32 data)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
-	adapter->rx_csum = data;
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		if (data)
+			adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM;
+		else
+			adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM;
+	}
 
 	return 0;
 }
@@ -485,19 +500,10 @@ static void igb_get_regs(struct net_device *netdev,
 	regs_buff[119] = adapter->stats.scvpc;
 	regs_buff[120] = adapter->stats.hrmpc;
 
-	/* These should probably be added to e1000_regs.h instead */
-	#define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
-	#define E1000_IP4AT_REG(_i)   (0x05840 + ((_i) * 8))
-	#define E1000_IP6AT_REG(_i)   (0x05880 + ((_i) * 4))
-	#define E1000_WUPM_REG(_i)    (0x05A00 + ((_i) * 4))
-	#define E1000_FFMT_REG(_i)    (0x09000 + ((_i) * 8))
-	#define E1000_FFVT_REG(_i)    (0x09800 + ((_i) * 8))
-	#define E1000_FFLT_REG(_i)    (0x05F00 + ((_i) * 8))
-
 	for (i = 0; i < 4; i++)
 		regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i));
+		regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
 	for (i = 0; i < 4; i++)
 		regs_buff[129 + i] = rd32(E1000_RDBAL(i));
 	for (i = 0; i < 4; i++)
@@ -721,18 +727,18 @@ static int igb_set_ringparam(struct net_device *netdev,
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct igb_ring *temp_ring;
-	int i, err;
-	u32 new_rx_count, new_tx_count;
+	int i, err = 0;
+	u16 new_rx_count, new_tx_count;
 
 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 		return -EINVAL;
 
-	new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD);
-	new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD);
+	new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
+	new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
 	new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
 
-	new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD);
-	new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD);
+	new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
+	new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
 	new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
 
 	if ((new_tx_count == adapter->tx_ring_count) &&
@@ -741,18 +747,30 @@ static int igb_set_ringparam(struct net_device *netdev,
 		return 0;
 	}
 
+	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (!netif_running(adapter->netdev)) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			adapter->tx_ring[i].count = new_tx_count;
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i].count = new_rx_count;
+		adapter->tx_ring_count = new_tx_count;
+		adapter->rx_ring_count = new_rx_count;
+		goto clear_reset;
+	}
+
 	if (adapter->num_tx_queues > adapter->num_rx_queues)
 		temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
 	else
 		temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
-	if (!temp_ring)
-		return -ENOMEM;
 
-	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-		msleep(1);
+	if (!temp_ring) {
+		err = -ENOMEM;
+		goto clear_reset;
+	}
 
-	if (netif_running(adapter->netdev))
-		igb_down(adapter);
+	igb_down(adapter);
 
 	/*
 	 * We can't just free everything and then setup again,
@@ -765,7 +783,7 @@ static int igb_set_ringparam(struct net_device *netdev,
 
 		for (i = 0; i < adapter->num_tx_queues; i++) {
 			temp_ring[i].count = new_tx_count;
-			err = igb_setup_tx_resources(adapter, &temp_ring[i]);
+			err = igb_setup_tx_resources(&temp_ring[i]);
 			if (err) {
 				while (i) {
 					i--;
@@ -790,7 +808,7 @@ static int igb_set_ringparam(struct net_device *netdev,
 
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			temp_ring[i].count = new_rx_count;
-			err = igb_setup_rx_resources(adapter, &temp_ring[i]);
+			err = igb_setup_rx_resources(&temp_ring[i]);
 			if (err) {
 				while (i) {
 					i--;
@@ -809,14 +827,11 @@ static int igb_set_ringparam(struct net_device *netdev,
 
 		adapter->rx_ring_count = new_rx_count;
 	}
-
-	err = 0;
 err_setup:
-	if (netif_running(adapter->netdev))
-		igb_up(adapter);
-
-	clear_bit(__IGB_RESETTING, &adapter->state);
+	igb_up(adapter);
 	vfree(temp_ring);
+clear_reset:
+	clear_bit(__IGB_RESETTING, &adapter->state);
 	return err;
 }
 
@@ -847,6 +862,49 @@ struct igb_reg_test {
 #define TABLE64_TEST_LO	5
 #define TABLE64_TEST_HI	6
 
+/* i350 reg test */
+static struct igb_reg_test reg_test_i350[] = {
+	{ E1000_FCAL,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_FCAH,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+	{ E1000_FCT,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+	{ E1000_VET,	   0x100, 1,  PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
+	{ E1000_RDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_RDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+	{ E1000_RDBAL(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_RDBAH(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RDLEN(4),  0x40,  4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+	/* RDH is read-only for i350, only test RDT. */
+	{ E1000_RDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_RDT(4),	   0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_FCRTH,	   0x100, 1,  PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+	{ E1000_FCTTV,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_TIPG,	   0x100, 1,  PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+	{ E1000_TDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_TDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_TDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+	{ E1000_TDBAL(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_TDBAH(4),  0x40,  4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_TDLEN(4),  0x40,  4,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+	{ E1000_TDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_TDT(4),	   0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_RCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+	{ E1000_RCTL, 	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+	{ E1000_RCTL, 	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+	{ E1000_TCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+	{ E1000_RA,	   0, 16, TABLE64_TEST_LO,
+						0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RA,	   0, 16, TABLE64_TEST_HI,
+						0xC3FFFFFF, 0xFFFFFFFF },
+	{ E1000_RA2,	   0, 16, TABLE64_TEST_LO,
+						0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RA2,	   0, 16, TABLE64_TEST_HI,
+						0xC3FFFFFF, 0xFFFFFFFF },
+	{ E1000_MTA,	   0, 128, TABLE32_TEST,
+						0xFFFFFFFF, 0xFFFFFFFF },
+	{ 0, 0, 0, 0 }
+};
+
 /* 82580 reg test */
 static struct igb_reg_test reg_test_82580[] = {
 	{ E1000_FCAL,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -967,7 +1025,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 pat, val;
-	u32 _test[] =
+	static const u32 _test[] =
 		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
 	for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
 		wr32(reg, (_test[pat] & write));
@@ -980,6 +1038,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
 			return 1;
 		}
 	}
+
 	return 0;
 }
 
@@ -997,6 +1056,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
 		*data = reg;
 		return 1;
 	}
+
 	return 0;
 }
 
@@ -1019,18 +1079,22 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
 	u32 value, before, after;
 	u32 i, toggle;
 
-	toggle = 0x7FFFF3FF;
-
 	switch (adapter->hw.mac.type) {
+	case e1000_i350:
+		test = reg_test_i350;
+		toggle = 0x7FEFF3FF;
+		break;
 	case e1000_82580:
 		test = reg_test_82580;
 		toggle = 0x7FEFF3FF;
 		break;
 	case e1000_82576:
 		test = reg_test_82576;
+		toggle = 0x7FFFF3FF;
 		break;
 	default:
 		test = reg_test_82575;
+		toggle = 0x7FFFF3FF;
 		break;
 	}
 
@@ -1108,8 +1172,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
 	*data = 0;
 	/* Read and add up the contents of the EEPROM */
 	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
-		if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp))
-		    < 0) {
+		if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
 			*data = 1;
 			break;
 		}
@@ -1125,8 +1188,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
 
 static irqreturn_t igb_test_intr(int irq, void *data, struct pt_regs *regs)
 {
-	struct net_device *netdev = (struct net_device *) data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = (struct igb_adapter *) data;
 	struct e1000_hw *hw = &adapter->hw;
 
 	adapter->test_icr |= rd32(E1000_ICR);
@@ -1146,28 +1208,28 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 	/* Hook up test interrupt handler just for this test */
 	if (adapter->msix_entries) {
 		if (request_irq(adapter->msix_entries[0].vector,
-		                &igb_test_intr, 0, netdev->name, adapter)) {
+		                igb_test_intr, 0, netdev->name, adapter)) {
 			*data = 1;
 			return -1;
 		}
-
 	} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
 		shared_int = false;
 		if (request_irq(irq,
-		                &igb_test_intr, 0, netdev->name, adapter)) {
+		                igb_test_intr, 0, netdev->name, adapter)) {
 			*data = 1;
 			return -1;
 		}
-	} else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
+	} else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
 				netdev->name, adapter)) {
 		shared_int = false;
-	} else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
+	} else if (request_irq(irq, igb_test_intr, IRQF_SHARED,
 		 netdev->name, adapter)) {
 		*data = 1;
 		return -1;
 	}
 	dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
 		(shared_int ? "shared" : "unshared"));
+
 	/* Disable all the interrupts */
 	wr32(E1000_IMC, ~0);
 	msleep(10);
@@ -1183,6 +1245,9 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 	case e1000_82580:
 		ics_mask = 0x77DCFED5;
 		break;
+	case e1000_i350:
+		ics_mask = 0x77DCFED5;
+		break;
 	default:
 		ics_mask = 0x7FFFFFFF;
 		break;
@@ -1276,183 +1341,51 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 
 static void igb_free_desc_rings(struct igb_adapter *adapter)
 {
-	struct igb_ring *tx_ring = &adapter->test_tx_ring;
-	struct igb_ring *rx_ring = &adapter->test_rx_ring;
-	struct pci_dev *pdev = adapter->pdev;
-	int i;
-
-	if (tx_ring->desc && tx_ring->buffer_info) {
-		for (i = 0; i < tx_ring->count; i++) {
-			struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
-			if (buf->dma)
-				pci_unmap_single(pdev, buf->dma, buf->length,
-						 PCI_DMA_TODEVICE);
-			if (buf->skb)
-				dev_kfree_skb(buf->skb);
-		}
-	}
-
-	if (rx_ring->desc && rx_ring->buffer_info) {
-		for (i = 0; i < rx_ring->count; i++) {
-			struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
-			if (buf->dma)
-				pci_unmap_single(pdev, buf->dma,
-						 IGB_RXBUFFER_2048,
-						 PCI_DMA_FROMDEVICE);
-			if (buf->skb)
-				dev_kfree_skb(buf->skb);
-		}
-	}
-
-	if (tx_ring->desc) {
-		pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
-				    tx_ring->dma);
-		tx_ring->desc = NULL;
-	}
-	if (rx_ring->desc) {
-		pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
-				    rx_ring->dma);
-		rx_ring->desc = NULL;
-	}
-
-	kfree(tx_ring->buffer_info);
-	tx_ring->buffer_info = NULL;
-	kfree(rx_ring->buffer_info);
-	rx_ring->buffer_info = NULL;
-
-	return;
+	igb_free_tx_resources(&adapter->test_tx_ring);
+	igb_free_rx_resources(&adapter->test_rx_ring);
 }
 
 static int igb_setup_desc_rings(struct igb_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
 	struct igb_ring *tx_ring = &adapter->test_tx_ring;
 	struct igb_ring *rx_ring = &adapter->test_rx_ring;
-	struct pci_dev *pdev = adapter->pdev;
-	struct igb_buffer *buffer_info;
-	u32 rctl;
-	int i, ret_val;
+	struct e1000_hw *hw = &adapter->hw;
+	int ret_val;
 
 	/* Setup Tx descriptor ring and Tx buffers */
+	tx_ring->count = IGB_DEFAULT_TXD;
+	tx_ring->pdev = adapter->pdev;
+	tx_ring->netdev = adapter->netdev;
+	tx_ring->reg_idx = adapter->vfs_allocated_count;
 
-	if (!tx_ring->count)
-		tx_ring->count = IGB_DEFAULT_TXD;
-
-	tx_ring->buffer_info = kcalloc(tx_ring->count,
-				       sizeof(struct igb_buffer),
-				       GFP_KERNEL);
-	if (!tx_ring->buffer_info) {
+	if (igb_setup_tx_resources(tx_ring)) {
 		ret_val = 1;
 		goto err_nomem;
 	}
 
-	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
-	tx_ring->size = ALIGN(tx_ring->size, 4096);
-	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
-					     &tx_ring->dma);
-	if (!tx_ring->desc) {
-		ret_val = 2;
-		goto err_nomem;
-	}
-	tx_ring->next_to_use = tx_ring->next_to_clean = 0;
-
-	wr32(E1000_TDBAL(0),
-			((u64) tx_ring->dma & 0x00000000FFFFFFFF));
-	wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
-	wr32(E1000_TDLEN(0),
-			tx_ring->count * sizeof(union e1000_adv_tx_desc));
-	wr32(E1000_TDH(0), 0);
-	wr32(E1000_TDT(0), 0);
-	wr32(E1000_TCTL,
-			E1000_TCTL_PSP | E1000_TCTL_EN |
-			E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
-			E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
-
-	for (i = 0; i < tx_ring->count; i++) {
-		union e1000_adv_tx_desc *tx_desc;
-		struct sk_buff *skb;
-		unsigned int size = 1024;
-
-		tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
-		skb = alloc_skb(size, GFP_KERNEL);
-		if (!skb) {
-			ret_val = 3;
-			goto err_nomem;
-		}
-		skb_put(skb, size);
-		buffer_info = &tx_ring->buffer_info[i];
-		buffer_info->skb = skb;
-		buffer_info->length = skb->len;
-		buffer_info->dma = pci_map_single(pdev, skb->data, skb->len,
-		                                  PCI_DMA_TODEVICE);
-		tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
-		tx_desc->read.olinfo_status = cpu_to_le32(skb->len) <<
-		                              E1000_ADVTXD_PAYLEN_SHIFT;
-		tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
-		tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP |
-		                                          E1000_TXD_CMD_IFCS |
-		                                          E1000_TXD_CMD_RS |
-		                                          E1000_ADVTXD_DTYP_DATA |
-		                                          E1000_ADVTXD_DCMD_DEXT);
-	}
+	igb_setup_tctl(adapter);
+	igb_configure_tx_ring(adapter, tx_ring);
 
 	/* Setup Rx descriptor ring and Rx buffers */
-
-	if (!rx_ring->count)
-		rx_ring->count = IGB_DEFAULT_RXD;
-
-	rx_ring->buffer_info = kcalloc(rx_ring->count,
-				       sizeof(struct igb_buffer),
-				       GFP_KERNEL);
-	if (!rx_ring->buffer_info) {
-		ret_val = 4;
+	rx_ring->count = IGB_DEFAULT_RXD;
+	rx_ring->pdev = adapter->pdev;
+	rx_ring->netdev = adapter->netdev;
+	rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
+	rx_ring->reg_idx = adapter->vfs_allocated_count;
+
+	if (igb_setup_rx_resources(rx_ring)) {
+		ret_val = 3;
 		goto err_nomem;
 	}
 
-	rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
-	rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
-					     &rx_ring->dma);
-	if (!rx_ring->desc) {
-		ret_val = 5;
-		goto err_nomem;
-	}
-	rx_ring->next_to_use = rx_ring->next_to_clean = 0;
+	/* set the default queue to queue 0 of PF */
+	wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
 
-	rctl = rd32(E1000_RCTL);
-	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
-	wr32(E1000_RDBAL(0),
-			((u64) rx_ring->dma & 0xFFFFFFFF));
-	wr32(E1000_RDBAH(0),
-			((u64) rx_ring->dma >> 32));
-	wr32(E1000_RDLEN(0), rx_ring->size);
-	wr32(E1000_RDH(0), 0);
-	wr32(E1000_RDT(0), 0);
-	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
-	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
-		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
-	wr32(E1000_RCTL, rctl);
-	wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF);
-
-	for (i = 0; i < rx_ring->count; i++) {
-		union e1000_adv_rx_desc *rx_desc;
-		struct sk_buff *skb;
-
-		buffer_info = &rx_ring->buffer_info[i];
-		rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
-		skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
-				GFP_KERNEL);
-		if (!skb) {
-			ret_val = 6;
-			goto err_nomem;
-		}
-		skb_reserve(skb, NET_IP_ALIGN);
-		buffer_info->skb = skb;
-		buffer_info->dma = pci_map_single(pdev, skb->data,
-		                                  IGB_RXBUFFER_2048,
-		                                  PCI_DMA_FROMDEVICE);
-		rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
-		memset(skb->data, 0x00, skb->len);
-	}
+	/* enable receive ring */
+	igb_setup_rctl(adapter);
+	igb_configure_rx_ring(adapter, rx_ring);
+
+	igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
 
 	return 0;
 
@@ -1531,7 +1464,10 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
 	struct e1000_hw *hw = &adapter->hw;
 	u32 reg;
 
-	if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+	reg = rd32(E1000_CTRL_EXT);
+
+	/* use CTRL_EXT to identify link type as SGMII can appear as copper */
+	if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
 		reg = rd32(E1000_RCTL);
 		reg |= E1000_RCTL_LBM_TCVR;
 		wr32(E1000_RCTL, reg);
@@ -1562,11 +1498,9 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
 		wr32(E1000_PCS_LCTL, reg);
 
 		return 0;
-	} else if (hw->phy.media_type == e1000_media_type_copper) {
-		return igb_set_phy_loopback(adapter);
 	}
 
-	return 7;
+	return igb_set_phy_loopback(adapter);
 }
 
 static void igb_loopback_cleanup(struct igb_adapter *adapter)
@@ -1592,35 +1526,99 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
 				    unsigned int frame_size)
 {
 	memset(skb->data, 0xFF, frame_size);
-	frame_size &= ~1;
-	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
-	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
-	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+	frame_size /= 2;
+	memset(&skb->data[frame_size], 0xAA, frame_size - 1);
+	memset(&skb->data[frame_size + 10], 0xBE, 1);
+	memset(&skb->data[frame_size + 12], 0xAF, 1);
 }
 
 static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 {
-	frame_size &= ~1;
-	if (*(skb->data + 3) == 0xFF)
-		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
-		   (*(skb->data + frame_size / 2 + 12) == 0xAF))
+	frame_size /= 2;
+	if (*(skb->data + 3) == 0xFF) {
+		if ((*(skb->data + frame_size + 10) == 0xBE) &&
+		   (*(skb->data + frame_size + 12) == 0xAF)) {
 			return 0;
+		}
+	}
 	return 13;
 }
 
+static int igb_clean_test_rings(struct igb_ring *rx_ring,
+                                struct igb_ring *tx_ring,
+                                unsigned int size)
+{
+	union e1000_adv_rx_desc *rx_desc;
+	struct igb_buffer *buffer_info;
+	int rx_ntc, tx_ntc, count = 0;
+	u32 staterr;
+
+	/* initialize next to clean and descriptor values */
+	rx_ntc = rx_ring->next_to_clean;
+	tx_ntc = tx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
+	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+	while (staterr & E1000_RXD_STAT_DD) {
+		/* check rx buffer */
+		buffer_info = &rx_ring->buffer_info[rx_ntc];
+
+		/* unmap rx buffer, will be remapped by alloc_rx_buffers */
+		pci_unmap_single(rx_ring->pdev,
+		                 buffer_info->dma,
+				 rx_ring->rx_buffer_len,
+				 PCI_DMA_FROMDEVICE);
+		buffer_info->dma = 0;
+
+		/* verify contents of skb */
+		if (!igb_check_lbtest_frame(buffer_info->skb, size))
+			count++;
+
+		/* unmap buffer on tx side */
+		buffer_info = &tx_ring->buffer_info[tx_ntc];
+		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+
+		/* increment rx/tx next to clean counters */
+		rx_ntc++;
+		if (rx_ntc == rx_ring->count)
+			rx_ntc = 0;
+		tx_ntc++;
+		if (tx_ntc == tx_ring->count)
+			tx_ntc = 0;
+
+		/* fetch next descriptor */
+		rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
+		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	}
+
+	/* re-map buffers to ring, store next to clean values */
+	igb_alloc_rx_buffers_adv(rx_ring, count);
+	rx_ring->next_to_clean = rx_ntc;
+	tx_ring->next_to_clean = tx_ntc;
+
+	return count;
+}
+
 static int igb_run_loopback_test(struct igb_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
 	struct igb_ring *tx_ring = &adapter->test_tx_ring;
 	struct igb_ring *rx_ring = &adapter->test_rx_ring;
-	struct pci_dev *pdev = adapter->pdev;
-	int i, j, k, l, lc, good_cnt;
-	int ret_val = 0;
-	unsigned long time;
+	int i, j, lc, good_cnt, ret_val = 0;
+	unsigned int size = 1024;
+	int tx_ret_val;
+	struct sk_buff *skb;
+
+	/* allocate test skb */
+	skb = alloc_skb(size, GFP_KERNEL);
+	if (!skb)
+		return 11;
 
-	wr32(E1000_RDT(0), rx_ring->count - 1);
+	/* place data into test skb */
+	igb_create_lbtest_frame(skb, size);
+	skb_put(skb, size);
 
-	/* Calculate the loop count based on the largest descriptor ring
+	/*
+	 * Calculate the loop count based on the largest descriptor ring
 	 * The idea is to wrap the largest ring a number of times using 64
 	 * send/receive pairs during each loop
 	 */
@@ -1630,50 +1628,36 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
 	else
 		lc = ((rx_ring->count / 64) * 2) + 1;
 
-	k = l = 0;
 	for (j = 0; j <= lc; j++) { /* loop count loop */
-		for (i = 0; i < 64; i++) { /* send the packets */
-			igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
-						1024);
-			pci_dma_sync_single_for_device(pdev,
-				tx_ring->buffer_info[k].dma,
-				tx_ring->buffer_info[k].length,
-				PCI_DMA_TODEVICE);
-			k++;
-			if (k == tx_ring->count)
-				k = 0;
-		}
-		wr32(E1000_TDT(0), k);
-		msleep(200);
-		time = jiffies; /* set the start time for the receive */
+		/* reset count of good packets */
 		good_cnt = 0;
-		do { /* receive the sent packets */
-			pci_dma_sync_single_for_cpu(pdev,
-					rx_ring->buffer_info[l].dma,
-					IGB_RXBUFFER_2048,
-					PCI_DMA_FROMDEVICE);
-
-			ret_val = igb_check_lbtest_frame(
-					     rx_ring->buffer_info[l].skb, 1024);
-			if (!ret_val)
+
+		/* place 64 packets on the transmit queue*/
+		for (i = 0; i < 64; i++) {
+			skb_get(skb);
+			tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
+			if (tx_ret_val == NETDEV_TX_OK)
 				good_cnt++;
-			l++;
-			if (l == rx_ring->count)
-				l = 0;
-			/* time + 20 msecs (200 msecs on 2.4) is more than
-			 * enough time to complete the receives, if it's
-			 * exceeded, break and error off
-			 */
-		} while (good_cnt < 64 && jiffies < (time + 20));
+		}
+
 		if (good_cnt != 64) {
-			ret_val = 13; /* ret_val is the same as mis-compare */
+			ret_val = 12;
 			break;
 		}
-		if (jiffies >= (time + 20)) {
-			ret_val = 14; /* error code for time out error */
+
+		/* allow 200 milliseconds for packets to go from tx to rx */
+		msleep(200);
+
+		good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
+		if (good_cnt != 64) {
+			ret_val = 13;
 			break;
 		}
 	} /* end loop count loop */
+
+	/* free the original skb */
+	kfree_skb(skb);
+
 	return ret_val;
 }
 
@@ -1726,8 +1710,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
 		if (hw->mac.autoneg)
 			msleep(4000);
 
-		if (!(rd32(E1000_STATUS) &
-		      E1000_STATUS_LU))
+		if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
 			*data = 1;
 	}
 	return *data;
@@ -1831,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
 		retval = 0;
 		break;
 	case E1000_DEV_ID_82576_QUAD_COPPER:
+	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
 		/* quad port adapters only support WoL on port A */
 		if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
 			wol->supported = 0;
@@ -1909,7 +1893,6 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 		adapter->wol |= E1000_WUFC_BC;
 	if (wol->wolopts & WAKE_MAGIC)
 		adapter->wol |= E1000_WUFC_MAG;
-
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
 	return 0;
@@ -1922,12 +1905,19 @@ static int igb_phys_id(struct net_device *netdev, u32 data)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
+	unsigned long timeout;
 
-	if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
-		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
+	timeout = data * 1000;
+
+	/*
+	 *  msleep_interruptable only accepts unsigned int so we are limited
+	 * in how long a duration we can wait
+	 */
+	if (!timeout || timeout > UINT_MAX)
+		timeout = UINT_MAX;
 
 	igb_blink_led(hw);
-	msleep_interruptible(data * 1000);
+	msleep_interruptible(timeout);
 
 	igb_led_off(hw);
 	clear_bit(IGB_LED_ON, &adapter->led_status);
@@ -1940,7 +1930,6 @@ static int igb_set_coalesce(struct net_device *netdev,
 			    struct ethtool_coalesce *ec)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
-	struct e1000_hw *hw = &adapter->hw;
 	int i;
 
 	if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
@@ -1949,17 +1938,39 @@ static int igb_set_coalesce(struct net_device *netdev,
 	    (ec->rx_coalesce_usecs == 2))
 		return -EINVAL;
 
+	if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
+	    ((ec->tx_coalesce_usecs > 3) &&
+	     (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
+	    (ec->tx_coalesce_usecs == 2))
+		return -EINVAL;
+
+	if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+		return -EINVAL;
+
 	/* convert to rate of irq's per second */
-	if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
-		adapter->itr_setting = ec->rx_coalesce_usecs;
-		adapter->itr = IGB_START_ITR;
-	} else {
-		adapter->itr_setting = ec->rx_coalesce_usecs << 2;
-		adapter->itr = adapter->itr_setting;
-	}
+	if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
+		adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+	else
+		adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		wr32(adapter->rx_ring[i].itr_register, adapter->itr);
+	/* convert to rate of irq's per second */
+	if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
+		adapter->tx_itr_setting = adapter->rx_itr_setting;
+	else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
+		adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+	else
+		adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		if (q_vector->rx_ring)
+			q_vector->itr_val = adapter->rx_itr_setting;
+		else
+			q_vector->itr_val = adapter->tx_itr_setting;
+		if (q_vector->itr_val && q_vector->itr_val <= 3)
+			q_vector->itr_val = IGB_START_ITR;
+		q_vector->set_itr = 1;
+	}
 
 	return 0;
 }
@@ -1969,15 +1980,21 @@ static int igb_get_coalesce(struct net_device *netdev,
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 
-	if (adapter->itr_setting <= 3)
-		ec->rx_coalesce_usecs = adapter->itr_setting;
+	if (adapter->rx_itr_setting <= 3)
+		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
 	else
-		ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
+		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
+		if (adapter->tx_itr_setting <= 3)
+			ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+		else
+			ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+	}
 
 	return 0;
 }
 
-
 static int igb_nway_reset(struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
@@ -1996,20 +2013,36 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	u64 *queue_stat;
-	int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
+	int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64);
+	int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64);
 	int j;
 	int i;
+	char *p = NULL;
 
 	igb_update_stats(adapter);
+
 	for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
-		char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset;
+		switch (igb_gstrings_stats[i].type) {
+		case IGB_STATS:
+			p = (char *) adapter +
+					igb_gstrings_stats[i].stat_offset;
+			break;
+		}
+
 		data[i] = (igb_gstrings_stats[i].sizeof_stat ==
 			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 	}
+	for (j = 0; j < adapter->num_tx_queues; j++) {
+		int k;
+		queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
+		for (k = 0; k < stat_count_tx; k++)
+			data[i + k] = queue_stat[k];
+		i += k;
+	}
 	for (j = 0; j < adapter->num_rx_queues; j++) {
 		int k;
 		queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
-		for (k = 0; k < stat_count; k++)
+		for (k = 0; k < stat_count_rx; k++)
 			data[i + k] = queue_stat[k];
 		i += k;
 	}
@@ -2037,12 +2070,20 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
 			p += ETH_GSTRING_LEN;
 			sprintf(p, "tx_queue_%u_bytes", i);
 			p += ETH_GSTRING_LEN;
+			sprintf(p, "tx_queue_%u_restart", i);
+			p += ETH_GSTRING_LEN;
 		}
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			sprintf(p, "rx_queue_%u_packets", i);
 			p += ETH_GSTRING_LEN;
 			sprintf(p, "rx_queue_%u_bytes", i);
 			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_drops", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_csum_err", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_alloc_failed", i);
+			p += ETH_GSTRING_LEN;
 		}
 /*		BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
 		break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index fc9e5b8..bfddb5b 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -59,7 +59,11 @@ static const struct e1000_info *igb_info_tbl[] = {
 	[board_82575] = &e1000_82575_info,
 };
 
-static struct pci_device_id igb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -71,6 +75,7 @@ static struct pci_device_id igb_pci_tbl[] = {
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
@@ -86,6 +91,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
 static int igb_setup_all_rx_resources(struct igb_adapter *);
 static void igb_free_all_tx_resources(struct igb_adapter *);
 static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_setup_mrqc(struct igb_adapter *);
 void igb_update_stats(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
@@ -94,35 +100,30 @@ static int igb_open(struct net_device *);
 static int igb_close(struct net_device *);
 static void igb_configure_tx(struct igb_adapter *);
 static void igb_configure_rx(struct igb_adapter *);
-static void igb_setup_rctl(struct igb_adapter *);
 static void igb_clean_all_tx_rings(struct igb_adapter *);
 static void igb_clean_all_rx_rings(struct igb_adapter *);
 static void igb_clean_tx_ring(struct igb_ring *);
 static void igb_clean_rx_ring(struct igb_ring *);
-static void igb_set_multi(struct net_device *);
+static void igb_set_rx_mode(struct net_device *);
 static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct igb_adapter *);
-static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
-				  struct igb_ring *);
 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
 static struct net_device_stats *igb_get_stats(struct net_device *);
 static int igb_change_mtu(struct net_device *, int);
 static int igb_set_mac(struct net_device *, void *);
+static void igb_set_uta(struct igb_adapter *adapter);
 static irqreturn_t igb_intr(int irq, void *, struct pt_regs *);
 static irqreturn_t igb_intr_msi(int irq, void *, struct pt_regs *);
 static irqreturn_t igb_msix_other(int irq, void *, struct pt_regs *);
-static irqreturn_t igb_msix_rx(int irq, void *, struct pt_regs *);
-static irqreturn_t igb_msix_tx(int irq, void *, struct pt_regs *);
+static irqreturn_t igb_msix_ring(int irq, void *,  struct pt_regs *);
 #ifdef CONFIG_IGB_DCA
-static void igb_update_rx_dca(struct igb_ring *);
-static void igb_update_tx_dca(struct igb_ring *);
+static void igb_update_dca(struct igb_q_vector *);
 static void igb_setup_dca(struct igb_adapter *);
 #endif /* CONFIG_IGB_DCA */
-static bool igb_clean_tx_irq(struct igb_ring *);
+static bool igb_clean_tx_irq(struct igb_q_vector *);
 static int igb_poll(struct net_device *, int *);
-static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
-static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
+static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct igb_adapter *);
@@ -130,56 +131,14 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
 static void igb_vlan_rx_add_vid(struct net_device *, u16);
 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
 static void igb_restore_vlan(struct igb_adapter *);
+static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
 static void igb_ping_all_vfs(struct igb_adapter *);
 static void igb_msg_task(struct igb_adapter *);
-static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
+static void igb_rcv_msg_from_vf(struct igb_adapter *, u32);
 static void igb_vmm_control(struct igb_adapter *);
 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
 
-static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
-{
-	u32 reg_data;
-
-	reg_data = rd32(E1000_VMOLR(vfn));
-	reg_data |= E1000_VMOLR_BAM |	 /* Accept broadcast */
-	            E1000_VMOLR_ROPE |   /* Accept packets matched in UTA */
-	            E1000_VMOLR_ROMPE |  /* Accept packets matched in MTA */
-	            E1000_VMOLR_AUPE |   /* Accept untagged packets */
-	            E1000_VMOLR_STRVLAN; /* Strip vlan tags */
-	wr32(E1000_VMOLR(vfn), reg_data);
-}
-
-static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
-                                 int vfn)
-{
-	struct e1000_hw *hw = &adapter->hw;
-	u32 vmolr;
-
-	/* if it isn't the PF check to see if VFs are enabled and
-	 * increase the size to support vlan tags */
-	if (vfn < adapter->vfs_allocated_count &&
-	    adapter->vf_data[vfn].vlans_enabled)
-		size += VLAN_TAG_SIZE;
-
-	vmolr = rd32(E1000_VMOLR(vfn));
-	vmolr &= ~E1000_VMOLR_RLPML_MASK;
-	vmolr |= size | E1000_VMOLR_LPE;
-	wr32(E1000_VMOLR(vfn), vmolr);
-
-	return 0;
-}
-
-static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
-{
-	u32 reg_data;
-
-	reg_data = rd32(E1000_RAH(entry));
-	reg_data &= ~E1000_RAH_POOL_MASK;
-	reg_data |= E1000_RAH_POOL_1 << pool;;
-	wr32(E1000_RAH(entry), reg_data);
-}
-
 #ifdef CONFIG_PM
 static int igb_suspend(struct pci_dev *, pm_message_t);
 static int igb_resume(struct pci_dev *);
@@ -231,8 +190,6 @@ static struct pci_driver igb_driver = {
 	.err_handler = &igb_err_handler
 };
 
-static int global_quad_port_a; /* global quad port a indication */
-
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
 MODULE_LICENSE("GPL");
@@ -251,17 +208,6 @@ char *igb_get_hw_dev_name(struct e1000_hw *hw)
 #endif
 
 /**
- * igb_desc_unused - calculate if we have unused descriptors
- **/
-static int igb_desc_unused(struct igb_ring *ring)
-{
-	if (ring->next_to_clean > ring->next_to_use)
-		return ring->next_to_clean - ring->next_to_use - 1;
-
-	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
-}
-
-/**
  * igb_init_module - Driver Registration Routine
  *
  * igb_init_module is the first routine called when the driver is
@@ -275,12 +221,9 @@ static int __init igb_init_module(void)
 
 	printk(KERN_INFO "%s\n", igb_copyright);
 
-	global_quad_port_a = 0;
-
 #ifdef CONFIG_IGB_DCA
 	dca_register_notify(&dca_notifier);
 #endif
-
 	ret = pci_register_driver(&igb_driver);
 	return ret;
 }
@@ -313,8 +256,8 @@ module_exit(igb_exit_module);
  **/
 static void igb_cache_ring_register(struct igb_adapter *adapter)
 {
-	int i;
-	unsigned int rbase_offset = adapter->vfs_allocated_count;
+	int i = 0, j = 0;
+	u32 rbase_offset = adapter->vfs_allocated_count;
 
 	switch (adapter->hw.mac.type) {
 	case e1000_82576:
@@ -323,46 +266,36 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
 		 * In order to avoid collision we start at the first free queue
 		 * and continue consuming queues in the same sequence
 		 */
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			adapter->rx_ring[i].reg_idx = rbase_offset +
-			                              Q_IDX_82576(i);
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			adapter->tx_ring[i].reg_idx = rbase_offset +
-			                              Q_IDX_82576(i);
-		break;
+		if (adapter->vfs_allocated_count) {
+			for (; i < adapter->num_rx_queues; i++)
+				adapter->rx_ring[i].reg_idx = rbase_offset +
+				                              Q_IDX_82576(i);
+			for (; j < adapter->num_tx_queues; j++)
+				adapter->tx_ring[j].reg_idx = rbase_offset +
+				                              Q_IDX_82576(j);
+		}
 	case e1000_82575:
 	case e1000_82580:
+	case e1000_i350:
 	default:
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			adapter->rx_ring[i].reg_idx = i;
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			adapter->tx_ring[i].reg_idx = i;
+		for (; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i].reg_idx = rbase_offset + i;
+		for (; j < adapter->num_tx_queues; j++)
+			adapter->tx_ring[j].reg_idx = rbase_offset + j;
 		break;
 	}
 }
 
-/**
- * igb_free_queues - Free memory for all rings
- * @adapter: board private structure to initialize
- *
- * We free one ring per queue at run-time since we don't know the
- * number of queues at compile-time.
- **/
 static void igb_free_queues(struct igb_adapter *adapter)
 {
-	int i;
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *ring = &(adapter->rx_ring[i]);
-		ring->count = adapter->rx_ring_count;
-		free_netdev(ring->netdev);
-	}
+	adapter->tx_ring = NULL;
+	adapter->rx_ring = NULL;
 
 	adapter->num_rx_queues = 0;
 	adapter->num_tx_queues = 0;
-
-	kfree(adapter->tx_ring);
-	kfree(adapter->rx_ring);
 }
 
 /**
@@ -379,58 +312,58 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
 				   sizeof(struct igb_ring), GFP_KERNEL);
 	if (!adapter->tx_ring)
-		return -ENOMEM;
+		goto err;
 
 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
 				   sizeof(struct igb_ring), GFP_KERNEL);
-	if (!adapter->rx_ring) {
-		kfree(adapter->tx_ring);
-		return -ENOMEM;
-	}
-
-	adapter->rx_ring->buddy = adapter->tx_ring;
+	if (!adapter->rx_ring)
+		goto err;
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		struct igb_ring *ring = &(adapter->tx_ring[i]);
 		ring->count = adapter->tx_ring_count;
-		ring->adapter = adapter;
+		ring->queue_index = i;
+		ring->pdev = adapter->pdev;
+		ring->netdev = adapter->netdev;
+		/* For 82575, context index must be unique per ring. */
+		if (adapter->hw.mac.type == e1000_82575)
+			ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
 	}
 
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		struct igb_ring *ring = &(adapter->rx_ring[i]);
 		ring->count = adapter->rx_ring_count;
-		ring->adapter = adapter;
-		ring->itr_register = E1000_ITR;
-
-		if (!ring->netdev) {
-			ring->netdev = alloc_netdev(0, "", ether_setup);
-			if (!ring->netdev)
-				goto err;
-
-			ring->netdev->priv = (void *)ring;
-			ring->netdev->poll = igb_poll;
-			ring->netdev->weight = 64;
-			set_bit(__LINK_STATE_START, &ring->netdev->state);
-		}
-
-		ring->napi.dev = adapter->netdev;
+		ring->queue_index = i;
+		ring->pdev = adapter->pdev;
+		ring->netdev = adapter->netdev;
+		ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+		ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
 	}
 
 	igb_cache_ring_register(adapter);
+
 	return 0;
+
 err:
 	igb_free_queues(adapter);
-	return -ENOMEM;
 
+	return -ENOMEM;
 }
 
 #define IGB_N0_QUEUE -1
-static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
-			      int tx_queue, int msix_vector)
+static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
 {
 	u32 msixbm = 0;
+	struct igb_adapter *adapter = q_vector->adapter;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ivar, index;
+	int rx_queue = IGB_N0_QUEUE;
+	int tx_queue = IGB_N0_QUEUE;
+
+	if (q_vector->rx_ring)
+		rx_queue = q_vector->rx_ring->reg_idx;
+	if (q_vector->tx_ring)
+		tx_queue = q_vector->tx_ring->reg_idx;
 
 	switch (hw->mac.type) {
 	case e1000_82575:
@@ -438,16 +371,14 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
 		   bitmask for the EICR/EIMS/EIMC registers.  To assign one
 		   or more queues to a vector, we write the appropriate bits
 		   into the MSIXBM register for that vector. */
-		if (rx_queue > IGB_N0_QUEUE) {
+		if (rx_queue > IGB_N0_QUEUE)
 			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
-			adapter->rx_ring[rx_queue].eims_value = msixbm;
-		}
-		if (tx_queue > IGB_N0_QUEUE) {
+		if (tx_queue > IGB_N0_QUEUE)
 			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
-			adapter->tx_ring[tx_queue].eims_value =
-				  E1000_EICR_TX_QUEUE0 << tx_queue;
-		}
+		if (!adapter->msix_entries && msix_vector == 0)
+			msixbm |= E1000_EIMS_OTHER;
 		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
+		q_vector->eims_value = msixbm;
 		break;
 	case e1000_82576:
 		/* 82576 uses a table-based method for assigning vectors.
@@ -455,37 +386,37 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
 		   a vector number along with a "valid" bit.  Sadly, the layout
 		   of the table is somewhat counterintuitive. */
 		if (rx_queue > IGB_N0_QUEUE) {
-			index = (rx_queue >> 1) + adapter->vfs_allocated_count;
+			index = (rx_queue & 0x7);
 			ivar = array_rd32(E1000_IVAR0, index);
-			if (rx_queue & 0x1) {
-				/* vector goes into third byte of register */
-				ivar = ivar & 0xFF00FFFF;
-				ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
-			} else {
+			if (rx_queue < 8) {
 				/* vector goes into low byte of register */
 				ivar = ivar & 0xFFFFFF00;
 				ivar |= msix_vector | E1000_IVAR_VALID;
+			} else {
+				/* vector goes into third byte of register */
+				ivar = ivar & 0xFF00FFFF;
+				ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
 			}
-			adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
 			array_wr32(E1000_IVAR0, index, ivar);
 		}
 		if (tx_queue > IGB_N0_QUEUE) {
-			index = (tx_queue >> 1) + adapter->vfs_allocated_count;
+			index = (tx_queue & 0x7);
 			ivar = array_rd32(E1000_IVAR0, index);
-			if (tx_queue & 0x1) {
-				/* vector goes into high byte of register */
-				ivar = ivar & 0x00FFFFFF;
-				ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
-			} else {
+			if (tx_queue < 8) {
 				/* vector goes into second byte of register */
 				ivar = ivar & 0xFFFF00FF;
 				ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
+			} else {
+				/* vector goes into high byte of register */
+				ivar = ivar & 0x00FFFFFF;
+				ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
 			}
-			adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
 			array_wr32(E1000_IVAR0, index, ivar);
 		}
+		q_vector->eims_value = 1 << msix_vector;
 		break;
 	case e1000_82580:
+	case e1000_i350:
 		/* 82580 uses the same table-based approach as 82576 but has fewer
 		   entries as a result we carry over for queues greater than 4. */
 		if (rx_queue > IGB_N0_QUEUE) {
@@ -500,7 +431,6 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
 				ivar = ivar & 0xFFFFFF00;
 				ivar |= msix_vector | E1000_IVAR_VALID;
 			}
-			adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
 			array_wr32(E1000_IVAR0, index, ivar);
 		}
 		if (tx_queue > IGB_N0_QUEUE) {
@@ -515,9 +445,9 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
 				ivar = ivar & 0xFFFF00FF;
 				ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
 			}
-			adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
 			array_wr32(E1000_IVAR0, index, ivar);
 		}
+		q_vector->eims_value = 1 << msix_vector;
 		break;
 	default:
 		BUG();
@@ -538,46 +468,10 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 	struct e1000_hw *hw = &adapter->hw;
 
 	adapter->eims_enable_mask = 0;
-	if (hw->mac.type == e1000_82576 || hw->mac.type == e1000_82580)
-		/* Turn on MSI-X capability first, or our settings
-		 * won't stick.  And it will take days to debug. */
-		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
-				   E1000_GPIE_PBA | E1000_GPIE_EIAME |
- 				   E1000_GPIE_NSICR);
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *tx_ring = &adapter->tx_ring[i];
-		igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
-		adapter->eims_enable_mask |= tx_ring->eims_value;
-		if (tx_ring->itr_val)
-			writel(tx_ring->itr_val,
-			       hw->hw_addr + tx_ring->itr_register);
-		else
-			writel(1, hw->hw_addr + tx_ring->itr_register);
-	}
-
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *rx_ring = &adapter->rx_ring[i];
-		rx_ring->buddy = NULL;
-		igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
-		adapter->eims_enable_mask |= rx_ring->eims_value;
-		if (rx_ring->itr_val)
-			writel(rx_ring->itr_val,
-			       hw->hw_addr + rx_ring->itr_register);
-		else
-			writel(1, hw->hw_addr + rx_ring->itr_register);
-	}
-
 
 	/* set vector for other causes, i.e. link changes */
 	switch (hw->mac.type) {
 	case e1000_82575:
-		array_wr32(E1000_MSIXBM(0), vector++,
-				      E1000_EIMS_OTHER);
-
-		/* disable IAM for ICR interrupt bits */
-		wr32(E1000_IAM, 0);
-
 		tmp = rd32(E1000_CTRL_EXT);
 		/* enable MSI-X PBA support*/
 		tmp |= E1000_CTRL_EXT_PBA_CLR;
@@ -587,22 +481,42 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 		tmp |= E1000_CTRL_EXT_IRCA;
 
 		wr32(E1000_CTRL_EXT, tmp);
-		adapter->eims_enable_mask |= E1000_EIMS_OTHER;
+
+		/* enable msix_other interrupt */
+		array_wr32(E1000_MSIXBM(0), vector++,
+		                      E1000_EIMS_OTHER);
 		adapter->eims_other = E1000_EIMS_OTHER;
 
 		break;
 
 	case e1000_82576:
+	case e1000_82580:
+	case e1000_i350:
+		/* Turn on MSI-X capability first, or our settings
+		 * won't stick.  And it will take days to debug. */
+		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
+		                E1000_GPIE_PBA | E1000_GPIE_EIAME |
+		                E1000_GPIE_NSICR);
+
+		/* enable msix_other interrupt */
+		adapter->eims_other = 1 << vector;
 		tmp = (vector++ | E1000_IVAR_VALID) << 8;
-		wr32(E1000_IVAR_MISC, tmp);
 
-		adapter->eims_enable_mask = (1 << (vector)) - 1;
-		adapter->eims_other = 1 << (vector - 1);
+		wr32(E1000_IVAR_MISC, tmp);
 		break;
 	default:
 		/* do nothing, since nothing else supports MSI-X */
 		break;
 	} /* switch (hw->mac.type) */
+
+	adapter->eims_enable_mask |= adapter->eims_other;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		igb_assign_vector(q_vector, vector++);
+		adapter->eims_enable_mask |= q_vector->eims_value;
+	}
+
 	wrfl();
 }
 
@@ -615,43 +529,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 static int igb_request_msix(struct igb_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
 	int i, err = 0, vector = 0;
 
-	vector = 0;
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *ring = &(adapter->tx_ring[i]);
-		sprintf(ring->name, "%s-tx-%d", netdev->name, i);
-		err = request_irq(adapter->msix_entries[vector].vector,
-				  &igb_msix_tx, 0, ring->name,
-				  &(adapter->tx_ring[i]));
-		if (err)
-			goto out;
-		ring->itr_register = E1000_EITR(0) + (vector << 2);
-		ring->itr_val = 976; /* ~4000 ints/sec */
-		vector++;
-	}
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *ring = &(adapter->rx_ring[i]);
-		if (strlen(netdev->name) < (IFNAMSIZ - 5))
-			sprintf(ring->netdev->name, "%s-rx-%d", netdev->name, i);
+	err = request_irq(adapter->msix_entries[vector].vector,
+	                  igb_msix_other, 0, netdev->name, adapter);
+	if (err)
+		goto out;
+	vector++;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+
+		q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+		if (q_vector->rx_ring && q_vector->tx_ring)
+			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+			        q_vector->rx_ring->queue_index);
+		else if (q_vector->tx_ring)
+			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
+			        q_vector->tx_ring->queue_index);
+		else if (q_vector->rx_ring)
+			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
+			        q_vector->rx_ring->queue_index);
 		else
-			memcpy(ring->netdev->name, netdev->name, IFNAMSIZ);
+			sprintf(q_vector->name, "%s-unused", netdev->name);
+
 		err = request_irq(adapter->msix_entries[vector].vector,
-				  &igb_msix_rx, 0, ring->netdev->name,
-				  &(adapter->rx_ring[i]));
+		                  igb_msix_ring, 0, q_vector->name,
+		                  q_vector);
 		if (err)
 			goto out;
-		ring->itr_register = E1000_EITR(0) + (vector << 2);
-		ring->itr_val = adapter->itr;
 		vector++;
 	}
 
-	err = request_irq(adapter->msix_entries[vector].vector,
-			  &igb_msix_other, 0, netdev->name, netdev);
-	if (err)
-		goto out;
-
 	igb_configure_msix(adapter);
 	return 0;
 out:
@@ -664,11 +575,45 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
 		pci_disable_msix(adapter->pdev);
 		kfree(adapter->msix_entries);
 		adapter->msix_entries = NULL;
-	} else if (adapter->flags & IGB_FLAG_HAS_MSI)
+	} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
 		pci_disable_msi(adapter->pdev);
-	return;
+	}
 }
 
+/**
+ * igb_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vectors(struct igb_adapter *adapter)
+{
+	int v_idx;
+
+	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+		adapter->q_vector[v_idx] = NULL;
+		free_netdev(q_vector->dummy_netdev);
+		q_vector->dummy_netdev = NULL;
+		kfree(q_vector);
+	}
+	adapter->num_q_vectors = 0;
+}
+
+/**
+ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ *
+ * This function resets the device so that it has 0 rx queues, tx queues, and
+ * MSI-X interrupts allocated.
+ */
+static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
+{
+	igb_free_queues(adapter);
+	igb_free_q_vectors(adapter);
+	igb_reset_interrupt_capability(adapter);
+}
 
 /**
  * igb_set_interrupt_capability - set MSI or MSI-X if supported
@@ -682,11 +627,20 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
 	int numvecs, i;
 
 	/* Number of supported queues. */
-	/* Having more queues than CPUs doesn't make sense. */
 	adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
 	adapter->num_tx_queues = 1;
 
-	numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
+	/* start with one vector for every rx queue */
+	numvecs = adapter->num_rx_queues;
+
+	/* if tx handler is seperate add 1 for every tx queue */
+	numvecs += adapter->num_tx_queues;
+
+	/* store the number of vectors reserved for queues */
+	adapter->num_q_vectors = numvecs;
+
+	/* add 1 vector for link status interrupts */
+	numvecs++;
 	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
 					GFP_KERNEL);
 	if (!adapter->msix_entries)
@@ -720,9 +674,11 @@ msi_only:
 		dev_info(&adapter->pdev->dev, "IOV Disabled\n");
 	}
 #endif
-
+	adapter->vfs_allocated_count = 0;
+	adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
 	adapter->num_rx_queues = 1;
 	adapter->num_tx_queues = 1;
+	adapter->num_q_vectors = 1;
 	if (!pci_enable_msi(adapter->pdev))
 		adapter->flags |= IGB_FLAG_HAS_MSI;
 out:
@@ -730,6 +686,154 @@ out:
 }
 
 /**
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+{
+	struct igb_q_vector *q_vector;
+	struct e1000_hw *hw = &adapter->hw;
+	int v_idx;
+
+	for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
+		q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
+		if (!q_vector)
+			goto err_out;
+		q_vector->adapter = adapter;
+		q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
+		q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
+		q_vector->itr_val = IGB_START_ITR;
+		q_vector->set_itr = 1;
+
+		if (!q_vector->dummy_netdev) {
+			q_vector->dummy_netdev = alloc_netdev(0, "", ether_setup);
+			if (!q_vector->dummy_netdev)
+				return -ENOMEM;
+		}
+		q_vector->dummy_netdev->priv = q_vector;
+		q_vector->dummy_netdev->poll = igb_poll;
+		q_vector->dummy_netdev->weight = 64;
+		set_bit(__LINK_STATE_START, &q_vector->dummy_netdev->state);
+
+		adapter->q_vector[v_idx] = q_vector;
+	}
+	return 0;
+
+err_out:
+	while (v_idx) {
+		v_idx--;
+		q_vector = adapter->q_vector[v_idx];
+		free_netdev(q_vector->dummy_netdev);
+		q_vector->dummy_netdev = NULL;
+		kfree(q_vector);
+		adapter->q_vector[v_idx] = NULL;
+	}
+	return -ENOMEM;
+}
+
+static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
+                                      int ring_idx, int v_idx)
+{
+	struct igb_q_vector *q_vector;
+
+	q_vector = adapter->q_vector[v_idx];
+	q_vector->rx_ring = &adapter->rx_ring[ring_idx];
+	q_vector->rx_ring->q_vector = q_vector;
+	q_vector->itr_val = adapter->rx_itr_setting;
+	if (q_vector->itr_val && q_vector->itr_val <= 3)
+		q_vector->itr_val = IGB_START_ITR;
+}
+
+static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
+                                      int ring_idx, int v_idx)
+{
+	struct igb_q_vector *q_vector;
+
+	q_vector = adapter->q_vector[v_idx];
+	q_vector->tx_ring = &adapter->tx_ring[ring_idx];
+	q_vector->tx_ring->q_vector = q_vector;
+	q_vector->itr_val = adapter->tx_itr_setting;
+	if (q_vector->itr_val && q_vector->itr_val <= 3)
+		q_vector->itr_val = IGB_START_ITR;
+}
+
+/**
+ * igb_map_ring_to_vector - maps allocated queues to vectors
+ *
+ * This function maps the recently allocated queues to vectors.
+ **/
+static int igb_map_ring_to_vector(struct igb_adapter *adapter)
+{
+	int i;
+	int v_idx = 0;
+
+	if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
+	    (adapter->num_q_vectors < adapter->num_tx_queues))
+		return -ENOMEM;
+
+	if (adapter->num_q_vectors >=
+	    (adapter->num_rx_queues + adapter->num_tx_queues)) {
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+	} else {
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			if (i < adapter->num_tx_queues)
+				igb_map_tx_ring_to_vector(adapter, i, v_idx);
+			igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+		}
+		for (; i < adapter->num_tx_queues; i++)
+			igb_map_tx_ring_to_vector(adapter, i, v_idx++);
+	}
+	return 0;
+}
+
+/**
+ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ **/
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int err;
+
+	igb_set_interrupt_capability(adapter);
+
+	err = igb_alloc_q_vectors(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	err = igb_alloc_queues(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		goto err_alloc_queues;
+	}
+
+	err = igb_map_ring_to_vector(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
+		goto err_map_queues;
+	}
+
+
+	return 0;
+err_map_queues:
+	igb_free_queues(adapter);
+err_alloc_queues:
+	igb_free_q_vectors(adapter);
+err_alloc_q_vectors:
+	igb_reset_interrupt_capability(adapter);
+	return err;
+}
+
+/**
  * igb_request_irq - initialize interrupts
  *
  * Attempts to configure interrupts using the best available
@@ -738,6 +842,7 @@ out:
 static int igb_request_irq(struct igb_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_hw *hw = &adapter->hw;
 	int err = 0;
 
@@ -750,40 +855,46 @@ static int igb_request_irq(struct igb_adapter *adapter)
 			goto request_done;
 		}
 		/* fall back to MSI */
-		igb_reset_interrupt_capability(adapter);
+		igb_clear_interrupt_scheme(adapter);
 		if (!pci_enable_msi(adapter->pdev))
 			adapter->flags |= IGB_FLAG_HAS_MSI;
 		igb_free_all_tx_resources(adapter);
 		igb_free_all_rx_resources(adapter);
+		adapter->num_tx_queues = 1;
 		adapter->num_rx_queues = 1;
-		igb_alloc_queues(adapter);
-	} else {
-		switch (hw->mac.type) {
-		case e1000_82575:
-			wr32(E1000_MSIXBM(0),
-			     (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
-			break;
-		case e1000_82580:
-		case e1000_82576:
-			wr32(E1000_IVAR0, E1000_IVAR_VALID);
-			break;
-		default:
-			break;
+		adapter->num_q_vectors = 1;
+		err = igb_alloc_q_vectors(adapter);
+		if (err) {
+			dev_err(&pdev->dev,
+			        "Unable to allocate memory for vectors\n");
+			goto request_done;
+		}
+		err = igb_alloc_queues(adapter);
+		if (err) {
+			dev_err(&pdev->dev,
+			        "Unable to allocate memory for queues\n");
+			igb_free_q_vectors(adapter);
+			goto request_done;
 		}
+		igb_setup_all_tx_resources(adapter);
+		igb_setup_all_rx_resources(adapter);
+	} else {
+		igb_assign_vector(adapter->q_vector[0], 0);
 	}
 
 	if (adapter->flags & IGB_FLAG_HAS_MSI) {
-		err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
-				  netdev->name, netdev);
+		err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
+				  netdev->name, adapter);
 		if (!err)
 			goto request_done;
+
 		/* fall back to legacy interrupts */
 		igb_reset_interrupt_capability(adapter);
 		adapter->flags &= ~IGB_FLAG_HAS_MSI;
 	}
 
-	err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
-			  netdev->name, netdev);
+	err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
+			  netdev->name, adapter);
 
 	if (err)
 		dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
@@ -795,23 +906,19 @@ request_done:
 
 static void igb_free_irq(struct igb_adapter *adapter)
 {
-	struct net_device *netdev = adapter->netdev;
-
 	if (adapter->msix_entries) {
 		int vector = 0, i;
 
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			free_irq(adapter->msix_entries[vector++].vector,
-				&(adapter->tx_ring[i]));
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			free_irq(adapter->msix_entries[vector++].vector,
-				&(adapter->rx_ring[i]));
+		free_irq(adapter->msix_entries[vector++].vector, adapter);
 
-		free_irq(adapter->msix_entries[vector++].vector, netdev);
-		return;
+		for (i = 0; i < adapter->num_q_vectors; i++) {
+			struct igb_q_vector *q_vector = adapter->q_vector[i];
+			free_irq(adapter->msix_entries[vector++].vector,
+			         q_vector);
+		}
+	} else {
+		free_irq(adapter->pdev->irq, adapter);
 	}
-
-	free_irq(adapter->pdev->irq, netdev);
 }
 
 /**
@@ -858,32 +965,37 @@ static void igb_irq_enable(struct igb_adapter *adapter)
 			wr32(E1000_MBVFIMR, 0xFF);
 			ims |= E1000_IMS_VMMB;
 		}
+		if (adapter->hw.mac.type == e1000_82580)
+			ims |= E1000_IMS_DRSTA;
+
 		wr32(E1000_IMS, ims);
 	} else {
-		wr32(E1000_IMS, IMS_ENABLE_MASK);
+		wr32(E1000_IMS, IMS_ENABLE_MASK |
+				E1000_IMS_DRSTA);
+		wr32(E1000_IAM, IMS_ENABLE_MASK |
+				E1000_IMS_DRSTA);
 	}
 }
 
 static void igb_update_mng_vlan(struct igb_adapter *adapter)
 {
-	struct net_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
 	u16 vid = adapter->hw.mng_cookie.vlan_id;
 	u16 old_vid = adapter->mng_vlan_id;
-	if (adapter->vlgrp) {
-		if (!vlan_group_get_device(adapter->vlgrp, vid)) {
-			if (adapter->hw.mng_cookie.status &
-				E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
-				igb_vlan_rx_add_vid(netdev, vid);
-				adapter->mng_vlan_id = vid;
-			} else
-				adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
 
-			if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
-					(vid != old_vid) &&
-			    !vlan_group_get_device(adapter->vlgrp, old_vid))
-				igb_vlan_rx_kill_vid(netdev, old_vid);
-		} else
-			adapter->mng_vlan_id = vid;
+	if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+		/* add VID to filter table */
+		igb_vfta_set(hw, vid, true);
+		adapter->mng_vlan_id = vid;
+	} else {
+		adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+	}
+
+	if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
+	    (vid != old_vid) &&
+	    !vlan_group_get_device(adapter->vlgrp, old_vid)) {
+		/* remove VID from filter table */
+		igb_vfta_set(hw, old_vid, false);
 	}
 }
 
@@ -907,7 +1019,6 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
 			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
 }
 
-
 /**
  * igb_get_hw_control - get control of the h/w from f/w
  * @adapter: address of board private structure
@@ -938,12 +1049,15 @@ static void igb_configure(struct igb_adapter *adapter)
 	int i;
 
 	igb_get_hw_control(adapter);
-	igb_set_multi(netdev);
+	igb_set_rx_mode(netdev);
 
 	igb_restore_vlan(adapter);
 
-	igb_configure_tx(adapter);
+	igb_setup_tctl(adapter);
+	igb_setup_mrqc(adapter);
 	igb_setup_rctl(adapter);
+
+	igb_configure_tx(adapter);
 	igb_configure_rx(adapter);
 
 	igb_rx_fifo_flush_82575(&adapter->hw);
@@ -965,7 +1079,6 @@ static void igb_configure(struct igb_adapter *adapter)
  * igb_up - Open the interface and prepare it to handle traffic
  * @adapter: board private structure
  **/
-
 int igb_up(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
@@ -976,15 +1089,14 @@ int igb_up(struct igb_adapter *adapter)
 
 	clear_bit(__IGB_DOWN, &adapter->state);
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		netif_poll_enable(adapter->rx_ring[i].netdev);
-
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		netif_poll_enable(q_vector->dummy_netdev);
+	}
 	if (adapter->msix_entries)
 		igb_configure_msix(adapter);
-
-	igb_vmm_control(adapter);
-	igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
-	igb_set_vmolr(hw, adapter->vfs_allocated_count);
+	else
+		igb_assign_vector(adapter->q_vector[0], 0);
 
 	/* Clear any pending interrupts. */
 	rd32(E1000_ICR);
@@ -997,6 +1109,15 @@ int igb_up(struct igb_adapter *adapter)
 		wr32(E1000_CTRL_EXT, reg_data);
 	}
 
+	netif_start_queue(adapter->netdev);
+
+	/* notify VFs that reset has been completed */
+	if (adapter->vfs_allocated_count) {
+		u32 reg_data = rd32(E1000_CTRL_EXT);
+		reg_data |= E1000_CTRL_EXT_PFRSTD;
+		wr32(E1000_CTRL_EXT, reg_data);
+	}
+
 	/* start the watchdog. */
 	hw->mac.get_link_status = 1;
 	schedule_work(&adapter->watchdog_task);
@@ -1006,8 +1127,8 @@ int igb_up(struct igb_adapter *adapter)
 
 void igb_down(struct igb_adapter *adapter)
 {
-	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
 	u32 tctl, rctl;
 	int i;
 
@@ -1030,8 +1151,10 @@ void igb_down(struct igb_adapter *adapter)
 	wrfl();
 	msleep(10);
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		netif_poll_disable(adapter->rx_ring[i].netdev);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		netif_poll_disable(q_vector->dummy_netdev);
+	}
 
 	igb_irq_disable(adapter);
 
@@ -1070,6 +1193,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
 
 void igb_reset(struct igb_adapter *adapter)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_hw *hw = &adapter->hw;
 	struct e1000_mac_info *mac = &hw->mac;
 	struct e1000_fc_info *fc = &hw->fc;
@@ -1080,7 +1204,9 @@ void igb_reset(struct igb_adapter *adapter)
 	 * To take effect CTRL.RST is required.
 	 */
 	switch (mac->type) {
+	case e1000_i350:
 	case e1000_82580:
+		pba = rd32(E1000_RXPBS);
 		pba = igb_rxpbs_adjust_82580(pba);
 		break;
 	case e1000_82576:
@@ -1171,11 +1297,11 @@ void igb_reset(struct igb_adapter *adapter)
 	}
 
 	/* Allow time for pending master requests to run */
-	adapter->hw.mac.ops.reset_hw(&adapter->hw);
+	hw->mac.ops.reset_hw(hw);
 	wr32(E1000_WUC, 0);
 
-	if (adapter->hw.mac.ops.init_hw(&adapter->hw))
-		dev_err(&adapter->pdev->dev, "Hardware Error\n");
+	if (hw->mac.ops.init_hw(hw))
+		dev_err(&pdev->dev, "Hardware Error\n");
 
 	if (hw->mac.type == e1000_82580) {
 		u32 reg = rd32(E1000_PCIEMISC);
@@ -1187,8 +1313,8 @@ void igb_reset(struct igb_adapter *adapter)
 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
 
-	igb_reset_adaptive(&adapter->hw);
-	igb_get_phy_info(&adapter->hw);
+	igb_reset_adaptive(hw);
+	igb_get_phy_info(hw);
 }
 
 /**
@@ -1213,6 +1339,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	unsigned long mmio_start, mmio_len;
 	int err, pci_using_dac, pos;
 	u16 eeprom_data = 0, state = 0;
+	static int global_quad_port_a; /* global quad port a indication */
 	u16 eeprom_apme_mask = IGB_EEPROM_APME;
 	u32 part_num;
 
@@ -1298,7 +1425,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	netdev->open = &igb_open;
 	netdev->stop = &igb_close;
 	netdev->get_stats = &igb_get_stats;
-	netdev->set_multicast_list = &igb_set_multi;
+	netdev->set_multicast_list = &igb_set_rx_mode;
 	netdev->set_mac_address = &igb_set_mac;
 	netdev->change_mtu = &igb_change_mtu;
 	netdev->do_ioctl = &igb_ioctl;
@@ -1324,10 +1451,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 	hw->subsystem_device_id = pdev->subsystem_device;
 
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
-
-	/* setup the private structure */
-	hw->back = adapter;
 	/* Copy the default MAC, PHY and NVM function pointers */
 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -1338,46 +1461,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	if (err)
 		goto err_sw_init;
 
-#ifdef CONFIG_PCI_IOV
-	/* since iov functionality isn't critical to base device function we
-	 * can accept failure.  If it fails we don't allow iov to be enabled */
-	if (hw->mac.type == e1000_82576) {
-		/* 82576 supports a maximum of 7 VFs in addition to the PF */
-		unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
-		int i;
-		unsigned char mac_addr[ETH_ALEN];
-
-		if (num_vfs) {
-			adapter->vf_data = kcalloc(num_vfs,
-						sizeof(struct vf_data_storage),
-						GFP_KERNEL);
-			if (!adapter->vf_data) {
-				dev_err(&pdev->dev,
-				        "Could not allocate VF private data - "
-					"IOV enable failed\n");
-			} else {
-				err = pci_enable_sriov(pdev, num_vfs);
-				if (!err) {
-					adapter->vfs_allocated_count = num_vfs;
-					dev_info(&pdev->dev,
-					         "%d vfs allocated\n",
-					         num_vfs);
-					for (i = 0;
-					     i < adapter->vfs_allocated_count;
-					     i++) {
-						random_ether_addr(mac_addr);
-						igb_set_vf_mac(adapter, i,
-						               mac_addr);
-					}
-				} else {
-					kfree(adapter->vf_data);
-					adapter->vf_data = NULL;
-				}
-			}
-		}
-	}
-
-#endif
 	/* setup the private structure */
 	err = igb_sw_init(adapter);
 	if (err)
@@ -1385,16 +1468,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 
 	igb_get_bus_info_pcie(hw);
 
-	/* set flags */
-	switch (hw->mac.type) {
-	case e1000_82575:
-		adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
-		break;
-	case e1000_82576:
-	default:
-		break;
-	}
-
 	hw->phy.autoneg_wait_to_complete = false;
 	hw->mac.adaptive_ifs = true;
 
@@ -1417,13 +1490,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 
 	netdev->features |= NETIF_F_TSO;
 	netdev->features |= NETIF_F_TSO6;
-
 	netdev->features |= NETIF_F_GRO;
 
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
-	adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
+	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
 
 	/* before reading the NVM, reset the controller to put the device in a
 	 * known good starting state */
@@ -1466,13 +1538,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	hw->fc.requested_mode = e1000_fc_default;
 	hw->fc.current_mode = e1000_fc_default;
 
-	adapter->itr_setting = IGB_DEFAULT_ITR;
-	adapter->itr = IGB_START_ITR;
-
 	igb_validate_mdi_setting(hw);
 
-	adapter->rx_csum = 1;
-
 	/* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
 	 * enable the ACPI Magic Packet filter
 	 */
@@ -1505,6 +1572,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 			adapter->eeprom_wol = 0;
 		break;
 	case E1000_DEV_ID_82576_QUAD_COPPER:
+	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
 		/* if quad port adapter, disable WoL on all but port A */
 		if (global_quad_port_a != 0)
 			adapter->eeprom_wol = 0;
@@ -1552,8 +1620,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	dev_info(&pdev->dev,
 		 "%s: (PCIe:%s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
 		 netdev->name,
-		 ((hw->bus.speed == e1000_bus_speed_2500)
-		  ? "2.5Gb/s" : "unknown"),
+		 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+		                                            "unknown"),
 		 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
 		  (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
 		  (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
@@ -1581,15 +1649,14 @@ err_eeprom:
 
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
-
-	igb_free_queues(adapter);
 err_sw_init:
+	igb_clear_interrupt_scheme(adapter);
 	iounmap(hw->hw_addr);
 err_ioremap:
 	free_netdev(netdev);
 err_alloc_etherdev:
-	pci_release_selected_regions(pdev, pci_select_bars(pdev,
-	                             IORESOURCE_MEM));
+	pci_release_selected_regions(pdev,
+	                             pci_select_bars(pdev, IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
 	pci_disable_device(pdev);
@@ -1634,12 +1701,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 
 	unregister_netdev(netdev);
 
-	if (!igb_check_reset_block(&adapter->hw))
-		igb_reset_phy(&adapter->hw);
-
-	igb_reset_interrupt_capability(adapter);
+	if (!igb_check_reset_block(hw))
+		igb_reset_phy(hw);
 
-	igb_free_queues(adapter);
+	igb_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PCI_IOV
 	/* reclaim resources allocated to VFs */
@@ -1655,11 +1720,12 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 		dev_info(&pdev->dev, "IOV Disabled\n");
 	}
 #endif
+
 	iounmap(hw->hw_addr);
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
-	pci_release_selected_regions(pdev, pci_select_bars(pdev,
-	                             IORESOURCE_MEM));
+	pci_release_selected_regions(pdev,
+	                             pci_select_bars(pdev, IORESOURCE_MEM));
 
 	free_netdev(netdev);
 
@@ -1669,6 +1735,54 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 }
 
 /**
+ * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
+ * @adapter: board private structure to initialize
+ *
+ * This function initializes the vf specific data storage and then attempts to
+ * allocate the VFs.  The reason for ordering it this way is because it is much
+ * mor expensive time wise to disable SR-IOV than it is to allocate and free
+ * the memory for the VFs.
+ **/
+static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
+{
+#ifdef CONFIG_PCI_IOV
+	struct pci_dev *pdev = adapter->pdev;
+
+	if (adapter->vfs_allocated_count > 7)
+		adapter->vfs_allocated_count = 7;
+
+	if (adapter->vfs_allocated_count) {
+		adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+		                           sizeof(struct vf_data_storage),
+		                           GFP_KERNEL);
+		/* if allocation failed then we do not support SR-IOV */
+		if (!adapter->vf_data) {
+			adapter->vfs_allocated_count = 0;
+			dev_err(&pdev->dev, "Unable to allocate memory for VF "
+			        "Data Storage\n");
+		}
+	}
+
+	if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
+		kfree(adapter->vf_data);
+		adapter->vf_data = NULL;
+#endif /* CONFIG_PCI_IOV */
+		adapter->vfs_allocated_count = 0;
+#ifdef CONFIG_PCI_IOV
+	} else {
+		unsigned char mac_addr[ETH_ALEN];
+		int i;
+		dev_info(&pdev->dev, "%d vfs allocated\n",
+		         adapter->vfs_allocated_count);
+		for (i = 0; i < adapter->vfs_allocated_count; i++) {
+			random_ether_addr(mac_addr);
+			igb_set_vf_mac(adapter, i, mac_addr);
+		}
+	}
+#endif /* CONFIG_PCI_IOV */
+}
+
+/**
  * igb_sw_init - Initialize general software structures (struct igb_adapter)
  * @adapter: board private structure to initialize
  *
@@ -1686,20 +1800,25 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
 
 	adapter->tx_ring_count = IGB_DEFAULT_TXD;
 	adapter->rx_ring_count = IGB_DEFAULT_RXD;
-	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-	adapter->rx_ps_hdr_size = 0; /* disable packet split */
+	adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+	adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+
 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
-	/* This call may decrease the number of queues depending on
-	 * interrupt mode. */
-	igb_set_interrupt_capability(adapter);
+#ifdef CONFIG_PCI_IOV
+	if (hw->mac.type == e1000_82576)
+		adapter->vfs_allocated_count = max_vfs;
 
-	if (igb_alloc_queues(adapter)) {
+#endif /* CONFIG_PCI_IOV */
+	/* This call may decrease the number of queues */
+	if (igb_init_interrupt_scheme(adapter)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
 
+	igb_probe_vfs(adapter);
+
 	/* Explicitly disable IRQ since the NIC can be in any state. */
 	igb_irq_disable(adapter);
 
@@ -1742,21 +1861,12 @@ static int igb_open(struct net_device *netdev)
 
 	/* e1000_power_up_phy(adapter); */
 
-	adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
-	if ((adapter->hw.mng_cookie.status &
-	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
-		igb_update_mng_vlan(adapter);
-
 	/* before we allocate an interrupt, we must be ready to handle it.
 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
 	 * as soon as we call pci_request_irq, so we have to setup our
 	 * clean_rx handler before we do so.  */
 	igb_configure(adapter);
 
-	igb_vmm_control(adapter);
-	igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
-	igb_set_vmolr(hw, adapter->vfs_allocated_count);
-
 	err = igb_request_irq(adapter);
 	if (err)
 		goto err_req_irq;
@@ -1764,8 +1874,10 @@ static int igb_open(struct net_device *netdev)
 	/* From here on the code is the same as igb_up() */
 	clear_bit(__IGB_DOWN, &adapter->state);
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		netif_poll_enable(adapter->rx_ring[i].netdev);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		netif_poll_enable(q_vector->dummy_netdev);
+	}
 
 	/* Clear any pending interrupts. */
 	rd32(E1000_ICR);
@@ -1822,28 +1934,18 @@ static int igb_close(struct net_device *netdev)
 	igb_free_all_tx_resources(adapter);
 	igb_free_all_rx_resources(adapter);
 
-	/* kill manageability vlan ID if supported, but not if a vlan with
-	 * the same ID is registered on the host OS (let 8021q kill it) */
-	if ((adapter->hw.mng_cookie.status &
-			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-	     !(adapter->vlgrp &&
-	       vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
-		igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-
 	return 0;
 }
 
 /**
  * igb_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
  * @tx_ring: tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
-int igb_setup_tx_resources(struct igb_adapter *adapter,
-			   struct igb_ring *tx_ring)
+int igb_setup_tx_resources(struct igb_ring *tx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
+	struct pci_dev *pdev = tx_ring->pdev;
 	int size;
 
 	size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -1856,13 +1958,13 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
 	tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+	tx_ring->desc = pci_alloc_consistent(pdev,
+	                                     tx_ring->size,
 					     &tx_ring->dma);
 
 	if (!tx_ring->desc)
 		goto err;
 
-	tx_ring->adapter = adapter;
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
 	spin_lock_init(&tx_ring->tx_clean_lock);
@@ -1871,7 +1973,7 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
 
 err:
 	vfree(tx_ring->buffer_info);
-	dev_err(&adapter->pdev->dev,
+	dev_err(&pdev->dev,
 		"Unable to allocate memory for the transmit descriptor ring\n");
 	return -ENOMEM;
 }
@@ -1885,12 +1987,13 @@ err:
  **/
 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	int i, err = 0;
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		err = igb_setup_tx_resources(&adapter->tx_ring[i]);
 		if (err) {
-			dev_err(&adapter->pdev->dev,
+			dev_err(&pdev->dev,
 				"Allocation for Tx Queue %u failed\n", i);
 			for (i--; i >= 0; i--)
 				igb_free_tx_resources(&adapter->tx_ring[i]);
@@ -1902,49 +2005,16 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
 }
 
 /**
- * igb_configure_tx - Configure transmit Unit after Reset
- * @adapter: board private structure
- *
- * Configure the Tx unit of the MAC after a reset.
+ * igb_setup_tctl - configure the transmit control registers
+ * @adapter: Board private structure
  **/
-static void igb_configure_tx(struct igb_adapter *adapter)
+void igb_setup_tctl(struct igb_adapter *adapter)
 {
-	u64 tdba;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 tctl;
-	u32 txdctl, txctrl;
-	int i, j;
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *ring = &adapter->tx_ring[i];
-		j = ring->reg_idx;
-		wr32(E1000_TDLEN(j),
-		     ring->count * sizeof(union e1000_adv_tx_desc));
-		tdba = ring->dma;
-		wr32(E1000_TDBAL(j),
-		     tdba & 0x00000000ffffffffULL);
-		wr32(E1000_TDBAH(j), tdba >> 32);
-
-		ring->head = E1000_TDH(j);
-		ring->tail = E1000_TDT(j);
-		writel(0, hw->hw_addr + ring->tail);
-		writel(0, hw->hw_addr + ring->head);
-		txdctl = rd32(E1000_TXDCTL(j));
-		txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
-		wr32(E1000_TXDCTL(j), txdctl);
-
-		/* Turn off Relaxed Ordering on head write-backs.  The
-		 * writebacks MUST be delivered in order or it will
-		 * completely screw up our bookeeping.
-		 */
-		txctrl = rd32(E1000_DCA_TXCTRL(j));
-		txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
-		wr32(E1000_DCA_TXCTRL(j), txctrl);
-	}
 
-	/* disable queue 0 to prevent tail bump w/o re-configuration */
-	if (adapter->vfs_allocated_count)
-		wr32(E1000_TXDCTL(0), 0);
+	/* disable queue 0 which is enabled by default on 82575 and 82576 */
+	wr32(E1000_TXDCTL(0), 0);
 
 	/* Program the Transmit Control Register */
 	tctl = rd32(E1000_TCTL);
@@ -1954,9 +2024,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 
 	igb_config_collision_dist(hw);
 
-	/* Setup Transmit Descriptor Settings for eop descriptor */
-	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
-
 	/* Enable transmits */
 	tctl |= E1000_TCTL_EN;
 
@@ -1964,16 +2031,69 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 }
 
 /**
- * igb_setup_rx_resources - allocate Rx resources (Descriptors)
+ * igb_configure_tx_ring - Configure transmit ring after Reset
  * @adapter: board private structure
+ * @ring: tx ring to configure
+ *
+ * Configure a transmit ring after a reset.
+ **/
+void igb_configure_tx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 txdctl;
+	u64 tdba = ring->dma;
+	int reg_idx = ring->reg_idx;
+
+	/* disable the queue */
+	txdctl = rd32(E1000_TXDCTL(reg_idx));
+	wr32(E1000_TXDCTL(reg_idx),
+	                txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+	wrfl();
+	mdelay(10);
+
+	wr32(E1000_TDLEN(reg_idx),
+	                ring->count * sizeof(union e1000_adv_tx_desc));
+	wr32(E1000_TDBAL(reg_idx),
+	                tdba & 0x00000000ffffffffULL);
+	wr32(E1000_TDBAH(reg_idx), tdba >> 32);
+
+	ring->head = hw->hw_addr + E1000_TDH(reg_idx);
+	ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+	writel(0, ring->head);
+	writel(0, ring->tail);
+
+	txdctl |= IGB_TX_PTHRESH;
+	txdctl |= IGB_TX_HTHRESH << 8;
+	txdctl |= IGB_TX_WTHRESH << 16;
+
+	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+	wr32(E1000_TXDCTL(reg_idx), txdctl);
+}
+
+/**
+ * igb_configure_tx - Configure transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void igb_configure_tx(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * igb_setup_rx_resources - allocate Rx resources (Descriptors)
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
-int igb_setup_rx_resources(struct igb_adapter *adapter,
-			   struct igb_ring *rx_ring)
+int igb_setup_rx_resources(struct igb_ring *rx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
+	struct pci_dev *pdev = rx_ring->pdev;
 	int size, desc_len;
 
 	size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -1997,13 +2117,12 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
-	rx_ring->adapter = adapter;
-
 	return 0;
 
 err:
 	vfree(rx_ring->buffer_info);
-	dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
+	rx_ring->buffer_info = NULL;
+	dev_err(&pdev->dev, "Unable to allocate memory for "
 		"the receive descriptor ring\n");
 	return -ENOMEM;
 }
@@ -2017,12 +2136,13 @@ err:
  **/
 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	int i, err = 0;
 
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		err = igb_setup_rx_resources(&adapter->rx_ring[i]);
 		if (err) {
-			dev_err(&adapter->pdev->dev,
+			dev_err(&pdev->dev,
 				"Allocation for Rx Queue %u failed\n", i);
 			for (i--; i >= 0; i--)
 				igb_free_rx_resources(&adapter->rx_ring[i]);
@@ -2034,15 +2154,127 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
 }
 
 /**
+ * igb_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
+ **/
+static void igb_setup_mrqc(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 mrqc, rxcsum;
+	u32 j, num_rx_queues, shift = 0, shift2 = 0;
+	union e1000_reta {
+		u32 dword;
+		u8  bytes[4];
+	} reta;
+	static const u8 rsshash[40] = {
+		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
+		0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
+		0xae, 0x7b, 0x30, 0xb4,	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
+		0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+
+	/* Fill out hash function seeds */
+	for (j = 0; j < 10; j++) {
+		u32 rsskey = rsshash[(j * 4)];
+		rsskey |= rsshash[(j * 4) + 1] << 8;
+		rsskey |= rsshash[(j * 4) + 2] << 16;
+		rsskey |= rsshash[(j * 4) + 3] << 24;
+		array_wr32(E1000_RSSRK(0), j, rsskey);
+	}
+
+	num_rx_queues = adapter->num_rx_queues;
+
+	if (adapter->vfs_allocated_count) {
+		/* 82575 and 82576 supports 2 RSS queues for VMDq */
+		switch (hw->mac.type) {
+		case e1000_i350:
+		case e1000_82580:
+			num_rx_queues = 1;
+			shift = 0;
+			break;
+		case e1000_82576:
+			shift = 3;
+			num_rx_queues = 2;
+			break;
+		case e1000_82575:
+			shift = 2;
+			shift2 = 6;
+		default:
+			break;
+		}
+	} else {
+		if (hw->mac.type == e1000_82575)
+			shift = 6;
+	}
+
+	for (j = 0; j < (32 * 4); j++) {
+		reta.bytes[j & 3] = (j % num_rx_queues) << shift;
+		if (shift2)
+			reta.bytes[j & 3] |= num_rx_queues << shift2;
+		if ((j & 3) == 3)
+			wr32(E1000_RETA(j >> 2), reta.dword);
+	}
+
+	/*
+	 * Disable raw packet checksumming so that RSS hash is placed in
+	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
+	 * offloads as they are enabled by default
+	 */
+	rxcsum = rd32(E1000_RXCSUM);
+	rxcsum |= E1000_RXCSUM_PCSD;
+
+#if 0	/* Receive Checksum Offload not supported in RHEL5 */
+	if (adapter->hw.mac.type >= e1000_82576)
+		/* Enable Receive Checksum Offload for SCTP */
+		rxcsum |= E1000_RXCSUM_CRCOFL;
+#endif
+
+	/* Don't need to set TUOFL or IPOFL, they default to 1 */
+	wr32(E1000_RXCSUM, rxcsum);
+
+	/* If VMDq is enabled then we set the appropriate mode for that, else
+	 * we default to RSS so that an RSS hash is calculated per packet even
+	 * if we are only using one queue */
+	if (adapter->vfs_allocated_count) {
+		if (hw->mac.type > e1000_82575) {
+			/* Set the default pool for the PF's first queue */
+			u32 vtctl = rd32(E1000_VT_CTL);
+			vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
+				   E1000_VT_CTL_DISABLE_DEF_POOL);
+			vtctl |= adapter->vfs_allocated_count <<
+				E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+			wr32(E1000_VT_CTL, vtctl);
+		}
+		if (adapter->num_rx_queues > 1)
+			mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+		else
+			mrqc = E1000_MRQC_ENABLE_VMDQ;
+	} else {
+		mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+	}
+	igb_vmm_control(adapter);
+
+	/*
+	 * Generate RSS hash based on TCP port numbers and/or
+	 * IPv4/v6 src and dst addresses since UDP cannot be
+	 * hashed reliably due to IP fragmentation
+	 */
+	mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+		E1000_MRQC_RSS_FIELD_IPV4_TCP |
+		E1000_MRQC_RSS_FIELD_IPV6 |
+		E1000_MRQC_RSS_FIELD_IPV6_TCP |
+		E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+
+	wr32(E1000_MRQC, mrqc);
+}
+
+/**
  * igb_setup_rctl - configure the receive control registers
  * @adapter: Board private structure
  **/
-static void igb_setup_rctl(struct igb_adapter *adapter)
+void igb_setup_rctl(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 rctl;
-	u32 srrctl = 0;
-	int i, j;
 
 	rctl = rd32(E1000_RCTL);
 
@@ -2059,67 +2291,45 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
 	 */
 	rctl |= E1000_RCTL_SECRC;
 
-	/*
-	 * disable store bad packets and clear size bits.
-	 */
+	/* disable store bad packets and clear size bits. */
 	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
 
-	/* enable LPE when to prevent packets larger than max_frame_size */
-		rctl |= E1000_RCTL_LPE;
+	/* enable LPE to prevent packets larger than max_frame_size */
+	rctl |= E1000_RCTL_LPE;
 
-	/* Setup buffer sizes */
-	srrctl = ALIGN(adapter->rx_buffer_len, 1024)
-	         >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-
-	/* 82575 and greater support packet-split where the protocol
-	 * header is placed in skb->data and the packet data is
-	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
-	 * In the case of a non-split, skb->data is linearly filled,
-	 * followed by the page buffers.  Therefore, skb->data is
-	 * sized to hold the largest protocol header.
-	 */
-	/* allocations using alloc_page take too long for regular MTU
-	 * so only enable packet split for jumbo frames */
-	if (adapter->netdev->mtu > ETH_DATA_LEN) {
-		adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
-		srrctl |= adapter->rx_ps_hdr_size <<
-			 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-		srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-	} else {
-		adapter->rx_ps_hdr_size = 0;
-		srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
-	}
+	/* disable queue 0 to prevent tail write w/o re-config */
+	wr32(E1000_RXDCTL(0), 0);
 
 	/* Attention!!!  For SR-IOV PF driver operations you must enable
 	 * queue drop for all VF and PF queues to prevent head of line blocking
 	 * if an un-trusted VF does not provide descriptors to hardware.
 	 */
 	if (adapter->vfs_allocated_count) {
-		u32 vmolr;
-
-		j = adapter->rx_ring[0].reg_idx;
-
 		/* set all queue drop enable bits */
 		wr32(E1000_QDE, ALL_QUEUES);
-		srrctl |= E1000_SRRCTL_DROP_EN;
+	}
 
-		/* disable queue 0 to prevent tail write w/o re-config */
-		wr32(E1000_RXDCTL(0), 0);
+	wr32(E1000_RCTL, rctl);
+}
 
-		vmolr = rd32(E1000_VMOLR(j));
-		if (rctl & E1000_RCTL_LPE)
-			vmolr |= E1000_VMOLR_LPE;
-		if (adapter->num_rx_queues > 0)
-			vmolr |= E1000_VMOLR_RSSE;
-		wr32(E1000_VMOLR(j), vmolr);
-	}
+static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
+                                   int vfn)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vmolr;
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		j = adapter->rx_ring[i].reg_idx;
-		wr32(E1000_SRRCTL(j), srrctl);
-	}
+	/* if it isn't the PF check to see if VFs are enabled and
+	 * increase the size to support vlan tags */
+	if (vfn < adapter->vfs_allocated_count &&
+	    adapter->vf_data[vfn].vlans_enabled)
+		size += VLAN_TAG_SIZE;
 
-	wr32(E1000_RCTL, rctl);
+	vmolr = rd32(E1000_VMOLR(vfn));
+	vmolr &= ~E1000_VMOLR_RLPML_MASK;
+	vmolr |= size | E1000_VMOLR_LPE;
+	wr32(E1000_VMOLR(vfn), vmolr);
+
+	return 0;
 }
 
 /**
@@ -2141,33 +2351,107 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
 	 * size and set the VMOLR RLPML to the size we need */
 	if (pf_id) {
 		igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
-		max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
+		max_frame_size = MAX_JUMBO_FRAME_SIZE;
 	}
 
 	wr32(E1000_RLPML, max_frame_size);
 }
 
+static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vmolr;
+
+	/*
+	 * This register exists only on 82576 and newer so if we are older then
+	 * we should exit and do nothing
+	 */
+	if (hw->mac.type < e1000_82576)
+		return;
+
+	vmolr = rd32(E1000_VMOLR(vfn));
+	vmolr |= E1000_VMOLR_AUPE |        /* Accept untagged packets */
+	         E1000_VMOLR_STRVLAN;      /* Strip vlan tags */
+
+	/* clear all bits that might not be set */
+	vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
+
+	if (adapter->num_rx_queues > 1 && vfn == adapter->vfs_allocated_count)
+		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
+	/*
+	 * for VMDq only allow the VFs and pool 0 to accept broadcast and
+	 * multicast packets
+	 */
+	if (vfn <= adapter->vfs_allocated_count)
+		vmolr |= E1000_VMOLR_BAM;	   /* Accept broadcast */
+
+	wr32(E1000_VMOLR(vfn), vmolr);
+}
+
 /**
- * igb_configure_vt_default_pool - Configure VT default pool
+ * igb_configure_rx_ring - Configure a receive ring after Reset
  * @adapter: board private structure
+ * @ring: receive ring to be configured
  *
- * Configure the default pool
+ * Configure the Rx unit of the MAC after a reset.
  **/
-static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
+void igb_configure_rx_ring(struct igb_adapter *adapter,
+                           struct igb_ring *ring)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	u16 pf_id = adapter->vfs_allocated_count;
-	u32 vtctl;
+	u64 rdba = ring->dma;
+	int reg_idx = ring->reg_idx;
+	u32 srrctl, rxdctl;
+
+	/* disable the queue */
+	rxdctl = rd32(E1000_RXDCTL(reg_idx));
+	wr32(E1000_RXDCTL(reg_idx),
+	                rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+
+	/* Set DMA base address registers */
+	wr32(E1000_RDBAL(reg_idx),
+	     rdba & 0x00000000ffffffffULL);
+	wr32(E1000_RDBAH(reg_idx), rdba >> 32);
+	wr32(E1000_RDLEN(reg_idx),
+	               ring->count * sizeof(union e1000_adv_rx_desc));
+
+	/* initialize head and tail */
+	ring->head = hw->hw_addr + E1000_RDH(reg_idx);
+	ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+	writel(0, ring->head);
+	writel(0, ring->tail);
+
+	/* set descriptor configuration */
+	if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
+		srrctl = ALIGN(ring->rx_buffer_len, 64) <<
+		         E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
+		srrctl |= IGB_RXBUFFER_16384 >>
+		          E1000_SRRCTL_BSIZEPKT_SHIFT;
+#else
+		srrctl |= (PAGE_SIZE / 2) >>
+		          E1000_SRRCTL_BSIZEPKT_SHIFT;
+#endif
+		srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+	} else {
+		srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
+		         E1000_SRRCTL_BSIZEPKT_SHIFT;
+		srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+	}
 
-	/* not in sr-iov mode - do nothing */
-	if (!pf_id)
-		return;
+	wr32(E1000_SRRCTL(reg_idx), srrctl);
+
+	/* set filtering for VMDQ pools */
+	igb_set_vmolr(adapter, reg_idx & 0x7);
 
-	vtctl = rd32(E1000_VT_CTL);
-	vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
-		   E1000_VT_CTL_DISABLE_DEF_POOL);
-	vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
-	wr32(E1000_VT_CTL, vtctl);
+	/* enable receive descriptor fetching */
+	rxdctl = rd32(E1000_RXDCTL(reg_idx));
+	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+	rxdctl &= 0xFFF00000;
+	rxdctl |= IGB_RX_PTHRESH;
+	rxdctl |= IGB_RX_HTHRESH << 8;
+	rxdctl |= IGB_RX_WTHRESH << 16;
+	wr32(E1000_RXDCTL(reg_idx), rxdctl);
 }
 
 /**
@@ -2178,118 +2462,19 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
  **/
 static void igb_configure_rx(struct igb_adapter *adapter)
 {
-	u64 rdba;
-	struct e1000_hw *hw = &adapter->hw;
-	u32 rctl, rxcsum;
-	u32 rxdctl;
 	int i;
 
-	/* disable receives while setting up the descriptors */
-	rctl = rd32(E1000_RCTL);
-	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
-	wrfl();
-	mdelay(10);
+	/* set UTA to appropriate mode */
+	igb_set_uta(adapter);
 
-	if (adapter->itr_setting > 3)
-		wr32(E1000_ITR, adapter->itr);
+	/* set the correct pool for the PF default MAC address in entry 0 */
+	igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
+	                 adapter->vfs_allocated_count);
 
 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
 	 * the Base and Length of the Rx Descriptor Ring */
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *ring = &adapter->rx_ring[i];
-		int j = ring->reg_idx;
-		rdba = ring->dma;
-		wr32(E1000_RDBAL(j),
-		     rdba & 0x00000000ffffffffULL);
-		wr32(E1000_RDBAH(j), rdba >> 32);
-		wr32(E1000_RDLEN(j),
-		     ring->count * sizeof(union e1000_adv_rx_desc));
-
-		ring->head = E1000_RDH(j);
-		ring->tail = E1000_RDT(j);
-		writel(0, hw->hw_addr + ring->tail);
-		writel(0, hw->hw_addr + ring->head);
-
-		rxdctl = rd32(E1000_RXDCTL(j));
-		rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
-		rxdctl &= 0xFFF00000;
-		rxdctl |= IGB_RX_PTHRESH;
-		rxdctl |= IGB_RX_HTHRESH << 8;
-		rxdctl |= IGB_RX_WTHRESH << 16;
-		wr32(E1000_RXDCTL(j), rxdctl);
-	}
-
-	if (adapter->num_rx_queues > 1) {
-		u32 random[10];
-		u32 mrqc;
-		u32 j, shift;
-		union e1000_reta {
-			u32 dword;
-			u8  bytes[4];
-		} reta;
-
-		get_random_bytes(&random[0], 40);
-
-		if (hw->mac.type >= e1000_82576)
-			shift = 0;
-		else
-			shift = 6;
-		for (j = 0; j < (32 * 4); j++) {
-			reta.bytes[j & 3] =
-				adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
-			if ((j & 3) == 3)
-				writel(reta.dword,
-				       hw->hw_addr + E1000_RETA(0) + (j & ~3));
-		}
-		if (adapter->vfs_allocated_count)
-			mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
-		else
-			mrqc = E1000_MRQC_ENABLE_RSS_4Q;
-
-		/* Fill out hash function seeds */
-		for (j = 0; j < 10; j++)
-			array_wr32(E1000_RSSRK(0), j, random[j]);
-
-		/*
-		 * Generate RSS hash based on TCP port numbers and/or
-		 * IPv4/v6 src and dst addresses since UDP cannot be
-		 * hashed reliably due to IP fragmentation
-		 */
-		mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
-			E1000_MRQC_RSS_FIELD_IPV4_TCP |
-			E1000_MRQC_RSS_FIELD_IPV6 |
-			E1000_MRQC_RSS_FIELD_IPV6_TCP |
-			E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-
-		wr32(E1000_MRQC, mrqc);
-
-		/* Multiqueue and raw packet checksumming are mutually
-		 * exclusive.  Note that this not the same as TCP/IP
-		 * checksumming, which works fine. */
-		rxcsum = rd32(E1000_RXCSUM);
-		rxcsum |= E1000_RXCSUM_PCSD;
-		wr32(E1000_RXCSUM, rxcsum);
-	} else {
-		/* Enable multi-queue for sr-iov */
-		if (adapter->vfs_allocated_count)
-			wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
-		/* Enable Receive Checksum Offload for TCP and UDP */
-		rxcsum = rd32(E1000_RXCSUM);
-		if (adapter->rx_csum)
-			rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE;
-		else
-			rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE);
-
-		wr32(E1000_RXCSUM, rxcsum);
-	}
-
-	/* Set the default pool for the PF's first queue */
-	igb_configure_vt_default_pool(adapter);
-
-	igb_rlpml_set(adapter);
-
-	/* Enable Receives */
-	wr32(E1000_RCTL, rctl);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
 }
 
 /**
@@ -2300,14 +2485,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
  **/
 void igb_free_tx_resources(struct igb_ring *tx_ring)
 {
-	struct pci_dev *pdev = tx_ring->adapter->pdev;
-
 	igb_clean_tx_ring(tx_ring);
 
 	vfree(tx_ring->buffer_info);
 	tx_ring->buffer_info = NULL;
 
-	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	pci_free_consistent(tx_ring->pdev, tx_ring->size,
+	                    tx_ring->desc, tx_ring->dma);
 
 	tx_ring->desc = NULL;
 }
@@ -2326,14 +2514,20 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
 		igb_free_tx_resources(&adapter->tx_ring[i]);
 }
 
-static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
-					   struct igb_buffer *buffer_info)
+void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
+				    struct igb_buffer *buffer_info)
 {
 	if (buffer_info->dma) {
-		pci_unmap_page(adapter->pdev,
-				buffer_info->dma,
-				buffer_info->length,
-				PCI_DMA_TODEVICE);
+		if (buffer_info->mapped_as_page)
+			pci_unmap_page(tx_ring->pdev,
+					buffer_info->dma,
+					buffer_info->length,
+					PCI_DMA_TODEVICE);
+		else
+			pci_unmap_single(tx_ring->pdev,
+					buffer_info->dma,
+					buffer_info->length,
+					PCI_DMA_TODEVICE);
 		buffer_info->dma = 0;
 	}
 	if (buffer_info->skb) {
@@ -2341,8 +2535,9 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
 		buffer_info->skb = NULL;
 	}
 	buffer_info->time_stamp = 0;
+	buffer_info->length = 0;
 	buffer_info->next_to_watch = 0;
-	/* buffer_info must be completely set up in the transmit path */
+	buffer_info->mapped_as_page = false;
 }
 
 /**
@@ -2351,7 +2546,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
  **/
 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 {
-	struct igb_adapter *adapter = tx_ring->adapter;
 	struct igb_buffer *buffer_info;
 	unsigned long size;
 	unsigned int i;
@@ -2362,21 +2556,17 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
 
 	for (i = 0; i < tx_ring->count; i++) {
 		buffer_info = &tx_ring->buffer_info[i];
-		igb_unmap_and_free_tx_resource(adapter, buffer_info);
+		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
 	}
 
 	size = sizeof(struct igb_buffer) * tx_ring->count;
 	memset(tx_ring->buffer_info, 0, size);
 
 	/* Zero out the descriptor ring */
-
 	memset(tx_ring->desc, 0, tx_ring->size);
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
-
-	writel(0, adapter->hw.hw_addr + tx_ring->head);
-	writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -2399,14 +2589,17 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
  **/
 void igb_free_rx_resources(struct igb_ring *rx_ring)
 {
-	struct pci_dev *pdev = rx_ring->adapter->pdev;
-
 	igb_clean_rx_ring(rx_ring);
 
 	vfree(rx_ring->buffer_info);
 	rx_ring->buffer_info = NULL;
 
-	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+	/* if not set, then don't free */
+	if (!rx_ring->desc)
+		return;
+
+	pci_free_consistent(rx_ring->pdev, rx_ring->size,
+	                    rx_ring->desc, rx_ring->dma);
 
 	rx_ring->desc = NULL;
 }
@@ -2432,26 +2625,21 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
  **/
 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
 	struct igb_buffer *buffer_info;
-	struct pci_dev *pdev = adapter->pdev;
 	unsigned long size;
 	unsigned int i;
 
 	if (!rx_ring->buffer_info)
 		return;
+
 	/* Free all the Rx ring sk_buffs */
 	for (i = 0; i < rx_ring->count; i++) {
 		buffer_info = &rx_ring->buffer_info[i];
 		if (buffer_info->dma) {
-			if (adapter->rx_ps_hdr_size)
-				pci_unmap_single(pdev, buffer_info->dma,
-						 adapter->rx_ps_hdr_size,
-						 PCI_DMA_FROMDEVICE);
-			else
-				pci_unmap_single(pdev, buffer_info->dma,
-						 adapter->rx_buffer_len,
-						 PCI_DMA_FROMDEVICE);
+			pci_unmap_single(rx_ring->pdev,
+			                 buffer_info->dma,
+					 rx_ring->rx_buffer_len,
+					 PCI_DMA_FROMDEVICE);
 			buffer_info->dma = 0;
 		}
 
@@ -2459,14 +2647,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 			dev_kfree_skb(buffer_info->skb);
 			buffer_info->skb = NULL;
 		}
+		if (buffer_info->page_dma) {
+			pci_unmap_page(rx_ring->pdev,
+			               buffer_info->page_dma,
+				       PAGE_SIZE / 2,
+				       PCI_DMA_FROMDEVICE);
+			buffer_info->page_dma = 0;
+		}
 		if (buffer_info->page) {
-			if (buffer_info->page_dma)
-				pci_unmap_page(pdev, buffer_info->page_dma,
-					       PAGE_SIZE / 2,
-					       PCI_DMA_FROMDEVICE);
 			put_page(buffer_info->page);
 			buffer_info->page = NULL;
-			buffer_info->page_dma = 0;
 			buffer_info->page_offset = 0;
 		}
 	}
@@ -2479,9 +2669,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
-
-	writel(0, adapter->hw.hw_addr + rx_ring->head);
-	writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
@@ -2515,61 +2702,44 @@ static int igb_set_mac(struct net_device *netdev, void *p)
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-	hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
-
-	igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
+	/* set the correct pool for the new PF MAC address in entry 0 */
+	igb_rar_set_qsel(adapter, hw->mac.addr, 0,
+	                 adapter->vfs_allocated_count);
 
 	return 0;
 }
 
 /**
- * igb_set_multi - Multicast and Promiscuous mode set
+ * igb_write_mc_addr_list - write multicast addresses to MTA
  * @netdev: network interface device structure
  *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated.  This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ *                0 on no addresses written
+ *                X on writing X addresses to MTA
  **/
-static void igb_set_multi(struct net_device *netdev)
+static int igb_write_mc_addr_list(struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-	struct dev_mc_list *mc_ptr;
-	u8  *mta_list = NULL;
-	u32 rctl;
+	struct dev_mc_list *mc_ptr = netdev->mc_list;
+	u8  *mta_list;
+	u32 vmolr = 0;
 	int i;
 
-	/* Check for Promiscuous and All Multicast modes */
-
-	rctl = rd32(E1000_RCTL);
-
-	if (netdev->flags & IFF_PROMISC) {
-		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
-		rctl &= ~E1000_RCTL_VFE;
-	} else {
-		if (netdev->flags & IFF_ALLMULTI) {
-			rctl |= E1000_RCTL_MPE;
-			rctl &= ~E1000_RCTL_UPE;
-		} else
-			rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
-		rctl |= E1000_RCTL_VFE;
-	}
-	wr32(E1000_RCTL, rctl);
-
 	if (!netdev->mc_count) {
 		/* nothing to program, so clear mc list */
 		igb_update_mc_addr_list(hw, NULL, 0);
 		igb_restore_vf_multicasts(adapter);
-		return;
+		return 0;
 	}
 
 	mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
-	if (!mta_list) {
-		dev_err(&adapter->pdev->dev,
-		        "failed to allocate multicast filter list\n");
-		return;
-	}
+	if (!mta_list)
+		return -ENOMEM;
+
+	/* set vmolr receive overflow multicast bit */
+	vmolr |= E1000_VMOLR_ROMPE;
 
 	/* The shared function expects a packed array of only addresses. */
 	mc_ptr = netdev->mc_list;
@@ -2582,6 +2752,94 @@ static void igb_set_multi(struct net_device *netdev)
 	}
 	igb_update_mc_addr_list(hw, mta_list, i);
 	kfree(mta_list);
+
+	return netdev->mc_count;
+}
+
+/**
+ * igb_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ *                0 on no addresses written
+ *                X on writing X addresses to the RAR table
+ **/
+static int igb_write_uc_addr_list(struct net_device *netdev)
+{
+	return 0;
+}
+
+/**
+ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void igb_set_rx_mode(struct net_device *netdev)
+{
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned int vfn = adapter->vfs_allocated_count;
+	u32 rctl, vmolr = 0;
+	int count;
+
+	/* Check for Promiscuous and All Multicast modes */
+	rctl = rd32(E1000_RCTL);
+
+	/* clear the effected bits */
+	rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+		vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+	} else {
+		if (netdev->flags & IFF_ALLMULTI) {
+			rctl |= E1000_RCTL_MPE;
+			vmolr |= E1000_VMOLR_MPME;
+		} else {
+			/*
+			 * Write addresses to the MTA, if the attempt fails
+			 * then we should just turn on promiscous mode so
+			 * that we can at least receive multicast traffic
+			 */
+			count = igb_write_mc_addr_list(netdev);
+			if (count < 0) {
+				rctl |= E1000_RCTL_MPE;
+				vmolr |= E1000_VMOLR_MPME;
+			} else if (count) {
+				vmolr |= E1000_VMOLR_ROMPE;
+			}
+		}
+		/*
+		 * Write addresses to available RAR registers, if there is not
+		 * sufficient space to store all the addresses then enable
+		 * unicast promiscous mode
+		 */
+		count = igb_write_uc_addr_list(netdev);
+		if (count < 0) {
+			rctl |= E1000_RCTL_UPE;
+			vmolr |= E1000_VMOLR_ROPE;
+		}
+		rctl |= E1000_RCTL_VFE;
+	}
+	wr32(E1000_RCTL, rctl);
+
+	/*
+	 * In order to support SR-IOV and eventually VMDq it is necessary to set
+	 * the VMOLR to enable the appropriate modes.  Without this workaround
+	 * we will have issues with VLAN tag stripping not being done for frames
+	 * that are only arriving because we are the default pool
+	 */
+	if (hw->mac.type < e1000_82576)
+		return;
+
+	vmolr |= rd32(E1000_VMOLR(vfn)) &
+	         ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
+	wr32(E1000_VMOLR(vfn), vmolr);
 	igb_restore_vf_multicasts(adapter);
 }
 
@@ -2644,34 +2902,29 @@ static void igb_watchdog_task(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
-	struct igb_ring *tx_ring = adapter->tx_ring;
 	u32 link;
-	u32 eics = 0;
 	int i;
 
 	link = igb_has_link(adapter);
-	if ((netif_carrier_ok(netdev)) && link)
-		goto link_up;
-
 	if (link) {
 		if (!netif_carrier_ok(netdev)) {
 			u32 ctrl;
-			hw->mac.ops.get_speed_and_duplex(&adapter->hw,
-						   &adapter->link_speed,
-						   &adapter->link_duplex);
+			hw->mac.ops.get_speed_and_duplex(hw,
+			                                 &adapter->link_speed,
+			                                 &adapter->link_duplex);
 
 			ctrl = rd32(E1000_CTRL);
 			/* Links status message must follow this format */
 			printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
 				 "Flow Control: %s\n",
-			         netdev->name,
-				 adapter->link_speed,
-				 adapter->link_duplex == FULL_DUPLEX ?
+			       netdev->name,
+			       adapter->link_speed,
+			       adapter->link_duplex == FULL_DUPLEX ?
 				 "Full Duplex" : "Half Duplex",
-				 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
-				 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
-				 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
-				 E1000_CTRL_TFCE) ? "TX" : "None")));
+			       ((ctrl & E1000_CTRL_TFCE) &&
+			        (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
+			       ((ctrl & E1000_CTRL_RFCE) ?  "RX" :
+			       ((ctrl & E1000_CTRL_TFCE) ?  "TX" : "None")));
 
 			/* tweak tx_queue_len according to speed/duplex and
 			 * adjust the timeout factor */
@@ -2689,7 +2942,6 @@ static void igb_watchdog_task(struct igb_adapter *adapter)
 			}
 
 			netif_carrier_on(netdev);
-			netif_wake_queue(netdev);
 
 			igb_ping_all_vfs(adapter);
 
@@ -2706,7 +2958,6 @@ static void igb_watchdog_task(struct igb_adapter *adapter)
 			printk(KERN_INFO "igb: %s NIC Link is Down\n",
 			       netdev->name);
 			netif_carrier_off(netdev);
-			netif_stop_queue(netdev);
 
 			igb_ping_all_vfs(adapter);
 
@@ -2717,44 +2968,40 @@ static void igb_watchdog_task(struct igb_adapter *adapter)
 		}
 	}
 
-link_up:
 	igb_update_stats(adapter);
+	igb_update_adaptive(hw);
 
-	hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
-	adapter->tpt_old = adapter->stats.tpt;
-	hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
-	adapter->colc_old = adapter->stats.colc;
-
-	adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
-	adapter->gorc_old = adapter->stats.gorc;
-	adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
-	adapter->gotc_old = adapter->stats.gotc;
-
-	igb_update_adaptive(&adapter->hw);
-
-	if (!netif_carrier_ok(netdev)) {
-		if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct igb_ring *tx_ring = &adapter->tx_ring[i];
+		if (!netif_carrier_ok(netdev)) {
 			/* We've lost link, so the controller stops DMA,
 			 * but we've got queued Tx work that's never going
 			 * to get done, so reset controller to flush Tx.
 			 * (Do the reset outside of interrupt context). */
-			adapter->tx_timeout_count++;
-			schedule_work(&adapter->reset_task);
+			if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
+				adapter->tx_timeout_count++;
+				schedule_work(&adapter->reset_task);
+				/* return immediately since reset is imminent */
+				return;
+			}
 		}
+
+		/* Force detection of hung controller every watchdog period */
+		tx_ring->detect_tx_hung = true;
 	}
 
 	/* Cause software interrupt to ensure rx ring is cleaned */
 	if (adapter->msix_entries) {
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			eics |= adapter->rx_ring[i].eims_value;
+		u32 eics = 0;
+		for (i = 0; i < adapter->num_q_vectors; i++) {
+			struct igb_q_vector *q_vector = adapter->q_vector[i];
+			eics |= q_vector->eims_value;
+		}
 		wr32(E1000_EICS, eics);
 	} else {
 		wr32(E1000_ICS, E1000_ICS_RXDMT0);
 	}
 
-	/* Force detection of hung controller every watchdog period */
-	tx_ring->detect_tx_hung = true;
-
 	/* Reset the timer */
 	if (!test_bit(__IGB_DOWN, &adapter->state))
 		mod_timer(&adapter->watchdog_timer,
@@ -2768,7 +3015,6 @@ enum latency_range {
 	latency_invalid = 255
 };
 
-
 /**
  * igb_update_ring_itr - update the dynamic ITR value based on packet size
  *
@@ -2783,25 +3029,37 @@ enum latency_range {
  *      parameter (see igb_param.c)
  *      NOTE:  This function is called only when operating in a multiqueue
  *             receive environment.
- * @rx_ring: pointer to ring
+ * @q_vector: pointer to q_vector
  **/
-static void igb_update_ring_itr(struct igb_ring *rx_ring)
+static void igb_update_ring_itr(struct igb_q_vector *q_vector)
 {
-	int new_val = rx_ring->itr_val;
+	int new_val = q_vector->itr_val;
 	int avg_wire_size = 0;
-	struct igb_adapter *adapter = rx_ring->adapter;
-
-	if (!rx_ring->total_packets)
-		goto clear_counts; /* no packets, so don't do anything */
+	struct igb_adapter *adapter = q_vector->adapter;
 
 	/* For non-gigabit speeds, just fix the interrupt rate at 4000
 	 * ints/sec - ITR timer value of 120 ticks.
 	 */
 	if (adapter->link_speed != SPEED_1000) {
-		new_val = 120;
+		new_val = 976;
 		goto set_itr_val;
 	}
-	avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
+
+	if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
+		struct igb_ring *ring = q_vector->rx_ring;
+		avg_wire_size = ring->total_bytes / ring->total_packets;
+	}
+
+	if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
+		struct igb_ring *ring = q_vector->tx_ring;
+		avg_wire_size = max_t(u32, avg_wire_size,
+		                      (ring->total_bytes /
+		                       ring->total_packets));
+	}
+
+	/* if avg_wire_size isn't set no work was done */
+	if (!avg_wire_size)
+		goto clear_counts;
 
 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
 	avg_wire_size += 24;
@@ -2815,14 +3073,24 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring)
 	else
 		new_val = avg_wire_size / 2;
 
+	/* when in itr mode 3 do not exceed 20K ints/sec */
+	if (adapter->rx_itr_setting == 3 && new_val < 196)
+		new_val = 196;
+
 set_itr_val:
-	if (new_val != rx_ring->itr_val) {
-		rx_ring->itr_val = new_val;
-		rx_ring->set_itr = 1;
+	if (new_val != q_vector->itr_val) {
+		q_vector->itr_val = new_val;
+		q_vector->set_itr = 1;
 	}
 clear_counts:
-	rx_ring->total_bytes = 0;
-	rx_ring->total_packets = 0;
+	if (q_vector->rx_ring) {
+		q_vector->rx_ring->total_bytes = 0;
+		q_vector->rx_ring->total_packets = 0;
+	}
+	if (q_vector->tx_ring) {
+		q_vector->tx_ring->total_bytes = 0;
+		q_vector->tx_ring->total_packets = 0;
+	}
 }
 
 /**
@@ -2839,7 +3107,7 @@ clear_counts:
  *      NOTE:  These calculations are only valid when operating in a single-
  *             queue environment.
  * @adapter: pointer to adapter
- * @itr_setting: current adapter->itr
+ * @itr_setting: current q_vector->itr_val
  * @packets: the number of packets during this measurement interval
  * @bytes: the number of bytes during this measurement interval
  **/
@@ -2891,8 +3159,9 @@ update_itr_done:
 
 static void igb_set_itr(struct igb_adapter *adapter)
 {
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
 	u16 current_itr;
-	u32 new_itr = adapter->itr;
+	u32 new_itr = q_vector->itr_val;
 
 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
 	if (adapter->link_speed != SPEED_1000) {
@@ -2906,30 +3175,26 @@ static void igb_set_itr(struct igb_adapter *adapter)
 				    adapter->rx_ring->total_packets,
 				    adapter->rx_ring->total_bytes);
 
-	if (adapter->rx_ring->buddy) {
-		adapter->tx_itr = igb_update_itr(adapter,
-					    adapter->tx_itr,
-					    adapter->tx_ring->total_packets,
-					    adapter->tx_ring->total_bytes);
-		current_itr = max(adapter->rx_itr, adapter->tx_itr);
-	} else {
-		current_itr = adapter->rx_itr;
-	}
+	adapter->tx_itr = igb_update_itr(adapter,
+				    adapter->tx_itr,
+				    adapter->tx_ring->total_packets,
+				    adapter->tx_ring->total_bytes);
+	current_itr = max(adapter->rx_itr, adapter->tx_itr);
 
 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
-	if (adapter->itr_setting == 3 && current_itr == lowest_latency)
+	if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
 		current_itr = low_latency;
 
 	switch (current_itr) {
 	/* counts and packets in update_itr are dependent on these numbers */
 	case lowest_latency:
-		new_itr = 70000;
+		new_itr = 56;  /* aka 70,000 ints/sec */
 		break;
 	case low_latency:
-		new_itr = 20000; /* aka hwitr = ~200 */
+		new_itr = 196; /* aka 20,000 ints/sec */
 		break;
 	case bulk_latency:
-		new_itr = 4000;
+		new_itr = 980; /* aka 4,000 ints/sec */
 		break;
 	default:
 		break;
@@ -2938,17 +3203,17 @@ static void igb_set_itr(struct igb_adapter *adapter)
 set_itr_now:
 	adapter->rx_ring->total_bytes = 0;
 	adapter->rx_ring->total_packets = 0;
-	if (adapter->rx_ring->buddy) {
-		adapter->rx_ring->buddy->total_bytes = 0;
-		adapter->rx_ring->buddy->total_packets = 0;
-	}
+	adapter->tx_ring->total_bytes = 0;
+	adapter->tx_ring->total_packets = 0;
 
-	if (new_itr != adapter->itr) {
+	if (new_itr != q_vector->itr_val) {
 		/* this attempts to bias the interrupt rate towards Bulk
 		 * by adding intermediate steps when interrupt rate is
 		 * increasing */
-		new_itr = new_itr > adapter->itr ?
-			     min(adapter->itr + (new_itr >> 2), new_itr) :
+		new_itr = new_itr > q_vector->itr_val ?
+		             max((new_itr * q_vector->itr_val) /
+		                 (new_itr + (q_vector->itr_val >> 2)),
+		                 new_itr) :
 			     new_itr;
 		/* Don't write the value here; it resets the adapter's
 		 * internal timer, and causes us to delay far longer than
@@ -2956,24 +3221,21 @@ set_itr_now:
 		 * value at the beginning of the next interrupt so the timing
 		 * ends up being correct.
 		 */
-		adapter->itr = new_itr;
-		adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
-		adapter->rx_ring->set_itr = 1;
+		q_vector->itr_val = new_itr;
+		q_vector->set_itr = 1;
 	}
 
 	return;
 }
 
-
 #define IGB_TX_FLAGS_CSUM		0x00000001
 #define IGB_TX_FLAGS_VLAN		0x00000002
 #define IGB_TX_FLAGS_TSO		0x00000004
 #define IGB_TX_FLAGS_IPV4		0x00000008
-#define IGB_TX_FLAGS_VLAN_MASK	0xffff0000
-#define IGB_TX_FLAGS_VLAN_SHIFT	16
+#define IGB_TX_FLAGS_VLAN_MASK		0xffff0000
+#define IGB_TX_FLAGS_VLAN_SHIFT		        16
 
-static inline int igb_tso_adv(struct igb_adapter *adapter,
-			      struct igb_ring *tx_ring,
+static inline int igb_tso_adv(struct igb_ring *tx_ring,
 			      struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
 {
 	struct e1000_adv_tx_context_desc *context_desc;
@@ -3035,8 +3297,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
 	mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
 
 	/* For 82575, context index must be unique per ring. */
-	if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
-		mss_l4len_idx |= tx_ring->eims_value >> 4;
+	if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
+		mss_l4len_idx |= tx_ring->reg_idx << 4;
 
 	context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
 	context_desc->seqnum_seed = 0;
@@ -3053,14 +3315,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
 	return true;
 }
 
-static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
-					struct igb_ring *tx_ring,
-					struct sk_buff *skb, u32 tx_flags)
+static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
+				   struct sk_buff *skb, u32 tx_flags)
 {
 	struct e1000_adv_tx_context_desc *context_desc;
-	unsigned int i;
+	struct pci_dev *pdev = tx_ring->pdev;
 	struct igb_buffer *buffer_info;
 	u32 info = 0, tu_cmd = 0;
+	unsigned int i;
 
 	if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
 	    (tx_flags & IGB_TX_FLAGS_VLAN)) {
@@ -3070,6 +3332,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 
 		if (tx_flags & IGB_TX_FLAGS_VLAN)
 			info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
+
 		info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
 		if (skb->ip_summed == CHECKSUM_PARTIAL)
 			info |= skb_network_header_len(skb);
@@ -3103,7 +3366,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 				break;
 			default:
 				if (unlikely(net_ratelimit()))
-					dev_warn(&adapter->pdev->dev,
+					dev_warn(&pdev->dev,
 					    "partial checksum but proto=%x!\n",
 					    skb->protocol);
 				break;
@@ -3112,11 +3375,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 
 		context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
 		context_desc->seqnum_seed = 0;
-		if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
+		if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
 			context_desc->mss_l4len_idx =
-				cpu_to_le32(tx_ring->eims_value >> 4);
-		else
-			context_desc->mss_l4len_idx = 0;
+				cpu_to_le32(tx_ring->reg_idx << 4);
 
 		buffer_info->time_stamp = jiffies;
 		buffer_info->next_to_watch = i;
@@ -3135,11 +3396,11 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 #define IGB_MAX_TXD_PWR	16
 #define IGB_MAX_DATA_PER_TXD	(1<<IGB_MAX_TXD_PWR)
 
-static inline int igb_tx_map_adv(struct igb_adapter *adapter,
-				 struct igb_ring *tx_ring, struct sk_buff *skb,
+static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
 				 unsigned int first)
 {
 	struct igb_buffer *buffer_info;
+	struct pci_dev *pdev = tx_ring->pdev;
 	unsigned int len = skb_headlen(skb);
 	unsigned int count = 0, i;
 	unsigned int f;
@@ -3152,16 +3413,19 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
 	/* set time_stamp *before* dma to help avoid a possible race */
 	buffer_info->time_stamp = jiffies;
 	buffer_info->next_to_watch = i;
-	buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
+	buffer_info->dma = pci_map_single(pdev, skb->data, len,
 					  PCI_DMA_TODEVICE);
-	count++;
-	i++;
-	if (i == tx_ring->count)
-		i = 0;
+	if (pci_dma_mapping_error(buffer_info->dma))
+		goto dma_error;
 
 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
 		struct skb_frag_struct *frag;
 
+		count++;
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+
 		frag = &skb_shinfo(skb)->frags[f];
 		len = frag->size;
 
@@ -3170,34 +3434,54 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
 		buffer_info->length = len;
 		buffer_info->time_stamp = jiffies;
 		buffer_info->next_to_watch = i;
-		buffer_info->dma = pci_map_page(adapter->pdev,
+		buffer_info->mapped_as_page = true;
+		buffer_info->dma = pci_map_page(pdev,
 						frag->page,
 						frag->page_offset,
 						len,
 						PCI_DMA_TODEVICE);
+		if (pci_dma_mapping_error(buffer_info->dma))
+			goto dma_error;
 
-		count++;
-		i++;
-		if (i == tx_ring->count)
-			i = 0;
 	}
 
-	i = ((i == 0) ? tx_ring->count - 1 : i - 1);
 	tx_ring->buffer_info[i].skb = skb;
 	tx_ring->buffer_info[first].next_to_watch = i;
 
-	return count;
+	return ++count;
+
+dma_error:
+	dev_err(&pdev->dev, "TX DMA map failed\n");
+
+	/* clear timestamp and dma mappings for failed buffer_info mapping */
+	buffer_info->dma = 0;
+	buffer_info->time_stamp = 0;
+	buffer_info->length = 0;
+	buffer_info->next_to_watch = 0;
+	buffer_info->mapped_as_page = false;
+	count--;
+
+	/* clear timestamp and dma mappings for remaining portion of packet */
+	while (count >= 0) {
+		count--;
+		i--;
+		if (i < 0)
+			i += tx_ring->count;
+		buffer_info = &tx_ring->buffer_info[i];
+		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+	}
+
+	return 0;
 }
 
-static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
-				    struct igb_ring *tx_ring,
+static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
 				    int tx_flags, int count, u32 paylen,
 				    u8 hdr_len)
 {
-	union e1000_adv_tx_desc *tx_desc = NULL;
+	union e1000_adv_tx_desc *tx_desc;
 	struct igb_buffer *buffer_info;
 	u32 olinfo_status = 0, cmd_type_len;
-	unsigned int i;
+	unsigned int i = tx_ring->next_to_use;
 
 	cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
 			E1000_ADVTXD_DCMD_DEXT);
@@ -3219,27 +3503,28 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
 		olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
 	}
 
-	if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
-	    (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
+	if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
+	    (tx_flags & (IGB_TX_FLAGS_CSUM |
+	                 IGB_TX_FLAGS_TSO |
 			 IGB_TX_FLAGS_VLAN)))
-		olinfo_status |= tx_ring->eims_value >> 4;
+		olinfo_status |= tx_ring->reg_idx << 4;
 
 	olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
 
-	i = tx_ring->next_to_use;
-	while (count--) {
+	do {
 		buffer_info = &tx_ring->buffer_info[i];
 		tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
 		tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
 		tx_desc->read.cmd_type_len =
 			cpu_to_le32(cmd_type_len | buffer_info->length);
 		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+		count--;
 		i++;
 		if (i == tx_ring->count)
 			i = 0;
-	}
+	} while (count > 0);
 
-	tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
+	tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
 	/* Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
 	 * applicable for weak-ordered memory model archs,
@@ -3247,18 +3532,18 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
 	wmb();
 
 	tx_ring->next_to_use = i;
-	writel(i, adapter->hw.hw_addr + tx_ring->tail);
+	writel(i, tx_ring->tail);
 	/* we need this if more than one processor can write to our tail
 	 * at a time, it syncronizes IO on IA64/Altix systems */
 	mmiowb();
 }
 
-static int __igb_maybe_stop_tx(struct net_device *netdev,
-			       struct igb_ring *tx_ring, int size)
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct net_device *netdev = tx_ring->netdev;
 
 	netif_stop_queue(netdev);
+
 	/* Herbert's original patch had:
 	 *  smp_mb__after_netif_stop_queue();
 	 * but since that doesn't exist yet, just open code it. */
@@ -3271,50 +3556,37 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
 
 	/* A reprieve! */
 	netif_start_queue(netdev);
-	++adapter->restart_queue;
+	tx_ring->tx_stats.restart_queue++;
 	return 0;
 }
 
-static int igb_maybe_stop_tx(struct net_device *netdev,
-			     struct igb_ring *tx_ring, int size)
+static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
 {
 	if (igb_desc_unused(tx_ring) >= size)
 		return 0;
-	return __igb_maybe_stop_tx(netdev, tx_ring, size);
+	return __igb_maybe_stop_tx(tx_ring, size);
 }
 
-static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
-				   struct net_device *netdev,
-				   struct igb_ring *tx_ring)
+int igb_xmit_frame_ring_adv(struct sk_buff *skb,
+				    struct igb_ring *tx_ring)
 {
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
 	unsigned int first;
 	unsigned int tx_flags = 0;
 	u8 hdr_len = 0;
-	int tso = 0;
-
-	if (test_bit(__IGB_DOWN, &adapter->state)) {
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
-	}
-
-	if (skb->len <= 0) {
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
-	}
+	int tso = 0, count;
 
 	/* need: 1 descriptor per page,
 	 *       + 2 desc gap to keep tail from touching head,
 	 *       + 1 desc for skb->data,
 	 *       + 1 desc for context descriptor,
 	 * otherwise try next time */
-	if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+	if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
 		/* this is a hard error */
 		return NETDEV_TX_BUSY;
 	}
-	skb_orphan(skb);
 
-	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+	if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
 		tx_flags |= IGB_TX_FLAGS_VLAN;
 		tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
 	}
@@ -3323,42 +3595,64 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
 		tx_flags |= IGB_TX_FLAGS_IPV4;
 
 	first = tx_ring->next_to_use;
-	tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
-					      &hdr_len) : 0;
+	if (skb_is_gso(skb)) {
+		tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
 
-	if (tso < 0) {
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
+		if (tso < 0) {
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
 	}
 
 	if (tso)
 		tx_flags |= IGB_TX_FLAGS_TSO;
-	else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
+	else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
 	         (skb->ip_summed == CHECKSUM_PARTIAL))
 		tx_flags |= IGB_TX_FLAGS_CSUM;
 
-	igb_tx_queue_adv(adapter, tx_ring, tx_flags,
-			 igb_tx_map_adv(adapter, tx_ring, skb, first),
-			 skb->len, hdr_len);
+	/* important: record TX start */
+	tx_ring->netdev->trans_start = jiffies;
 
-	netdev->trans_start = jiffies;
+	/*
+	 * count reflects descriptors mapped, if 0 or less then mapping error
+	 * has occured and we need to rewind the descriptor queue
+	 */
+	count = igb_tx_map_adv(tx_ring, skb, first);
+	if (!count) {
+		dev_kfree_skb_any(skb);
+		tx_ring->buffer_info[first].time_stamp = 0;
+		tx_ring->next_to_use = first;
+		return NETDEV_TX_OK;
+	}
+	igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
 
 	/* Make sure there is space in the ring for the next send. */
-	igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
+	igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
 
 	return NETDEV_TX_OK;
 }
 
-static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
+static int igb_xmit_frame_adv(struct sk_buff *skb,
+				      struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct igb_ring *tx_ring = &adapter->tx_ring[0];
 
+	if (test_bit(__IGB_DOWN, &adapter->state)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (skb->len <= 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
 	/* This goes back to the question of how to logically map a tx queue
 	 * to a flow.  Right now, performance is impacted slightly negatively
 	 * if using multiple tx queues.  If the stack breaks away from a
 	 * single qdisc implementation, we can look at this again. */
-	return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
+	return igb_xmit_frame_ring_adv(skb, tx_ring);
 }
 
 /**
@@ -3411,16 +3705,17 @@ static struct net_device_stats *igb_get_stats(struct net_device *netdev)
 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct pci_dev *pdev = adapter->pdev;
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+	u32 rx_buffer_len, i;
 
-	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
-	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-		dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
+	if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+		dev_err(&pdev->dev, "Invalid MTU setting\n");
 		return -EINVAL;
 	}
 
 	if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
-		dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
+		dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
 		return -EINVAL;
 	}
 
@@ -3429,8 +3724,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 
 	/* igb_down has a dependency on max_frame_size */
 	adapter->max_frame_size = max_frame;
-	if (netif_running(netdev))
-		igb_down(adapter);
 
 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
 	 * means we reserve 2 more, this pushes us to allocate from the next
@@ -3439,25 +3732,22 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
 	 */
 
 	if (max_frame <= IGB_RXBUFFER_1024)
-		adapter->rx_buffer_len = IGB_RXBUFFER_1024;
-	else if (max_frame <= IGB_RXBUFFER_2048)
-		adapter->rx_buffer_len = IGB_RXBUFFER_2048;
+		rx_buffer_len = IGB_RXBUFFER_1024;
+	else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
+		rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
 	else
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
-		adapter->rx_buffer_len = IGB_RXBUFFER_16384;
-#else
-		adapter->rx_buffer_len = PAGE_SIZE / 2;
-#endif
+		rx_buffer_len = IGB_RXBUFFER_128;
 
-	/* adjust allocation if LPE protects us, and we aren't using SBP */
-	if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
-	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
-		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+	if (netif_running(netdev))
+		igb_down(adapter);
 
-	dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
+	dev_info(&pdev->dev, "changing MTU from %d to %d\n",
 		 netdev->mtu, new_mtu);
 	netdev->mtu = new_mtu;
 
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
+
 	if (netif_running(netdev))
 		igb_up(adapter);
 	else
@@ -3477,6 +3767,7 @@ void igb_update_stats(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
+	u32 rnbc;
 	u16 phy_tmp;
 
 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
@@ -3490,6 +3781,7 @@ void igb_update_stats(struct igb_adapter *adapter)
 	if (pdev->error_state != pci_channel_io_normal)
 		return;
 
+	/* read stats registers */
 	adapter->stats.crcerrs += rd32(E1000_CRCERRS);
 	adapter->stats.gprc += rd32(E1000_GPRC);
 	adapter->stats.gorc += rd32(E1000_GORCL);
@@ -3522,7 +3814,8 @@ void igb_update_stats(struct igb_adapter *adapter)
 	adapter->stats.gptc += rd32(E1000_GPTC);
 	adapter->stats.gotc += rd32(E1000_GOTCL);
 	rd32(E1000_GOTCH); /* clear GOTCL */
-	adapter->stats.rnbc += rd32(E1000_RNBC);
+	rnbc = rd32(E1000_RNBC);
+	adapter->stats.rnbc += rnbc;
 	adapter->stats.ruc += rd32(E1000_RUC);
 	adapter->stats.rfc += rd32(E1000_RFC);
 	adapter->stats.rjc += rd32(E1000_RJC);
@@ -3541,7 +3834,6 @@ void igb_update_stats(struct igb_adapter *adapter)
 	adapter->stats.bptc += rd32(E1000_BPTC);
 
 	/* used for adaptive IFS */
-
 	hw->mac.tx_packet_delta = rd32(E1000_TPT);
 	adapter->stats.tpt += hw->mac.tx_packet_delta;
 	hw->mac.collision_delta = rd32(E1000_COLC);
@@ -3570,7 +3862,7 @@ void igb_update_stats(struct igb_adapter *adapter)
 	/* Rx Errors */
 
 	/* RLEC on some newer hardware can be incorrect so build
-	* our own version based on RUC and ROC */
+	 * our own version based on RUC and ROC */
 	adapter->net_stats.rx_errors = adapter->stats.rxerrc +
 		adapter->stats.crcerrs + adapter->stats.algnerrc +
 		adapter->stats.ruc + adapter->stats.roc +
@@ -3607,13 +3899,12 @@ void igb_update_stats(struct igb_adapter *adapter)
 
 static irqreturn_t igb_msix_other(int irq, void *data, struct pt_regs *regs)
 {
-	struct net_device *netdev = data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = data;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 icr = rd32(E1000_ICR);
-
 	/* reading ICR causes bit 31 of EICR to be cleared */
-	if(icr & E1000_ICR_DOUTSYNC) {
+
+	if (icr & E1000_ICR_DOUTSYNC) {
 		/* HW is reporting DMA is out of sync */
 		adapter->stats.doosync++;
 	}
@@ -3640,70 +3931,146 @@ static irqreturn_t igb_msix_other(int irq, void *data, struct pt_regs *regs)
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t igb_msix_tx(int irq, void *data, struct pt_regs *regs)
+static void igb_write_itr(struct igb_q_vector *q_vector)
 {
-	struct igb_ring *tx_ring = data;
-	struct igb_adapter *adapter = tx_ring->adapter;
-	struct e1000_hw *hw = &adapter->hw;
+	u32 itr_val = q_vector->itr_val & 0x7FFC;
 
-#ifdef CONFIG_IGB_DCA
-	if (adapter->flags & IGB_FLAG_DCA_ENABLED)
-		igb_update_tx_dca(tx_ring);
-#endif
+	if (!q_vector->set_itr)
+		return;
 
-	tx_ring->total_bytes = 0;
-	tx_ring->total_packets = 0;
+	if (!itr_val)
+		itr_val = 0x4;
 
-	if (!igb_clean_tx_irq(tx_ring))
-		/* Ring was not completely cleaned, so fire another interrupt */
-		wr32(E1000_EICS, tx_ring->eims_value);
+	if (q_vector->itr_shift)
+		itr_val |= itr_val << q_vector->itr_shift;
 	else
-		wr32(E1000_EIMS, tx_ring->eims_value);
+		itr_val |= 0x8000000;
+
+	writel(itr_val, q_vector->itr_register);
+	q_vector->set_itr = 0;
+}
+
+static irqreturn_t igb_msix_ring(int irq, void *data, struct pt_regs *regs)
+{
+	struct igb_q_vector *q_vector = data;
+
+	/* Write the ITR value calculated from the previous interrupt. */
+	igb_write_itr(q_vector);
+
+	netif_rx_schedule(q_vector->dummy_netdev);
 
 	return IRQ_HANDLED;
 }
 
-static void igb_write_itr(struct igb_ring *ring)
+#ifdef CONFIG_IGB_DCA
+static void igb_update_dca(struct igb_q_vector *q_vector)
 {
-	struct e1000_hw *hw = &ring->adapter->hw;
-	if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
-		switch (hw->mac.type) {
-		case e1000_82576:
-			wr32(ring->itr_register, ring->itr_val |
-			     0x80000000);
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct e1000_hw *hw = &adapter->hw;
+	int cpu = get_cpu();
+
+	if (q_vector->cpu == cpu)
+		goto out_no_update;
+
+	if (q_vector->tx_ring) {
+		int q = q_vector->tx_ring->reg_idx;
+		u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
+		if (hw->mac.type == e1000_82575) {
+			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
+			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+		} else {
+			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
+			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
+			              E1000_DCA_TXCTRL_CPUID_SHIFT;
+		}
+		dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
+		wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
+	}
+	if (q_vector->rx_ring) {
+		int q = q_vector->rx_ring->reg_idx;
+		u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
+		if (hw->mac.type == e1000_82575) {
+			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
+			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+		} else {
+			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
+			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
+			              E1000_DCA_RXCTRL_CPUID_SHIFT;
+		}
+		dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
+		dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
+		dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
+		wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
+	}
+	q_vector->cpu = cpu;
+out_no_update:
+	put_cpu();
+}
+
+static void igb_setup_dca(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int i;
+
+	if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
+		return;
+
+	/* Always use CB2 mode, difference is masked in the CB driver. */
+	wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		q_vector->cpu = -1;
+		igb_update_dca(q_vector);
+	}
+}
+
+static int __igb_notify_dca(struct device *dev, void *data)
+{
+	struct net_device *netdev = dev_get_drvdata(dev);
+	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned long event = *(unsigned long *)data;
+
+	switch (event) {
+	case DCA_PROVIDER_ADD:
+		/* if already enabled, don't do it again */
+		if (adapter->flags & IGB_FLAG_DCA_ENABLED)
 			break;
-		default:
-			wr32(ring->itr_register, ring->itr_val |
-			     (ring->itr_val << 16));
+		if (dca_add_requester(dev) == 0) {
+			adapter->flags |= IGB_FLAG_DCA_ENABLED;
+			dev_info(&pdev->dev, "DCA enabled\n");
+			igb_setup_dca(adapter);
 			break;
 		}
-		ring->set_itr = 0;
+		/* Fall Through since DCA is disabled. */
+	case DCA_PROVIDER_REMOVE:
+		if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
+			/* without this a class_device is left
+			 * hanging around in the sysfs model */
+			dca_remove_requester(dev);
+			dev_info(&pdev->dev, "DCA disabled\n");
+			adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
+			wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
+		}
+		break;
 	}
+
+	return 0;
 }
 
-static irqreturn_t igb_msix_rx(int irq, void *data, struct pt_regs *regs)
+static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
+                          void *p)
 {
-	struct igb_ring *rx_ring = data;
-
-	/* Write the ITR value calculated at the end of the
-	 * previous interrupt.
-	 */
-
-	igb_write_itr(rx_ring);
+	int ret_val;
 
-	if (netif_rx_schedule_prep(rx_ring->netdev)) {
-		rx_ring->total_bytes = 0;
-		rx_ring->total_packets = 0;
-		__netif_rx_schedule(rx_ring->netdev);
-	}
+	ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
+	                                 __igb_notify_dca);
 
-#ifdef CONFIG_IGB_DCA
-	/* Do not call igb_update_rx_dca here, it should only be
-	 * called from igb_poll.
-	 */
-#endif
-	return IRQ_HANDLED;
+	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
 }
+#endif /* CONFIG_IGB_DCA */
 
 static void igb_ping_all_vfs(struct igb_adapter *adapter)
 {
@@ -3719,6 +4086,45 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter)
 	}
 }
 
+static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vmolr = rd32(E1000_VMOLR(vf));
+	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+
+	vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
+	                    IGB_VF_FLAG_MULTI_PROMISC);
+	vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
+	if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
+		vmolr |= E1000_VMOLR_MPME;
+		*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
+	} else {
+		/*
+		 * if we have hashes and we are clearing a multicast promisc
+		 * flag we need to write the hashes to the MTA as this step
+		 * was previously skipped
+		 */
+		if (vf_data->num_vf_mc_hashes > 30) {
+			vmolr |= E1000_VMOLR_MPME;
+		} else if (vf_data->num_vf_mc_hashes) {
+			int j;
+			vmolr |= E1000_VMOLR_ROMPE;
+			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+		}
+	}
+
+	wr32(E1000_VMOLR(vf), vmolr);
+
+	/* there are flags left unprocessed, likely not supported */
+	if (*msgbuf & E1000_VT_MSGINFO_MASK)
+		return -EINVAL;
+
+	return 0;
+
+}
+
 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
 				  u32 *msgbuf, u32 vf)
 
@@ -3728,23 +4134,22 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
 	struct vf_data_storage *vf_data = &adapter->vf_data[vf];
 	int i;
 
-	/* only up to 30 hash values supported */
-	if (n > 30)
-		n = 30;
-
-	/* salt away the number of multi cast addresses assigned
+	/* salt away the number of multicast addresses assigned
 	 * to this VF for later use to restore when the PF multi cast
 	 * list changes
 	 */
 	vf_data->num_vf_mc_hashes = n;
 
-	/* VFs are limited to using the MTA hash table for their multicast
-	 * addresses */
+	/* only up to 30 hash values supported */
+	if (n > 30)
+		n = 30;
+
+	/* store the hashes for later use */
 	for (i = 0; i < n; i++)
 		vf_data->vf_mc_hashes[i] = hash_list[i];
 
 	/* Flush and reset the mta with the new values */
-	igb_set_multi(adapter->netdev);
+	igb_set_rx_mode(adapter->netdev);
 
 	return 0;
 }
@@ -3756,9 +4161,20 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
 	int i, j;
 
 	for (i = 0; i < adapter->vfs_allocated_count; i++) {
+		u32 vmolr = rd32(E1000_VMOLR(i));
+		vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+
 		vf_data = &adapter->vf_data[i];
-		for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
-			igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+
+		if ((vf_data->num_vf_mc_hashes > 30) ||
+		    (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
+			vmolr |= E1000_VMOLR_MPME;
+		} else if (vf_data->num_vf_mc_hashes) {
+			vmolr |= E1000_VMOLR_ROMPE;
+			for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+				igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+		}
+		wr32(E1000_VMOLR(i), vmolr);
 	}
 }
 
@@ -3796,7 +4212,11 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
 	struct e1000_hw *hw = &adapter->hw;
 	u32 reg, i;
 
-	/* It is an error to call this function when VFs are not enabled */
+	/* The vlvf table only exists on 82576 hardware and newer */
+	if (hw->mac.type < e1000_82576)
+		return -1;
+
+	/* we only need to do this if VMDq is enabled */
 	if (!adapter->vfs_allocated_count)
 		return -1;
 
@@ -3826,16 +4246,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
 
 			/* if !enabled we need to set this up in vfta */
 			if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
-				/* add VID to filter table, if bit already set
-				 * PF must have added it outside of table */
-				if (igb_vfta_set(hw, vid, true))
-					reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
-						adapter->vfs_allocated_count);
+				/* add VID to filter table */
+				igb_vfta_set(hw, vid, true);
 				reg |= E1000_VLVF_VLANID_ENABLE;
 			}
 			reg &= ~E1000_VLVF_VLANID_MASK;
 			reg |= vid;
-
 			wr32(E1000_VLVF(i), reg);
 
 			/* do not modify RLPML for PF devices */
@@ -3851,8 +4267,8 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
 				reg |= size;
 				wr32(E1000_VMOLR(vf), reg);
 			}
-			adapter->vf_data[vf].vlans_enabled++;
 
+			adapter->vf_data[vf].vlans_enabled++;
 			return 0;
 		}
 	} else {
@@ -3901,7 +4317,7 @@ static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
 	adapter->vf_data[vf].last_nack = jiffies;
 
 	/* reset offloads to defaults */
-	igb_set_vmolr(&adapter->hw, vf);
+	igb_set_vmolr(adapter, vf);
 
 	/* reset vlans for device */
 	igb_clear_vf_vfta(adapter, vf);
@@ -3910,7 +4326,7 @@ static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
 	adapter->vf_data[vf].num_vf_mc_hashes = 0;
 
 	/* Flush and reset the mta with the new values */
-	igb_set_multi(adapter->netdev);
+	igb_set_rx_mode(adapter->netdev);
 }
 
 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
@@ -3928,6 +4344,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+	int rar_entry = hw->mac.rar_entry_count - (vf + 1);
 	u32 reg, msgbuf[3];
 	u8 *addr = (u8 *)(&msgbuf[1]);
 
@@ -3935,8 +4352,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 	igb_vf_reset(adapter, vf);
 
 	/* set vf mac address */
-	igb_rar_set(hw, vf_mac, vf + 1);
-	igb_set_rah_pool(hw, vf, vf + 1);
+	igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
 
 	/* enable transmit and receive for vf */
 	reg = rd32(E1000_VFTE);
@@ -3987,8 +4403,14 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
 
 	retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
 
-	if (retval)
+	if (retval) {
+		/* if receive failed revoke VF CTS stats and restart init */
 		dev_err(&pdev->dev, "Error receiving message from VF\n");
+		vf_data->flags &= ~IGB_VF_FLAG_CTS;
+		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
+			return;
+		goto out;
+	}
 
 	/* this is a message we already processed, do nothing */
 	if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
@@ -4005,18 +4427,19 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
 	}
 
 	if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
-		msgbuf[0] = E1000_VT_MSGTYPE_NACK;
-		if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
-			igb_write_mbx(hw, msgbuf, 1, vf);
-			vf_data->last_nack = jiffies;
-		}
-		return;
+		if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
+			return;
+		retval = -1;
+		goto out;
 	}
 
 	switch ((msgbuf[0] & 0xFFFF)) {
 	case E1000_VF_SET_MAC_ADDR:
 		retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
 		break;
+	case E1000_VF_SET_PROMISC:
+		retval = igb_set_vf_promisc(adapter, msgbuf, vf);
+		break;
 	case E1000_VF_SET_MULTICAST:
 		retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
 		break;
@@ -4027,19 +4450,19 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
 		retval = igb_set_vf_vlan(adapter, msgbuf, vf);
 		break;
 	default:
-		dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
+		dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
 		retval = -1;
 		break;
 	}
 
+	msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
+out:
 	/* notify the VF of the results of what it sent us */
 	if (retval)
 		msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
 	else
 		msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
 
-	msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
-
 	igb_write_mbx(hw, msgbuf, 1, vf);
 }
 
@@ -4063,128 +4486,32 @@ static void igb_msg_task(struct igb_adapter *adapter)
 	}
 }
 
-#ifdef CONFIG_IGB_DCA
-static void igb_update_rx_dca(struct igb_ring *rx_ring)
-{
-	u32 dca_rxctrl;
-	struct igb_adapter *adapter = rx_ring->adapter;
-	struct e1000_hw *hw = &adapter->hw;
-	int cpu = get_cpu();
-	int q = rx_ring->reg_idx;
-
-	if (rx_ring->cpu != cpu) {
-		dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
-		if (hw->mac.type >= e1000_82576) {
-			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
-			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
-			              E1000_DCA_RXCTRL_CPUID_SHIFT;
-		} else {
-			dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
-			dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-		}
-		dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
-		dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
-		dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
-		wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
-		rx_ring->cpu = cpu;
-	}
-	put_cpu();
-}
-
-static void igb_update_tx_dca(struct igb_ring *tx_ring)
-{
-	u32 dca_txctrl;
-	struct igb_adapter *adapter = tx_ring->adapter;
-	struct e1000_hw *hw = &adapter->hw;
-	int cpu = get_cpu();
-	int q = tx_ring->reg_idx;
-
-	if (tx_ring->cpu != cpu) {
-		dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
-		if (hw->mac.type >= e1000_82576) {
-			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
-			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
-			              E1000_DCA_TXCTRL_CPUID_SHIFT;
-		} else {
-			dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
-			dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-		}
-		dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
-		wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
-		tx_ring->cpu = cpu;
-	}
-	put_cpu();
-}
-
-static void igb_setup_dca(struct igb_adapter *adapter)
+/**
+ *  igb_set_uta - Set unicast filter table address
+ *  @adapter: board private structure
+ *
+ *  The unicast table address is a register array of 32-bit registers.
+ *  The table is meant to be used in a way similar to how the MTA is used
+ *  however due to certain limitations in the hardware it is necessary to
+ *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
+ *  enable bit to allow vlan tag stripping when promiscous mode is enabled
+ **/
+static void igb_set_uta(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	int i;
 
-	if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
+	/* The UTA table only exists on 82576 hardware and newer */
+	if (hw->mac.type < e1000_82576)
 		return;
 
-	/* Always use CB2 mode, difference is masked in the CB driver. */
-	wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		adapter->tx_ring[i].cpu = -1;
-		igb_update_tx_dca(&adapter->tx_ring[i]);
-	}
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		adapter->rx_ring[i].cpu = -1;
-		igb_update_rx_dca(&adapter->rx_ring[i]);
-	}
-}
-
-static int __igb_notify_dca(struct device *dev, void *data)
-{
-	struct net_device *netdev = dev_get_drvdata(dev);
-	struct igb_adapter *adapter = netdev_priv(netdev);
-	struct e1000_hw *hw = &adapter->hw;
-	unsigned long event = *(unsigned long *)data;
-
-	switch (event) {
-	case DCA_PROVIDER_ADD:
-		/* if already enabled, don't do it again */
-		if (adapter->flags & IGB_FLAG_DCA_ENABLED)
-			break;
-		/* Always use CB2 mode, difference is masked
-		 * in the CB driver. */
-		wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
-		if (dca_add_requester(dev) == 0) {
-			adapter->flags |= IGB_FLAG_DCA_ENABLED;
-			dev_info(&adapter->pdev->dev, "DCA enabled\n");
-			igb_setup_dca(adapter);
-			break;
-		}
-		/* Fall Through since DCA is disabled. */
-	case DCA_PROVIDER_REMOVE:
-		if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
-			/* without this a class_device is left
- 			 * hanging around in the sysfs model */
-			dca_remove_requester(dev);
-			dev_info(&adapter->pdev->dev, "DCA disabled\n");
-			adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
-			wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
-		}
-		break;
-	}
-
-	return 0;
-}
-
-static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
-                          void *p)
-{
-	int ret_val;
-
-	ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
-	                                 __igb_notify_dca);
+	/* we only need to do this if VMDq is enabled */
+	if (!adapter->vfs_allocated_count)
+		return;
 
-	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
+	for (i = 0; i < hw->mac.uta_reg_count; i++)
+		array_wr32(E1000_UTA, i, ~0);
 }
-#endif /* CONFIG_IGB_DCA */
 
 /**
  * igb_intr_msi - Interrupt Handler
@@ -4194,15 +4521,15 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
  **/
 static irqreturn_t igb_intr_msi(int irq, void *data, struct pt_regs *regs)
 {
-	struct net_device *netdev = data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = data;
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
 	struct e1000_hw *hw = &adapter->hw;
 	/* read ICR disables interrupts using IAM */
 	u32 icr = rd32(E1000_ICR);
 
-	igb_write_itr(adapter->rx_ring);
+	igb_write_itr(q_vector);
 
-	if(icr & E1000_ICR_DOUTSYNC) {
+	if (icr & E1000_ICR_DOUTSYNC) {
 		/* HW is reporting DMA is out of sync */
 		adapter->stats.doosync++;
 	}
@@ -4213,13 +4540,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data, struct pt_regs *regs)
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-	if (netif_rx_schedule_prep(adapter->rx_ring[0].netdev)) {
-		adapter->tx_ring->total_bytes = 0;
-		adapter->tx_ring->total_packets = 0;
-		adapter->rx_ring->total_bytes = 0;
-		adapter->rx_ring->total_packets = 0;
-		__netif_rx_schedule(adapter->rx_ring[0].netdev);
-	}
+	netif_rx_schedule(q_vector->dummy_netdev);
 
 	return IRQ_HANDLED;
 }
@@ -4231,8 +4552,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data, struct pt_regs *regs)
  **/
 static irqreturn_t igb_intr(int irq, void *data, struct pt_regs *regs)
 {
-	struct net_device *netdev = data;
-	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct igb_adapter *adapter = data;
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
 	struct e1000_hw *hw = &adapter->hw;
 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
 	 * need for the IMC write */
@@ -4240,14 +4561,14 @@ static irqreturn_t igb_intr(int irq, void *data, struct pt_regs *regs)
 	if (!icr)
 		return IRQ_NONE;  /* Not our interrupt */
 
-	igb_write_itr(adapter->rx_ring);
+	igb_write_itr(q_vector);
 
 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
 	 * not set, then the adapter didn't send an interrupt */
 	if (!(icr & E1000_ICR_INT_ASSERTED))
 		return IRQ_NONE;
 
-	if(icr & E1000_ICR_DOUTSYNC) {
+	if (icr & E1000_ICR_DOUTSYNC) {
 		/* HW is reporting DMA is out of sync */
 		adapter->stats.doosync++;
 	}
@@ -4259,32 +4580,27 @@ static irqreturn_t igb_intr(int irq, void *data, struct pt_regs *regs)
 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
 	}
 
-	if (netif_rx_schedule_prep(adapter->rx_ring[0].netdev)) {
-		adapter->tx_ring->total_bytes = 0;
-		adapter->rx_ring->total_bytes = 0;
-		adapter->tx_ring->total_packets = 0;
-		adapter->rx_ring->total_packets = 0;
-		__netif_rx_schedule(adapter->rx_ring[0].netdev);
-	}
+	netif_rx_schedule(q_vector->dummy_netdev);
 
 	return IRQ_HANDLED;
 }
 
-static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
+static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
+	struct igb_adapter *adapter = q_vector->adapter;
 	struct e1000_hw *hw = &adapter->hw;
 
-	if (adapter->itr_setting & 3) {
-		if (adapter->num_rx_queues == 1)
+	if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
+	    (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
+		if (!adapter->msix_entries)
 			igb_set_itr(adapter);
 		else
-			igb_update_ring_itr(rx_ring);
+			igb_update_ring_itr(q_vector);
 	}
 
 	if (!test_bit(__IGB_DOWN, &adapter->state)) {
 		if (adapter->msix_entries)
-			wr32(E1000_EIMS, rx_ring->eims_value);
+			wr32(E1000_EIMS, q_vector->eims_value);
 		else
 			igb_irq_enable(adapter);
 	}
@@ -4297,53 +4613,53 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
  **/
 static int igb_poll(struct net_device *netdev, int *budget)
 {
-	struct igb_ring *rx_ring = netdev->priv;
-	struct net_device *real_netdev = rx_ring->adapter->netdev;
+	struct igb_q_vector *q_vector = netdev->priv;
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct net_device *real_netdev = adapter->netdev;
 	int work_to_do = min(*budget, netdev->quota);
-	int work_done = 0;
+	int tx_clean_complete = 1, work_done = 0;
 
 	/* Keep link state information with original netdev */
 	if (!netif_carrier_ok(real_netdev))
 		goto quit_polling;
 
 #ifdef CONFIG_IGB_DCA
-	if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
-		igb_update_rx_dca(rx_ring);
+	if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
+		igb_update_dca(q_vector);
 #endif
-	igb_clean_rx_irq_adv(rx_ring, &work_done, work_to_do);
+	if (q_vector->tx_ring)
+		tx_clean_complete = igb_clean_tx_irq(q_vector);
+
+	if (q_vector->rx_ring)
+		igb_clean_rx_irq_adv(q_vector, &work_done, work_to_do);
+
+	if (!tx_clean_complete)
+		work_done = *budget;
 
 	*budget -= work_done;
 	netdev->quota -= work_done;
 
-	if (rx_ring->buddy) {
-#ifdef CONFIG_IGB_DCA
-		if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
-			igb_update_tx_dca(rx_ring->buddy);
-#endif
-		if (!igb_clean_tx_irq(rx_ring->buddy))
-			work_done = work_to_do;
-	}
-
 	/* If not enough Rx work done, exit the polling mode */
 	if ((work_done < work_to_do) || !netif_running(real_netdev)) {
 quit_polling:
 		netif_rx_complete(netdev);
-		igb_rx_irq_enable(rx_ring);
+		igb_ring_irq_enable(q_vector);
 		return 0;
 	}
 
-	return 1;
-}	
+	return work_done;
+}
 
 /**
  * igb_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: pointer to q_vector containing needed info
  * returns true if ring is completely cleaned
  **/
-static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
 {
-	struct igb_adapter *adapter = tx_ring->adapter;
-	struct net_device *netdev = adapter->netdev;
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct igb_ring *tx_ring = q_vector->tx_ring;
+	struct net_device *netdev = tx_ring->netdev;
 	struct e1000_hw *hw = &adapter->hw;
 	struct igb_buffer *buffer_info;
 	struct sk_buff *skb;
@@ -4375,7 +4691,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
 				total_bytes += bytecount;
 			}
 
-			igb_unmap_and_free_tx_resource(adapter, buffer_info);
+			igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
 			tx_desc->wb.status = 0;
 
 			i++;
@@ -4398,7 +4714,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
 		if (netif_queue_stopped(netdev) &&
 		    !(test_bit(__IGB_DOWN, &adapter->state))) {
 			netif_wake_queue(netdev);
-			++adapter->restart_queue;
+			tx_ring->tx_stats.restart_queue++;
 		}
 	}
 
@@ -4413,9 +4729,9 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
 			 E1000_STATUS_TXOFF)) {
 
 			/* detected Tx unit hang */
-			dev_err(&adapter->pdev->dev,
+			dev_err(&tx_ring->pdev->dev,
 				"Detected Tx Unit Hang\n"
-				"  Tx Queue             <%lu>\n"
+				"  Tx Queue             <%d>\n"
 				"  TDH                  <%x>\n"
 				"  TDT                  <%x>\n"
 				"  next_to_use          <%x>\n"
@@ -4425,13 +4741,12 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
 				"  next_to_watch        <%x>\n"
 				"  jiffies              <%lx>\n"
 				"  desc.status          <%x>\n",
-				(unsigned long)((tx_ring - adapter->tx_ring) /
-				sizeof(struct igb_ring)),
-				readl(adapter->hw.hw_addr + tx_ring->head),
-				readl(adapter->hw.hw_addr + tx_ring->tail),
+				tx_ring->queue_index,
+				readl(tx_ring->head),
+				readl(tx_ring->tail),
 				tx_ring->next_to_use,
 				tx_ring->next_to_clean,
-				tx_ring->buffer_info[i].time_stamp,
+				tx_ring->buffer_info[eop].time_stamp,
 				eop,
 				jiffies,
 				eop_desc->wb.status);
@@ -4440,65 +4755,54 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
 	}
 	tx_ring->total_bytes += total_bytes;
 	tx_ring->total_packets += total_packets;
-	adapter->net_stats.tx_bytes += total_bytes;
-	adapter->net_stats.tx_packets += total_packets;
+	tx_ring->tx_stats.bytes += total_bytes;
+	tx_ring->tx_stats.packets += total_packets;
 	return (count < tx_ring->count);
 }
 
 /**
  * igb_receive_skb - helper function to handle rx indications
- * @ring: pointer to receive ring receving this packet
- * @status: descriptor status field as written by hardware
- * @rx_desc: receive descriptor containing vlan and type information.
- * @skb: pointer to sk_buff to be indicated to stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
  **/
-static void igb_receive_skb(struct igb_ring *ring, u8 status,
-                            union e1000_adv_rx_desc * rx_desc,
-                            struct sk_buff *skb)
-{
-	struct igb_adapter * adapter = ring->adapter;
-	bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
-
-	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
-		if (vlan_extracted)
-			vlan_gro_receive(&ring->napi, adapter->vlgrp,
-			                 le16_to_cpu(rx_desc->wb.upper.vlan),
-			                 skb);
-		else
-			napi_gro_receive(&ring->napi, skb);
-	} else {
-		if (vlan_extracted)
-			vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
-			                  le16_to_cpu(rx_desc->wb.upper.vlan));
-		else
+static void igb_receive_skb(struct igb_q_vector *q_vector,
+                            struct sk_buff *skb,
+                            u16 vlan_tag)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
 
-			netif_receive_skb(skb);
-	}
+	if (vlan_tag)
+		vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
+		                 vlan_tag, skb);
+	else
+		napi_gro_receive(&q_vector->napi, skb);
 }
 
-static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
+static inline void igb_rx_checksum_adv(struct igb_ring *ring,
 				       u32 status_err, struct sk_buff *skb)
 {
 	skb->ip_summed = CHECKSUM_NONE;
 
 	/* Ignore Checksum bit is set or checksum is disabled through ethtool */
-	if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
+	if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
+	     (status_err & E1000_RXD_STAT_IXSM))
 		return;
+
 	/* TCP/UDP checksum error bit is set */
 	if (status_err &
 	    (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
 		/* let the stack verify checksum errors */
-		adapter->hw_csum_err++;
 		return;
 	}
 	/* It must be a TCP or UDP packet with a valid checksum */
 	if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-	adapter->hw_csum_good++;
+	dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
 }
 
-static inline u16 igb_get_hlen(struct igb_adapter *adapter,
+static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
                                union e1000_adv_rx_desc *rx_desc)
 {
 	/* HW will not DMA in data larger than the given buffer, even if it
@@ -4507,26 +4811,29 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
 	 */
 	u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
 	           E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
-	if (hlen > adapter->rx_ps_hdr_size)
-		hlen = adapter->rx_ps_hdr_size;
+	if (hlen > rx_ring->rx_buffer_len)
+		hlen = rx_ring->rx_buffer_len;
 	return hlen;
 }
 
-static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
-				 int *work_done, int budget)
+static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
+                                 int *work_done, int budget)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
-	struct net_device *netdev = adapter->netdev;
-	struct pci_dev *pdev = adapter->pdev;
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct igb_ring *rx_ring = q_vector->rx_ring;
+	struct net_device *netdev = rx_ring->netdev;
+	struct pci_dev *pdev = rx_ring->pdev;
 	union e1000_adv_rx_desc *rx_desc , *next_rxd;
 	struct igb_buffer *buffer_info , *next_buffer;
 	struct sk_buff *skb;
 	bool cleaned = false;
 	int cleaned_count = 0;
+	int current_node = numa_node_id();
 	unsigned int total_bytes = 0, total_packets = 0;
 	unsigned int i;
 	u32 staterr;
 	u16 length;
+	u16 vlan_tag;
 
 	i = rx_ring->next_to_clean;
 	buffer_info = &rx_ring->buffer_info[i];
@@ -4545,6 +4852,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
 		i++;
 		if (i == rx_ring->count)
 			i = 0;
+
 		next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
 		prefetch(next_rxd);
 		next_buffer = &rx_ring->buffer_info[i];
@@ -4553,23 +4861,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
 		cleaned = true;
 		cleaned_count++;
 
-		/* this is the fast path for the non-packet split case */
-		if (!adapter->rx_ps_hdr_size) {
-			pci_unmap_single(pdev, buffer_info->dma,
-					 adapter->rx_buffer_len,
-					 PCI_DMA_FROMDEVICE);
-			buffer_info->dma = 0;
-			skb_put(skb, length);
-			goto send_up;
-		}
-
 		if (buffer_info->dma) {
-			u16 hlen = igb_get_hlen(adapter, rx_desc);
 			pci_unmap_single(pdev, buffer_info->dma,
-					 adapter->rx_ps_hdr_size,
+					 rx_ring->rx_buffer_len,
 					 PCI_DMA_FROMDEVICE);
 			buffer_info->dma = 0;
-			skb_put(skb, hlen);
+			if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
+				skb_put(skb, length);
+				goto send_up;
+			}
+			skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
 		}
 
 		if (length) {
@@ -4582,15 +4883,14 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
 						buffer_info->page_offset,
 						length);
 
-			if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
-			    (page_count(buffer_info->page) != 1))
+			if ((page_count(buffer_info->page) != 1) ||
+			    (page_to_nid(buffer_info->page) != current_node))
 				buffer_info->page = NULL;
 			else
 				get_page(buffer_info->page);
 
 			skb->len += length;
 			skb->data_len += length;
-
 			skb->truesize += length;
 		}
 
@@ -4610,11 +4910,15 @@ send_up:
 		total_bytes += skb->len;
 		total_packets++;
 
-		igb_rx_checksum_adv(adapter, staterr, skb);
+		igb_rx_checksum_adv(rx_ring, staterr, skb);
 
 		skb->protocol = eth_type_trans(skb, netdev);
+		/* skb_record_rx_queue(skb, rx_ring->queue_index); noop */
 
-		igb_receive_skb(rx_ring, staterr, rx_desc, skb);
+		vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
+		            le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
+
+		igb_receive_skb(q_vector, skb, vlan_tag);
 
 		netdev->last_rx = jiffies;
 
@@ -4636,7 +4940,7 @@ next_desc:
 	rx_ring->next_to_clean = i;
 	cleaned_count = igb_desc_unused(rx_ring);
 
-	napi_gro_flush(&rx_ring->napi);
+	napi_gro_flush(&q_vector->napi);
 
 	if (cleaned_count)
 		igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
@@ -4654,12 +4958,9 @@ next_desc:
  * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
  * @adapter: address of board private structure
  **/
-static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
-				     int cleaned_count)
+void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
 {
-	struct igb_adapter *adapter = rx_ring->adapter;
-	struct net_device *netdev = adapter->netdev;
-	struct pci_dev *pdev = adapter->pdev;
+	struct net_device *netdev = rx_ring->netdev;
 	union e1000_adv_rx_desc *rx_desc;
 	struct igb_buffer *buffer_info;
 	struct sk_buff *skb;
@@ -4669,19 +4970,16 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
 	i = rx_ring->next_to_use;
 	buffer_info = &rx_ring->buffer_info[i];
 
-	if (adapter->rx_ps_hdr_size)
-		bufsz = adapter->rx_ps_hdr_size;
-	else
-		bufsz = adapter->rx_buffer_len;
+	bufsz = rx_ring->rx_buffer_len;
 
 	while (cleaned_count--) {
 		rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
 
-		if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
+		if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
 			if (!buffer_info->page) {
 				buffer_info->page = alloc_page(GFP_ATOMIC);
 				if (!buffer_info->page) {
-					adapter->alloc_rx_buff_failed++;
+					rx_ring->rx_stats.alloc_failed++;
 					goto no_buffers;
 				}
 				buffer_info->page_offset = 0;
@@ -4689,16 +4987,22 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
 				buffer_info->page_offset ^= PAGE_SIZE / 2;
 			}
 			buffer_info->page_dma =
-				pci_map_page(pdev, buffer_info->page,
+				pci_map_page(rx_ring->pdev, buffer_info->page,
 					     buffer_info->page_offset,
 					     PAGE_SIZE / 2,
 					     PCI_DMA_FROMDEVICE);
+			if (pci_dma_mapping_error(buffer_info->page_dma)) {
+				buffer_info->page_dma = 0;
+				rx_ring->rx_stats.alloc_failed++;
+				goto no_buffers;
+			}
 		}
 
-		if (!buffer_info->skb) {
+		skb = buffer_info->skb;
+		if (!skb) {
 			skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
 			if (!skb) {
-				adapter->alloc_rx_buff_failed++;
+				rx_ring->rx_stats.alloc_failed++;
 				goto no_buffers;
 			}
 
@@ -4709,19 +5013,26 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
 			skb_reserve(skb, NET_IP_ALIGN);
 
 			buffer_info->skb = skb;
-			buffer_info->dma = pci_map_single(pdev, skb->data,
+		}
+		if (!buffer_info->dma) {
+			buffer_info->dma = pci_map_single(rx_ring->pdev,
+			                                  skb->data,
 							  bufsz,
 							  PCI_DMA_FROMDEVICE);
+			if (pci_dma_mapping_error(buffer_info->dma)) {
+				buffer_info->dma = 0;
+				rx_ring->rx_stats.alloc_failed++;
+				goto no_buffers;
+			}
 		}
 		/* Refresh the desc even if buffer_addrs didn't change because
 		 * each write-back erases this info. */
-		if (adapter->rx_ps_hdr_size) {
+		if (bufsz < IGB_RXBUFFER_1024) {
 			rx_desc->read.pkt_addr =
 			     cpu_to_le64(buffer_info->page_dma);
 			rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
 		} else {
-			rx_desc->read.pkt_addr =
-			     cpu_to_le64(buffer_info->dma);
+			rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
 			rx_desc->read.hdr_addr = 0;
 		}
 
@@ -4744,7 +5055,7 @@ no_buffers:
 		 * applicable for weak-ordered memory model archs,
 		 * such as IA-64). */
 		wmb();
-		writel(i, adapter->hw.hw_addr + rx_ring->tail);
+		writel(i, rx_ring->tail);
 	}
 }
 
@@ -4840,21 +5151,15 @@ static void igb_vlan_rx_register(struct net_device *netdev,
 		ctrl |= E1000_CTRL_VME;
 		wr32(E1000_CTRL, ctrl);
 
-		/* enable VLAN receive filtering */
+		/* Disable CFI check */
 		rctl = rd32(E1000_RCTL);
 		rctl &= ~E1000_RCTL_CFIEN;
 		wr32(E1000_RCTL, rctl);
-		igb_update_mng_vlan(adapter);
 	} else {
 		/* disable VLAN tag insert/strip */
 		ctrl = rd32(E1000_CTRL);
 		ctrl &= ~E1000_CTRL_VME;
 		wr32(E1000_CTRL, ctrl);
-
-		if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
-			igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
-			adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
-		}
 	}
 
 	igb_rlpml_set(adapter);
@@ -4869,16 +5174,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 	struct e1000_hw *hw = &adapter->hw;
 	int pf_id = adapter->vfs_allocated_count;
 
-	if ((hw->mng_cookie.status &
-	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-	    (vid == adapter->mng_vlan_id))
-		return;
-
-	/* add vid to vlvf if sr-iov is enabled,
-	 * if that fails add directly to filter table */
-	if (igb_vlvf_set(adapter, vid, true, pf_id))
-		igb_vfta_set(hw, vid, true);
+	/* attempt to add filter to vlvf array */
+	igb_vlvf_set(adapter, vid, true, pf_id);
 
+	/* add the filter since PF can receive vlans w/o entry in vlvf */
+	igb_vfta_set(hw, vid, true);
 }
 
 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -4886,6 +5186,7 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 	int pf_id = adapter->vfs_allocated_count;
+	s32 err;
 
 	igb_irq_disable(adapter);
 	vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -4893,17 +5194,11 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 	if (!test_bit(__IGB_DOWN, &adapter->state))
 		igb_irq_enable(adapter);
 
-	if ((adapter->hw.mng_cookie.status &
-	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
-	    (vid == adapter->mng_vlan_id)) {
-		/* release control to f/w */
-		igb_release_hw_control(adapter);
-		return;
-	}
+	/* remove vlan from VLVF table array */
+	err = igb_vlvf_set(adapter, vid, false, pf_id);
 
-	/* remove vid from vlvf if sr-iov is enabled,
-	 * if not in vlvf remove from vfta */
-	if (igb_vlvf_set(adapter, vid, false, pf_id))
+	/* if vid was not present in VLVF just remove it from table */
+	if (err)
 		igb_vfta_set(hw, vid, false);
 }
 
@@ -4923,6 +5218,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
 
 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
 {
+	struct pci_dev *pdev = adapter->pdev;
 	struct e1000_mac_info *mac = &adapter->hw.mac;
 
 	mac->autoneg = 0;
@@ -4946,8 +5242,7 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
 		break;
 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
 	default:
-		dev_err(&adapter->pdev->dev,
-			"Unsupported Speed/Duplex configuration\n");
+		dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
 		return -EINVAL;
 	}
 	return 0;
@@ -4969,9 +5264,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
 	if (netif_running(netdev))
 		igb_close(netdev);
 
-	igb_reset_interrupt_capability(adapter);
-
-	igb_free_queues(adapter);
+	igb_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PM
 	retval = pci_save_state(pdev);
@@ -4985,7 +5278,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
 
 	if (wufc) {
 		igb_setup_rctl(adapter);
-		igb_set_multi(netdev);
+		igb_set_rx_mode(netdev);
 
 		/* turn on all-multi mode if wake on multicast is enabled */
 		if (wufc & E1000_WUFC_MC) {
@@ -5003,7 +5296,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
 		wr32(E1000_CTRL, ctrl);
 
 		/* Allow time for pending master requests to run */
-		igb_disable_pcie_master(&adapter->hw);
+		igb_disable_pcie_master(hw);
 
 		wr32(E1000_WUC, E1000_WUC_PME_EN);
 		wr32(E1000_WUFC, wufc);
@@ -5068,9 +5361,7 @@ static int igb_resume(struct pci_dev *pdev)
 	pci_enable_wake(pdev, PCI_D3hot, 0);
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
-	igb_set_interrupt_capability(adapter);
-
-	if (igb_alloc_queues(adapter)) {
+	if (igb_init_interrupt_scheme(adapter)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
@@ -5128,22 +5419,16 @@ static void igb_netpoll(struct net_device *netdev)
 	int i;
 
 	if (!adapter->msix_entries) {
+		struct igb_q_vector *q_vector = adapter->q_vector[0];
 		igb_irq_disable(adapter);
-		netif_rx_schedule(adapter->rx_ring[0].netdev);
+		netif_rx_schedule(q_vector->dummy_netdev);
 		return;
 	}
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct igb_ring *tx_ring = &adapter->tx_ring[i];
-		wr32(E1000_EIMC, tx_ring->eims_value);
-		igb_clean_tx_irq(tx_ring);
-		wr32(E1000_EIMS, tx_ring->eims_value);
-	}
-
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct igb_ring *rx_ring = &adapter->rx_ring[i];
-		wr32(E1000_EIMC, rx_ring->eims_value);
-		netif_rx_schedule(rx_ring->netdev);
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+		wr32(E1000_EIMC, q_vector->eims_value);
+		netif_rx_schedule(q_vector->dummy_netdev);
 	}
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -5236,17 +5521,44 @@ static void igb_io_resume(struct pci_dev *pdev)
 	igb_get_hw_control(adapter);
 }
 
+static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
+                             u8 qsel)
+{
+	u32 rar_low, rar_high;
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+	          ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+	/* Indicate to hardware the Address is Valid. */
+	rar_high |= E1000_RAH_AV;
+
+	if (hw->mac.type == e1000_82575)
+		rar_high |= E1000_RAH_POOL_1 * qsel;
+	else
+		rar_high |= E1000_RAH_POOL_1 << qsel;
+
+	wr32(E1000_RAL(index), rar_low);
+	wrfl();
+	wr32(E1000_RAH(index), rar_high);
+	wrfl();
+}
+
 static int igb_set_vf_mac(struct igb_adapter *adapter,
                           int vf, unsigned char *mac_addr)
 {
 	struct e1000_hw *hw = &adapter->hw;
-	int rar_entry = vf + 1; /* VF MAC addresses start at entry 1 */
-
-	igb_rar_set(hw, mac_addr, rar_entry);
+	/* VF MAC addresses start at end of receive addresses and moves
+	 * torwards the first, as a result a collision should not be possible */
+	int rar_entry = hw->mac.rar_entry_count - (vf + 1);
 
 	memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
 
-	igb_set_rah_pool(hw, vf, rar_entry);
+	igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
 
 	return 0;
 }
@@ -5254,10 +5566,37 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
 static void igb_vmm_control(struct igb_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
+	u32 reg;
 
-	/* replication is not supported for 82575 */
-	if (hw->mac.type == e1000_82575)
+	switch (hw->mac.type) {
+	case e1000_82575:
+	default:
+		/* replication is not supported for 82575 */
 		return;
+	case e1000_82576:
+		/* notify HW that the MAC is adding vlan tags */
+		reg = rd32(E1000_DTXCTL);
+		reg |= E1000_DTXCTL_VLAN_ADDED;
+		wr32(E1000_DTXCTL, reg);
+	case e1000_82580:
+		/* enable replication vlan tag stripping */
+		reg = rd32(E1000_RPLOLR);
+		reg |= E1000_RPLOLR_STRVLAN;
+		wr32(E1000_RPLOLR, reg);
+	case e1000_i350:
+		/* none of the above registers are supported by i350 */
+		break;
+	}
+
+	/* enable replication vlan tag stripping */
+	reg = rd32(E1000_RPLOLR);
+	reg |= E1000_RPLOLR_STRVLAN;
+	wr32(E1000_RPLOLR, reg);
+
+	/* notify HW that the MAC is adding vlan tags */
+	reg = rd32(E1000_DTXCTL);
+	reg |= E1000_DTXCTL_VLAN_ADDED;
+	wr32(E1000_DTXCTL, reg);
 
 	if (adapter->vfs_allocated_count) {
 		igb_vmdq_set_loopback_pf(hw, true);