Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 2827

kernel-2.6.18-238.el5.src.rpm

From: Andy Gospodarek <gospo@redhat.com>
Date: Thu, 18 Feb 2010 20:51:49 -0500
Subject: [net] ixgbe: initial support of ixgbe PF and VF drivers
Message-id: <20100218205148.GS24578@gospo.rdu.redhat.com>
Patchwork-id: 23354
O-Subject: [RHEL5.5 PATCH v2] ixgbe and ixgbevf: initial support of ixgbe PF
	and VF drivers
Bugzilla: 525577
RH-Acked-by: Chris Wright <chrisw@redhat.com>
RH-Acked-by: David S. Miller <davem@redhat.com>

This large patch includes a rather long-list of upstream commits to add
PF support for ixgbe and add the VF support by including the ixgbe
driver.  I've also added some ipv6 gso infrastructure and added a pci
quirk for 82599VF adapters.

This patch includes the following commits for ixgbevf:

        commit 257ddbdad13cd3c4f7d03b85af632c508aa8abc9
        Author: Alexey Dobriyan <adobriyan@gmail.com>

            netdev: remove HAVE_ leftovers

        commit a9ee25a2b87c8077042ce23d3231f67f026719b0
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbevf: Take action when the PF notifies the VF it is resetting.

        commit c0456c231a8b2981128cc29e211b7bad1bd997df
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbevf: Fix panics in the VF driver

        commit da6b33306801af7ee6479c177051e70842974932
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbevf: Tell network stack to stop tx when the VF detects PF reset

        commit 9010bc3364db56dd88a1851e0797e597e322ce08
        Author: Jeff Kirsher <jeffrey.t.kirsher@intel.com>

            ixgbevf: Fix IPv6 GSO type checks

        commit 0d3592fa28965a3083d670b430bb59c19efb6abe
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbevf: Driver Makefile

        commit 92915f71201b43762fbe05dbfb1a1a0de9c8adb9
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbevf: Driver main and ethool interface module and main header

        commit cbf698dbba25741ee1b2e734a6dd4f3070a1706f
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbevf: Mailbox communication

        commit 3047f90bd515b38e62094eff90b299f379ab7243
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbevf: 82599 Virtual Function core functions and header

        commit 50d9c84ee522ce6f61cd6a7dfce1b82a260edd3c
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbevf: Macros, data structures, useful defines and registers

and the following commits for ixgbe:

        commit 8c47eaa76600cebc4869a42abb4568925ade6c47
        Author: Shannon Nelson <shannon.nelson@intel.com>

            ixgbe: update copyright dates

        commit c1fa347f20f17f14a4a1575727fa24340e8a9117
        Author: Roel Kluin <roel.kluin@gmail.com>

            e1000/e1000e/igb/igbvf/ixgb/ixgbe: Fix tests of unsigned in *_tx_map()

        commit 1ada1b1b41b0e54291aca67eed4bd42e61e7e529
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbe: Set the correct pool when VLANs are added in SR-IOV mode.

        commit ef291b8c71ee934d077676b891494c50e17ccc28
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbe: Remove unused emulation MAC storage from the per VF data structure.

        commit c9205697c7527173c8f8bfa9f8c9dabdbced3c49
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbe: Allow the VF driver to be loaded before the PF driver

        commit 767081adbd920ce93e3f1cbe797d0631637f92b3
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbe: Improve reset coordination between the PF and the VF

        commit 8e1e8a4779cb23c1d9f51e9223795e07ec54d77a
        Author: Sridhar Samudrala <sri@us.ibm.com>

            net: Fix IPv6 GSO type checks in Intel ethernet drivers

        commit e44d38e1b72a2aa7f5e7024c5da83a879355a1cc
        Author: Anton Blanchard <anton@samba.org>

            ixgbe: Fix ixgbe_tx_map error path

        commit 10eec95569513206877769ad9336591c08015cfe
        Author: John Fastabend <john.r.fastabend@intel.com>

            ixgbe: only process one ixgbe_watchdog_task at a time.

        commit bdbec4b86ee99b020e159f9bd604003a3ae3b0ab
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbe: Fix ethtool diag test for VT mode operation

        commit 1cdd1ec8784399eef55a60887a45f3f46a1c240a
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbe: Add SR-IOV features to main module

        commit 096a58fdec72335d9cbee94bd10b312c5f14f8af
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbe: Add SR-IOV feature enablement code

        commit 92ed72d536445334829af65c63516557fd50f554
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbe: Add SR-IOV specific modules to driver Makefile

        commit 17367270b1d7f529f5d72e8f2cf76672829eb335
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbe: Add SR-IOV specific features

        commit 7f870475c8f102469df44a86387a03cb23e79b90
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbe: Add SR-IOV register, structure and bit defines

        commit 10ca132c41ecc1b55bc22667493ab75c4f6eec0d
        Author: Greg Rose <gregory.v.rose@intel.com>

            ixgbe: Mailbox header and code module

        commit 6837e895cbfd5ce8a717f112e927d2815f341e54
        Author: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>

            ixgbe: Fix compiler warning about variable being used uninitialized

        commit ce7194d889996fb8f724148bc85a0c2c79da7b8e
        Author: hartleys <hartleys@visionengravers.com>

            drivers/net/ixgbe/ixgbe_common.c: use %pM to show MAC address

        commit 656ab8172a49d3931e64f866c080260be638c8a9
        Author: Don Skidmore <donald.c.skidmore@intel.com>

            ixgbe: fix Need to call pci_save_state after pci_restore_state

this patch also adds skb_is_gso_v6:

        commit eabd7e35c0061dc250fcb8b77c472cb66d770774
        Author: Brice Goglin <brice@myri.com>

            Add skb_is_gso_v6

and a PCI quirk needed for ixgbevf-capable devices:

        commit c763e7b58f71833e6cb7b05493eb1adda4811b08
        Author: Dexuan Cui <dexuan.cui@intel.com>

            PCI: add Intel 82599 Virtual Function specific reset method

        commit b9c3b266411d27f1a6466c19d146d08db576bfea
        Author: Dexuan Cui <dexuan.cui@intel.com>

            PCI: support device-specific reset methods

Some testing has been done by Intel and Red Hat and so far things look
good.  Any further fixes needed will be posted for the next snapshot.

This will satisfy the request for RHBZ 525577.


diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt
new file mode 100644
index 0000000..19015de
--- /dev/null
+++ b/Documentation/networking/ixgbevf.txt
@@ -0,0 +1,90 @@
+Linux* Base Driver for Intel(R) Network Connection
+==================================================
+
+November 24, 2009
+
+Contents
+========
+
+- In This Release
+- Identifying Your Adapter
+- Known Issues/Troubleshooting
+- Support
+
+In This Release
+===============
+
+This file describes the ixgbevf Linux* Base Driver for Intel Network
+Connection.
+
+The ixgbevf driver supports 82599-based virtual function devices that can only
+be activated on kernels with CONFIG_PCI_IOV enabled.
+
+The ixgbevf driver supports virtual functions generated by the ixgbe driver
+with a max_vfs value of 1 or greater.
+
+The guest OS loading the ixgbevf driver must support MSI-X interrupts.
+
+VLANs: There is a limit of a total of 32 shared VLANs to 1 or more VFs.
+
+Identifying Your Adapter
+========================
+
+For more information on how to identify your adapter, go to the Adapter &
+Driver ID Guide at:
+
+    http://support.intel.com/support/network/sb/CS-008441.htm
+
+Known Issues/Troubleshooting
+============================
+
+  Unloading Physical Function (PF) Driver Causes System Reboots When VM is
+  Running and VF is Loaded on the VM
+  ------------------------------------------------------------------------
+  Do not unload the PF driver (ixgbe) while VFs are assigned to guests.
+
+Support
+=======
+
+For general information, go to the Intel support website at:
+
+    http://support.intel.com
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+    http://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on the supported
+kernel with a supported adapter, email the specific information related
+to the issue to e1000-devel@lists.sf.net
+
+License
+=======
+
+Intel 10 Gigabit Linux driver.
+Copyright(c) 1999 - 2009 Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+The full GNU General Public License is included in this distribution in
+the file called "COPYING".
+
+Trademarks
+==========
+
+Intel, Itanium, and Pentium are trademarks or registered trademarks of
+Intel Corporation or its subsidiaries in the United States and other
+countries.
+
+* Other names and brands may be claimed as the property of others.
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index abd18ac..3aca055 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2456,6 +2456,28 @@ config IXGBE_DCA
 	  driver.  DCA is a method for warming the CPU cache before data
 	  is used, with the intent of lessening the impact of cache misses.
 
+config IXGBEVF
+       tristate "Intel(R) 82599 Virtual Function Ethernet support"
+       depends on PCI_MSI
+       ---help---
+         This driver supports Intel(R) 82599 virtual functions.  For more
+         information on how to identify your adapter, go to the Adapter &
+         Driver ID Guide at:
+
+         <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+         For general information and support, go to the Intel support
+         website at:
+
+         <http://support.intel.com>
+
+         More specific information on configuring the driver is in
+         <file:Documentation/networking/ixgbevf.txt>.
+
+         To compile this driver as a module, choose M here. The module
+         will be called ixgbevf.  MSI-X interrupt support is required
+         for this driver to work correctly.
+
 config IXGB
 	tristate "Intel(R) PRO/10GbE support"
 	depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 783bc08..5dfcccf 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_E1000E) += e1000e/
 obj-$(CONFIG_IGB) += igb/
 obj-$(CONFIG_IBM_EMAC) += ibm_emac/
 obj-$(CONFIG_IGBVF) += igbvf/
+obj-$(CONFIG_IXGBEVF) += ixgbevf/
 obj-$(CONFIG_IXGBE) += ixgbe/
 obj-$(CONFIG_IXGB) += ixgb/
 obj-$(CONFIG_CHELSIO_T1) += chelsio/
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index c0d3b6e..cd3ddfa 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2009 Intel Corporation.
+# Copyright(c) 1999 - 2010 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
@@ -33,4 +33,5 @@
 obj-$(CONFIG_IXGBE) += ixgbe.o
 
 ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
-              ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o
+              ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
+              ixgbe_mbx.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index d8320c5..71b703e 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -96,6 +96,22 @@
 
 #define IXGBE_MAX_RSC_INT_RATE          162760
 
+#define IXGBE_MAX_VF_MC_ENTRIES         30
+#define IXGBE_MAX_VF_FUNCTIONS          64
+#define IXGBE_MAX_VFTA_ENTRIES          128
+#define MAX_EMULATION_MAC_ADDRS         16
+#define VMDQ_P(p)   ((p) + adapter->num_vfs)
+
+struct vf_data_storage {
+	unsigned char vf_mac_addresses[ETH_ALEN];
+	u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
+	u16 num_vf_mc_hashes;
+	u16 default_vf_vlan_id;
+	u16 vlans_enabled;
+	bool clear_to_send;
+	int rar;
+};
+
 /* wrapper around a pointer to a socket buffer,
  * so a DMA handle can be stored along with the buffer */
 struct ixgbe_tx_buffer {
@@ -169,7 +185,7 @@ struct ixgbe_ring {
 enum ixgbe_ring_f_enum {
 	RING_F_NONE = 0,
 	RING_F_DCB,
-	RING_F_VMDQ,
+	RING_F_VMDQ,  /* SR-IOV uses the same ring feature */
 	RING_F_RSS,
 	RING_F_FDIR,
 #ifdef IXGBE_FCOE
@@ -181,7 +197,7 @@ enum ixgbe_ring_f_enum {
 
 #define IXGBE_MAX_DCB_INDICES   8
 #define IXGBE_MAX_RSS_INDICES  16
-#define IXGBE_MAX_VMDQ_INDICES 16
+#define IXGBE_MAX_VMDQ_INDICES 64
 #define IXGBE_MAX_FDIR_INDICES 64
 #ifdef IXGBE_FCOE
 #define IXGBE_MAX_FCOE_INDICES  8
@@ -287,6 +303,8 @@ struct ixgbe_adapter {
 	/* RX */
 	struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
 	int num_rx_queues;
+	int num_rx_pools;		/* == num_rx_queues in 82598 */
+	int num_rx_queues_per_pool;	/* 1 if 82598, can be many if 82599 */
 	u64 hw_csum_rx_error;
 	u64 hw_rx_no_dma_resources;
 	u64 non_eop_descs;
@@ -322,13 +340,14 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 19)
 #define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 20)
 #define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 22)
-#define IXGBE_FLAG_IN_WATCHDOG_TASK             (u32)(1 << 23)
-#define IXGBE_FLAG_IN_SFP_LINK_TASK             (u32)(1 << 24)
-#define IXGBE_FLAG_IN_SFP_MOD_TASK              (u32)(1 << 25)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE            (u32)(1 << 26)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         (u32)(1 << 27)
-#define IXGBE_FLAG_FCOE_CAPABLE                 (u32)(1 << 28)
-#define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 29)
+#define IXGBE_FLAG_IN_SFP_LINK_TASK             (u32)(1 << 23)
+#define IXGBE_FLAG_IN_SFP_MOD_TASK              (u32)(1 << 24)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE            (u32)(1 << 25)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         (u32)(1 << 26)
+#define IXGBE_FLAG_FCOE_CAPABLE                 (u32)(1 << 27)
+#define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 28)
+#define IXGBE_FLAG_SRIOV_CAPABLE                (u32)(1 << 29)
+#define IXGBE_FLAG_SRIOV_ENABLED                (u32)(1 << 30)
 
 	u32 flags2;
 #define IXGBE_FLAG2_RSC_CAPABLE                 (u32)(1)
@@ -376,6 +395,11 @@ struct ixgbe_adapter {
 	u64 rsc_total_flush;
 	u32 wol;
 	u16 eeprom_version;
+
+	/* SR-IOV */
+	DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
+	unsigned int num_vfs;
+	struct vf_data_storage *vfinfo;
 };
 
 enum ixbge_state_t {
@@ -440,6 +464,7 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
                                          u16 flex_byte);
 extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
                                       u8 l4type);
+extern void ixgbe_set_rx_mode(struct net_device *netdev);
 #ifdef IXGBE_FCOE
 extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
 extern int ixgbe_fso(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 09b69f5..bca84f3 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 334cede..884ee10 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -31,6 +31,7 @@
 
 #include "ixgbe.h"
 #include "ixgbe_phy.h"
+#include "ixgbe_mbx.h"
 
 #define IXGBE_82599_MAX_TX_QUEUES 128
 #define IXGBE_82599_MAX_RX_QUEUES 128
@@ -890,7 +891,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
 static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 {
 	s32 status = 0;
-	u32 ctrl, ctrl_ext;
+	u32 ctrl;
 	u32 i;
 	u32 autoc;
 	u32 autoc2;
@@ -945,15 +946,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 		status = IXGBE_ERR_RESET_FAILED;
 		hw_dbg(hw, "Reset polling failed to complete.\n");
 	}
-	/* Clear PF Reset Done bit so PF/VF Mail Ops can work */
-	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
-	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
 
 	msleep(50);
 
-
-
 	/*
 	 * Store the original AUTOC/AUTOC2 values if they have not been
 	 * stored off yet.  Otherwise restore the stored original
@@ -1096,9 +1091,11 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
                                 bool vlan_on)
 {
 	u32 regindex;
+	u32 vlvf_index;
 	u32 bitindex;
 	u32 bits;
 	u32 first_empty_slot;
+	u32 vt_ctl;
 
 	if (vlan > 4095)
 		return IXGBE_ERR_PARAM;
@@ -1125,76 +1122,84 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
 
 
 	/* Part 2
-	 * If the vind is set
+	 * If VT mode is set
 	 *   Either vlan_on
 	 *     make sure the vlan is in VLVF
 	 *     set the vind bit in the matching VLVFB
 	 *   Or !vlan_on
 	 *     clear the pool bit and possibly the vind
 	 */
-	if (vind) {
-		/* find the vlanid or the first empty slot */
-		first_empty_slot = 0;
-
-		for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
-			bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
-			if (!bits && !first_empty_slot)
-				first_empty_slot = regindex;
-			else if ((bits & 0x0FFF) == vlan)
-				break;
-		}
+	vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+	if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
+		goto out;
 
-		if (regindex >= IXGBE_VLVF_ENTRIES) {
-			if (first_empty_slot)
-				regindex = first_empty_slot;
-			else {
-				hw_dbg(hw, "No space in VLVF.\n");
-				goto out;
-			}
+	/* find the vlanid or the first empty slot */
+	first_empty_slot = 0;
+
+	for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
+		bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
+		if (!bits && !first_empty_slot)
+			first_empty_slot = vlvf_index;
+		else if ((bits & 0x0FFF) == vlan)
+			break;
+	}
+
+	if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
+		if (first_empty_slot)
+			vlvf_index = first_empty_slot;
+		else {
+			hw_dbg(hw, "No space in VLVF.\n");
+			goto out;
 		}
+	}
 
-		if (vlan_on) {
-			/* set the pool bit */
-			if (vind < 32) {
-				bits = IXGBE_READ_REG(hw,
-				                    IXGBE_VLVFB(regindex * 2));
-				bits |= (1 << vind);
-				IXGBE_WRITE_REG(hw,
-				              IXGBE_VLVFB(regindex * 2), bits);
-			} else {
-				bits = IXGBE_READ_REG(hw,
-				              IXGBE_VLVFB((regindex * 2) + 1));
-				bits |= (1 << vind);
-				IXGBE_WRITE_REG(hw,
-				        IXGBE_VLVFB((regindex * 2) + 1), bits);
-			}
+	if (vlan_on) {
+		/* set the pool bit */
+		if (vind < 32) {
+			bits = IXGBE_READ_REG(hw,
+					      IXGBE_VLVFB(vlvf_index * 2));
+			bits |= (1 << vind);
+			IXGBE_WRITE_REG(hw,
+					IXGBE_VLVFB(vlvf_index * 2), bits);
 		} else {
-			/* clear the pool bit */
-			if (vind < 32) {
-				bits = IXGBE_READ_REG(hw,
-				     IXGBE_VLVFB(regindex * 2));
+			bits = IXGBE_READ_REG(hw,
+				IXGBE_VLVFB((vlvf_index * 2) + 1));
+			bits |= (1 << (vind - 32));
+			IXGBE_WRITE_REG(hw,
+				IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+		}
+	} else {
+		/* clear the pool bit */
+		if (vind < 32) {
+			bits = IXGBE_READ_REG(hw,
+					      IXGBE_VLVFB(vlvf_index * 2));
 			bits &= ~(1 << vind);
-				IXGBE_WRITE_REG(hw,
-				              IXGBE_VLVFB(regindex * 2), bits);
-				bits |= IXGBE_READ_REG(hw,
-				              IXGBE_VLVFB((regindex * 2) + 1));
-			} else {
-				bits = IXGBE_READ_REG(hw,
-				              IXGBE_VLVFB((regindex * 2) + 1));
-				bits &= ~(1 << vind);
-				IXGBE_WRITE_REG(hw,
-				        IXGBE_VLVFB((regindex * 2) + 1), bits);
-				bits |= IXGBE_READ_REG(hw,
-				                    IXGBE_VLVFB(regindex * 2));
-			}
+			IXGBE_WRITE_REG(hw,
+					IXGBE_VLVFB(vlvf_index * 2), bits);
+			bits |= IXGBE_READ_REG(hw,
+					IXGBE_VLVFB((vlvf_index * 2) + 1));
+		} else {
+			bits = IXGBE_READ_REG(hw,
+				IXGBE_VLVFB((vlvf_index * 2) + 1));
+			bits &= ~(1 << (vind - 32));
+			IXGBE_WRITE_REG(hw,
+				IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+			bits |= IXGBE_READ_REG(hw,
+					       IXGBE_VLVFB(vlvf_index * 2));
 		}
+	}
 
-		if (bits)
-			IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
-			                (IXGBE_VLVF_VIEN | vlan));
-		else
-			IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
+	if (bits) {
+		IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+				(IXGBE_VLVF_VIEN | vlan));
+		/* if bits is non-zero then some pools/VFs are still
+		 * using this VLAN ID.  Force the VFTA entry to on */
+		bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+		bits |= (1 << bitindex);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
 	}
+	else
+		IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
 
 out:
 	return 0;
@@ -2658,4 +2663,5 @@ struct ixgbe_info ixgbe_82599_info = {
 	.mac_ops                = &mac_ops_82599,
 	.eeprom_ops             = &eeprom_ops_82599,
 	.phy_ops                = &phy_ops_82599,
+	.mbx_ops                = &mbx_ops_82599,
 };
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index b2cb328..ba975d0 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -1272,19 +1272,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
 		/* Get the MAC address from the RAR0 for later reference */
 		hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
 
-		hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
-		       hw->mac.addr[0], hw->mac.addr[1],
-		       hw->mac.addr[2]);
-		hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
-		       hw->mac.addr[4], hw->mac.addr[5]);
+		hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
 	} else {
 		/* Setup the receive address. */
 		hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
-		hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
-		       hw->mac.addr[0], hw->mac.addr[1],
-		       hw->mac.addr[2]);
-		hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
-		       hw->mac.addr[4], hw->mac.addr[5]);
+		hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
 
 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 	}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 4f15f04..f2c7e83 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 62dfd24..e2cae70 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2007 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3712212..9423fa6 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -394,9 +394,9 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	if (data)
-		adapter->flags2 |= IXGBE_FLAG_RX_CSUM_ENABLED;
+		adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
 	else
-		adapter->flags2 &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+		adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
 
 	if (netif_running(netdev))
 		ixgbe_reinit_locked(adapter);
@@ -1829,11 +1829,32 @@ static void ixgbe_diag_test(struct net_device *netdev,
 		if (ixgbe_intr_test(adapter, &data[2]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
+		/* If SRIOV or VMDq is enabled then skip MAC
+		 * loopback diagnostic. */
+		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+				      IXGBE_FLAG_VMDQ_ENABLED)) {
+			DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
+				"mode\n");
+			data[3] = 0;
+			goto skip_loopback;
+		}
+
+		/* If SRIOV or VMDq is enabled then skip MAC
+		 * loopback diagnostic. */
+		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+				      IXGBE_FLAG_VMDQ_ENABLED)) {
+			DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
+				"mode\n");
+			data[3] = 0;
+			goto skip_loopback;
+		}
+
 		ixgbe_reset(adapter);
 		DPRINTK(HW, INFO, "loopback testing starting\n");
 		if (ixgbe_loopback_test(adapter, &data[3]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
+skip_loopback:
 		ixgbe_reset(adapter);
 
 		clear_bit(__IXGBE_TESTING, &adapter->state);
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 053b5a9..dca8549 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 73bcdd4..1f76a12 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -44,6 +44,7 @@
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
+#include "ixgbe_sriov.h"
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
@@ -51,7 +52,7 @@ static const char ixgbe_driver_string[] =
 
 #define DRV_VERSION "2.0.44-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
+static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
 	[board_82598] = &ixgbe_82598_info,
@@ -123,6 +124,13 @@ static struct notifier_block dca_notifier = {
 };
 #endif
 
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
+                 "per physical function");
+#endif /* CONFIG_PCI_IOV */
+
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL");
@@ -130,6 +138,41 @@ MODULE_VERSION(DRV_VERSION);
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 
+static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 gcr;
+	u32 gpie;
+	u32 vmdctl;
+
+#ifdef CONFIG_PCI_IOV
+	/* disable iov and allow time for transactions to clear */
+	pci_disable_sriov(adapter->pdev);
+#endif
+
+	/* turn off device IOV mode */
+	gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+	gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+	gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+	/* set default pool back to 0 */
+	vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+	vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+
+	/* take a breather then clean up driver data */
+	msleep(100);
+	if (adapter->vfinfo)
+		kfree(adapter->vfinfo);
+	adapter->vfinfo = NULL;
+
+	adapter->num_vfs = 0;
+	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
 {
 	u32 ctrl_ext;
@@ -220,10 +263,6 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
                                              *tx_buffer_info)
 {
 	if (tx_buffer_info->dma) {
-		pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
-		               tx_buffer_info->length, PCI_DMA_TODEVICE);
-		if (tx_buffer_info->dma) {
-	}
 		if (tx_buffer_info->mapped_as_page)
 			pci_unmap_page(adapter->pdev,
 				       tx_buffer_info->dma,
@@ -265,10 +304,12 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
 		int reg_idx = tx_ring->reg_idx;
 		int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 
-		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+		switch (adapter->hw.mac.type) {
+		case ixgbe_mac_82598EB:
 			tc = reg_idx >> 2;
 			txoff = IXGBE_TFCS_TXOFF0;
-		} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+			break;
+		case ixgbe_mac_82599EB:
 			tc = 0;
 			txoff = IXGBE_TFCS_TXOFF;
 			if (dcb_i == 8) {
@@ -287,6 +328,9 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
 						tc += (reg_idx - 96) >> 4;
 				}
 			}
+			break;
+		default:
+			tc = 0;
 		}
 		txoff <<= tc;
 	}
@@ -1029,7 +1073,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 
 	/* set up to autoclear timer, and the vectors */
 	mask = IXGBE_EIMS_ENABLE_MASK;
-	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+	if (adapter->num_vfs)
+		mask &= ~(IXGBE_EIMS_OTHER |
+			  IXGBE_EIMS_MAILBOX |
+			  IXGBE_EIMS_LSC);
+	else
+		mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
 }
 
@@ -1258,6 +1307,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data, struct pt_regs *regs)
 	if (eicr & IXGBE_EICR_LSC)
 		ixgbe_check_lsc(adapter);
 
+	if (eicr & IXGBE_EICR_MAILBOX)
+		ixgbe_msg_task(adapter);
+
 	if (hw->mac.type == ixgbe_mac_82598EB)
 		ixgbe_check_fan_failure(adapter, eicr);
 
@@ -1795,6 +1847,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
 		mask |= IXGBE_EIMS_ECC;
 		mask |= IXGBE_EIMS_GPI_SDP1;
 		mask |= IXGBE_EIMS_GPI_SDP2;
+		if (adapter->num_vfs)
+			mask |= IXGBE_EIMS_MAILBOX;
 	}
 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
 	    adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -1803,6 +1857,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
 	ixgbe_irq_enable_queues(adapter, ~0);
 	IXGBE_WRITE_FLUSH(&adapter->hw);
+
+	if (adapter->num_vfs > 32) {
+		u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+	}
 }
 
 /**
@@ -1933,6 +1992,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+		if (adapter->num_vfs > 32)
+			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
 	}
 	IXGBE_WRITE_FLUSH(&adapter->hw);
 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2017,18 +2078,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
 
 	if (hw->mac.type == ixgbe_mac_82599EB) {
 		u32 rttdcs;
+		u32 mask;
 
 		/* disable the arbiter while setting MTQC */
 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
 
-		/* We enable 8 traffic classes, DCB only */
-		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
-			IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
-			                IXGBE_MTQC_8TC_8TQ));
-		else
+		/* set transmit pool layout */
+		mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
+		switch (adapter->flags & mask) {
+
+		case (IXGBE_FLAG_SRIOV_ENABLED):
+			IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+					(IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+			break;
+
+		case (IXGBE_FLAG_DCB_ENABLED):
+			/* We enable 8 traffic classes, DCB only */
+			IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+				      (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
+			break;
+
+		default:
 			IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+			break;
+		}
 
 		/* re-eable the arbiter */
 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@ -2087,12 +2162,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_IXGBE_DCB
 				 | IXGBE_FLAG_DCB_ENABLED
 #endif
+				 | IXGBE_FLAG_SRIOV_ENABLED
 				);
 
 	switch (mask) {
 	case (IXGBE_FLAG_RSS_ENABLED):
 		mrqc = IXGBE_MRQC_RSSEN;
 		break;
+	case (IXGBE_FLAG_SRIOV_ENABLED):
+		mrqc = IXGBE_MRQC_VMDQEN;
+		break;
 #ifdef CONFIG_IXGBE_DCB
 	case (IXGBE_FLAG_DCB_ENABLED):
 		mrqc = IXGBE_MRQC_RT8TCEN;
@@ -2173,7 +2252,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 	int rx_buf_len;
 
 	/* Decide whether to use packet split mode or not */
-	adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+	/* Do not use packet split if we're in SR-IOV Mode */
+	if (!adapter->num_vfs)
+		adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 
 	/* Set the RX buffer length according to the mode */
 	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2185,7 +2266,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 			              IXGBE_PSRTYPE_IPV4HDR |
 			              IXGBE_PSRTYPE_IPV6HDR |
 			              IXGBE_PSRTYPE_L2HDR;
-			IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
+			IXGBE_WRITE_REG(hw,
+					IXGBE_PSRTYPE(adapter->num_vfs),
+					psrtype);
 		}
 	} else {
 		if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
@@ -2271,6 +2354,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
 	}
 
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+		u32 vt_reg_bits;
+		u32 reg_offset, vf_shift;
+		u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+		vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
+			| IXGBE_VT_CTL_REPLEN;
+		vt_reg_bits |= (adapter->num_vfs <<
+				IXGBE_VT_CTL_POOL_SHIFT);
+		IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+		IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
+
+		vf_shift = adapter->num_vfs % 32;
+		reg_offset = adapter->num_vfs / 32;
+		IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+		/* Enable only the PF's pool for Tx/Rx */
+		IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+		IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+		IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+		ixgbe_set_vmolr(hw, adapter->num_vfs);
+	}
+
 	/* Program MRQC for the distribution of queues */
 	mrqc = ixgbe_setup_mrqc(adapter);
 
@@ -2302,6 +2409,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
 	}
 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 
+	if (adapter->num_vfs) {
+		u32 reg;
+
+		/* Map PF MAC address in RAR Entry 0 to first pool
+		 * following VFs */
+		hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+		/* Set up VF register offsets for selected VT Mode, i.e.
+		 * 64 VFs for SR-IOV */
+		reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+		reg |= IXGBE_GCR_EXT_SRIOV;
+		IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
+	}
+
 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
 
 	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2340,15 +2461,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
+	int pool_ndx = adapter->num_vfs;
 
 	/* add VID to filter table */
-	hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
+	hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
 }
 
 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
+	int pool_ndx = adapter->num_vfs;
 
 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
 		ixgbe_irq_disable(adapter);
@@ -2359,7 +2482,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 		ixgbe_irq_enable(adapter);
 
 	/* remove VID from filter table */
-	hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
+	hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
 }
 
 static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -2442,7 +2565,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
  * responsible for configuring the hardware for proper unicast, multicast and
  * promiscuous mode.
  **/
-static void ixgbe_set_rx_mode(struct net_device *netdev)
+void ixgbe_set_rx_mode(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -2490,6 +2613,8 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
 		addr_list = netdev->mc_list->dmi_addr;
 	hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
 	                                ixgbe_addr_list_itr);
+	if (adapter->num_vfs)
+		ixgbe_restore_vf_multicasts(adapter);
 }
 
 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2730,6 +2855,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 	u32 txdctl, rxdctl, mhadd;
 	u32 dmatxctl;
 	u32 gpie;
+	u32 ctrl_ext;
 
 	ixgbe_get_hw_control(adapter);
 
@@ -2742,6 +2868,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 			/* MSI only */
 			gpie = 0;
 		}
+		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+			gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+			gpie |= IXGBE_GPIE_VTMODE_64;
+		}
 		/* XXX: to interrupt immediately for EICS writes, enable this */
 		/* gpie |= IXGBE_GPIE_EIMEN; */
 		IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -2816,6 +2946,18 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
 		txdctl |= IXGBE_TXDCTL_ENABLE;
 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+		if (hw->mac.type == ixgbe_mac_82599EB) {
+			int wait_loop = 10;
+			/* poll for Tx Enable ready */
+			do {
+				msleep(1);
+				txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+			} while (--wait_loop &&
+			         !(txdctl & IXGBE_TXDCTL_ENABLE));
+			if (!wait_loop)
+				DPRINTK(DRV, ERR, "Could not enable "
+				        "Tx Queue %d\n", j);
+		}
 	}
 
 	for (i = 0; i < num_rx_rings; i++) {
@@ -2904,6 +3046,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 	adapter->link_check_timeout = jiffies;
 	mod_timer(&adapter->watchdog_timer, jiffies);
+
+	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
+	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
 	return 0;
 }
 
@@ -2952,7 +3100,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
 	}
 
 	/* reprogram the RAR[0] in case user changed it. */
-	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+			    IXGBE_RAH_AV);
 }
 
 /**
@@ -3084,6 +3233,17 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 	/* signal that we are down to the interrupt handler */
 	set_bit(__IXGBE_DOWN, &adapter->state);
 
+	/* disable receive for all VFs and wait one second */
+	if (adapter->num_vfs) {
+		for (i = 0 ; i < adapter->num_vfs; i++)
+			adapter->vfinfo[i].clear_to_send = 0;
+
+		/* ping all the active vfs to let them know we are going down */
+		ixgbe_ping_all_vfs(adapter);
+		/* Disable all VFTE/VFRE TX/RX */
+		ixgbe_disable_tx_rx(adapter);
+	}
+
 	/* disable receives */
 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -3326,6 +3486,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
 }
 
 #endif /* IXGBE_FCOE */
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+	return false;
+}
+
 /*
  * ixgbe_set_num_queues: Allocate queues for device, feature dependant
  * @adapter: board private structure to initialize
@@ -3339,6 +3512,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
  **/
 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 {
+	/* Start with base case */
+	adapter->num_rx_queues = 1;
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_pools = adapter->num_rx_queues;
+	adapter->num_rx_queues_per_pool = 1;
+
+	if (ixgbe_set_sriov_queues(adapter))
+		return;
+
 #ifdef IXGBE_FCOE
 	if (ixgbe_set_fcoe_queues(adapter))
 		goto done;
@@ -3606,6 +3788,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
 
 #endif /* IXGBE_FCOE */
 /**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+	adapter->rx_ring[0].reg_idx = adapter->num_vfs * 2;
+	adapter->tx_ring[0].reg_idx = adapter->num_vfs * 2;
+	if (adapter->num_vfs)
+		return true;
+	else
+		return false;
+}
+
+/**
  * ixgbe_cache_ring_register - Descriptor ring to register mapping
  * @adapter: board private structure to initialize
  *
@@ -3622,6 +3822,9 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
 	adapter->rx_ring[0].reg_idx = 0;
 	adapter->tx_ring[0].reg_idx = 0;
 
+	if (ixgbe_cache_ring_sriov(adapter))
+		return;
+
 #ifdef IXGBE_FCOE
 	if (ixgbe_cache_ring_fcoe(adapter))
 		return;
@@ -3731,6 +3934,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 	adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
 	adapter->atr_sample_rate = 0;
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+		ixgbe_disable_sriov(adapter);
+
 	ixgbe_set_num_queues(adapter);
 
 	err = pci_enable_msi(adapter->pdev);
@@ -4415,6 +4621,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
+	/*
+	 * pci_restore_state clears dev->state_saved so call
+	 * pci_save_state to restore it.
+	 */
+	pci_save_state(pdev);
 
 	err = pci_enable_device_mem(pdev);
 	if (err) {
@@ -4822,6 +5033,8 @@ static void ixgbe_fdir_reinit_task(struct ixgbe_adapter *adapter)
 	netif_start_queue(adapter->netdev);
 }
 
+static DEFINE_MUTEX(ixgbe_watchdog_lock);
+
 /**
  * ixgbe_watchdog_task - worker thread to bring link up
  * @work: pointer to work_struct containing our data
@@ -4830,13 +5043,16 @@ static void ixgbe_watchdog_task(struct ixgbe_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 link_speed = adapter->link_speed;
-	bool link_up = adapter->link_up;
+	u32 link_speed;
+	bool link_up;
 	int i;
 	struct ixgbe_ring *tx_ring;
 	int some_tx_pending = 0;
 
-	adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+	mutex_lock(&ixgbe_watchdog_lock);
+
+	link_up = adapter->link_up;
+	link_speed = adapter->link_speed;
 
 	if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
 		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
@@ -4927,7 +5143,7 @@ static void ixgbe_watchdog_task(struct ixgbe_adapter *adapter)
 	}
 
 	ixgbe_update_stats(adapter);
-	adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+	mutex_unlock(&ixgbe_watchdog_lock);
 }
 
 static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@ -4958,7 +5174,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
 			                                         iph->daddr, 0,
 			                                         IPPROTO_TCP,
 			                                         0);
-		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+		} else if (skb_is_gso_v6(skb)) {
 			ipv6_hdr(skb)->payload_len = 0;
 			tcp_hdr(skb)->check =
 			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -5136,9 +5352,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 		total -= size;
 		offset += size;
 		count++;
-		i++;
-		if (i == tx_ring->count)
-			i = 0;
+
+		if (len) {
+			i++;
+			if (i == tx_ring->count)
+				i = 0;
+		}
 	}
 
 	for (f = 0; f < nr_frags; f++) {
@@ -5149,6 +5368,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 		offset = frag->page_offset;
 
 		while (len) {
+			i++;
+			if (i == tx_ring->count)
+				i = 0;
+
 			tx_buffer_info = &tx_ring->tx_buffer_info[i];
 			size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
@@ -5167,15 +5390,11 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 			total -= size;
 			offset += size;
 			count++;
-			i++;
-			if (i == tx_ring->count)
-				i = 0;
 		}
+		if (total == 0)
+			break;
 	}
-	if (i == 0)
-		i = tx_ring->count - 1;
-	else
-		i = i - 1;
+
 	tx_ring->tx_buffer_info[i].skb = skb;
 	tx_ring->tx_buffer_info[first].next_to_watch = i;
 
@@ -5188,19 +5407,19 @@ dma_error:
 	tx_buffer_info->dma = 0;
 	tx_buffer_info->time_stamp = 0;
 	tx_buffer_info->next_to_watch = 0;
-	count--;
+	if (count)
+		count--;
 
 	/* clear timestamp and dma mappings for remaining portion of packet */
-	while (count >= 0) {
-		count--;
-		i--;
-		if (i < 0)
+	while (count--) {
+		if (i==0)
 			i += tx_ring->count;
+		i--;
 		tx_buffer_info = &tx_ring->tx_buffer_info[i];
 		ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
 	}
 
-	return count;
+	return 0;
 }
 
 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5489,7 +5708,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+			    IXGBE_RAH_AV);
 
 	return 0;
 }
@@ -5524,6 +5744,61 @@ static void ixgbe_netpoll(struct net_device *netdev)
 #endif
 
 
+static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
+			   const struct ixgbe_info *ii)
+{
+#ifdef CONFIG_PCI_IOV
+	struct ixgbe_hw *hw = &adapter->hw;
+	int err;
+
+	if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
+		return;
+
+	/* The 82599 supports up to 64 VFs per physical function
+	 * but this implementation limits allocation to 63 so that
+	 * basic networking resources are still available to the
+	 * physical function
+	 */
+	adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+	adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+	err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+	if (err) {
+		DPRINTK(PROBE, ERR,
+			"Failed to enable PCI sriov: %d\n", err);
+		goto err_novfs;
+	}
+	/* If call to enable VFs succeeded then allocate memory
+	 * for per VF control structures.
+	 */
+	adapter->vfinfo =
+		kcalloc(adapter->num_vfs,
+			sizeof(struct vf_data_storage), GFP_KERNEL);
+	if (adapter->vfinfo) {
+		/* Now that we're sure SR-IOV is enabled
+		 * and memory allocated set up the mailbox parameters
+		 */
+		ixgbe_init_mbx_params_pf(hw);
+		memcpy(&hw->mbx.ops, ii->mbx_ops,
+		       sizeof(hw->mbx.ops));
+
+		/* Disable RSC when in SR-IOV mode */
+		adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+				     IXGBE_FLAG2_RSC_ENABLED);
+		return;
+	}
+
+	/* Oh oh */
+	DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for VF "
+		"Data Storage - SRIOV disabled\n");
+	pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+	adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+	adapter->num_vfs = 0;
+#endif /* CONFIG_PCI_IOV */
+}
+
 /**
  * ixgbe_probe - Device Initialization Routine
  * @pdev: PCI device information struct
@@ -5703,6 +5978,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 		goto err_sw_init;
 	}
 
+	ixgbe_probe_vf(adapter, ii);
+
 	netdev->features = NETIF_F_SG |
 	                   NETIF_F_HW_CSUM |
 	                   NETIF_F_HW_VLAN_TX |
@@ -5713,6 +5990,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	netdev->features |= NETIF_F_TSO6;
 	netdev->features |= NETIF_F_GRO;
 
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+		adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
+				    IXGBE_FLAG_DCB_ENABLED);
 	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
 		adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
 
@@ -5779,16 +6059,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 	hw->mac.ops.get_bus_info(hw);
 
 	/* print bus type/speed/width info */
-	dev_info(&pdev->dev, "(PCI Express:%s:%s) "
-	         "%02x:%02x:%02x:%02x:%02x:%02x\n",
+	dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
 	        ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
 	         (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
 	        ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
 	         (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
 	         (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
 	         "Unknown"),
-	        netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
-	        netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+	         netdev->dev_addr);
 	ixgbe_read_pba_num_generic(hw, &part_num);
 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
 		dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
@@ -5842,6 +6120,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 		ixgbe_setup_dca(adapter);
 	}
 #endif
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+		DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
+			adapter->num_vfs);
+		for (i = 0; i < adapter->num_vfs; i++)
+			ixgbe_vf_configuration(pdev, (i | 0x10000000));
+	}
+
 	dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
 	cards_found++;
 	return 0;
@@ -5851,6 +6136,8 @@ err_register:
 	ixgbe_clear_interrupt_scheme(adapter);
 err_sw_init:
 err_eeprom:
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+		ixgbe_disable_sriov(adapter);
 	clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
 	del_timer_sync(&adapter->sfp_timer);
 	cancel_delayed_work(&adapter->sfp_task);
@@ -5915,6 +6202,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 	if (netdev->reg_state == NETREG_REGISTERED)
 		unregister_netdev(netdev);
 
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+		ixgbe_disable_sriov(adapter);
+
 	ixgbe_clear_interrupt_scheme(adapter);
 
 	ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
new file mode 100644
index 0000000..d75f914
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,479 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_mbx.h"
+
+/**
+ *  ixgbe_read_mbx - Reads a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	/* limit read to size of mailbox */
+	if (size > mbx->size)
+		size = mbx->size;
+
+	if (mbx->ops.read)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_write_mbx - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = 0;
+
+	if (size > mbx->size)
+		ret_val = IXGBE_ERR_MBX;
+
+	else if (mbx->ops.write)
+		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_msg - checks to see if someone sent us mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (mbx->ops.check_for_msg)
+		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_ack - checks to see if someone sent us ACK
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (mbx->ops.check_for_ack)
+		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_rst - checks to see if other side has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (mbx->ops.check_for_rst)
+		ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !mbx->ops.check_for_msg)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		udelay(mbx->usec_delay);
+	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown)
+		mbx->timeout = 0;
+out:
+	return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ *  ixgbe_poll_for_ack - Wait for message acknowledgement
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !mbx->ops.check_for_ack)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		udelay(mbx->usec_delay);
+	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown)
+		mbx->timeout = 0;
+out:
+	return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ *  ixgbe_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (!mbx->ops.read)
+		goto out;
+
+	ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+	/* if ack received read message, otherwise we timed out */
+	if (!ret_val)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+                           u16 mbx_id)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!mbx->ops.write || !mbx->timeout)
+		goto out;
+
+	/* send msg */
+	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Setup the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+	mbx->ops.read_posted = ixgbe_read_posted_mbx;
+	mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+	u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (mbvficr & mask) {
+		ret_val = 0;
+		IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+	s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+	u32 vf_bit = vf_number % 16;
+
+	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+	                            index)) {
+		ret_val = 0;
+		hw->mbx.stats.reqs++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+	s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+	u32 vf_bit = vf_number % 16;
+
+	if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+	                            index)) {
+		ret_val = 0;
+		hw->mbx.stats.acks++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+	u32 reg_offset = (vf_number < 32) ? 0 : 1;
+	u32 vf_shift = vf_number % 32;
+	u32 vflre = 0;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (hw->mac.type == ixgbe_mac_82599EB)
+		vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+
+	if (vflre & (1 << vf_shift)) {
+		ret_val = 0;
+		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+		hw->mbx.stats.rsts++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+	u32 p2v_mailbox;
+
+	/* Take ownership of the buffer */
+	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+	/* reserve mailbox for vf use */
+	p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+	if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+		ret_val = 0;
+
+	return ret_val;
+}
+
+/**
+ *  ixgbe_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+                              u16 vf_number)
+{
+	s32 ret_val;
+	u16 i;
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+	if (ret_val)
+		goto out_no_write;
+
+	/* flush msg and acks as we are overwriting the message buffer */
+	ixgbe_check_for_msg_pf(hw, vf_number);
+	ixgbe_check_for_ack_pf(hw, vf_number);
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+	/* Interrupt VF to tell it a message has been sent and release buffer*/
+	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+	/* update stats */
+	hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+	return ret_val;
+
+}
+
+/**
+ *  ixgbe_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF request so no polling for message is needed.
+ **/
+static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+                             u16 vf_number)
+{
+	s32 ret_val;
+	u16 i;
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+	if (ret_val)
+		goto out_no_read;
+
+	/* copy the message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+	/* Acknowledge the message and release buffer */
+	IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+	/* update stats */
+	hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+	if (hw->mac.type != ixgbe_mac_82599EB)
+		return;
+
+	mbx->timeout = 0;
+	mbx->usec_delay = 0;
+
+	mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+	mbx->stats.msgs_tx = 0;
+	mbx->stats.msgs_rx = 0;
+	mbx->stats.reqs = 0;
+	mbx->stats.acks = 0;
+	mbx->stats.rsts = 0;
+}
+
+struct ixgbe_mbx_operations mbx_ops_82599 = {
+	.read                   = ixgbe_read_mbx_pf,
+	.write                  = ixgbe_write_mbx_pf,
+	.read_posted            = ixgbe_read_posted_mbx,
+	.write_posted           = ixgbe_write_posted_mbx,
+	.check_for_msg          = ixgbe_check_for_msg_pf,
+	.check_for_ack          = ixgbe_check_for_ack_pf,
+	.check_for_rst          = ixgbe_check_for_rst_pf,
+};
+
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
new file mode 100644
index 0000000..f0d6c9d
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,192 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE        16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX               -100
+
+#define IXGBE_VFMAILBOX             0x002FC
+#define IXGBE_VFMBMEM               0x00200
+
+#define IXGBE_PFMAILBOX(x)          (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn)          (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS   0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF.  The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
+                                               * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
+                                               * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
+                                                 clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT    16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK     (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET            0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD     3
+
+#define IXGBE_PF_CONTROL_MSG      0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY   500  /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+extern struct ixgbe_mbx_operations mbx_ops_82599;
+
+#endif /* _IXGBE_MBX_H_ */
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE        16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX               -100
+
+#define IXGBE_VFMAILBOX             0x002FC
+#define IXGBE_VFMBMEM               0x00200
+
+#define IXGBE_PFMAILBOX(x)          (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn)          (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS   0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF.  The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
+                                               * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
+                                               * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
+                                                 clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT    16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK     (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET            0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD     3
+
+#define IXGBE_PF_CONTROL_MSG      0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY   500  /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+extern struct ixgbe_mbx_operations mbx_ops_82599;
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 7408af0..0614916 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9b700f5..9cf5f3b 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
new file mode 100644
index 0000000..85d8b16
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -0,0 +1,361 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+#include "ixgbe.h"
+
+#include "ixgbe_sriov.h"
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+			    int entries, u16 *hash_list, u32 vf)
+{
+	struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+	int i;
+
+	/* only so many hash values supported */
+	entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
+
+	/*
+	 * salt away the number of multi cast addresses assigned
+	 * to this VF for later use to restore when the PF multi cast
+	 * list changes
+	 */
+	vfinfo->num_vf_mc_hashes = entries;
+
+	/*
+	 * VFs are limited to using the MTA hash table for their multicast
+	 * addresses
+	 */
+	for (i = 0; i < entries; i++) {
+		vfinfo->vf_mc_hashes[i] = hash_list[i];;
+	}
+
+	/* Flush and reset the mta with the new values */
+	ixgbe_set_rx_mode(adapter->netdev);
+
+	return 0;
+}
+
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct vf_data_storage *vfinfo;
+	int i, j;
+	u32 vector_bit;
+	u32 vector_reg;
+	u32 mta_reg;
+
+	for (i = 0; i < adapter->num_vfs; i++) {
+		vfinfo = &adapter->vfinfo[i];
+		for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+			hw->addr_ctrl.mta_in_use++;
+			vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
+			vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
+			mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+			mta_reg |= (1 << vector_bit);
+			IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+		}
+	}
+}
+
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
+{
+	u32 ctrl;
+
+	/* Check if global VLAN already set, if not set it */
+	ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+	if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
+		/* enable VLAN tag insert/strip */
+		ctrl |= IXGBE_VLNCTRL_VFE;
+		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+	}
+
+	return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
+}
+
+
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
+{
+	u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+	vmolr |= (IXGBE_VMOLR_AUPE |
+		  IXGBE_VMOLR_ROMPE |
+		  IXGBE_VMOLR_ROPE |
+		  IXGBE_VMOLR_BAM);
+	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+}
+
+inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	/* reset offloads to defaults */
+	ixgbe_set_vmolr(hw, vf);
+
+
+	/* reset multicast table array for vf */
+	adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+	/* Flush and reset the mta with the new values */
+	ixgbe_set_rx_mode(adapter->netdev);
+
+	if (adapter->vfinfo[vf].rar > 0) {
+		adapter->hw.mac.ops.clear_rar(&adapter->hw,
+		                              adapter->vfinfo[vf].rar);
+		adapter->vfinfo[vf].rar = -1;
+	}
+}
+
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+                          int vf, unsigned char *mac_addr)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
+	                                              vf, IXGBE_RAH_AV);
+	if (adapter->vfinfo[vf].rar < 0) {
+		DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
+		return -1;
+	}
+
+	memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+
+	return 0;
+}
+
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
+{
+	unsigned char vf_mac_addr[6];
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	unsigned int vfn = (event_mask & 0x3f);
+
+	bool enable = ((event_mask & 0x10000000U) != 0);
+
+	if (enable) {
+		random_ether_addr(vf_mac_addr);
+		DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
+		       "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
+		       vfn,
+		       vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
+		       vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
+		/*
+		 * Store away the VF "permananet" MAC address, it will ask
+		 * for it later.
+		 */
+		memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
+	}
+
+	return 0;
+}
+
+inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 reg;
+	u32 reg_offset, vf_shift;
+
+	vf_shift = vf % 32;
+	reg_offset = vf / 32;
+
+	/* enable transmit and receive for vf */
+	reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+	reg |= (reg | (1 << vf_shift));
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+	reg |= (reg | (1 << vf_shift));
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+
+	ixgbe_vf_reset_event(adapter, vf);
+}
+
+static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+	u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
+	u32 msgbuf[mbx_size];
+	struct ixgbe_hw *hw = &adapter->hw;
+	s32 retval;
+	int entries;
+	u16 *hash_list;
+	int add, vid;
+
+	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+
+	if (retval)
+		printk(KERN_ERR "Error receiving message from VF\n");
+
+	/* this is a message we already processed, do nothing */
+	if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
+		return retval;
+
+	/*
+	 * until the vf completes a virtual function reset it should not be
+	 * allowed to start any configuration.
+	 */
+
+	if (msgbuf[0] == IXGBE_VF_RESET) {
+		unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+		u8 *addr = (u8 *)(&msgbuf[1]);
+		DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
+		adapter->vfinfo[vf].clear_to_send = false;
+		ixgbe_vf_reset_msg(adapter, vf);
+		adapter->vfinfo[vf].clear_to_send = true;
+
+		/* reply to reset with ack and vf mac address */
+		msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+		memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+		/*
+		 * Piggyback the multicast filter type so VF can compute the
+		 * correct vectors
+		 */
+		msgbuf[3] = hw->mac.mc_filter_type;
+		ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+		return retval;
+	}
+
+	if (!adapter->vfinfo[vf].clear_to_send) {
+		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+		ixgbe_write_mbx(hw, msgbuf, 1, vf);
+		return retval;
+	}
+
+	switch ((msgbuf[0] & 0xFFFF)) {
+	case IXGBE_VF_SET_MAC_ADDR:
+		{
+			u8 *new_mac = ((u8 *)(&msgbuf[1]));
+			if (is_valid_ether_addr(new_mac))
+				ixgbe_set_vf_mac(adapter, vf, new_mac);
+			else
+				retval = -1;
+		}
+		break;
+	case IXGBE_VF_SET_MULTICAST:
+		entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+		          >> IXGBE_VT_MSGINFO_SHIFT;
+		hash_list = (u16 *)&msgbuf[1];
+		retval = ixgbe_set_vf_multicasts(adapter, entries,
+		                                 hash_list, vf);
+		break;
+	case IXGBE_VF_SET_LPE:
+		WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
+		break;
+	case IXGBE_VF_SET_VLAN:
+		add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+		      >> IXGBE_VT_MSGINFO_SHIFT;
+		vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+		retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+		break;
+	default:
+		DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
+		retval = IXGBE_ERR_MBX;
+		break;
+	}
+
+	/* notify the VF of the results of what it sent us */
+	if (retval)
+		msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+	else
+		msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+
+	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
+
+	ixgbe_write_mbx(hw, msgbuf, 1, vf);
+
+	return retval;
+}
+
+static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 msg = IXGBE_VT_MSGTYPE_NACK;
+
+	/* if device isn't clear to send it shouldn't be reading either */
+	if (!adapter->vfinfo[vf].clear_to_send)
+		ixgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void ixgbe_msg_task(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 vf;
+
+	for (vf = 0; vf < adapter->num_vfs; vf++) {
+		/* process any reset requests */
+		if (!ixgbe_check_for_rst(hw, vf))
+			ixgbe_vf_reset_event(adapter, vf);
+
+		/* process any messages pending */
+		if (!ixgbe_check_for_msg(hw, vf))
+			ixgbe_rcv_msg_from_vf(adapter, vf);
+
+		/* process any acks */
+		if (!ixgbe_check_for_ack(hw, vf))
+			ixgbe_rcv_ack_from_vf(adapter, vf);
+	}
+}
+
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	/* disable transmit and receive for all vfs */
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+}
+
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 ping;
+	int i;
+
+	for (i = 0 ; i < adapter->num_vfs; i++) {
+		ping = IXGBE_PF_CONTROL_MSG;
+		if (adapter->vfinfo[i].clear_to_send)
+			ping |= IXGBE_VT_MSGTYPE_CTS;
+		ixgbe_write_mbx(hw, &ping, 1, i);
+	}
+}
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
new file mode 100644
index 0000000..51d1106
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -0,0 +1,47 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_SRIOV_H_
+#define _IXGBE_SRIOV_H_
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+                            int entries, u16 *hash_list, u32 vf);
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_msg_task(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+                     int vf, unsigned char *mac_addr);
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+
+#endif /* _IXGBE_SRIOV_H_ */
+
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 4db3aba..f9d80a9 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -277,6 +277,7 @@
 #define IXGBE_DTXCTL    0x07E00
 
 #define IXGBE_DMATXCTL  0x04A80
+#define IXGBE_PFDTXGSWC     0x08220
 #define IXGBE_DTXMXSZRQ     0x08100
 #define IXGBE_DTXTCPFLGL    0x04A88
 #define IXGBE_DTXTCPFLGH    0x04A8C
@@ -287,6 +288,8 @@
 #define IXGBE_DMATXCTL_NS       0x2 /* No Snoop LSO hdr buffer */
 #define IXGBE_DMATXCTL_GDV      0x8 /* Global Double VLAN */
 #define IXGBE_DMATXCTL_VT_SHIFT 16  /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
 #define IXGBE_DCA_TXCTRL(_i)    (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
 /* Tx DCA Control register : 128 of these (0-127) */
 #define IXGBE_DCA_TXCTRL_82599(_i)  (0x0600C + ((_i) * 0x40))
@@ -497,6 +500,7 @@
 /* DCB registers */
 #define IXGBE_RTRPCS      0x02430
 #define IXGBE_RTTDCS      0x04900
+#define IXGBE_RTTDCS_ARBDIS     0x00000040 /* DCB arbiter disable */
 #define IXGBE_RTTPCS      0x0CD00
 #define IXGBE_RTRUP2TC    0x03020
 #define IXGBE_RTTUP2TC    0x0C800
@@ -730,6 +734,13 @@
 #define IXGBE_GCR_CMPL_TMOUT_RESEND     0x00010000
 #define IXGBE_GCR_CAP_VER2              0x00040000
 
+#define IXGBE_GCR_EXT_MSIX_EN           0x80000000
+#define IXGBE_GCR_EXT_VT_MODE_16        0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32        0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64        0x00000003
+#define IXGBE_GCR_EXT_SRIOV             (IXGBE_GCR_EXT_MSIX_EN | \
+                                         IXGBE_GCR_EXT_VT_MODE_64)
+
 /* Time Sync Registers */
 #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
 #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1093,6 +1104,8 @@
 /* VFRE bitmask */
 #define IXGBE_VFRE_ENABLE_ALL   0xFFFFFFFF
 
+#define IXGBE_VF_INIT_TIMEOUT   200 /* Number of retries to clear RSTI */
+
 /* RDHMPN and TDHMPN bitmasks */
 #define IXGBE_RDHMPN_RDICADDR       0x007FF800
 #define IXGBE_RDHMPN_RDICRDREQ      0x00800000
@@ -1323,6 +1336,7 @@
 /* VLAN pool filtering masks */
 #define IXGBE_VLVF_VIEN         0x80000000  /* filter is valid */
 #define IXGBE_VLVF_ENTRIES      64
+#define IXGBE_VLVF_VLANID_MASK  0x00000FFF
 
 #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.1q protocol */
 
@@ -1871,6 +1885,12 @@
 #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT  0x000D /* Priority in upper 3 of 16 */
 #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT  IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
 
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number)   (vf_number >> 4)
+#define IXGBE_MBVFICR(_i)                (0x00710 + (_i * 4))
+#define IXGBE_VFLRE(_i)                  (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i)                 (0x00700 + (_i * 4))
+
 /* Little Endian defines */
 #ifndef __le32
 #define __le32  u32
@@ -2495,6 +2515,37 @@ struct ixgbe_phy_info {
 	bool                            multispeed_fiber;
 };
 
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+	s32 (*init_params)(struct ixgbe_hw *hw);
+	s32 (*read)(struct ixgbe_hw *, u32 *, u16,  u16);
+	s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+	s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16,  u16);
+	s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+	s32 (*check_for_msg)(struct ixgbe_hw *, u16);
+	s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+	s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+	struct ixgbe_mbx_operations ops;
+	struct ixgbe_mbx_stats stats;
+	u32 timeout;
+	u32 usec_delay;
+	u32 v2p_mailbox;
+	u16 size;
+};
+
 struct ixgbe_hw {
 	u8 __iomem			*hw_addr;
 	void				*back;
@@ -2504,6 +2555,7 @@ struct ixgbe_hw {
 	struct ixgbe_phy_info		phy;
 	struct ixgbe_eeprom_info	eeprom;
 	struct ixgbe_bus_info		bus;
+	struct ixgbe_mbx_info		mbx;
 	u16				device_id;
 	u16				vendor_id;
 	u16				subsystem_device_id;
@@ -2518,6 +2570,7 @@ struct ixgbe_info {
 	struct ixgbe_mac_operations	*mac_ops;
 	struct ixgbe_eeprom_operations	*eeprom_ops;
 	struct ixgbe_phy_operations	*phy_ops;
+	struct ixgbe_mbx_operations	*mbx_ops;
 };
 
 
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
new file mode 100644
index 0000000..dd4e0d2
--- /dev/null
+++ b/drivers/net/ixgbevf/Makefile
@@ -0,0 +1,38 @@
+################################################################################
+#
+# Intel 82599 Virtual Function driver
+# Copyright(c) 1999 - 2009 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) 82599 VF ethernet driver
+#
+
+obj-$(CONFIG_IXGBEVF) += ixgbevf.o
+
+ixgbevf-objs := vf.o \
+                mbx.o \
+                ethtool.o \
+                ixgbevf_main.o
+
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
new file mode 100644
index 0000000..c44fdb0
--- /dev/null
+++ b/drivers/net/ixgbevf/defines.h
@@ -0,0 +1,292 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_DEFINES_H_
+#define _IXGBEVF_DEFINES_H_
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82599_VF           0x10ED
+
+#define IXGBE_VF_IRQ_CLEAR_MASK         7
+#define IXGBE_VF_MAX_TX_QUEUES          1
+#define IXGBE_VF_MAX_RX_QUEUES          1
+#define IXGBE_ETH_LENGTH_OF_ADDRESS     6
+
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_1GB_FULL       0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL      0x0080
+
+#define IXGBE_CTRL_RST          0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE     0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE     0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP          0x40000000
+#define IXGBE_LINKS_SPEED       0x20000000
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE  8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY   1024
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_ALLOC_VAL    0x80 /* Interrupt Allocation valid */
+
+#define IXGBE_VF_INIT_TIMEOUT   200 /* Number of retries to clear RSTI */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN       0x00000001  /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */
+
+/* DCA Control */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR    0x00000010
+#define IXGBE_PSRTYPE_UDPHDR    0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR   0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR   0x00000200
+#define IXGBE_PSRTYPE_L2HDR     0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT     10     /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT        22
+#define IXGBE_SRRCTL_RDMTS_MASK         0x01C00000
+#define IXGBE_SRRCTL_DROP_EN            0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK      0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK      0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY    0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT  0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK      0x0E000000
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD         0x01    /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP        0x02    /* End of Packet */
+#define IXGBE_RXD_STAT_FLM        0x04    /* FDir Match */
+#define IXGBE_RXD_STAT_VP         0x08    /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK   0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT  0x00000004
+#define IXGBE_RXD_STAT_UDPCS      0x10    /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS       0x20    /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS       0x40    /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF        0x80    /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV       0x100   /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT       0x200   /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV       0x400   /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT     0x800   /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_TS         0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP       0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB         0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK        0x8000  /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE          0x01    /* CRC Error */
+#define IXGBE_RXD_ERR_LE          0x02    /* Length Error */
+#define IXGBE_RXD_ERR_PE          0x08    /* Packet Error */
+#define IXGBE_RXD_ERR_OSE         0x10    /* Oversize Error */
+#define IXGBE_RXD_ERR_USE         0x20    /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE        0x40    /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE         0x80    /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK     0xFFF00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT    20         /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_HBO      0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE       0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE       0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE       0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE      0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE      0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE     0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE      0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK    0x0FFF  /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK        0xE000  /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT       13
+#define IXGBE_RXD_CFI_MASK        0x1000  /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT       12
+
+#define IXGBE_RXDADV_STAT_DD            IXGBE_RXD_STAT_DD  /* Done */
+#define IXGBE_RXDADV_STAT_EOP           IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM           IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP            IXGBE_RXD_STAT_VP  /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK          0x000FFFFF /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS        0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT        0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP  0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP    0x00000030 /* 11: Ctxt w/ DDP */
+
+#define IXGBE_RXDADV_RSSTYPE_MASK       0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK       0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX    0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK     0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK        0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT       17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT    5
+#define IXGBE_RXDADV_SPLITHEADER_EN     0x00001000
+#define IXGBE_RXDADV_SPH                0x8000
+
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+				      IXGBE_RXD_ERR_CE |  \
+				      IXGBE_RXD_ERR_LE |  \
+				      IXGBE_RXD_ERR_PE |  \
+				      IXGBE_RXD_ERR_OSE | \
+				      IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+					 IXGBE_RXDADV_ERR_CE |  \
+					 IXGBE_RXDADV_ERR_LE |  \
+					 IXGBE_RXDADV_ERR_PE |  \
+					 IXGBE_RXDADV_ERR_OSE | \
+					 IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS     0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+	struct {
+		__le64 buffer_addr;      /* Address of descriptor's data buf */
+		__le32 cmd_type_len;
+		__le32 olinfo_status;
+	} read;
+	struct {
+		__le64 rsvd;       /* Reserved */
+		__le32 nxtseq_seed;
+		__le32 status;
+	} wb;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+	struct {
+		__le64 pkt_addr; /* Packet buffer address */
+		__le64 hdr_addr; /* Header buffer address */
+	} read;
+	struct {
+		struct {
+			union {
+				__le32 data;
+				struct {
+					__le16 pkt_info; /* RSS, Pkt type */
+					__le16 hdr_info; /* Splithdr, hdrlen */
+				} hs_rss;
+			} lo_dword;
+			union {
+				__le32 rss; /* RSS Hash */
+				struct {
+					__le16 ip_id; /* IP id */
+					__le16 csum; /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			__le32 status_error; /* ext status/error */
+			__le16 length; /* Packet length */
+			__le16 vlan; /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTYP_MASK  0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT  0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA  0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP   IXGBE_TXD_CMD_EOP  /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS  IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS    IXGBE_TXD_CMD_RS   /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DEXT  IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE   IXGBE_TXD_CMD_VLE  /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE   0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD    IXGBE_TXD_STAT_DD  /* Descriptor Done */
+#define IXGBE_ADVTXD_TUCMD_IPV4      0x00000400  /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6      0x00000000  /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP   0x00000000  /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP   0x00000800  /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP  0x00001000  /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_IDX_SHIFT  4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_POPTS_SHIFT      8  /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+				 IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+				 IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT      16  /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT       16  /* Adv ctxt MSS shift */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE    0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_MAILBOX      0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
+#define IXGBE_EICS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+				IXGBE_EIMS_RTX_QUEUE       | \
+				IXGBE_EIMS_MAILBOX         | \
+				IXGBE_EIMS_OTHER)
+
+#define IXGBE_EITR_CNT_WDIS     0x80000000
+
+/* Error Codes */
+#define IXGBE_ERR_INVALID_MAC_ADDR              -1
+#define IXGBE_ERR_RESET_FAILED                  -2
+
+#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
new file mode 100644
index 0000000..4cc5cb2
--- /dev/null
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -0,0 +1,709 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbevf */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+
+#include "ixgbevf.h"
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+#ifdef ETHTOOL_GSTATS
+struct ixgbe_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+	int base_stat_offset;
+};
+
+#define IXGBEVF_STAT(m, b)  sizeof(((struct ixgbevf_adapter *)0)->m), \
+			    offsetof(struct ixgbevf_adapter, m),      \
+			    offsetof(struct ixgbevf_adapter, b)
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+	{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)},
+	{"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)},
+	{"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)},
+	{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)},
+	{"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)},
+	{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)},
+	{"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)},
+	{"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)},
+	{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)},
+	{"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)},
+};
+
+#define IXGBE_QUEUE_STATS_LEN 0
+#define IXGBE_GLOBAL_STATS_LEN	ARRAY_SIZE(ixgbe_gstrings_stats)
+
+#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)",
+	"Link test   (on/offline)"
+};
+#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+static int ixgbevf_get_settings(struct net_device *netdev,
+				struct ethtool_cmd *ecmd)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 link_speed = 0;
+	bool link_up;
+
+	ecmd->supported = SUPPORTED_10000baseT_Full;
+	ecmd->autoneg = AUTONEG_DISABLE;
+	ecmd->transceiver = XCVR_DUMMY1;
+	ecmd->port = -1;
+
+	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+	if (link_up) {
+		ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+			       SPEED_10000 : SPEED_1000;
+		ecmd->duplex = DUPLEX_FULL;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	return 0;
+}
+
+static u32 ixgbevf_get_rx_csum(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
+}
+
+static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	if (data)
+		adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+	else
+		adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+
+	if (netif_running(netdev)) {
+		if (!adapter->dev_closed)
+			ixgbevf_reinit_locked(adapter);
+	} else {
+		ixgbevf_reset(adapter);
+	}
+
+	return 0;
+}
+
+static int ixgbevf_set_tso(struct net_device *netdev, u32 data)
+{
+	if (data) {
+		netdev->features |= NETIF_F_TSO;
+		netdev->features |= NETIF_F_TSO6;
+	} else {
+		netif_stop_queue(netdev);
+		netdev->features &= ~NETIF_F_TSO;
+		netdev->features &= ~NETIF_F_TSO6;
+		netif_start_queue(netdev);
+	}
+	return 0;
+}
+
+static u32 ixgbevf_get_msglevel(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	return adapter->msg_enable;
+}
+
+static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	adapter->msg_enable = data;
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
+
+static char *ixgbevf_reg_names[] = {
+	"IXGBE_VFCTRL",
+	"IXGBE_VFSTATUS",
+	"IXGBE_VFLINKS",
+	"IXGBE_VFRXMEMWRAP",
+	"IXGBE_VFRTIMER",
+	"IXGBE_VTEICR",
+	"IXGBE_VTEICS",
+	"IXGBE_VTEIMS",
+	"IXGBE_VTEIMC",
+	"IXGBE_VTEIAC",
+	"IXGBE_VTEIAM",
+	"IXGBE_VTEITR",
+	"IXGBE_VTIVAR",
+	"IXGBE_VTIVAR_MISC",
+	"IXGBE_VFRDBAL0",
+	"IXGBE_VFRDBAL1",
+	"IXGBE_VFRDBAH0",
+	"IXGBE_VFRDBAH1",
+	"IXGBE_VFRDLEN0",
+	"IXGBE_VFRDLEN1",
+	"IXGBE_VFRDH0",
+	"IXGBE_VFRDH1",
+	"IXGBE_VFRDT0",
+	"IXGBE_VFRDT1",
+	"IXGBE_VFRXDCTL0",
+	"IXGBE_VFRXDCTL1",
+	"IXGBE_VFSRRCTL0",
+	"IXGBE_VFSRRCTL1",
+	"IXGBE_VFPSRTYPE",
+	"IXGBE_VFTDBAL0",
+	"IXGBE_VFTDBAL1",
+	"IXGBE_VFTDBAH0",
+	"IXGBE_VFTDBAH1",
+	"IXGBE_VFTDLEN0",
+	"IXGBE_VFTDLEN1",
+	"IXGBE_VFTDH0",
+	"IXGBE_VFTDH1",
+	"IXGBE_VFTDT0",
+	"IXGBE_VFTDT1",
+	"IXGBE_VFTXDCTL0",
+	"IXGBE_VFTXDCTL1",
+	"IXGBE_VFTDWBAL0",
+	"IXGBE_VFTDWBAL1",
+	"IXGBE_VFTDWBAH0",
+	"IXGBE_VFTDWBAH1"
+};
+
+
+static int ixgbevf_get_regs_len(struct net_device *netdev)
+{
+	return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+}
+
+static void ixgbevf_get_regs(struct net_device *netdev,
+			     struct ethtool_regs *regs,
+			     void *p)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	u32 regs_len = ixgbevf_get_regs_len(netdev);
+	u8 i;
+
+	memset(p, 0, regs_len);
+
+	regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+	/* General Registers */
+	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
+	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
+	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
+	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
+
+	/* Interrupt */
+	/* don't read EICR because it can clear interrupt causes, instead
+	 * read EICS which is a shadow but doesn't clear EICR */
+	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
+	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
+	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
+	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
+	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
+	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+
+	/* Receive DMA */
+	for (i = 0; i < 2; i++)
+		regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+
+	/* Receive */
+	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
+
+	/* Transmit */
+	for (i = 0; i < 2; i++)
+		regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
+	for (i = 0; i < 2; i++)
+		regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
+
+	for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
+		hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
+}
+
+static void ixgbevf_get_drvinfo(struct net_device *netdev,
+				struct ethtool_drvinfo *drvinfo)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
+	strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
+
+	strlcpy(drvinfo->fw_version, "N/A", 4);
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+static void ixgbevf_get_ringparam(struct net_device *netdev,
+				  struct ethtool_ringparam *ring)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbevf_ring *tx_ring = adapter->tx_ring;
+	struct ixgbevf_ring *rx_ring = adapter->rx_ring;
+
+	ring->rx_max_pending = IXGBEVF_MAX_RXD;
+	ring->tx_max_pending = IXGBEVF_MAX_TXD;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = rx_ring->count;
+	ring->tx_pending = tx_ring->count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+static int ixgbevf_set_ringparam(struct net_device *netdev,
+				 struct ethtool_ringparam *ring)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
+	int i, err;
+	u32 new_rx_count, new_tx_count;
+	bool need_tx_update = false;
+	bool need_rx_update = false;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
+	new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
+	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
+	new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+	if ((new_tx_count == adapter->tx_ring->count) &&
+	    (new_rx_count == adapter->rx_ring->count)) {
+		/* nothing to do */
+		return 0;
+	}
+
+	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (new_tx_count != adapter->tx_ring_count) {
+		tx_ring = kcalloc(adapter->num_tx_queues,
+				  sizeof(struct ixgbevf_ring), GFP_KERNEL);
+		if (!tx_ring) {
+			err = -ENOMEM;
+			goto err_setup;
+		}
+		memcpy(tx_ring, adapter->tx_ring,
+		       adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			tx_ring[i].count = new_tx_count;
+			err = ixgbevf_setup_tx_resources(adapter,
+							 &tx_ring[i]);
+			if (err) {
+				while (i) {
+					i--;
+					ixgbevf_free_tx_resources(adapter,
+								  &tx_ring[i]);
+				}
+				kfree(tx_ring);
+				goto err_setup;
+			}
+			tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
+		}
+		need_tx_update = true;
+	}
+
+	if (new_rx_count != adapter->rx_ring_count) {
+		rx_ring = kcalloc(adapter->num_rx_queues,
+				  sizeof(struct ixgbevf_ring), GFP_KERNEL);
+		if ((!rx_ring) && (need_tx_update)) {
+			err = -ENOMEM;
+			goto err_rx_setup;
+		}
+		memcpy(rx_ring, adapter->rx_ring,
+		       adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			rx_ring[i].count = new_rx_count;
+			err = ixgbevf_setup_rx_resources(adapter,
+							 &rx_ring[i]);
+			if (err) {
+				while (i) {
+					i--;
+					ixgbevf_free_rx_resources(adapter,
+								  &rx_ring[i]);
+				}
+				kfree(rx_ring);
+				goto err_rx_setup;
+			}
+			rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
+		}
+		need_rx_update = true;
+	}
+
+err_rx_setup:
+	/* if rings need to be updated, here's the place to do it in one shot */
+	if (need_tx_update || need_rx_update) {
+		if (netif_running(netdev))
+			ixgbevf_down(adapter);
+	}
+
+	/* tx */
+	if (need_tx_update) {
+		kfree(adapter->tx_ring);
+		adapter->tx_ring = tx_ring;
+		tx_ring = NULL;
+		adapter->tx_ring_count = new_tx_count;
+	}
+
+	/* rx */
+	if (need_rx_update) {
+		kfree(adapter->rx_ring);
+		adapter->rx_ring = rx_ring;
+		rx_ring = NULL;
+		adapter->rx_ring_count = new_rx_count;
+	}
+
+	/* success! */
+	err = 0;
+	if (netif_running(netdev))
+		ixgbevf_up(adapter);
+
+err_setup:
+	clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+	return err;
+}
+
+static int ixgbevf_get_stats_count(struct net_device *dev)
+{
+	return IXGBEVF_STATS_LEN;
+}
+
+static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
+				      struct ethtool_stats *stats, u64 *data)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	int i;
+
+	ixgbevf_update_stats(adapter);
+	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+		char *p = (char *)adapter +
+			ixgbe_gstrings_stats[i].stat_offset;
+		char *b = (char *)adapter +
+			ixgbe_gstrings_stats[i].base_stat_offset;
+		data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
+			    sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
+			  ((ixgbe_gstrings_stats[i].sizeof_stat ==
+			    sizeof(u64)) ? *(u64 *)b : *(u32 *)b);
+	}
+}
+
+static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
+				u8 *data)
+{
+	char *p = (char *)data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		memcpy(data, *ixgbe_gstrings_test,
+		       IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+		break;
+	case ETH_SS_STATS:
+		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	bool link_up;
+	u32 link_speed = 0;
+	*data = 0;
+
+	hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+	if (!link_up)
+		*data = 1;
+
+	return *data;
+}
+
+/* ethtool register test data */
+struct ixgbevf_reg_test {
+	u16 reg;
+	u8  array_len;
+	u8  test_type;
+	u32 mask;
+	u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables.  We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST	1
+#define SET_READ_TEST	2
+#define WRITE_NO_TEST	3
+#define TABLE32_TEST	4
+#define TABLE64_TEST_LO	5
+#define TABLE64_TEST_HI	6
+
+/* default VF register test */
+static struct ixgbevf_reg_test reg_test_vf[] = {
+	{ IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+	{ IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+	{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+	{ IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+	{ IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+	{ 0, 0, 0, 0 }
+};
+
+#define REG_PATTERN_TEST(R, M, W)                                             \
+{                                                                             \
+	u32 pat, val, before;                                                 \
+	const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+	for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {                       \
+		before = readl(adapter->hw.hw_addr + R);                      \
+		writel((_test[pat] & W), (adapter->hw.hw_addr + R));          \
+		val = readl(adapter->hw.hw_addr + R);                         \
+		if (val != (_test[pat] & W & M)) {                            \
+			hw_dbg(&adapter->hw,                                  \
+			"pattern test reg %04X failed: got "                  \
+			"0x%08X expected 0x%08X\n",                           \
+			R, val, (_test[pat] & W & M));                        \
+			*data = R;                                            \
+			writel(before, adapter->hw.hw_addr + R);              \
+			return 1;                                             \
+		}                                                             \
+		writel(before, adapter->hw.hw_addr + R);                      \
+	}                                                                     \
+}
+
+#define REG_SET_AND_CHECK(R, M, W)                                            \
+{                                                                             \
+	u32 val, before;                                                      \
+	before = readl(adapter->hw.hw_addr + R);                              \
+	writel((W & M), (adapter->hw.hw_addr + R));                           \
+	val = readl(adapter->hw.hw_addr + R);                                 \
+	if ((W & M) != (val & M)) {                                           \
+		printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
+				 "expected 0x%08X\n", R, (val & M), (W & M)); \
+		*data = R;                                                    \
+		writel(before, (adapter->hw.hw_addr + R));                    \
+		return 1;                                                     \
+	}                                                                     \
+	writel(before, (adapter->hw.hw_addr + R));                            \
+}
+
+static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+	struct ixgbevf_reg_test *test;
+	u32 i;
+
+	test = reg_test_vf;
+
+	/*
+	 * Perform the register test, looping through the test table
+	 * until we either fail or reach the null entry.
+	 */
+	while (test->reg) {
+		for (i = 0; i < test->array_len; i++) {
+			switch (test->test_type) {
+			case PATTERN_TEST:
+				REG_PATTERN_TEST(test->reg + (i * 0x40),
+						test->mask,
+						test->write);
+				break;
+			case SET_READ_TEST:
+				REG_SET_AND_CHECK(test->reg + (i * 0x40),
+						test->mask,
+						test->write);
+				break;
+			case WRITE_NO_TEST:
+				writel(test->write,
+				       (adapter->hw.hw_addr + test->reg)
+				       + (i * 0x40));
+				break;
+			case TABLE32_TEST:
+				REG_PATTERN_TEST(test->reg + (i * 4),
+						test->mask,
+						test->write);
+				break;
+			case TABLE64_TEST_LO:
+				REG_PATTERN_TEST(test->reg + (i * 8),
+						test->mask,
+						test->write);
+				break;
+			case TABLE64_TEST_HI:
+				REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+						test->mask,
+						test->write);
+				break;
+			}
+		}
+		test++;
+	}
+
+	*data = 0;
+	return *data;
+}
+
+static void ixgbevf_diag_test(struct net_device *netdev,
+			      struct ethtool_test *eth_test, u64 *data)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	bool if_running = netif_running(netdev);
+
+	set_bit(__IXGBEVF_TESTING, &adapter->state);
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		/* Offline tests */
+
+		hw_dbg(&adapter->hw, "offline testing starting\n");
+
+		/* Link test performed before hardware reset so autoneg doesn't
+		 * interfere with test result */
+		if (ixgbevf_link_test(adapter, &data[1]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		if (if_running)
+			/* indicate we're in test mode */
+			dev_close(netdev);
+		else
+			ixgbevf_reset(adapter);
+
+		hw_dbg(&adapter->hw, "register testing starting\n");
+		if (ixgbevf_reg_test(adapter, &data[0]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		ixgbevf_reset(adapter);
+
+		clear_bit(__IXGBEVF_TESTING, &adapter->state);
+		if (if_running)
+			dev_open(netdev);
+	} else {
+		hw_dbg(&adapter->hw, "online testing starting\n");
+		/* Online tests */
+		if (ixgbevf_link_test(adapter, &data[1]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* Online tests aren't run; pass by default */
+		data[0] = 0;
+
+		clear_bit(__IXGBEVF_TESTING, &adapter->state);
+	}
+	msleep_interruptible(4 * 1000);
+}
+
+static int ixgbevf_nway_reset(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	if (netif_running(netdev)) {
+		if (!adapter->dev_closed)
+			ixgbevf_reinit_locked(adapter);
+	}
+
+	return 0;
+}
+
+static struct ethtool_ops ixgbevf_ethtool_ops = {
+	.get_settings           = ixgbevf_get_settings,
+	.get_drvinfo            = ixgbevf_get_drvinfo,
+	.get_regs_len           = ixgbevf_get_regs_len,
+	.get_regs               = ixgbevf_get_regs,
+	.nway_reset             = ixgbevf_nway_reset,
+	.get_link               = ethtool_op_get_link,
+	.get_ringparam          = ixgbevf_get_ringparam,
+	.set_ringparam          = ixgbevf_set_ringparam,
+	.get_rx_csum            = ixgbevf_get_rx_csum,
+	.set_rx_csum            = ixgbevf_set_rx_csum,
+	.get_tx_csum            = ethtool_op_get_tx_csum,
+	.set_tx_csum            = ethtool_op_set_tx_csum,
+	.get_sg                 = ethtool_op_get_sg,
+	.set_sg                 = ethtool_op_set_sg,
+	.get_msglevel           = ixgbevf_get_msglevel,
+	.set_msglevel           = ixgbevf_set_msglevel,
+	.get_tso                = ethtool_op_get_tso,
+	.set_tso                = ixgbevf_set_tso,
+	.self_test              = ixgbevf_diag_test,
+	.get_stats_count        = ixgbevf_get_stats_count,
+	.get_strings            = ixgbevf_get_strings,
+	.get_ethtool_stats      = ixgbevf_get_ethtool_stats,
+};
+
+void ixgbevf_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
new file mode 100644
index 0000000..f84b7e0
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -0,0 +1,319 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_H_
+#define _IXGBEVF_H_
+
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+
+#include "vf.h"
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgbevf_tx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	unsigned long time_stamp;
+	u16 length;
+	u16 next_to_watch;
+	u16 mapped_as_page;
+};
+
+struct ixgbevf_rx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	struct page *page;
+	dma_addr_t page_dma;
+	unsigned int page_offset;
+};
+
+struct ixgbevf_ring {
+	struct ixgbevf_adapter *adapter;  /* backlink */
+	void *desc;			/* descriptor ring memory */
+	dma_addr_t dma;			/* phys. address of descriptor ring */
+	unsigned int size;		/* length in bytes */
+	unsigned int count;		/* amount of descriptors */
+	unsigned int next_to_use;
+	unsigned int next_to_clean;
+
+	int queue_index; /* needed for multiqueue queue management */
+	union {
+		struct ixgbevf_tx_buffer *tx_buffer_info;
+		struct ixgbevf_rx_buffer *rx_buffer_info;
+	};
+
+	u16 head;
+	u16 tail;
+
+	unsigned int total_bytes;
+	unsigned int total_packets;
+
+	u16 reg_idx; /* holds the special value that gets the hardware register
+		      * offset associated with this ring, which is different
+		      * for DCB and RSS modes */
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+	/* cpu for tx queue */
+	int cpu;
+#endif
+
+	u64 v_idx; /* maps directly to the index for this ring in the hardware
+		    * vector array, can also be used for finding the bit in EICR
+		    * and friends that represents the vector for this ring */
+
+	u16 work_limit;                /* max work per interrupt */
+	u16 rx_buf_len;
+};
+
+enum ixgbevf_ring_f_enum {
+	RING_F_NONE = 0,
+	RING_F_ARRAY_SIZE      /* must be last in enum set */
+};
+
+struct ixgbevf_ring_feature {
+	int indices;
+	int mask;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGBEVF_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+
+#define MAX_RX_QUEUES 1
+#define MAX_TX_QUEUES 1
+
+#define IXGBEVF_DEFAULT_TXD   1024
+#define IXGBEVF_DEFAULT_RXD   512
+#define IXGBEVF_MAX_TXD       4096
+#define IXGBEVF_MIN_TXD       64
+#define IXGBEVF_MAX_RXD       4096
+#define IXGBEVF_MIN_RXD       64
+
+/* Supported Rx Buffer Sizes */
+#define IXGBEVF_RXBUFFER_64    64     /* Used for packet split */
+#define IXGBEVF_RXBUFFER_128   128    /* Used for packet split */
+#define IXGBEVF_RXBUFFER_256   256    /* Used for packet split */
+#define IXGBEVF_RXBUFFER_2048  2048
+#define IXGBEVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
+
+#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define IXGBE_TX_FLAGS_CSUM		(u32)(1)
+#define IXGBE_TX_FLAGS_VLAN		(u32)(1 << 1)
+#define IXGBE_TX_FLAGS_TSO		(u32)(1 << 2)
+#define IXGBE_TX_FLAGS_IPV4		(u32)(1 << 3)
+#define IXGBE_TX_FLAGS_FCOE		(u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FSO		(u32)(1 << 5)
+#define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0x0000e000
+#define IXGBE_TX_FLAGS_VLAN_SHIFT	16
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct ixgbevf_q_vector {
+	struct ixgbevf_adapter *adapter;
+	struct net_device *dummy_netdev;
+	struct napi_struct napi;
+	DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
+	DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
+	u8 rxr_count;     /* Rx ring count assigned to this vector */
+	u8 txr_count;     /* Tx ring count assigned to this vector */
+	u8 tx_itr;
+	u8 rx_itr;
+	u32 eitr;
+	int v_idx;	  /* vector index in list */
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways.  The lowest value
+ * supported by all of the ixgbe hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+	((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define IXGBE_DESC_UNUSED(R) \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IXGBE_RX_DESC_ADV(R, i)	    \
+	(&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
+#define IXGBE_TX_DESC_ADV(R, i)	    \
+	(&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
+#define IXGBE_TX_CTXTDESC_ADV(R, i)	    \
+	(&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE        16128
+
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 2
+#define MAX_MSIX_COUNT 2
+
+#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+/* board specific private data structure */
+struct ixgbevf_adapter {
+	struct timer_list watchdog_timer;
+#ifdef NETIF_F_HW_VLAN_TX
+	struct vlan_group *vlgrp;
+#endif
+	u16 bd_number;
+	struct work_struct reset_task;
+	struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+	char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+	/* Interrupt Throttle Rate */
+	u32 itr_setting;
+	u16 eitr_low;
+	u16 eitr_high;
+
+	/* TX */
+	struct ixgbevf_ring *tx_ring;	/* One per active queue */
+	int num_tx_queues;
+	u64 restart_queue;
+	u64 hw_csum_tx_good;
+	u64 lsc_int;
+	u64 hw_tso_ctxt;
+	u64 hw_tso6_ctxt;
+	u32 tx_timeout_count;
+	bool detect_tx_hung;
+
+	/* RX */
+	struct ixgbevf_ring *rx_ring;	/* One per active queue */
+	int num_rx_queues;
+	int num_rx_pools;               /* == num_rx_queues in 82598 */
+	int num_rx_queues_per_pool;	/* 1 if 82598, can be many if 82599 */
+	u64 hw_csum_rx_error;
+	u64 hw_rx_no_dma_resources;
+	u64 hw_csum_rx_good;
+	u64 non_eop_descs;
+	int num_msix_vectors;
+	int max_msix_q_vectors;         /* true count of q_vectors for device */
+	struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+	struct msix_entry *msix_entries;
+
+	u64 rx_hdr_split;
+	u32 alloc_rx_page_failed;
+	u32 alloc_rx_buff_failed;
+
+	/* Some features need tri-state capability,
+	 * thus the additional *_CAPABLE flags.
+	 */
+	u32 flags;
+#define IXGBE_FLAG_RX_CSUM_ENABLED              (u32)(1)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 1)
+#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 2)
+#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 3)
+#define IXGBE_FLAG_IN_NETPOLL                   (u32)(1 << 4)
+#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
+#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 6)
+#define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 7)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK             (u32)(1 << 8)
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+
+	/* structs defined in ixgbe_vf.h */
+	struct ixgbe_hw hw;
+	u16 msg_enable;
+	struct ixgbevf_hw_stats stats;
+	u64 zero_base;
+	/* Interrupt Throttle Rate */
+	u32 eitr_param;
+
+	unsigned long state;
+	u32 *config_space;
+	u64 tx_busy;
+	unsigned int tx_ring_count;
+	unsigned int rx_ring_count;
+
+	u32 link_speed;
+	bool link_up;
+	unsigned long link_check_timeout;
+
+	struct work_struct watchdog_task;
+	bool netdev_registered;
+	bool dev_closed;
+};
+
+enum ixbgevf_state_t {
+	__IXGBEVF_TESTING,
+	__IXGBEVF_RESETTING,
+	__IXGBEVF_DOWN
+};
+
+enum ixgbevf_boards {
+	board_82599_vf,
+};
+
+extern struct ixgbevf_info ixgbevf_vf_info;
+extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
+
+/* needed by ethtool.c */
+extern char ixgbevf_driver_name[];
+extern const char ixgbevf_driver_version[];
+
+extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
+				      struct ixgbevf_ring *);
+extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
+				      struct ixgbevf_ring *);
+extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
+				      struct ixgbevf_ring *);
+extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
+				      struct ixgbevf_ring *);
+extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+
+#endif
+extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+
+#ifdef DEBUG
+extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+#define hw_dbg(hw, format, arg...) \
+	printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
+#else
+#define hw_dbg(hw, format, arg...) do {} while (0)
+#endif
+
+#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
new file mode 100644
index 0000000..3fb2c09
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -0,0 +1,3605 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+******************************************************************************/
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#include "ixgbevf.h"
+
+#define ETH_FCS_LEN               4
+
+static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
+                                                       int vlan_id)
+{
+        return vg->vlan_devices[vlan_id];
+}
+
+static inline void vlan_group_set_device(struct vlan_group *vg, int vlan_id,
+                                         struct net_device *dev)
+{
+        vg->vlan_devices[vlan_id] = NULL;
+}
+
+char ixgbevf_driver_name[] = "ixgbevf";
+static const char ixgbevf_driver_string[] =
+	"Intel(R) 82599 Virtual Function";
+
+#define DRV_VERSION "1.0.0-k0"
+const char ixgbevf_driver_version[] = DRV_VERSION;
+static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+
+static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
+	[board_82599_vf] = &ixgbevf_vf_info,
+};
+
+/* ixgbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ *   Class, Class Mask, private data (not used) }
+ */
+static struct pci_device_id ixgbevf_pci_tbl[] = {
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
+	board_82599_vf},
+
+	/* required last entry */
+	{0, }
+};
+MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+/* forward decls */
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+			       u32 itr_reg);
+
+static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
+					   struct ixgbevf_ring *rx_ring,
+					   u32 val)
+{
+	/*
+	 * Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+	IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+}
+
+/*
+ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
+			     u8 queue, u8 msix_vector)
+{
+	u32 ivar, index;
+	struct ixgbe_hw *hw = &adapter->hw;
+	if (direction == -1) {
+		/* other causes */
+		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+		ivar &= ~0xFF;
+		ivar |= msix_vector;
+		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+	} else {
+		/* tx or rx causes */
+		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+		index = ((16 * (queue & 1)) + (8 * direction));
+		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+		ivar &= ~(0xFF << index);
+		ivar |= (msix_vector << index);
+		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
+	}
+}
+
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+					       struct ixgbevf_tx_buffer
+					       *tx_buffer_info)
+{
+	if (tx_buffer_info->dma) {
+		if (tx_buffer_info->mapped_as_page)
+			pci_unmap_page(adapter->pdev,
+				       tx_buffer_info->dma,
+				       tx_buffer_info->length,
+				       PCI_DMA_TODEVICE);
+		else
+			pci_unmap_single(adapter->pdev,
+					 tx_buffer_info->dma,
+					 tx_buffer_info->length,
+					 PCI_DMA_TODEVICE);
+		tx_buffer_info->dma = 0;
+	}
+	if (tx_buffer_info->skb) {
+		dev_kfree_skb_any(tx_buffer_info->skb);
+		tx_buffer_info->skb = NULL;
+	}
+	tx_buffer_info->time_stamp = 0;
+	/* tx_buffer_info must be completely set up in the transmit path */
+}
+
+static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
+					 struct ixgbevf_ring *tx_ring,
+					 unsigned int eop)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 head, tail;
+
+	/* Detect a transmit hang in hardware, this serializes the
+	 * check with the clearing of time_stamp and movement of eop */
+	head = readl(hw->hw_addr + tx_ring->head);
+	tail = readl(hw->hw_addr + tx_ring->tail);
+	adapter->detect_tx_hung = false;
+	if ((head != tail) &&
+	    tx_ring->tx_buffer_info[eop].time_stamp &&
+	    time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
+		/* detected Tx unit hang */
+		union ixgbe_adv_tx_desc *tx_desc;
+		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+		printk(KERN_ERR "Detected Tx Unit Hang\n"
+		       "  Tx Queue             <%d>\n"
+		       "  TDH, TDT             <%x>, <%x>\n"
+		       "  next_to_use          <%x>\n"
+		       "  next_to_clean        <%x>\n"
+		       "tx_buffer_info[next_to_clean]\n"
+		       "  time_stamp           <%lx>\n"
+		       "  jiffies              <%lx>\n",
+		       tx_ring->queue_index,
+		       head, tail,
+		       tx_ring->next_to_use, eop,
+		       tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+		return true;
+	}
+
+	return false;
+}
+
+#define IXGBE_MAX_TXD_PWR	14
+#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+			 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#ifdef MAX_SKB_FRAGS
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)      /* for context */
+#else
+#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
+#endif
+
+static void ixgbevf_tx_timeout(struct net_device *netdev);
+
+/**
+ * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ * @tx_ring: tx ring to clean
+ **/
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+				 struct ixgbevf_ring *tx_ring)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
+	union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	unsigned int i, eop, count = 0;
+	unsigned int total_bytes = 0, total_packets = 0;
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->tx_buffer_info[i].next_to_watch;
+	eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+
+	while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
+	       (count < tx_ring->work_limit)) {
+		bool cleaned = false;
+		for ( ; !cleaned; count++) {
+			struct sk_buff *skb;
+			tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+			tx_buffer_info = &tx_ring->tx_buffer_info[i];
+			cleaned = (i == eop);
+			skb = tx_buffer_info->skb;
+
+			if (cleaned && skb) {
+				unsigned int segs, bytecount;
+
+				/* gso_segs is currently only valid for tcp */
+				segs = skb_shinfo(skb)->gso_segs ?: 1;
+				/* multiply data chunks by size of headers */
+				bytecount = ((segs - 1) * skb_headlen(skb)) +
+					    skb->len;
+				total_packets += segs;
+				total_bytes += bytecount;
+			}
+
+			ixgbevf_unmap_and_free_tx_resource(adapter,
+							   tx_buffer_info);
+
+			tx_desc->wb.status = 0;
+
+			i++;
+			if (i == tx_ring->count)
+				i = 0;
+		}
+
+		eop = tx_ring->tx_buffer_info[i].next_to_watch;
+		eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+	}
+
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+	if (unlikely(count && netif_carrier_ok(netdev) &&
+		     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+#ifdef HAVE_TX_MQ
+		if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+		    !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+			netif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
+#else
+		if (netif_queue_stopped(netdev) &&
+		    !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+			netif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
+#endif
+	}
+
+	if (adapter->detect_tx_hung) {
+		if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
+			/* schedule immediate reset if we believe we hung */
+			printk(KERN_INFO
+			       "tx hang %d detected, resetting adapter\n",
+			       adapter->tx_timeout_count + 1);
+			ixgbevf_tx_timeout(adapter->netdev);
+		}
+	}
+
+	/* re-arm the interrupt */
+	if ((count >= tx_ring->work_limit) &&
+	    (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
+		IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
+	}
+
+	tx_ring->total_bytes += total_bytes;
+	tx_ring->total_packets += total_packets;
+
+	adapter->net_stats.tx_bytes += total_bytes;
+	adapter->net_stats.tx_packets += total_packets;
+
+	return (count < tx_ring->work_limit);
+}
+
+/**
+ * ixgbevf_receive_skb - Send a completed packet up the stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
+				struct sk_buff *skb, u8 status,
+				struct ixgbevf_ring *ring,
+				union ixgbe_adv_rx_desc *rx_desc)
+{
+	struct ixgbevf_adapter *adapter = q_vector->adapter;
+	bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+	u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+	int ret;
+
+	if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+		if (adapter->vlgrp && is_vlan)
+			vlan_gro_receive(&q_vector->napi,
+					 adapter->vlgrp,
+					 tag, skb);
+		else
+			napi_gro_receive(&q_vector->napi, skb);
+	} else {
+		if (adapter->vlgrp && is_vlan)
+			ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+		else
+			ret = netif_rx(skb);
+	}
+}
+
+/**
+ * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
+static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+				       u32 status_err, struct sk_buff *skb)
+{
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* Rx csum disabled */
+	if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+		return;
+
+	/* if IP and error */
+	if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+	    (status_err & IXGBE_RXDADV_ERR_IPE)) {
+		adapter->hw_csum_rx_error++;
+		return;
+	}
+
+	if (!(status_err & IXGBE_RXD_STAT_L4CS))
+		return;
+
+	if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+		adapter->hw_csum_rx_error++;
+		return;
+	}
+
+	/* It must be a TCP or UDP packet with a valid checksum */
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	adapter->hw_csum_rx_good++;
+}
+
+/**
+ * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
+				     struct ixgbevf_ring *rx_ring,
+				     int cleaned_count)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	union ixgbe_adv_rx_desc *rx_desc;
+	struct ixgbevf_rx_buffer *bi;
+	struct sk_buff *skb;
+	unsigned int i;
+	unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+
+	i = rx_ring->next_to_use;
+	bi = &rx_ring->rx_buffer_info[i];
+
+	while (cleaned_count--) {
+		rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+
+		if (!bi->page_dma &&
+		    (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+			if (!bi->page) {
+				bi->page = alloc_page(GFP_ATOMIC);
+				if (!bi->page) {
+					adapter->alloc_rx_page_failed++;
+					goto no_buffers;
+				}
+				bi->page_offset = 0;
+			} else {
+				/* use a half page if we're re-using */
+				bi->page_offset ^= (PAGE_SIZE / 2);
+			}
+
+			bi->page_dma = pci_map_page(pdev, bi->page,
+						    bi->page_offset,
+						    (PAGE_SIZE / 2),
+						    PCI_DMA_FROMDEVICE);
+		}
+
+		skb = bi->skb;
+		if (!skb) {
+			skb = netdev_alloc_skb(adapter->netdev,
+							       bufsz);
+
+			if (!skb) {
+				adapter->alloc_rx_buff_failed++;
+				goto no_buffers;
+			}
+
+			/*
+			 * Make buffer alignment 2 beyond a 16 byte boundary
+			 * this will result in a 16 byte aligned IP header after
+			 * the 14 byte MAC header is removed
+			 */
+			skb_reserve(skb, NET_IP_ALIGN);
+
+			bi->skb = skb;
+		}
+		if (!bi->dma) {
+			bi->dma = pci_map_single(pdev, skb->data,
+						 rx_ring->rx_buf_len,
+						 PCI_DMA_FROMDEVICE);
+		}
+		/* Refresh the desc even if buffer_addrs didn't change because
+		 * each write-back erases this info. */
+		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+			rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+			rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+		} else {
+			rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+		}
+
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+		bi = &rx_ring->rx_buffer_info[i];
+	}
+
+no_buffers:
+	if (rx_ring->next_to_use != i) {
+		rx_ring->next_to_use = i;
+		if (i-- == 0)
+			i = (rx_ring->count - 1);
+
+		ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
+	}
+}
+
+static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
+					     u64 qmask)
+{
+	u32 mask;
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	mask = (qmask & 0xFFFFFFFF);
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+}
+
+static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+	return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+}
+
+static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+	return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+}
+
+static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+				 struct ixgbevf_ring *rx_ring,
+				 int *work_done, int work_to_do)
+{
+	struct ixgbevf_adapter *adapter = q_vector->adapter;
+	struct pci_dev *pdev = adapter->pdev;
+	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+	struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
+	struct sk_buff *skb;
+	unsigned int i;
+	u32 len, staterr;
+	u16 hdr_info;
+	bool cleaned = false;
+	int cleaned_count = 0;
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+	while (staterr & IXGBE_RXD_STAT_DD) {
+		u32 upper_len = 0;
+		if (*work_done >= work_to_do)
+			break;
+		(*work_done)++;
+
+		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+			hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
+			len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+			       IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+			if (hdr_info & IXGBE_RXDADV_SPH)
+				adapter->rx_hdr_split++;
+			if (len > IXGBEVF_RX_HDR_SIZE)
+				len = IXGBEVF_RX_HDR_SIZE;
+			upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+		} else {
+			len = le16_to_cpu(rx_desc->wb.upper.length);
+		}
+		cleaned = true;
+		skb = rx_buffer_info->skb;
+		prefetch(skb->data - NET_IP_ALIGN);
+		rx_buffer_info->skb = NULL;
+
+		if (rx_buffer_info->dma) {
+			pci_unmap_single(pdev, rx_buffer_info->dma,
+					 rx_ring->rx_buf_len,
+					 PCI_DMA_FROMDEVICE);
+			rx_buffer_info->dma = 0;
+			skb_put(skb, len);
+		}
+
+		if (upper_len) {
+			pci_unmap_page(pdev, rx_buffer_info->page_dma,
+				       PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+			rx_buffer_info->page_dma = 0;
+			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+					   rx_buffer_info->page,
+					   rx_buffer_info->page_offset,
+					   upper_len);
+
+			if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+			    (page_count(rx_buffer_info->page) != 1))
+				rx_buffer_info->page = NULL;
+			else
+				get_page(rx_buffer_info->page);
+
+			skb->len += upper_len;
+			skb->data_len += upper_len;
+			skb->truesize += upper_len;
+		}
+
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+
+		next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+		prefetch(next_rxd);
+		cleaned_count++;
+
+		next_buffer = &rx_ring->rx_buffer_info[i];
+
+		if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+			if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+				rx_buffer_info->skb = next_buffer->skb;
+				rx_buffer_info->dma = next_buffer->dma;
+				next_buffer->skb = skb;
+				next_buffer->dma = 0;
+			} else {
+				skb->next = next_buffer->skb;
+				skb->next->prev = skb;
+			}
+			adapter->non_eop_descs++;
+			goto next_desc;
+		}
+
+		/* ERR_MASK will only have valid bits if EOP set */
+		if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+			dev_kfree_skb_irq(skb);
+			goto next_desc;
+		}
+
+		ixgbevf_rx_checksum(adapter, staterr, skb);
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+		total_rx_packets++;
+
+		/*
+		 * Work around issue of some types of VM to VM loop back
+		 * packets not getting split correctly
+		 */
+		if (staterr & IXGBE_RXD_STAT_LB) {
+			u32 header_fixup_len = skb->len - skb->data_len;
+			if (header_fixup_len < 14)
+				skb_push(skb, header_fixup_len);
+		}
+		skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+		ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+		adapter->netdev->last_rx = jiffies;
+
+next_desc:
+		rx_desc->wb.upper.status_error = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
+			ixgbevf_alloc_rx_buffers(adapter, rx_ring,
+						 cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	}
+
+	napi_gro_flush(&q_vector->napi);
+
+	rx_ring->next_to_clean = i;
+	cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+
+	if (cleaned_count)
+		ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+
+	rx_ring->total_packets += total_rx_packets;
+	rx_ring->total_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+
+	return cleaned;
+}
+
+/**
+ * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbevf_clean_rxonly(struct net_device *netdev, int *budget)
+{
+	struct ixgbevf_q_vector *q_vector = netdev->priv;
+	struct ixgbevf_adapter *adapter = q_vector->adapter;
+	struct ixgbevf_ring *rx_ring = NULL;
+	int work_to_do = min(*budget, netdev->quota);
+	int work_done = 0;
+	long r_idx;
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	rx_ring = &(adapter->rx_ring[r_idx]);
+
+	ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, work_to_do);
+
+	*budget -= work_done;
+	netdev->quota -= work_done;
+
+	/* If all Rx work done, exit the polling mode */
+	if (work_done < work_to_do) {
+		netif_rx_complete(netdev);
+		if (adapter->itr_setting & 1)
+			ixgbevf_set_itr_msix(q_vector);
+		if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+			ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one rx queue associated with a
+ * q_vector.
+ **/
+static int ixgbevf_clean_rxonly_many(struct net_device *netdev, int *budget)
+{
+	struct ixgbevf_q_vector *q_vector = netdev->priv;
+	struct ixgbevf_adapter *adapter = q_vector->adapter;
+	struct ixgbevf_ring *rx_ring = NULL;
+	int work_to_do = min(*budget, netdev->quota);
+	int work_done = 0, i;
+	long r_idx;
+	u64 enable_mask = 0;
+
+	/* attempt to distribute budget to each queue fairly, but don't allow
+	 * the budget to go below 1 because we'll exit polling */
+	work_to_do /= (q_vector->rxr_count ?: 1);
+	work_to_do = max(work_to_do, 1);
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	for (i = 0; i < q_vector->rxr_count; i++) {
+		rx_ring = &(adapter->rx_ring[r_idx]);
+		ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, work_to_do);
+		enable_mask |= rx_ring->v_idx;
+		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+				      r_idx + 1);
+	}
+
+	*budget -= work_done;
+	netdev->quota -= work_done;
+
+#ifndef HAVE_NETDEV_NAPI_LIST
+	if (!netif_running(adapter->netdev))
+		work_done = 0;
+
+#endif
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	rx_ring = &(adapter->rx_ring[r_idx]);
+
+	/* If all Rx work done, exit the polling mode */
+	if (work_done < work_to_do) {
+		netif_rx_complete(netdev);
+		if (adapter->itr_setting & 1)
+			ixgbevf_set_itr_msix(q_vector);
+		if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+			ixgbevf_irq_enable_queues(adapter, enable_mask);
+		return 0;
+	}
+
+	return 1;
+}
+
+
+/**
+ * ixgbevf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbevf_q_vector *q_vector;
+	struct ixgbe_hw *hw = &adapter->hw;
+	int i, j, q_vectors, v_idx, r_idx;
+	u32 mask;
+
+	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	/*
+	 * Populate the IVAR table and set the ITR values to the
+	 * corresponding register.
+	 */
+	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+		q_vector = adapter->q_vector[v_idx];
+		/* XXX for_each_bit(...) */
+		r_idx = find_first_bit(q_vector->rxr_idx,
+				       adapter->num_rx_queues);
+
+		for (i = 0; i < q_vector->rxr_count; i++) {
+			j = adapter->rx_ring[r_idx].reg_idx;
+			ixgbevf_set_ivar(adapter, 0, j, v_idx);
+			r_idx = find_next_bit(q_vector->rxr_idx,
+					      adapter->num_rx_queues,
+					      r_idx + 1);
+		}
+		r_idx = find_first_bit(q_vector->txr_idx,
+				       adapter->num_tx_queues);
+
+		for (i = 0; i < q_vector->txr_count; i++) {
+			j = adapter->tx_ring[r_idx].reg_idx;
+			ixgbevf_set_ivar(adapter, 1, j, v_idx);
+			r_idx = find_next_bit(q_vector->txr_idx,
+					      adapter->num_tx_queues,
+					      r_idx + 1);
+		}
+
+		/* if this is a tx only vector halve the interrupt rate */
+		if (q_vector->txr_count && !q_vector->rxr_count)
+			q_vector->eitr = (adapter->eitr_param >> 1);
+		else if (q_vector->rxr_count)
+			/* rx only */
+			q_vector->eitr = adapter->eitr_param;
+
+		ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+	}
+
+	ixgbevf_set_ivar(adapter, -1, 1, v_idx);
+
+	/* set up to autoclear timer, and the vectors */
+	mask = IXGBE_EIMS_ENABLE_MASK;
+	mask &= ~IXGBE_EIMS_OTHER;
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ * ixgbevf_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @eitr: eitr setting (ints per sec) to give last timeslice
+ * @itr_setting: current throttle rate in ints/second
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ **/
+static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
+			     u32 eitr, u8 itr_setting,
+			     int packets, int bytes)
+{
+	unsigned int retval = itr_setting;
+	u32 timepassed_us;
+	u64 bytes_perint;
+
+	if (packets == 0)
+		goto update_itr_done;
+
+
+	/* simple throttlerate management
+	 *    0-20MB/s lowest (100000 ints/s)
+	 *   20-100MB/s low   (20000 ints/s)
+	 *  100-1249MB/s bulk (8000 ints/s)
+	 */
+	/* what was last interrupt timeslice? */
+	timepassed_us = 1000000/eitr;
+	bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+	switch (itr_setting) {
+	case lowest_latency:
+		if (bytes_perint > adapter->eitr_low)
+			retval = low_latency;
+		break;
+	case low_latency:
+		if (bytes_perint > adapter->eitr_high)
+			retval = bulk_latency;
+		else if (bytes_perint <= adapter->eitr_low)
+			retval = lowest_latency;
+		break;
+	case bulk_latency:
+		if (bytes_perint <= adapter->eitr_high)
+			retval = low_latency;
+		break;
+	}
+
+update_itr_done:
+	return retval;
+}
+
+/**
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @adapter: pointer to adapter struct
+ * @v_idx: vector index into q_vector array
+ * @itr_reg: new value to be written in *register* format, not ints/s
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update VTEITR registers at runtime.  Hardware
+ * specific quirks/differences are taken care of here.
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+			       u32 itr_reg)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
+
+	/*
+	 * set the WDIS bit to not clear the timer bits and cause an
+	 * immediate assertion of the interrupt
+	 */
+	itr_reg |= IXGBE_EITR_CNT_WDIS;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
+}
+
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
+{
+	struct ixgbevf_adapter *adapter = q_vector->adapter;
+	u32 new_itr;
+	u8 current_itr, ret_itr;
+	int i, r_idx, v_idx = q_vector->v_idx;
+	struct ixgbevf_ring *rx_ring, *tx_ring;
+
+	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+	for (i = 0; i < q_vector->txr_count; i++) {
+		tx_ring = &(adapter->tx_ring[r_idx]);
+		ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+					     q_vector->tx_itr,
+					     tx_ring->total_packets,
+					     tx_ring->total_bytes);
+		/* if the result for this queue would decrease interrupt
+		 * rate for this vector then use that result */
+		q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
+				    q_vector->tx_itr - 1 : ret_itr);
+		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+				      r_idx + 1);
+	}
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	for (i = 0; i < q_vector->rxr_count; i++) {
+		rx_ring = &(adapter->rx_ring[r_idx]);
+		ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+					     q_vector->rx_itr,
+					     rx_ring->total_packets,
+					     rx_ring->total_bytes);
+		/* if the result for this queue would decrease interrupt
+		 * rate for this vector then use that result */
+		q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
+				    q_vector->rx_itr - 1 : ret_itr);
+		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+				      r_idx + 1);
+	}
+
+	current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+	switch (current_itr) {
+	/* counts and packets in update_itr are dependent on these numbers */
+	case lowest_latency:
+		new_itr = 100000;
+		break;
+	case low_latency:
+		new_itr = 20000; /* aka hwitr = ~200 */
+		break;
+	case bulk_latency:
+	default:
+		new_itr = 8000;
+		break;
+	}
+
+	if (new_itr != q_vector->eitr) {
+		u32 itr_reg;
+
+		/* save the algorithm value here, not the smoothed one */
+		q_vector->eitr = new_itr;
+		/* do an exponential smoothing */
+		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+		itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+		ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+	}
+
+	return;
+}
+
+static irqreturn_t ixgbevf_msix_mbx(int irq, void *data, struct pt_regs *regs)
+{
+	struct net_device *netdev = data;
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 eicr;
+	u32 msg;
+
+	eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+
+	hw->mbx.ops.read(hw, &msg, 1);
+
+	if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
+		mod_timer(&adapter->watchdog_timer,
+			  round_jiffies(jiffies + 10));
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data, struct pt_regs *regs)
+{
+	struct ixgbevf_q_vector *q_vector = data;
+	struct ixgbevf_adapter  *adapter = q_vector->adapter;
+	struct ixgbevf_ring     *tx_ring;
+	int i, r_idx;
+
+	if (!q_vector->txr_count)
+		return IRQ_HANDLED;
+
+	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+	for (i = 0; i < q_vector->txr_count; i++) {
+		tx_ring = &(adapter->tx_ring[r_idx]);
+		tx_ring->total_bytes = 0;
+		tx_ring->total_packets = 0;
+		ixgbevf_clean_tx_irq(adapter, tx_ring);
+		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+				      r_idx + 1);
+	}
+
+	if (adapter->itr_setting & 1)
+		ixgbevf_set_itr_msix(q_vector);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * @irq: unused
+ * @data: pointer to our q_vector struct for this interrupt vector
+ **/
+static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data, struct pt_regs *regs)
+{
+	struct ixgbevf_q_vector *q_vector = data;
+	struct ixgbevf_adapter  *adapter = q_vector->adapter;
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbevf_ring  *rx_ring;
+	int r_idx;
+	int i;
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	for (i = 0; i < q_vector->rxr_count; i++) {
+		rx_ring = &(adapter->rx_ring[r_idx]);
+		rx_ring->total_bytes = 0;
+		rx_ring->total_packets = 0;
+		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+				      r_idx + 1);
+	}
+
+	if (!q_vector->rxr_count)
+		return IRQ_HANDLED;
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	rx_ring = &(adapter->rx_ring[r_idx]);
+	/* disable interrupts on this vector only */
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
+	netif_rx_schedule(q_vector->dummy_netdev);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data, struct pt_regs *regs)
+{
+	ixgbevf_msix_clean_rx(irq, data, regs);
+	ixgbevf_msix_clean_tx(irq, data, regs);
+
+	return IRQ_HANDLED;
+}
+
+static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
+				     int r_idx)
+{
+	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+	set_bit(r_idx, q_vector->rxr_idx);
+	q_vector->rxr_count++;
+	a->rx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
+static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
+				     int t_idx)
+{
+	struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+	set_bit(t_idx, q_vector->txr_idx);
+	q_vector->txr_count++;
+	a->tx_ring[t_idx].v_idx = 1 << v_idx;
+}
+
+/**
+ * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code.  Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible.  You would add new
+ * mapping configurations in here.
+ **/
+static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
+{
+	int q_vectors;
+	int v_start = 0;
+	int rxr_idx = 0, txr_idx = 0;
+	int rxr_remaining = adapter->num_rx_queues;
+	int txr_remaining = adapter->num_tx_queues;
+	int i, j;
+	int rqpv, tqpv;
+	int err = 0;
+
+	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	/*
+	 * The ideal configuration...
+	 * We have enough vectors to map one per queue.
+	 */
+	if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+			map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+		for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+			map_vector_to_txq(adapter, v_start, txr_idx);
+		goto out;
+	}
+
+	/*
+	 * If we don't have enough vectors for a 1-to-1
+	 * mapping, we'll have to group them so there are
+	 * multiple queues per vector.
+	 */
+	/* Re-adjusting *qpv takes care of the remainder. */
+	for (i = v_start; i < q_vectors; i++) {
+		rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+		for (j = 0; j < rqpv; j++) {
+			map_vector_to_rxq(adapter, i, rxr_idx);
+			rxr_idx++;
+			rxr_remaining--;
+		}
+	}
+	for (i = v_start; i < q_vectors; i++) {
+		tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+		for (j = 0; j < tqpv; j++) {
+			map_vector_to_txq(adapter, i, txr_idx);
+			txr_idx++;
+			txr_remaining--;
+		}
+	}
+
+out:
+	return err;
+}
+
+/**
+ * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	irqreturn_t (*handler)(int, void *, struct pt_regs *);
+	int i, vector, q_vectors, err;
+	int ri = 0, ti = 0;
+
+	/* Decrement for Other and TCP Timer vectors */
+	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count)          \
+					  ? &ixgbevf_msix_clean_many : \
+			  (_v)->rxr_count ? &ixgbevf_msix_clean_rx   : \
+			  (_v)->txr_count ? &ixgbevf_msix_clean_tx   : \
+			  NULL)
+	for (vector = 0; vector < q_vectors; vector++) {
+		handler = SET_HANDLER(adapter->q_vector[vector]);
+
+		if (handler == &ixgbevf_msix_clean_rx) {
+			sprintf(adapter->name[vector], "%s-%s-%d",
+				netdev->name, "rx", ri++);
+		} else if (handler == &ixgbevf_msix_clean_tx) {
+			sprintf(adapter->name[vector], "%s-%s-%d",
+				netdev->name, "tx", ti++);
+		} else if (handler == &ixgbevf_msix_clean_many) {
+			sprintf(adapter->name[vector], "%s-%s-%d",
+				netdev->name, "TxRx", vector);
+		} else {
+			/* skip this unused q_vector */
+			continue;
+		}
+		err = request_irq(adapter->msix_entries[vector].vector,
+				  handler, 0, adapter->name[vector],
+				  adapter->q_vector[vector]);
+		if (err) {
+			hw_dbg(&adapter->hw,
+			       "request_irq failed for MSIX interrupt "
+			       "Error: %d\n", err);
+			goto free_queue_irqs;
+		}
+	}
+
+	sprintf(adapter->name[vector], "%s:mbx", netdev->name);
+	err = request_irq(adapter->msix_entries[vector].vector,
+			  &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+	if (err) {
+		hw_dbg(&adapter->hw,
+		       "request_irq for msix_mbx failed: %d\n", err);
+		goto free_queue_irqs;
+	}
+
+	return 0;
+
+free_queue_irqs:
+	for (i = vector - 1; i >= 0; i--)
+		free_irq(adapter->msix_entries[--vector].vector,
+			 &(adapter->q_vector[i]));
+	pci_disable_msix(adapter->pdev);
+	kfree(adapter->msix_entries);
+	adapter->msix_entries = NULL;
+	return err;
+}
+
+static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
+{
+	int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	for (i = 0; i < q_vectors; i++) {
+		struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
+		bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
+		bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
+		q_vector->rxr_count = 0;
+		q_vector->txr_count = 0;
+		q_vector->eitr = adapter->eitr_param;
+	}
+}
+
+/**
+ * ixgbevf_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
+{
+	int err = 0;
+
+	err = ixgbevf_request_msix_irqs(adapter);
+
+	if (err)
+		hw_dbg(&adapter->hw,
+		       "request_irq failed, Error %d\n", err);
+
+	return err;
+}
+
+static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i, q_vectors;
+
+	q_vectors = adapter->num_msix_vectors;
+
+	i = q_vectors - 1;
+
+	free_irq(adapter->msix_entries[i].vector, netdev);
+	i--;
+
+	for (; i >= 0; i--) {
+		free_irq(adapter->msix_entries[i].vector,
+			 adapter->q_vector[i]);
+	}
+
+	ixgbevf_reset_q_vectors(adapter);
+}
+
+/**
+ * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
+{
+	int i;
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+
+	IXGBE_WRITE_FLUSH(hw);
+
+	for (i = 0; i < adapter->num_msix_vectors; i++)
+		synchronize_irq(adapter->msix_entries[i].vector);
+}
+
+/**
+ * ixgbevf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
+				      bool queues, bool flush)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 mask;
+	u64 qmask;
+
+	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+	qmask = ~0;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+	if (queues)
+		ixgbevf_irq_enable_queues(adapter, qmask);
+
+	if (flush)
+		IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
+{
+	u64 tdba;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 i, j, tdlen, txctrl;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct ixgbevf_ring *ring = &adapter->tx_ring[i];
+		j = ring->reg_idx;
+		tdba = ring->dma;
+		tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
+				(tdba & DMA_BIT_MASK(32)));
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
+		adapter->tx_ring[i].head = IXGBE_VFTDH(j);
+		adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
+		/* Disable Tx Head Writeback RO bit, since this hoses
+		 * bookkeeping if things aren't delivered in order.
+		 */
+		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
+		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
+	}
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT	2
+
+static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
+{
+	struct ixgbevf_ring *rx_ring;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 srrctl;
+
+	rx_ring = &adapter->rx_ring[index];
+
+	srrctl = IXGBE_SRRCTL_DROP_EN;
+
+	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+		u16 bufsz = IXGBEVF_RXBUFFER_2048;
+		/* grow the amount we can receive on large page machines */
+		if (bufsz < (PAGE_SIZE / 2))
+			bufsz = (PAGE_SIZE / 2);
+		/* cap the bufsz at our largest descriptor size */
+		bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
+
+		srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+		srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+		srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
+			   IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+			   IXGBE_SRRCTL_BSIZEHDR_MASK);
+	} else {
+		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+		if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+			srrctl |= IXGBEVF_RXBUFFER_2048 >>
+				IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+		else
+			srrctl |= rx_ring->rx_buf_len >>
+				IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
+}
+
+/**
+ * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
+{
+	u64 rdba;
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	int i, j;
+	u32 rdlen;
+	int rx_buf_len;
+
+	/* Decide whether to use packet split mode or not */
+	if (netdev->mtu > ETH_DATA_LEN) {
+		if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
+			adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+		else
+			adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+	} else {
+		if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
+			adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+		else
+			adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+	}
+
+	/* Set the RX buffer length according to the mode */
+	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+		/* PSRTYPE must be initialized in 82599 */
+		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+			IXGBE_PSRTYPE_UDPHDR |
+			IXGBE_PSRTYPE_IPV4HDR |
+			IXGBE_PSRTYPE_IPV6HDR |
+			IXGBE_PSRTYPE_L2HDR;
+		IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+		rx_buf_len = IXGBEVF_RX_HDR_SIZE;
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+		if (netdev->mtu <= ETH_DATA_LEN)
+			rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+		else
+			rx_buf_len = ALIGN(max_frame, 1024);
+	}
+
+	rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rdba = adapter->rx_ring[i].dma;
+		j = adapter->rx_ring[i].reg_idx;
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
+				(rdba & DMA_BIT_MASK(32)));
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
+		adapter->rx_ring[i].head = IXGBE_VFRDH(j);
+		adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
+		adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+
+		ixgbevf_configure_srrctl(adapter, j);
+	}
+}
+
+static void ixgbevf_vlan_rx_register(struct net_device *netdev,
+				     struct vlan_group *grp)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	int i, j;
+	u32 ctrl;
+
+	adapter->vlgrp = grp;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		j = adapter->rx_ring[i].reg_idx;
+		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+		ctrl |= IXGBE_RXDCTL_VME;
+		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
+	}
+}
+
+static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct net_device *v_netdev;
+
+	/* add VID to filter table */
+	if (hw->mac.ops.set_vfta)
+		hw->mac.ops.set_vfta(hw, vid, 0, true);
+	/*
+	 * Copy feature flags from netdev to the vlan netdev for this vid.
+	 * This allows things like TSO to bubble down to our vlan device.
+	 */
+	v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
+	v_netdev->features |= adapter->netdev->features;
+	vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
+}
+
+static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+		ixgbevf_irq_disable(adapter);
+
+	vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+		ixgbevf_irq_enable(adapter, true, true);
+
+	/* remove VID from filter table */
+	if (hw->mac.ops.set_vfta)
+		hw->mac.ops.set_vfta(hw, vid, 0, false);
+}
+
+static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
+{
+	ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+	if (adapter->vlgrp) {
+		u16 vid;
+		for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+			if (!vlan_group_get_device(adapter->vlgrp, vid))
+				continue;
+			ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
+		}
+	}
+}
+
+static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+				 u32 *vmdq)
+{
+	struct dev_mc_list *mc_ptr;
+	u8 *addr = *mc_addr_ptr;
+	*vmdq = 0;
+
+	mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
+	if (mc_ptr->next)
+		*mc_addr_ptr = mc_ptr->next->dmi_addr;
+	else
+		*mc_addr_ptr = NULL;
+
+	return addr;
+}
+
+/**
+ * ixgbevf_set_rx_mode - Multicast set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast mode.
+ **/
+static void ixgbevf_set_rx_mode(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u8 *addr_list = NULL;
+	int addr_count = 0;
+
+	/* reprogram multicast list */
+	addr_count = netdev->mc_count;
+	if (addr_count)
+		addr_list = netdev->mc_list->dmi_addr;
+	if (hw->mac.ops.update_mc_addr_list)
+		hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+						ixgbevf_addr_list_itr);
+}
+
+static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
+{
+	int q_idx;
+	struct ixgbevf_q_vector *q_vector;
+	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		struct net_device *netdev;
+		q_vector = adapter->q_vector[q_idx];
+		if (!q_vector->rxr_count)
+			continue;
+		netdev = q_vector->dummy_netdev;
+		if (q_vector->rxr_count > 1)
+			netdev->poll = &ixgbevf_clean_rxonly_many;
+
+		netif_poll_enable(netdev);
+	}
+}
+
+static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
+{
+	int q_idx;
+	struct ixgbevf_q_vector *q_vector;
+	int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+		q_vector = adapter->q_vector[q_idx];
+		if (!q_vector->rxr_count)
+			continue;
+		netif_poll_disable(q_vector->dummy_netdev);
+	}
+}
+
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	ixgbevf_set_rx_mode(netdev);
+
+	ixgbevf_restore_vlan(adapter);
+
+	ixgbevf_configure_tx(adapter);
+	ixgbevf_configure_rx(adapter);
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct ixgbevf_ring *ring = &adapter->rx_ring[i];
+		ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
+		ring->next_to_use = ring->count - 1;
+		writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+	}
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+						int rxr)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int j = adapter->rx_ring[rxr].reg_idx;
+	int k;
+
+	for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
+		if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
+			break;
+		else
+			msleep(1);
+	}
+	if (k >= IXGBE_MAX_RX_DESC_POLL) {
+		hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
+		       "not set within the polling period\n", rxr);
+	}
+
+	ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+				(adapter->rx_ring[rxr].count - 1));
+}
+
+static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
+	int i, j = 0;
+	int num_rx_rings = adapter->num_rx_queues;
+	u32 txdctl, rxdctl;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		j = adapter->tx_ring[i].reg_idx;
+		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+		/* enable WTHRESH=8 descriptors, to encourage burst writeback */
+		txdctl |= (8 << 16);
+		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		j = adapter->tx_ring[i].reg_idx;
+		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+		txdctl |= IXGBE_TXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+	}
+
+	for (i = 0; i < num_rx_rings; i++) {
+		j = adapter->rx_ring[i].reg_idx;
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+		rxdctl |= IXGBE_RXDCTL_ENABLE;
+		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
+		ixgbevf_rx_desc_queue_enable(adapter, i);
+	}
+
+	ixgbevf_configure_msix(adapter);
+
+	if (hw->mac.ops.set_rar) {
+		if (is_valid_ether_addr(hw->mac.addr))
+			hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+		else
+			hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
+	}
+
+	clear_bit(__IXGBEVF_DOWN, &adapter->state);
+	ixgbevf_napi_enable_all(adapter);
+
+	/* enable transmits */
+	netif_start_queue(netdev);
+
+	/* bring the link up in the watchdog, this could race with our first
+	 * link up interrupt but shouldn't be a problem */
+	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+	adapter->link_check_timeout = jiffies;
+	mod_timer(&adapter->watchdog_timer, jiffies);
+	return 0;
+}
+
+int ixgbevf_up(struct ixgbevf_adapter *adapter)
+{
+	int err;
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	ixgbevf_configure(adapter);
+
+	err = ixgbevf_up_complete(adapter);
+
+	/* clear any pending interrupts, may auto mask */
+	IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+	ixgbevf_irq_enable(adapter, true, true);
+
+	return err;
+}
+
+/**
+ * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
+				  struct ixgbevf_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i;
+
+	if (!rx_ring->rx_buffer_info)
+		return;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		struct ixgbevf_rx_buffer *rx_buffer_info;
+
+		rx_buffer_info = &rx_ring->rx_buffer_info[i];
+		if (rx_buffer_info->dma) {
+			pci_unmap_single(pdev, rx_buffer_info->dma,
+					 rx_ring->rx_buf_len,
+					 PCI_DMA_FROMDEVICE);
+			rx_buffer_info->dma = 0;
+		}
+		if (rx_buffer_info->skb) {
+			struct sk_buff *skb = rx_buffer_info->skb;
+			rx_buffer_info->skb = NULL;
+			do {
+				struct sk_buff *this = skb;
+				skb = skb->prev;
+				dev_kfree_skb(this);
+			} while (skb);
+		}
+		if (!rx_buffer_info->page)
+			continue;
+		pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
+			       PCI_DMA_FROMDEVICE);
+		rx_buffer_info->page_dma = 0;
+		put_page(rx_buffer_info->page);
+		rx_buffer_info->page = NULL;
+		rx_buffer_info->page_offset = 0;
+	}
+
+	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+	memset(rx_ring->rx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	if (rx_ring->head)
+		writel(0, adapter->hw.hw_addr + rx_ring->head);
+	if (rx_ring->tail)
+		writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
+				  struct ixgbevf_ring *tx_ring)
+{
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	if (!tx_ring->tx_buffer_info)
+		return;
+
+	/* Free all the Tx ring sk_buffs */
+
+	for (i = 0; i < tx_ring->count; i++) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+	}
+
+	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	if (tx_ring->head)
+		writel(0, adapter->hw.hw_addr + tx_ring->head);
+	if (tx_ring->tail)
+		writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+void ixgbevf_down(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 txdctl;
+	int i, j;
+
+	/* signal that we are down to the interrupt handler */
+	set_bit(__IXGBEVF_DOWN, &adapter->state);
+	/* disable receives */
+
+	netif_tx_disable(netdev);
+
+	msleep(10);
+
+	netif_stop_queue(netdev);
+
+	ixgbevf_irq_disable(adapter);
+
+	ixgbevf_napi_disable_all(adapter);
+
+	del_timer_sync(&adapter->watchdog_timer);
+	/* can't call flush scheduled work here because it can deadlock
+	 * if linkwatch_event tries to acquire the rtnl_lock which we are
+	 * holding */
+	while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
+		msleep(1);
+
+	/* disable transmits in the hardware now that interrupts are off */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		j = adapter->tx_ring[i].reg_idx;
+		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
+				(txdctl & ~IXGBE_TXDCTL_ENABLE));
+	}
+
+	netif_carrier_off(netdev);
+
+	if (!pci_channel_offline(adapter->pdev))
+		ixgbevf_reset(adapter);
+
+	ixgbevf_clean_all_tx_rings(adapter);
+	ixgbevf_clean_all_rx_rings(adapter);
+}
+
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	WARN_ON(in_interrupt());
+
+	while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+		msleep(1);
+
+	/*
+	 * Check if PF is up before re-init.  If not then skip until
+	 * later when the PF is up and ready to service requests from
+	 * the VF via mailbox.  If the VF is up and running then the
+	 * watchdog task will continue to schedule reset tasks until
+	 * the PF is up and running.
+	 */
+	if (!hw->mac.ops.reset_hw(hw)) {
+		ixgbevf_down(adapter);
+		ixgbevf_up(adapter);
+	}
+
+	clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+}
+
+void ixgbevf_reset(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+
+	if (hw->mac.ops.reset_hw(hw))
+		hw_dbg(hw, "PF still resetting\n");
+	else
+		hw->mac.ops.init_hw(hw);
+
+	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+		memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+		       netdev->addr_len);
+		memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+		       netdev->addr_len);
+	}
+}
+
+static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+					 int vectors)
+{
+	int err, vector_threshold;
+
+	/* We'll want at least 3 (vector_threshold):
+	 * 1) TxQ[0] Cleanup
+	 * 2) RxQ[0] Cleanup
+	 * 3) Other (Link Status Change, etc.)
+	 */
+	vector_threshold = MIN_MSIX_COUNT;
+
+	/* The more we get, the more we will assign to Tx/Rx Cleanup
+	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+	 * Right now, we simply care about how many we'll get; we'll
+	 * set them up later while requesting irq's.
+	 */
+	while (vectors >= vector_threshold) {
+		err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+				      vectors);
+		if (!err) /* Success in acquiring all requested vectors. */
+			break;
+		else if (err < 0)
+			vectors = 0; /* Nasty failure, quit now */
+		else /* err == number of vectors we should try again with */
+			vectors = err;
+	}
+
+	if (vectors < vector_threshold) {
+		/* Can't allocate enough MSI-X interrupts?  Oh well.
+		 * This just means we'll go with either a single MSI
+		 * vector or fall back to legacy interrupts.
+		 */
+		hw_dbg(&adapter->hw,
+		       "Unable to allocate MSI-X interrupts\n");
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else {
+		/*
+		 * Adjust for only the vectors we'll use, which is minimum
+		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+		 * vectors we were allocated.
+		 */
+		adapter->num_msix_vectors = vectors;
+	}
+}
+
+/*
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine.  The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features.  This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
+{
+	/* Start with base case */
+	adapter->num_rx_queues = 1;
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_pools = adapter->num_rx_queues;
+	adapter->num_rx_queues_per_pool = 1;
+}
+
+/**
+ * ixgbevf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.  The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
+{
+	int i;
+
+	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
+	if (!adapter->tx_ring)
+		goto err_tx_ring_allocation;
+
+	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+				   sizeof(struct ixgbevf_ring), GFP_KERNEL);
+	if (!adapter->rx_ring)
+		goto err_rx_ring_allocation;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		adapter->tx_ring[i].count = adapter->tx_ring_count;
+		adapter->tx_ring[i].queue_index = i;
+		adapter->tx_ring[i].reg_idx = i;
+	}
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		adapter->rx_ring[i].count = adapter->rx_ring_count;
+		adapter->rx_ring[i].queue_index = i;
+		adapter->rx_ring[i].reg_idx = i;
+	}
+
+	return 0;
+
+err_rx_ring_allocation:
+	kfree(adapter->tx_ring);
+err_tx_ring_allocation:
+	return -ENOMEM;
+}
+
+/**
+ * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+	int err = 0;
+	int vector, v_budget;
+
+	/*
+	 * It's easy to be greedy for MSI-X vectors, but it really
+	 * doesn't do us much good if we have a lot more vectors
+	 * than CPU's.  So let's be conservative and only ask for
+	 * (roughly) twice the number of vectors as there are CPU's.
+	 */
+	v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+		       (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+
+	/* A failure in MSI-X entry allocation isn't fatal, but it does
+	 * mean we disable MSI-X capabilities of the adapter. */
+	adapter->msix_entries = kcalloc(v_budget,
+					sizeof(struct msix_entry), GFP_KERNEL);
+	if (!adapter->msix_entries) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	for (vector = 0; vector < v_budget; vector++)
+		adapter->msix_entries[vector].entry = vector;
+
+	ixgbevf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+	return err;
+}
+
+/**
+ * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
+{
+	int q_idx, num_q_vectors;
+	struct ixgbevf_q_vector *q_vector;
+	int napi_vectors;
+	int (*poll)(struct net_device *, int *);
+
+	num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+	napi_vectors = adapter->num_rx_queues;
+	poll = &ixgbevf_clean_rxonly;
+
+	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+		q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
+		if (!q_vector)
+			goto err_out;
+		q_vector->adapter = adapter;
+		q_vector->v_idx = q_idx;
+		q_vector->eitr = adapter->eitr_param;
+		if (q_idx < napi_vectors) {
+			if (!q_vector->dummy_netdev) {
+				q_vector->dummy_netdev = alloc_netdev(0, "", ether_setup);
+				if (!q_vector->dummy_netdev)
+					return -ENOMEM;
+			}
+			q_vector->dummy_netdev->priv = q_vector;
+			q_vector->dummy_netdev->poll = poll;
+			q_vector->dummy_netdev->weight = 64;
+			set_bit(__LINK_STATE_START, &q_vector->dummy_netdev->state);
+		}
+		adapter->q_vector[q_idx] = q_vector;
+	}
+
+	return 0;
+
+err_out:
+	while (q_idx) {
+		q_idx--;
+		q_vector = adapter->q_vector[q_idx];
+		free_netdev(q_vector->dummy_netdev);
+		q_vector->dummy_netdev = NULL;
+		kfree(q_vector);
+		adapter->q_vector[q_idx] = NULL;
+	}
+	return -ENOMEM;
+}
+
+/**
+ * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
+{
+	int q_idx, num_q_vectors;
+	int napi_vectors;
+
+	num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+	napi_vectors = adapter->num_rx_queues;
+
+	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+		struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
+
+		adapter->q_vector[q_idx] = NULL;
+		if (q_idx < napi_vectors) {
+			free_netdev(q_vector->dummy_netdev);
+			q_vector->dummy_netdev = NULL;
+		}
+		kfree(q_vector);
+	}
+}
+
+/**
+ * ixgbevf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+	pci_disable_msix(adapter->pdev);
+	kfree(adapter->msix_entries);
+	adapter->msix_entries = NULL;
+
+	return;
+}
+
+/**
+ * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+	int err;
+
+	/* Number of supported queues */
+	ixgbevf_set_num_queues(adapter);
+
+	err = ixgbevf_set_interrupt_capability(adapter);
+	if (err) {
+		hw_dbg(&adapter->hw,
+		       "Unable to setup interrupt capabilities\n");
+		goto err_set_interrupt;
+	}
+
+	err = ixgbevf_alloc_q_vectors(adapter);
+	if (err) {
+		hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
+		       "vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	err = ixgbevf_alloc_queues(adapter);
+	if (err) {
+		printk(KERN_ERR "Unable to allocate memory for queues\n");
+		goto err_alloc_queues;
+	}
+
+	hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
+	       "Tx Queue count = %u\n",
+	       (adapter->num_rx_queues > 1) ? "Enabled" :
+	       "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+
+	set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+	return 0;
+err_alloc_queues:
+	ixgbevf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+	ixgbevf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+	return err;
+}
+
+/**
+ * ixgbevf_sw_init - Initialize general software structures
+ * (struct ixgbevf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbevf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	int err;
+
+	/* PCI config space info */
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
+	hw->mbx.ops.init_params(hw);
+	hw->mac.max_tx_queues = MAX_TX_QUEUES;
+	hw->mac.max_rx_queues = MAX_RX_QUEUES;
+	err = hw->mac.ops.reset_hw(hw);
+	if (err) {
+		dev_info(&pdev->dev,
+		         "PF still in reset state, assigning new address\n");
+		random_ether_addr(hw->mac.addr);
+	} else {
+		err = hw->mac.ops.init_hw(hw);
+		if (err) {
+			printk(KERN_ERR "init_shared_code failed: %d\n", err);
+			goto out;
+		}
+	}
+
+	/* Enable dynamic interrupt throttling rates */
+	adapter->eitr_param = 20000;
+	adapter->itr_setting = 1;
+
+	/* set defaults for eitr in MegaBytes */
+	adapter->eitr_low = 10;
+	adapter->eitr_high = 20;
+
+	/* set default ring sizes */
+	adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
+	adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+
+	/* enable rx csum by default */
+	adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+	set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+out:
+	return err;
+}
+
+static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
+	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
+	adapter->stats.last_vfgorc |=
+		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
+	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
+	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
+	adapter->stats.last_vfgotc |=
+		(((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
+	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
+
+	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
+	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
+	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
+	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
+	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+}
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)	\
+	{							\
+		u32 current_counter = IXGBE_READ_REG(hw, reg);	\
+		if (current_counter < last_counter)		\
+			counter += 0x100000000LL;		\
+		last_counter = current_counter;			\
+		counter &= 0xFFFFFFFF00000000LL;		\
+		counter |= current_counter;			\
+	}
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+	{								 \
+		u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);	 \
+		u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);	 \
+		u64 current_counter = (current_counter_msb << 32) |      \
+			current_counter_lsb;                             \
+		if (current_counter < last_counter)			 \
+			counter += 0x1000000000LL;			 \
+		last_counter = current_counter;				 \
+		counter &= 0xFFFFFFF000000000LL;			 \
+		counter |= current_counter;				 \
+	}
+/**
+ * ixgbevf_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
+				adapter->stats.vfgprc);
+	UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
+				adapter->stats.vfgptc);
+	UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+				adapter->stats.last_vfgorc,
+				adapter->stats.vfgorc);
+	UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+				adapter->stats.last_vfgotc,
+				adapter->stats.vfgotc);
+	UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
+				adapter->stats.vfmprc);
+
+	/* Fill out the OS statistics structure */
+	adapter->net_stats.multicast = adapter->stats.vfmprc -
+		adapter->stats.base_vfmprc;
+}
+
+/**
+ * ixgbevf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbevf_watchdog(unsigned long data)
+{
+	struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u64 eics = 0;
+	int i;
+
+	/*
+	 * Do the watchdog outside of interrupt context due to the lovely
+	 * delays that some of the newer hardware requires
+	 */
+
+	if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+		goto watchdog_short_circuit;
+
+	/* get one bit for every active tx/rx interrupt vector */
+	for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+		struct ixgbevf_q_vector *qv = adapter->q_vector[i];
+		if (qv->rxr_count || qv->txr_count)
+			eics |= (1 << i);
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+
+watchdog_short_circuit:
+	schedule_work(&adapter->watchdog_task);
+}
+
+/**
+ * ixgbevf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbevf_tx_timeout(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	/* Do the reset outside of interrupt context */
+	schedule_work(&adapter->reset_task);
+}
+
+static void ixgbevf_reset_task(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	/* If we're already down or resetting, just bail */
+	if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+	    test_bit(__IXGBEVF_RESETTING, &adapter->state))
+		return;
+
+	adapter->tx_timeout_count++;
+
+	ixgbevf_reinit_locked(adapter);
+}
+
+/**
+ * ixgbevf_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbevf_watchdog_task(struct ixgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 link_speed = adapter->link_speed;
+	bool link_up = adapter->link_up;
+
+	adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+
+	/*
+	 * Always check the link on the watchdog because we have
+	 * no LSC interrupt
+	 */
+	if (hw->mac.ops.check_link) {
+		if ((hw->mac.ops.check_link(hw, &link_speed,
+					    &link_up, false)) != 0) {
+			adapter->link_up = link_up;
+			adapter->link_speed = link_speed;
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+			schedule_work(&adapter->reset_task);
+			goto pf_has_reset;
+		}
+	} else {
+		/* always assume link is up, if no check link
+		 * function */
+		link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+		link_up = true;
+	}
+	adapter->link_up = link_up;
+	adapter->link_speed = link_speed;
+
+	if (link_up) {
+		if (!netif_carrier_ok(netdev)) {
+			hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
+			       ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+				"10 Gbps" : "1 Gbps"));
+			netif_carrier_on(netdev);
+			netif_wake_queue(netdev);
+		} else {
+			/* Force detection of hung controller */
+			adapter->detect_tx_hung = true;
+		}
+	} else {
+		adapter->link_up = false;
+		adapter->link_speed = 0;
+		if (netif_carrier_ok(netdev)) {
+			hw_dbg(&adapter->hw, "NIC Link is Down\n");
+			netif_carrier_off(netdev);
+			netif_stop_queue(netdev);
+		}
+	}
+
+pf_has_reset:
+	ixgbevf_update_stats(adapter);
+
+	/* Force detection of hung controller every watchdog period */
+	adapter->detect_tx_hung = true;
+
+	/* Reset the timer */
+	if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+		mod_timer(&adapter->watchdog_timer,
+			  round_jiffies(jiffies + (2 * HZ)));
+
+	adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+}
+
+/**
+ * ixgbevf_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
+			       struct ixgbevf_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	ixgbevf_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+
+	pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		if (adapter->tx_ring[i].desc)
+			ixgbevf_free_tx_resources(adapter,
+						  &adapter->tx_ring[i]);
+
+}
+
+/**
+ * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
+			       struct ixgbevf_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+	tx_ring->tx_buffer_info = vmalloc(size);
+	if (!tx_ring->tx_buffer_info)
+		goto err;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+					     &tx_ring->dma);
+	if (!tx_ring->desc)
+		goto err;
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	tx_ring->work_limit = tx_ring->count;
+	return 0;
+
+err:
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+	hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
+	       "descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (!err)
+			continue;
+		hw_dbg(&adapter->hw,
+		       "Allocation for Tx Queue %u failed\n", i);
+		break;
+	}
+
+	return err;
+}
+
+/**
+ * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
+			       struct ixgbevf_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+	rx_ring->rx_buffer_info = vmalloc(size);
+	if (!rx_ring->rx_buffer_info) {
+		hw_dbg(&adapter->hw,
+		       "Unable to vmalloc buffer memory for "
+		       "the receive descriptor ring\n");
+		goto alloc_failed;
+	}
+	memset(rx_ring->rx_buffer_info, 0, size);
+
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+					     &rx_ring->dma);
+
+	if (!rx_ring->desc) {
+		hw_dbg(&adapter->hw,
+		       "Unable to allocate memory for "
+		       "the receive descriptor ring\n");
+		vfree(rx_ring->rx_buffer_info);
+		rx_ring->rx_buffer_info = NULL;
+		goto alloc_failed;
+	}
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	return 0;
+alloc_failed:
+	return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		if (!err)
+			continue;
+		hw_dbg(&adapter->hw,
+		       "Allocation for Rx Queue %u failed\n", i);
+		break;
+	}
+	return err;
+}
+
+/**
+ * ixgbevf_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
+			       struct ixgbevf_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	ixgbevf_clean_rx_ring(adapter, rx_ring);
+
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+
+	pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		if (adapter->rx_ring[i].desc)
+			ixgbevf_free_rx_resources(adapter,
+						  &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int ixgbevf_open(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__IXGBEVF_TESTING, &adapter->state))
+		return -EBUSY;
+
+	if (hw->adapter_stopped) {
+		ixgbevf_reset(adapter);
+		/* if adapter is still stopped then PF isn't up and
+		 * the vf can't start. */
+		if (hw->adapter_stopped) {
+			err = IXGBE_ERR_MBX;
+			printk(KERN_ERR "Unable to start - perhaps the PF"
+			       "Driver isn't up yet\n");
+			goto err_setup_reset;
+		}
+	}
+
+	/* allocate transmit descriptors */
+	err = ixgbevf_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = ixgbevf_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	ixgbevf_configure(adapter);
+
+	/*
+	 * Map the Tx/Rx rings to the vectors we were allotted.
+	 * if request_irq will be called in this function map_rings
+	 * must be called *before* up_complete
+	 */
+	ixgbevf_map_rings_to_vectors(adapter);
+
+	err = ixgbevf_up_complete(adapter);
+	if (err)
+		goto err_up;
+
+	/* clear any pending interrupts, may auto mask */
+	IXGBE_READ_REG(hw, IXGBE_VTEICR);
+	err = ixgbevf_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	ixgbevf_irq_enable(adapter, true, true);
+
+	return 0;
+
+err_req_irq:
+	ixgbevf_down(adapter);
+err_up:
+	ixgbevf_free_irq(adapter);
+err_setup_rx:
+	ixgbevf_free_all_rx_resources(adapter);
+err_setup_tx:
+	ixgbevf_free_all_tx_resources(adapter);
+	ixgbevf_reset(adapter);
+
+err_setup_reset:
+
+	return err;
+}
+
+/**
+ * ixgbevf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ixgbevf_close(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	ixgbevf_down(adapter);
+	ixgbevf_free_irq(adapter);
+
+	ixgbevf_free_all_tx_resources(adapter);
+	ixgbevf_free_all_rx_resources(adapter);
+
+	return 0;
+}
+
+static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
+		       struct ixgbevf_ring *tx_ring,
+		       struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+	struct ixgbe_adv_tx_context_desc *context_desc;
+	unsigned int i;
+	int err;
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	u32 vlan_macip_lens = 0, type_tucmd_mlhl;
+	u32 mss_l4len_idx, l4len;
+
+	if (skb_is_gso(skb)) {
+		if (skb_header_cloned(skb)) {
+			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (err)
+				return err;
+		}
+		l4len = tcp_hdrlen(skb);
+		*hdr_len += l4len;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *iph = ip_hdr(skb);
+			iph->tot_len = 0;
+			iph->check = 0;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								 iph->daddr, 0,
+								 IPPROTO_TCP,
+								 0);
+			adapter->hw_tso_ctxt++;
+		} else if (skb_is_gso_v6(skb)) {
+			ipv6_hdr(skb)->payload_len = 0;
+			tcp_hdr(skb)->check =
+			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+					     &ipv6_hdr(skb)->daddr,
+					     0, IPPROTO_TCP, 0);
+			adapter->hw_tso6_ctxt++;
+		}
+
+		i = tx_ring->next_to_use;
+
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+		/* VLAN MACLEN IPLEN */
+		if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+			vlan_macip_lens |=
+				(tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+		vlan_macip_lens |= ((skb_network_offset(skb)) <<
+				    IXGBE_ADVTXD_MACLEN_SHIFT);
+		*hdr_len += skb_network_offset(skb);
+		vlan_macip_lens |=
+			(skb_transport_header(skb) - skb_network_header(skb));
+		*hdr_len +=
+			(skb_transport_header(skb) - skb_network_header(skb));
+		context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+		context_desc->seqnum_seed = 0;
+
+		/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+		type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
+				    IXGBE_ADVTXD_DTYP_CTXT);
+
+		if (skb->protocol == htons(ETH_P_IP))
+			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+
+		/* MSS L4LEN IDX */
+		mss_l4len_idx =
+			(skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
+		mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
+		/* use index 1 for TSO */
+		mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+		context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+		tx_buffer_info->time_stamp = jiffies;
+		tx_buffer_info->next_to_watch = i;
+
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+		tx_ring->next_to_use = i;
+
+		return true;
+	}
+
+	return false;
+}
+
+static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
+			    struct ixgbevf_ring *tx_ring,
+			    struct sk_buff *skb, u32 tx_flags)
+{
+	struct ixgbe_adv_tx_context_desc *context_desc;
+	unsigned int i;
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL ||
+	    (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
+		i = tx_ring->next_to_use;
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+		if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+			vlan_macip_lens |= (tx_flags &
+					    IXGBE_TX_FLAGS_VLAN_MASK);
+		vlan_macip_lens |= (skb_network_offset(skb) <<
+				    IXGBE_ADVTXD_MACLEN_SHIFT);
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			vlan_macip_lens |= (skb_transport_header(skb) -
+					    skb_network_header(skb));
+
+		context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+		context_desc->seqnum_seed = 0;
+
+		type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+				    IXGBE_ADVTXD_DTYP_CTXT);
+
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			switch (skb->protocol) {
+			case __constant_htons(ETH_P_IP):
+				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+					type_tucmd_mlhl |=
+					    IXGBE_ADVTXD_TUCMD_L4T_TCP;
+				break;
+			case __constant_htons(ETH_P_IPV6):
+				/* XXX what about other V6 headers?? */
+				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+					type_tucmd_mlhl |=
+						IXGBE_ADVTXD_TUCMD_L4T_TCP;
+				break;
+			default:
+				if (unlikely(net_ratelimit())) {
+					printk(KERN_WARNING
+					       "partial checksum but "
+					       "proto=%x!\n",
+					       skb->protocol);
+				}
+				break;
+			}
+		}
+
+		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+		/* use index zero for tx checksum offload */
+		context_desc->mss_l4len_idx = 0;
+
+		tx_buffer_info->time_stamp = jiffies;
+		tx_buffer_info->next_to_watch = i;
+
+		adapter->hw_csum_tx_good++;
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+		tx_ring->next_to_use = i;
+
+		return true;
+	}
+
+	return false;
+}
+
+static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
+			  struct ixgbevf_ring *tx_ring,
+			  struct sk_buff *skb, u32 tx_flags,
+			  unsigned int first)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	unsigned int len;
+	unsigned int total = skb->len;
+	unsigned int offset = 0, size, count = 0, i;
+	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+	unsigned int f;
+
+	i = tx_ring->next_to_use;
+
+	len = min(skb_headlen(skb), total);
+	while (len) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+		tx_buffer_info->length = size;
+		tx_buffer_info->mapped_as_page = false;
+		tx_buffer_info->dma = pci_map_single(adapter->pdev,
+						     skb->data + offset,
+						     size, PCI_DMA_TODEVICE);
+		if (pci_dma_mapping_error(tx_buffer_info->dma))
+			goto dma_error;
+		tx_buffer_info->time_stamp = jiffies;
+		tx_buffer_info->next_to_watch = i;
+
+		len -= size;
+		total -= size;
+		offset += size;
+		count++;
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+	}
+
+	for (f = 0; f < nr_frags; f++) {
+		struct skb_frag_struct *frag;
+
+		frag = &skb_shinfo(skb)->frags[f];
+		len = min((unsigned int)frag->size, total);
+		offset = frag->page_offset;
+
+		while (len) {
+			tx_buffer_info = &tx_ring->tx_buffer_info[i];
+			size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+			tx_buffer_info->length = size;
+			tx_buffer_info->dma = pci_map_page(adapter->pdev,
+							   frag->page,
+							   offset,
+							   size,
+							   PCI_DMA_TODEVICE);
+			tx_buffer_info->mapped_as_page = true;
+			if (pci_dma_mapping_error(tx_buffer_info->dma))
+				goto dma_error;
+			tx_buffer_info->time_stamp = jiffies;
+			tx_buffer_info->next_to_watch = i;
+
+			len -= size;
+			total -= size;
+			offset += size;
+			count++;
+			i++;
+			if (i == tx_ring->count)
+				i = 0;
+		}
+		if (total == 0)
+			break;
+	}
+
+	if (i == 0)
+		i = tx_ring->count - 1;
+	else
+		i = i - 1;
+	tx_ring->tx_buffer_info[i].skb = skb;
+	tx_ring->tx_buffer_info[first].next_to_watch = i;
+
+	return count;
+
+dma_error:
+	dev_err(&pdev->dev, "TX DMA map failed\n");
+
+	/* clear timestamp and dma mappings for failed tx_buffer_info map */
+	tx_buffer_info->dma = 0;
+	tx_buffer_info->time_stamp = 0;
+	tx_buffer_info->next_to_watch = 0;
+	count--;
+
+	/* clear timestamp and dma mappings for remaining portion of packet */
+	while (count >= 0) {
+		count--;
+		i--;
+		if (i < 0)
+			i += tx_ring->count;
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+	}
+
+	return count;
+}
+
+static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
+			     struct ixgbevf_ring *tx_ring, int tx_flags,
+			     int count, u32 paylen, u8 hdr_len)
+{
+	union ixgbe_adv_tx_desc *tx_desc = NULL;
+	struct ixgbevf_tx_buffer *tx_buffer_info;
+	u32 olinfo_status = 0, cmd_type_len = 0;
+	unsigned int i;
+
+	u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+
+	cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+
+	cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+	if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+	if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
+		olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+			IXGBE_ADVTXD_POPTS_SHIFT;
+
+		/* use index 1 context for tso */
+		olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+		if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+			olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
+				IXGBE_ADVTXD_POPTS_SHIFT;
+
+	} else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+		olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+			IXGBE_ADVTXD_POPTS_SHIFT;
+
+	olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+	i = tx_ring->next_to_use;
+	while (count--) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
+		tx_desc->read.cmd_type_len =
+			cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+	}
+
+	tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+
+	/*
+	 * Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+
+	tx_ring->next_to_use = i;
+	writel(i, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
+				   struct ixgbevf_ring *tx_ring, int size)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	netif_stop_queue(netdev);
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it. */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available. */
+	if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! - use start_queue because it doesn't call schedule */
+	netif_start_queue(netdev);
+	++adapter->restart_queue;
+	return 0;
+}
+
+static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
+				 struct ixgbevf_ring *tx_ring, int size)
+{
+	if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbevf_ring *tx_ring;
+	unsigned int first;
+	unsigned int tx_flags = 0;
+	u8 hdr_len = 0;
+	int r_idx = 0, tso;
+	int count = 0;
+
+	unsigned int f;
+
+	tx_ring = &adapter->tx_ring[r_idx];
+
+	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+		tx_flags |= vlan_tx_tag_get(skb);
+		tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+		tx_flags |= IXGBE_TX_FLAGS_VLAN;
+	}
+
+	/* four things can cause us to need a context descriptor */
+	if (skb_is_gso(skb) ||
+	    (skb->ip_summed == CHECKSUM_PARTIAL) ||
+	    (tx_flags & IXGBE_TX_FLAGS_VLAN))
+		count++;
+
+	count += TXD_USE_COUNT(skb_headlen(skb));
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+	if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
+		adapter->tx_busy++;
+		return NETDEV_TX_BUSY;
+	}
+
+	first = tx_ring->next_to_use;
+
+	if (skb->protocol == htons(ETH_P_IP))
+		tx_flags |= IXGBE_TX_FLAGS_IPV4;
+	tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+	if (tso < 0) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (tso)
+		tx_flags |= IXGBE_TX_FLAGS_TSO;
+	else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+		 (skb->ip_summed == CHECKSUM_PARTIAL))
+		tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+	ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
+			 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+			 skb->len, hdr_len);
+
+	netdev->trans_start = jiffies;
+
+	ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+
+	return NETDEV_TX_OK;
+}
+
+/**
+ * ixgbevf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	/* only return the current stats */
+	return &adapter->net_stats;
+}
+
+/**
+ * ixgbevf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_set_mac(struct net_device *netdev, void *p)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+	if (hw->mac.ops.set_rar)
+		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+
+	return 0;
+}
+
+/**
+ * ixgbevf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+	/* MTU < 68 is an error and causes problems on some kernels */
+	if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+		return -EINVAL;
+
+	hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+	       netdev->mtu, new_mtu);
+	/* must set new MTU before calling down or up */
+	netdev->mtu = new_mtu;
+
+	if (netif_running(netdev))
+		ixgbevf_reinit_locked(adapter);
+
+	return 0;
+}
+
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		ixgbevf_down(adapter);
+		ixgbevf_free_irq(adapter);
+		ixgbevf_free_all_tx_resources(adapter);
+		ixgbevf_free_all_rx_resources(adapter);
+	}
+
+#ifdef CONFIG_PM
+	pci_save_state(pdev);
+#endif
+
+	pci_disable_device(pdev);
+}
+
+static void ixgbevf_assign_netdev_ops(struct net_device *dev)
+{
+	struct ixgbevf_adapter *adapter;
+	adapter = netdev_priv(dev);
+
+	dev->open = &ixgbevf_open;
+	dev->stop = &ixgbevf_close;
+
+	dev->hard_start_xmit = &ixgbevf_xmit_frame;
+
+	dev->get_stats = &ixgbevf_get_stats;
+	dev->set_multicast_list = &ixgbevf_set_rx_mode;
+	dev->set_mac_address = &ixgbevf_set_mac;
+	dev->change_mtu = &ixgbevf_change_mtu;
+	dev->tx_timeout = &ixgbevf_tx_timeout;
+	dev->vlan_rx_register = &ixgbevf_vlan_rx_register;
+	dev->vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid;
+	dev->vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid;
+
+	ixgbevf_set_ethtool_ops(dev);
+	dev->watchdog_timeo = 5 * HZ;
+}
+
+/**
+ * ixgbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbevf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit ixgbevf_probe(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct ixgbevf_adapter *adapter = NULL;
+	struct ixgbe_hw *hw = NULL;
+	const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
+	static int cards_found;
+	int err, pci_using_dac;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		pci_using_dac = 1;
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			err = pci_set_consistent_dma_mask(pdev,
+							  DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev, "No usable DMA "
+					"configuration, aborting\n");
+				goto err_dma;
+			}
+		}
+		pci_using_dac = 0;
+	}
+
+	err = pci_request_regions(pdev, ixgbevf_driver_name);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+		goto err_pci_reg;
+	}
+
+	pci_set_master(pdev);
+
+#ifdef HAVE_TX_MQ
+	netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
+				   MAX_TX_QUEUES);
+#else
+	netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
+#endif
+	if (!netdev) {
+		err = -ENOMEM;
+		goto err_alloc_etherdev;
+	}
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev_priv(netdev);
+
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	hw = &adapter->hw;
+	hw->back = adapter;
+	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+	/*
+	 * call save state here in standalone driver because it relies on
+	 * adapter struct to exist, and needs to call netdev_priv
+	 */
+	pci_save_state(pdev);
+
+	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+			      pci_resource_len(pdev, 0));
+	if (!hw->hw_addr) {
+		err = -EIO;
+		goto err_ioremap;
+	}
+
+	ixgbevf_assign_netdev_ops(netdev);
+
+	adapter->bd_number = cards_found;
+
+	/* Setup hw api */
+	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+	hw->mac.type  = ii->mac;
+
+	memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+	       sizeof(struct ixgbe_mac_operations));
+
+	adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
+	adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+	adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+
+	/* setup the private structure */
+	err = ixgbevf_sw_init(adapter);
+
+	ixgbevf_init_last_counter_stats(adapter);
+
+#ifdef MAX_SKB_FRAGS
+	netdev->features = NETIF_F_SG |
+			   NETIF_F_IP_CSUM |
+			   NETIF_F_HW_VLAN_TX |
+			   NETIF_F_HW_VLAN_RX |
+			   NETIF_F_HW_VLAN_FILTER;
+
+	netdev->features |= NETIF_F_TSO;
+	netdev->features |= NETIF_F_TSO6;
+
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+#endif /* MAX_SKB_FRAGS */
+
+	/* The HW MAC address was set and/or determined in sw_init */
+	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		printk(KERN_ERR "invalid MAC address\n");
+		err = -EIO;
+		goto err_sw_init;
+	}
+
+	init_timer(&adapter->watchdog_timer);
+	adapter->watchdog_timer.function = &ixgbevf_watchdog;
+	adapter->watchdog_timer.data = (unsigned long)adapter;
+
+	INIT_WORK(&adapter->reset_task,
+		  (void (*)(void *))ixgbevf_reset_task,
+		  netdev);
+	INIT_WORK(&adapter->watchdog_task,
+		  (void (*)(void *))ixgbevf_watchdog_task,
+		  adapter);
+
+	err = ixgbevf_init_interrupt_scheme(adapter);
+	if (err)
+		goto err_sw_init;
+
+	/* pick up the PCI bus settings for reporting later */
+	if (hw->mac.ops.get_bus_info)
+		hw->mac.ops.get_bus_info(hw);
+
+
+	netif_carrier_off(netdev);
+	netif_stop_queue(netdev);
+
+	strcpy(netdev->name, "eth%d");
+
+	err = register_netdev(netdev);
+	if (err)
+		goto err_register;
+
+	adapter->netdev_registered = true;
+
+	/* print the MAC address */
+	hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+	       netdev->dev_addr[0],
+	       netdev->dev_addr[1],
+	       netdev->dev_addr[2],
+	       netdev->dev_addr[3],
+	       netdev->dev_addr[4],
+	       netdev->dev_addr[5]);
+
+	hw_dbg(hw, "MAC: %d\n", hw->mac.type);
+
+	hw_dbg(hw, "LRO is disabled \n");
+
+	hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
+	cards_found++;
+	return 0;
+
+err_register:
+err_sw_init:
+	ixgbevf_reset_interrupt_capability(adapter);
+	iounmap(hw->hw_addr);
+err_ioremap:
+	free_netdev(netdev);
+err_alloc_etherdev:
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * ixgbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit ixgbevf_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+	set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+	del_timer_sync(&adapter->watchdog_timer);
+
+	cancel_delayed_work(&adapter->watchdog_task);
+
+	flush_scheduled_work();
+
+	if (adapter->netdev_registered) {
+		unregister_netdev(netdev);
+		adapter->netdev_registered = false;
+	}
+
+	ixgbevf_reset_interrupt_capability(adapter);
+
+	iounmap(adapter->hw.hw_addr);
+	pci_release_regions(pdev);
+
+	hw_dbg(&adapter->hw, "Remove complete\n");
+
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	free_netdev(netdev);
+
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver ixgbevf_driver = {
+	.name     = ixgbevf_driver_name,
+	.id_table = ixgbevf_pci_tbl,
+	.probe    = ixgbevf_probe,
+	.remove   = __devexit_p(ixgbevf_remove),
+	.shutdown = ixgbevf_shutdown,
+};
+
+/**
+ * ixgbe_init_module - Driver Registration Routine
+ *
+ * ixgbe_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init ixgbevf_init_module(void)
+{
+	int ret;
+	printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
+	       ixgbevf_driver_version);
+
+	printk(KERN_INFO "%s\n", ixgbevf_copyright);
+
+	ret = pci_register_driver(&ixgbevf_driver);
+	return ret;
+}
+
+module_init(ixgbevf_init_module);
+
+/**
+ * ixgbe_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgbe_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit ixgbevf_exit_module(void)
+{
+	pci_unregister_driver(&ixgbevf_driver);
+}
+
+#ifdef DEBUG
+/**
+ * ixgbe_get_hw_dev_name - return device name string
+ * used by hardware layer to print debugging information
+ **/
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
+{
+	struct ixgbevf_adapter *adapter = hw->back;
+	return adapter->netdev->name;
+}
+
+#endif
+module_exit(ixgbevf_exit_module);
+
+/* ixgbevf_main.c */
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
new file mode 100644
index 0000000..b814350
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.c
@@ -0,0 +1,341 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "mbx.h"
+
+/**
+ *  ixgbevf_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if it successfully received a message notification
+ **/
+static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	while (countdown && mbx->ops.check_for_msg(hw)) {
+		countdown--;
+		udelay(mbx->udelay);
+	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown)
+		mbx->timeout = 0;
+
+	return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ *  ixgbevf_poll_for_ack - Wait for message acknowledgement
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if it successfully received a message acknowledgement
+ **/
+static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	while (countdown && mbx->ops.check_for_ack(hw)) {
+		countdown--;
+		udelay(mbx->udelay);
+	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown)
+		mbx->timeout = 0;
+
+	return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ *  ixgbevf_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	ret_val = ixgbevf_poll_for_msg(hw);
+
+	/* if ack received read message, otherwise we timed out */
+	if (!ret_val)
+		ret_val = mbx->ops.read(hw, msg, size);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val;
+
+	/* send msg */
+	ret_val = mbx->ops.write(hw, msg, size);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = ixgbevf_poll_for_ack(hw);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_read_v2p_mailbox - read v2p mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  This function is used to read the v2p mailbox without losing the read to
+ *  clear status bits.
+ **/
+static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+	u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+	v2p_mailbox |= hw->mbx.v2p_mailbox;
+	hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+	return v2p_mailbox;
+}
+
+/**
+ *  ixgbevf_check_for_bit_vf - Determine if a status bit was set
+ *  @hw: pointer to the HW structure
+ *  @mask: bitmask for bits to be tested and cleared
+ *
+ *  This function is used to check for the read to clear bits within
+ *  the V2P mailbox.
+ **/
+static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+	u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (v2p_mailbox & mask)
+		ret_val = 0;
+
+	hw->mbx.v2p_mailbox &= ~mask;
+
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+		ret_val = 0;
+		hw->mbx.stats.reqs++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+		ret_val = 0;
+		hw->mbx.stats.acks++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_check_for_rst_vf - checks to see if the PF has reset
+ *  @hw: pointer to the HW structure
+ *
+ *  returns true if the PF has set the reset done bit or else false
+ **/
+static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+					 IXGBE_VFMAILBOX_RSTI))) {
+		ret_val = 0;
+		hw->mbx.stats.rsts++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *
+ *  return 0 if we obtained the mailbox lock
+ **/
+static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_ERR_MBX;
+
+	/* Take ownership of the buffer */
+	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+	/* reserve mailbox for vf use */
+	if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+		ret_val = 0;
+
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_write_mbx_vf - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+	s32 ret_val;
+	u16 i;
+
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+	if (ret_val)
+		goto out_no_write;
+
+	/* flush msg and acks as we are overwriting the message buffer */
+	ixgbevf_check_for_msg_vf(hw);
+	ixgbevf_check_for_ack_vf(hw);
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+	/* update stats */
+	hw->mbx.stats.msgs_tx++;
+
+	/* Drop VFU and interrupt the PF to tell it a message has been sent */
+	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfuly read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+	s32 ret_val = 0;
+	u16 i;
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+	if (ret_val)
+		goto out_no_read;
+
+	/* copy the message from the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+	/* Acknowledge receipt and release mailbox, then we're done */
+	IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+	/* update stats */
+	hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+	/* start mailbox as timed out and let the reset_hw call set the timeout
+	 * value to begin communications */
+	mbx->timeout = 0;
+	mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
+
+	mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+	mbx->stats.msgs_tx = 0;
+	mbx->stats.msgs_rx = 0;
+	mbx->stats.reqs = 0;
+	mbx->stats.acks = 0;
+	mbx->stats.rsts = 0;
+
+	return 0;
+}
+
+struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
+	.init_params   = ixgbevf_init_mbx_params_vf,
+	.read          = ixgbevf_read_mbx_vf,
+	.write         = ixgbevf_write_mbx_vf,
+	.read_posted   = ixgbevf_read_posted_mbx,
+	.write_posted  = ixgbevf_write_posted_mbx,
+	.check_for_msg = ixgbevf_check_for_msg_vf,
+	.check_for_ack = ixgbevf_check_for_ack_vf,
+	.check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
new file mode 100644
index 0000000..1b0e0bf
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "vf.h"
+
+#define IXGBE_VFMAILBOX_SIZE        16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX               -100
+
+#define IXGBE_VFMAILBOX             0x002FC
+#define IXGBE_VFMBMEM               0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ      0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK      0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU      0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU      0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS    0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK    0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI     0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD     0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX(x)          (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn)          (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS   0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF.  The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
+					       * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
+					       * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
+					       *  clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT    16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK     (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET            0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD     3
+
+#define IXGBE_PF_CONTROL_MSG      0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY   500  /* microseconds between retries */
+
+/* forward declaration of the HW struct */
+struct ixgbe_hw;
+
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
new file mode 100644
index 0000000..12f7596
--- /dev/null
+++ b/drivers/net/ixgbevf/regs.h
@@ -0,0 +1,85 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_REGS_H_
+#define _IXGBEVF_REGS_H_
+
+#define IXGBE_VFCTRL           0x00000
+#define IXGBE_VFSTATUS         0x00008
+#define IXGBE_VFLINKS          0x00010
+#define IXGBE_VFRTIMER         0x00048
+#define IXGBE_VFRXMEMWRAP      0x03190
+#define IXGBE_VTEICR           0x00100
+#define IXGBE_VTEICS           0x00104
+#define IXGBE_VTEIMS           0x00108
+#define IXGBE_VTEIMC           0x0010C
+#define IXGBE_VTEIAC           0x00110
+#define IXGBE_VTEIAM           0x00114
+#define IXGBE_VTEITR(x)        (0x00820 + (4 * x))
+#define IXGBE_VTIVAR(x)        (0x00120 + (4 * x))
+#define IXGBE_VTIVAR_MISC      0x00140
+#define IXGBE_VTRSCINT(x)      (0x00180 + (4 * x))
+#define IXGBE_VFRDBAL(x)       (0x01000 + (0x40 * x))
+#define IXGBE_VFRDBAH(x)       (0x01004 + (0x40 * x))
+#define IXGBE_VFRDLEN(x)       (0x01008 + (0x40 * x))
+#define IXGBE_VFRDH(x)         (0x01010 + (0x40 * x))
+#define IXGBE_VFRDT(x)         (0x01018 + (0x40 * x))
+#define IXGBE_VFRXDCTL(x)      (0x01028 + (0x40 * x))
+#define IXGBE_VFSRRCTL(x)      (0x01014 + (0x40 * x))
+#define IXGBE_VFRSCCTL(x)      (0x0102C + (0x40 * x))
+#define IXGBE_VFPSRTYPE        0x00300
+#define IXGBE_VFTDBAL(x)       (0x02000 + (0x40 * x))
+#define IXGBE_VFTDBAH(x)       (0x02004 + (0x40 * x))
+#define IXGBE_VFTDLEN(x)       (0x02008 + (0x40 * x))
+#define IXGBE_VFTDH(x)         (0x02010 + (0x40 * x))
+#define IXGBE_VFTDT(x)         (0x02018 + (0x40 * x))
+#define IXGBE_VFTXDCTL(x)      (0x02028 + (0x40 * x))
+#define IXGBE_VFTDWBAL(x)      (0x02038 + (0x40 * x))
+#define IXGBE_VFTDWBAH(x)      (0x0203C + (0x40 * x))
+#define IXGBE_VFDCA_RXCTRL(x)  (0x0100C + (0x40 * x))
+#define IXGBE_VFDCA_TXCTRL(x)  (0x0200c + (0x40 * x))
+#define IXGBE_VFGPRC           0x0101C
+#define IXGBE_VFGPTC           0x0201C
+#define IXGBE_VFGORC_LSB       0x01020
+#define IXGBE_VFGORC_MSB       0x01024
+#define IXGBE_VFGOTC_LSB       0x02020
+#define IXGBE_VFGOTC_MSB       0x02024
+#define IXGBE_VFMPRC           0x01034
+
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+    writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
+    readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
+
+#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
new file mode 100644
index 0000000..4b5dec0
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.c
@@ -0,0 +1,387 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "vf.h"
+
+/**
+ *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware by filling the bus info structure and media type, clears
+ *  all on chip counters, initializes receive address registers, multicast
+ *  table, VLAN filter table, calls routine to set up link and flow control
+ *  settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
+{
+	/* Clear adapter stopped flag */
+	hw->adapter_stopped = false;
+
+	return 0;
+}
+
+/**
+ *  ixgbevf_init_hw_vf - virtual function hardware initialization
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting the hardware and then starting
+ *  the hardware
+ **/
+static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
+{
+	s32 status = hw->mac.ops.start_hw(hw);
+
+	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+	return status;
+}
+
+/**
+ *  ixgbevf_reset_hw_vf - Performs hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by reseting the transmit and receive units, masks and
+ *  clears all interrupts.
+ **/
+static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+	s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+	u8 *addr = (u8 *)(&msgbuf[1]);
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	hw->mac.ops.stop_adapter(hw);
+
+	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* we cannot reset while the RSTI / RSTD bits are asserted */
+	while (!mbx->ops.check_for_rst(hw) && timeout) {
+		timeout--;
+		udelay(5);
+	}
+
+	if (!timeout)
+		return IXGBE_ERR_RESET_FAILED;
+
+	/* mailbox timeout can now become active */
+	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+	msgbuf[0] = IXGBE_VF_RESET;
+	mbx->ops.write_posted(hw, msgbuf, 1);
+
+	msleep(10);
+
+	/* set our "perm_addr" based on info provided by PF */
+	/* also set up the mc_filter_type which is piggy backed
+	 * on the mac address in word 3 */
+	ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
+	if (ret_val)
+		return ret_val;
+
+	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+		return IXGBE_ERR_INVALID_MAC_ADDR;
+
+	memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+	return 0;
+}
+
+/**
+ *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
+{
+	u32 number_of_queues;
+	u32 reg_val;
+	u16 i;
+
+	/*
+	 * Set the adapter_stopped flag so other driver functions stop touching
+	 * the hardware
+	 */
+	hw->adapter_stopped = true;
+
+	/* Disable the receive unit by stopped each queue */
+	number_of_queues = hw->mac.max_rx_queues;
+	for (i = 0; i < number_of_queues; i++) {
+		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+		if (reg_val & IXGBE_RXDCTL_ENABLE) {
+			reg_val &= ~IXGBE_RXDCTL_ENABLE;
+			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+		}
+	}
+
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Clear interrupt mask to stop from interrupts being generated */
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+	/* Clear any pending interrupts */
+	IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+	/* Disable the transmit unit.  Each queue must be disabled. */
+	number_of_queues = hw->mac.max_tx_queues;
+	for (i = 0; i < number_of_queues; i++) {
+		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+		if (reg_val & IXGBE_TXDCTL_ENABLE) {
+			reg_val &= ~IXGBE_TXDCTL_ENABLE;
+			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: the multicast address
+ *
+ *  Extracts the 12 bits, from a multicast address, to determine which
+ *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ *  incoming rx multicast addresses, to determine the bit-vector to check in
+ *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ *  by the MO field of the MCSTCTRL. The MO field is set during initialization
+ *  to mc_filter_type.
+ **/
+static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+	u32 vector = 0;
+
+	switch (hw->mac.mc_filter_type) {
+	case 0:   /* use bits [47:36] of the address */
+		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+		break;
+	case 1:   /* use bits [46:35] of the address */
+		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+		break;
+	case 2:   /* use bits [45:34] of the address */
+		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+		break;
+	case 3:   /* use bits [43:32] of the address */
+		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+		break;
+	default:  /* Invalid mc_filter_type */
+		break;
+	}
+
+	/* vector can only be 12-bits or boundary will be exceeded */
+	vector &= 0xFFF;
+	return vector;
+}
+
+/**
+ *  ixgbevf_get_mac_addr_vf - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *  @mac_addr: pointer to storage for retrieved MAC address
+ **/
+static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+	memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+	return 0;
+}
+
+/**
+ *  ixgbevf_set_rar_vf - set device MAC address
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: Unused in this implementation
+ **/
+static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+			      u32 vmdq)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[3];
+	u8 *msg_addr = (u8 *)(&msgbuf[1]);
+	s32 ret_val;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+	memcpy(msg_addr, addr, 6);
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+	/* if nacked the address was rejected, use "perm_addr" */
+	if (!ret_val &&
+	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *  @next: caller supplied function to return next address in list
+ *
+ *  Updates the Multicast Table Array.
+ **/
+static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+					u32 mc_addr_count,
+					ixgbe_mc_addr_itr next)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+	u16 *vector_list = (u16 *)&msgbuf[1];
+	u32 vector;
+	u32 cnt, i;
+	u32 vmdq;
+
+	/* Each entry in the list uses 1 16 bit word.  We have 30
+	 * 16 bit words available in our HW msg buffer (minus 1 for the
+	 * msg type).  That's 30 hash values if we pack 'em right.  If
+	 * there are more than 30 MC addresses to add then punt the
+	 * extras for now and then add code to handle more than 30 later.
+	 * It would be unusual for a server to request that many multi-cast
+	 * addresses except for in large enterprise network environments.
+	 */
+
+	cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+	for (i = 0; i < cnt; i++) {
+		vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+		vector_list[i] = vector;
+	}
+
+	mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+
+	return 0;
+}
+
+/**
+ *  ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
+ *  @hw: pointer to the HW structure
+ *  @vlan: 12 bit VLAN ID
+ *  @vind: unused by VF drivers
+ *  @vlan_on: if true then set bit, else clear bit
+ **/
+static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+			       bool vlan_on)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+
+	msgbuf[0] = IXGBE_VF_SET_VLAN;
+	msgbuf[1] = vlan;
+	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+	return mbx->ops.write_posted(hw, msgbuf, 2);
+}
+
+/**
+ *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
+ *  @hw: pointer to hardware structure
+ *  @speed: Unused in this implementation
+ *  @autoneg: Unused in this implementation
+ *  @autoneg_wait_to_complete: Unused in this implementation
+ *
+ *  Do nothing and return success.  VF drivers are not allowed to change
+ *  global settings.  Maintained for driver compatibility.
+ **/
+static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
+				     ixgbe_link_speed speed, bool autoneg,
+				     bool autoneg_wait_to_complete)
+{
+	return 0;
+}
+
+/**
+ *  ixgbevf_check_mac_link_vf - Get link/speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: true is link is up, false otherwise
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
+				     ixgbe_link_speed *speed,
+				     bool *link_up,
+				     bool autoneg_wait_to_complete)
+{
+	u32 links_reg;
+
+	if (!(hw->mbx.ops.check_for_rst(hw))) {
+		*link_up = false;
+		*speed = 0;
+		return -1;
+	}
+
+	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+	if (links_reg & IXGBE_LINKS_UP)
+		*link_up = true;
+	else
+		*link_up = false;
+
+	if (links_reg & IXGBE_LINKS_SPEED)
+		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+	else
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+	return 0;
+}
+
+struct ixgbe_mac_operations ixgbevf_mac_ops = {
+	.init_hw             = ixgbevf_init_hw_vf,
+	.reset_hw            = ixgbevf_reset_hw_vf,
+	.start_hw            = ixgbevf_start_hw_vf,
+	.get_mac_addr        = ixgbevf_get_mac_addr_vf,
+	.stop_adapter        = ixgbevf_stop_hw_vf,
+	.setup_link          = ixgbevf_setup_mac_link_vf,
+	.check_link          = ixgbevf_check_mac_link_vf,
+	.set_rar             = ixgbevf_set_rar_vf,
+	.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+	.set_vfta            = ixgbevf_set_vfta_vf,
+};
+
+struct ixgbevf_info ixgbevf_vf_info = {
+	.mac = ixgbe_mac_82599_vf,
+	.mac_ops = &ixgbevf_mac_ops,
+};
+
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
new file mode 100644
index 0000000..799600e
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.h
@@ -0,0 +1,168 @@
+/*******************************************************************************
+
+  Intel 82599 Virtual Function driver
+  Copyright(c) 1999 - 2009 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef __IXGBE_VF_H__
+#define __IXGBE_VF_H__
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "defines.h"
+#include "regs.h"
+#include "mbx.h"
+
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+				  u32 *vmdq);
+struct ixgbe_mac_operations {
+	s32 (*init_hw)(struct ixgbe_hw *);
+	s32 (*reset_hw)(struct ixgbe_hw *);
+	s32 (*start_hw)(struct ixgbe_hw *);
+	s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+	enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+	u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+	s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+	s32 (*stop_adapter)(struct ixgbe_hw *);
+	s32 (*get_bus_info)(struct ixgbe_hw *);
+
+	/* Link */
+	s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+	s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+				     bool *);
+
+	/* RAR, Multicast, VLAN */
+	s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
+	s32 (*init_rx_addrs)(struct ixgbe_hw *);
+	s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+				   ixgbe_mc_addr_itr);
+	s32 (*enable_mc)(struct ixgbe_hw *);
+	s32 (*disable_mc)(struct ixgbe_hw *);
+	s32 (*clear_vfta)(struct ixgbe_hw *);
+	s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+};
+
+enum ixgbe_mac_type {
+	ixgbe_mac_unknown = 0,
+	ixgbe_mac_82599_vf,
+	ixgbe_num_macs
+};
+
+struct ixgbe_mac_info {
+	struct ixgbe_mac_operations ops;
+	u8 addr[6];
+	u8 perm_addr[6];
+
+	enum ixgbe_mac_type type;
+
+	s32  mc_filter_type;
+
+	bool get_link_status;
+	u32  max_tx_queues;
+	u32  max_rx_queues;
+	u32  max_msix_vectors;
+};
+
+struct ixgbe_mbx_operations {
+	s32 (*init_params)(struct ixgbe_hw *hw);
+	s32 (*read)(struct ixgbe_hw *, u32 *, u16);
+	s32 (*write)(struct ixgbe_hw *, u32 *, u16);
+	s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
+	s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
+	s32 (*check_for_msg)(struct ixgbe_hw *);
+	s32 (*check_for_ack)(struct ixgbe_hw *);
+	s32 (*check_for_rst)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+	struct ixgbe_mbx_operations ops;
+	struct ixgbe_mbx_stats stats;
+	u32 timeout;
+	u32 udelay;
+	u32 v2p_mailbox;
+	u16 size;
+};
+
+struct ixgbe_hw {
+	void *back;
+
+	u8 __iomem *hw_addr;
+	u8 *flash_address;
+	unsigned long io_base;
+
+	struct ixgbe_mac_info mac;
+	struct ixgbe_mbx_info mbx;
+
+	u16 device_id;
+	u16 subsystem_vendor_id;
+	u16 subsystem_device_id;
+	u16 vendor_id;
+
+	u8  revision_id;
+	bool adapter_stopped;
+};
+
+struct ixgbevf_hw_stats {
+	u64 base_vfgprc;
+	u64 base_vfgptc;
+	u64 base_vfgorc;
+	u64 base_vfgotc;
+	u64 base_vfmprc;
+
+	u64 last_vfgprc;
+	u64 last_vfgptc;
+	u64 last_vfgorc;
+	u64 last_vfgotc;
+	u64 last_vfmprc;
+
+	u64 vfgprc;
+	u64 vfgptc;
+	u64 vfgorc;
+	u64 vfgotc;
+	u64 vfmprc;
+};
+
+struct ixgbevf_info {
+	enum ixgbe_mac_type		mac;
+	struct ixgbe_mac_operations	*mac_ops;
+};
+
+#endif /* __IXGBE_VF_H__ */
+
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cf4f719..e378aef 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1477,6 +1477,21 @@ static int __pcie_flr(struct pci_dev *dev, int probe)
 	return 0;
 }
 
+static int __pci_dev_specific_reset(struct pci_dev *dev, int probe)
+{
+	struct pci_dev_reset_methods *i;
+
+	for (i = pci_dev_reset_methods; i->reset; i++) {
+		if ((i->vendor == dev->vendor ||
+		     i->vendor == (u16)PCI_ANY_ID) &&
+		    (i->device == dev->device ||
+		     i->device == (u16)PCI_ANY_ID))
+			return i->reset(dev, probe);
+	}
+
+	return -ENOTTY;
+}
+
 static int __pci_af_flr(struct pci_dev *dev, int probe)
 {
 	int cappos = pci_find_capability(dev, PCI_CAP_ID_AF);
@@ -1518,6 +1533,10 @@ static int __pci_reset_function(struct pci_dev *pdev, int probe)
 {
 	int res;
 
+	res = __pci_dev_specific_reset(pdev, probe);
+	if (res != -ENOTTY)
+		return res;
+
 	res = __pcie_flr(pdev, probe);
 	if (res != -ENOTTY)
 		return res;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index dbf7048..7619a2c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -211,3 +211,11 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
 struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
 
 extern void pci_enable_acs(struct pci_dev *dev);
+
+struct pci_dev_reset_methods {
+	u16 vendor;
+	u16 device;
+	int (*reset)(struct pci_dev *dev, int probe);
+};
+
+extern struct pci_dev_reset_methods pci_dev_reset_methods[];
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 1364cca..cb719ad 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1703,6 +1703,36 @@ extern struct pci_fixup __end_pci_fixups_final[];
 extern struct pci_fixup __start_pci_fixups_enable[];
 extern struct pci_fixup __end_pci_fixups_enable[];
 
+/*
+ * Followings are device-specific reset methods which can be used to
+ * reset a single function if other methods (e.g. FLR, PM D0->D3) are
+ * not available.
+ */
+static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
+{
+	int pos;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+	if (!pos)
+		return -ENOTTY;
+
+	if (probe)
+		return 0;
+
+	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
+				PCI_EXP_DEVCTL_BCR_FLR);
+	msleep(100);
+
+	return 0;
+}
+
+#define PCI_DEVICE_ID_INTEL_82599_SFP_VF   0x10ed
+
+struct pci_dev_reset_methods pci_dev_reset_methods[] = {
+	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
+		 reset_intel_82599_sfp_virtfn },
+	{ 0 }
+};
 
 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
 {
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 8f8f35a..547e568 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1092,6 +1092,11 @@ static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
 	return skb->h.raw;
 }
 
+static inline int skb_is_gso_v6(const struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
+}
+
 static inline void skb_reset_transport_header(struct sk_buff *skb)
 {
 	skb->h.raw = skb->data;