Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 2714

kernel-2.6.18-238.el5.src.rpm

From: Andy Gospodarek <gospo@redhat.com>
Date: Tue, 4 Mar 2008 15:31:14 -0500
Subject: [net] igb: more 5.2 fixes and backports
Message-id: 20080304203114.GH564@gospo.usersys.redhat.com
O-Subject: Re: [RHEL5.2 PATCH] igb: update to latest upstream and fix backport issues
Bugzilla: 252004

On Thu, Feb 14, 2008 at 11:54:48AM -0500, Andy Gospodarek wrote:
>
> I know this is a *large* update for something so late in the game, but
> the goal was to ship an igb driver that was as close to upstream as
> possible for 5.2 and this is it.  We knew a late update would happen, so
> here it is.
>
> Not only does this fix whitespace and naming conventions, but it fixes
> several important issues that caused panics with the existing driver as
> well as a fix that I've posted upstream:
>
> http://marc.info/?l=linux-netdev&m=120295921815357&w=2
>
> to make legacy interrupts work (which would be nice if you want to run
> a Xen kernel on a box equipped with one of these).
>
> It has been tested by myself and some by Intel and Red Hat QA folks.
>
> This will further resolve the issues complained about in BZ 252004
> (which was used originally for the igb update).
>

This is a little update to my recent igb post that includes 2 upstream
fixes that are necessary for the systems to function properly.  One
resolves an arp issue and the other resolves an fiber PHY fix.  Both of
these are in Jeff's netdev-2.6 tree and I presume will be included in
2.6.25.

As you can see this one is just a little bit different from the previous
patch, so I'm replacing it with this one.

diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index c2594b2..ceb1bac 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -35,39 +35,39 @@
 #include "e1000_mac.h"
 #include "e1000_82575.h"
 
-static s32  e1000_get_invariants_82575(struct e1000_hw *);
-static s32  e1000_acquire_phy_82575(struct e1000_hw *);
-static void e1000_release_phy_82575(struct e1000_hw *);
-static s32  e1000_acquire_nvm_82575(struct e1000_hw *);
-static void e1000_release_nvm_82575(struct e1000_hw *);
-static s32  e1000_check_for_link_82575(struct e1000_hw *);
-static s32  e1000_get_cfg_done_82575(struct e1000_hw *);
-static s32  e1000_init_hw_82575(struct e1000_hw *);
-static s32  e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *);
-static s32  e1000_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
-static void e1000_rar_set_82575(struct e1000_hw *, u8 *, u32);
-static s32  e1000_reset_hw_82575(struct e1000_hw *);
-static s32  e1000_set_d0_lplu_state_82575(struct e1000_hw *, bool);
-static s32  e1000_setup_copper_link_82575(struct e1000_hw *);
-static s32  e1000_setup_fiber_serdes_link_82575(struct e1000_hw *);
-static s32  e1000_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
-static void e1000_clear_hw_cntrs_82575(struct e1000_hw *);
-static s32  e1000_acquire_swfw_sync_82575(struct e1000_hw *, u16);
-static s32  e1000_configure_pcs_link_82575(struct e1000_hw *);
-static s32  e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
+static s32  igb_get_invariants_82575(struct e1000_hw *);
+static s32  igb_acquire_phy_82575(struct e1000_hw *);
+static void igb_release_phy_82575(struct e1000_hw *);
+static s32  igb_acquire_nvm_82575(struct e1000_hw *);
+static void igb_release_nvm_82575(struct e1000_hw *);
+static s32  igb_check_for_link_82575(struct e1000_hw *);
+static s32  igb_get_cfg_done_82575(struct e1000_hw *);
+static s32  igb_init_hw_82575(struct e1000_hw *);
+static s32  igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
+static s32  igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
+static void igb_rar_set_82575(struct e1000_hw *, u8 *, u32);
+static s32  igb_reset_hw_82575(struct e1000_hw *);
+static s32  igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
+static s32  igb_setup_copper_link_82575(struct e1000_hw *);
+static s32  igb_setup_fiber_serdes_link_82575(struct e1000_hw *);
+static s32  igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
+static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
+static s32  igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
+static s32  igb_configure_pcs_link_82575(struct e1000_hw *);
+static s32  igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
 						 u16 *);
-static s32  e1000_get_phy_id_82575(struct e1000_hw *);
-static void e1000_release_swfw_sync_82575(struct e1000_hw *, u16);
-static bool e1000_sgmii_active_82575(struct e1000_hw *);
-static s32  e1000_reset_init_script_82575(struct e1000_hw *);
-static s32  e1000_read_mac_addr_82575(struct e1000_hw *);
+static s32  igb_get_phy_id_82575(struct e1000_hw *);
+static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
+static bool igb_sgmii_active_82575(struct e1000_hw *);
+static s32  igb_reset_init_script_82575(struct e1000_hw *);
+static s32  igb_read_mac_addr_82575(struct e1000_hw *);
 
 
 struct e1000_dev_spec_82575 {
 	bool sgmii_active;
 };
 
-static s32 e1000_get_invariants_82575(struct e1000_hw *hw)
+static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	struct e1000_nvm_info *nvm = &hw->nvm;
@@ -106,43 +106,43 @@ static s32 e1000_get_invariants_82575(struct e1000_hw *hw)
 	 * based on the EEPROM. We cannot rely upon device ID. There
 	 * is no distinguishable difference between fiber and internal
 	 * SerDes mode on the 82575. There can be an external PHY attached
-	 * on the SGMII interface. For this, we'll set sgmii_active to 1.
+	 * on the SGMII interface. For this, we'll set sgmii_active to true.
 	 */
 	phy->media_type = e1000_media_type_copper;
-	dev_spec->sgmii_active = 0;
+	dev_spec->sgmii_active = false;
 
-	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	ctrl_ext = rd32(E1000_CTRL_EXT);
 	if ((ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) ==
 	    E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES) {
 		hw->phy.media_type = e1000_media_type_internal_serdes;
 		ctrl_ext |= E1000_CTRL_I2C_ENA;
 	} else if (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII) {
-		dev_spec->sgmii_active = 1;
+		dev_spec->sgmii_active = true;
 		ctrl_ext |= E1000_CTRL_I2C_ENA;
 	} else {
 		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
 	}
-	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	wr32(E1000_CTRL_EXT, ctrl_ext);
 
 	/* Set mta register count */
 	mac->mta_reg_count = 128;
 	/* Set rar entry count */
 	mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
 	/* Set if part includes ASF firmware */
-	mac->asf_firmware_present = 1;
+	mac->asf_firmware_present = true;
 	/* Set if manageability features are enabled. */
 	mac->arc_subsystem_valid =
-		(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
-			? 1 : 0;
+		(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
+			? true : false;
 
 	/* physical interface link setup */
 	mac->ops.setup_physical_interface =
 		(hw->phy.media_type == e1000_media_type_copper)
-			? e1000_setup_copper_link_82575
-			: e1000_setup_fiber_serdes_link_82575;
+			? igb_setup_copper_link_82575
+			: igb_setup_fiber_serdes_link_82575;
 
 	/* NVM initialization */
-	eecd = E1000_READ_REG(hw, E1000_EECD);
+	eecd = rd32(E1000_EECD);
 
 	nvm->opcode_bits        = 8;
 	nvm->delay_usec         = 1;
@@ -183,18 +183,18 @@ static s32 e1000_get_invariants_82575(struct e1000_hw *hw)
 	phy->reset_delay_us      = 100;
 
 	/* PHY function pointers */
-	if (e1000_sgmii_active_82575(hw)) {
-		phy->ops.reset_phy          = e1000_phy_hw_reset_sgmii_82575;
-		phy->ops.read_phy_reg       = e1000_read_phy_reg_sgmii_82575;
-		phy->ops.write_phy_reg      = e1000_write_phy_reg_sgmii_82575;
+	if (igb_sgmii_active_82575(hw)) {
+		phy->ops.reset_phy          = igb_phy_hw_reset_sgmii_82575;
+		phy->ops.read_phy_reg       = igb_read_phy_reg_sgmii_82575;
+		phy->ops.write_phy_reg      = igb_write_phy_reg_sgmii_82575;
 	} else {
-		phy->ops.reset_phy          = e1000_phy_hw_reset;
-		phy->ops.read_phy_reg       = e1000_read_phy_reg_igp;
-		phy->ops.write_phy_reg      = e1000_write_phy_reg_igp;
+		phy->ops.reset_phy          = igb_phy_hw_reset;
+		phy->ops.read_phy_reg       = igb_read_phy_reg_igp;
+		phy->ops.write_phy_reg      = igb_write_phy_reg_igp;
 	}
 
 	/* Set phy->phy_addr and phy->id. */
-	ret_val = e1000_get_phy_id_82575(hw);
+	ret_val = igb_get_phy_id_82575(hw);
 	if (ret_val)
 		return ret_val;
 
@@ -202,17 +202,17 @@ static s32 e1000_get_invariants_82575(struct e1000_hw *hw)
 	switch (phy->id) {
 	case M88E1111_I_PHY_ID:
 		phy->type                   = e1000_phy_m88;
-		phy->ops.get_phy_info       = e1000_get_phy_info_m88;
-		phy->ops.get_cable_length   = e1000_get_cable_length_m88;
-		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+		phy->ops.get_phy_info       = igb_get_phy_info_m88;
+		phy->ops.get_cable_length   = igb_get_cable_length_m88;
+		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
 		break;
 	case IGP03E1000_E_PHY_ID:
 		phy->type                   = e1000_phy_igp_3;
-		phy->ops.get_phy_info       = e1000_get_phy_info_igp;
-		phy->ops.get_cable_length   = e1000_get_cable_length_igp_2;
-		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
-		phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82575;
-		phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state;
+		phy->ops.get_phy_info       = igb_get_phy_info_igp;
+		phy->ops.get_cable_length   = igb_get_cable_length_igp_2;
+		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
+		phy->ops.set_d0_lplu_state  = igb_set_d0_lplu_state_82575;
+		phy->ops.set_d3_lplu_state  = igb_set_d3_lplu_state;
 		break;
 	default:
 		return -E1000_ERR_PHY;
@@ -228,13 +228,13 @@ static s32 e1000_get_invariants_82575(struct e1000_hw *hw)
  *  Acquire access rights to the correct PHY.  This is a
  *  function pointer entry point called by the api module.
  **/
-static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
+static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
 {
 	u16 mask;
 
 	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
 
-	return e1000_acquire_swfw_sync_82575(hw, mask);
+	return igb_acquire_swfw_sync_82575(hw, mask);
 }
 
 /**
@@ -244,12 +244,12 @@ static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
  *  A wrapper to release access rights to the correct PHY.  This is a
  *  function pointer entry point called by the api module.
  **/
-static void e1000_release_phy_82575(struct e1000_hw *hw)
+static void igb_release_phy_82575(struct e1000_hw *hw)
 {
 	u16 mask;
 
 	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
-	e1000_release_swfw_sync_82575(hw, mask);
+	igb_release_swfw_sync_82575(hw, mask);
 }
 
 /**
@@ -261,7 +261,7 @@ static void e1000_release_phy_82575(struct e1000_hw *hw)
  *  Reads the PHY register at offset using the serial gigabit media independent
  *  interface and stores the retrieved information in data.
  **/
-static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
 					  u16 *data)
 {
 	struct e1000_phy_info *phy = &hw->phy;
@@ -281,12 +281,12 @@ static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
 		  (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
 		  (E1000_I2CCMD_OPCODE_READ));
 
-	E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+	wr32(E1000_I2CCMD, i2ccmd);
 
 	/* Poll the ready bit to see if the I2C read completed */
 	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
 		udelay(50);
-		i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+		i2ccmd = rd32(E1000_I2CCMD);
 		if (i2ccmd & E1000_I2CCMD_READY)
 			break;
 	}
@@ -314,7 +314,7 @@ static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
  *  Writes the data to PHY register at the offset using the serial gigabit
  *  media independent interface.
  **/
-static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
 					   u16 data)
 {
 	struct e1000_phy_info *phy = &hw->phy;
@@ -339,12 +339,12 @@ static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
 		  E1000_I2CCMD_OPCODE_WRITE |
 		  phy_data_swapped);
 
-	E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+	wr32(E1000_I2CCMD, i2ccmd);
 
 	/* Poll the ready bit to see if the I2C read completed */
 	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
 		udelay(50);
-		i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+		i2ccmd = rd32(E1000_I2CCMD);
 		if (i2ccmd & E1000_I2CCMD_READY)
 			break;
 	}
@@ -367,7 +367,7 @@ static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
  *  Retreives the PHY address and ID for both PHY's which do and do not use
  *  sgmi interface.
  **/
-static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
+static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32  ret_val = 0;
@@ -380,9 +380,9 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
 	 * work.  The result of this function should mean phy->phy_addr
 	 * and phy->id are set correctly.
 	 */
-	if (!(e1000_sgmii_active_82575(hw))) {
+	if (!(igb_sgmii_active_82575(hw))) {
 		phy->addr = 1;
-		ret_val = e1000_get_phy_id(hw);
+		ret_val = igb_get_phy_id(hw);
 		goto out;
 	}
 
@@ -391,7 +391,7 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
 	 * Therefore, we need to test 1-7
 	 */
 	for (phy->addr = 1; phy->addr < 8; phy->addr++) {
-		ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+		ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
 		if (ret_val == 0) {
 			hw_dbg(hw, "Vendor ID 0x%08X read at address %u\n",
 				  phy_id,
@@ -415,7 +415,7 @@ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
 		goto out;
 	}
 
-	ret_val = e1000_get_phy_id(hw);
+	ret_val = igb_get_phy_id(hw);
 
 out:
 	return ret_val;
@@ -427,12 +427,12 @@ out:
  *
  *  Resets the PHY using the serial gigabit media independent interface.
  **/
-static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
 {
 	s32 ret_val;
 
 	/*
-	 * This isn't a 1 "hard" reset, but is the only reset
+	 * This isn't a true "hard" reset, but is the only reset
 	 * available to us at this time.
 	*/
 
@@ -446,7 +446,7 @@ static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
 	if (ret_val)
 		goto out;
 
-	ret_val = e1000_phy_sw_reset(hw);
+	ret_val = igb_phy_sw_reset(hw);
 
 out:
 	return ret_val;
@@ -455,7 +455,7 @@ out:
 /**
  *  e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
  *  @hw: pointer to the HW structure
- *  @active: 1 to enable LPLU, 0 to disable
+ *  @active: true to enable LPLU, false to disable
  *
  *  Sets the LPLU D0 state according to the active flag.  When
  *  activating LPLU this function also disables smart speed
@@ -465,7 +465,7 @@ out:
  *  This is a function pointer entry point only called by
  *  PHY setup routines.
  **/
-static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -547,18 +547,18 @@ out:
  *  Return successful if access grant bit set, else clear the request for
  *  EEPROM access and return -E1000_ERR_NVM (-1).
  **/
-static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
+static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
 {
 	s32 ret_val;
 
-	ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+	ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
 	if (ret_val)
 		goto out;
 
-	ret_val = e1000_acquire_nvm(hw);
+	ret_val = igb_acquire_nvm(hw);
 
 	if (ret_val)
-		e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+		igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
 
 out:
 	return ret_val;
@@ -571,10 +571,10 @@ out:
  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
  *  then release the semaphores acquired.
  **/
-static void e1000_release_nvm_82575(struct e1000_hw *hw)
+static void igb_release_nvm_82575(struct e1000_hw *hw)
 {
-	e1000_release_nvm(hw);
-	e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+	igb_release_nvm(hw);
+	igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
 }
 
 /**
@@ -585,7 +585,7 @@ static void e1000_release_nvm_82575(struct e1000_hw *hw)
  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
  *  will also specify which port we're acquiring the lock for.
  **/
-static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
 {
 	u32 swfw_sync;
 	u32 swmask = mask;
@@ -594,12 +594,12 @@ static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
 	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
 
 	while (i < timeout) {
-		if (e1000_get_hw_semaphore(hw)) {
+		if (igb_get_hw_semaphore(hw)) {
 			ret_val = -E1000_ERR_SWFW_SYNC;
 			goto out;
 		}
 
-		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+		swfw_sync = rd32(E1000_SW_FW_SYNC);
 		if (!(swfw_sync & (fwmask | swmask)))
 			break;
 
@@ -607,7 +607,7 @@ static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
 		 * Firmware currently using resource (fwmask)
 		 * or other software thread using resource (swmask)
 		 */
-		e1000_put_hw_semaphore(hw);
+		igb_put_hw_semaphore(hw);
 		mdelay(5);
 		i++;
 	}
@@ -619,9 +619,9 @@ static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
 	}
 
 	swfw_sync |= swmask;
-	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
 
-	e1000_put_hw_semaphore(hw);
+	igb_put_hw_semaphore(hw);
 
 out:
 	return ret_val;
@@ -635,18 +635,18 @@ out:
  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
  *  will also specify which port we're releasing the lock for.
  **/
-static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
 {
 	u32 swfw_sync;
 
-	while (e1000_get_hw_semaphore(hw) != 0);
+	while (igb_get_hw_semaphore(hw) != 0);
 	/* Empty */
 
-	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+	swfw_sync = rd32(E1000_SW_FW_SYNC);
 	swfw_sync &= ~mask;
-	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
 
-	e1000_put_hw_semaphore(hw);
+	igb_put_hw_semaphore(hw);
 }
 
 /**
@@ -659,7 +659,7 @@ static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
  *  0.  If we were to return with error, EEPROM-less silicon
  *  would not be able to be reset or change link.
  **/
-static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
+static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
 {
 	s32 timeout = PHY_CFG_TIMEOUT;
 	s32 ret_val = 0;
@@ -669,7 +669,7 @@ static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
 		mask = E1000_NVM_CFG_DONE_PORT_1;
 
 	while (timeout) {
-		if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+		if (rd32(E1000_EEMNGCTL) & mask)
 			break;
 		msleep(1);
 		timeout--;
@@ -678,9 +678,9 @@ static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
 		hw_dbg(hw, "MNG configuration cycle has not completed.\n");
 
 	/* If EEPROM is not marked present, init the PHY manually */
-	if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
+	if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
 	    (hw->phy.type == e1000_phy_igp_3))
-		e1000_phy_init_script_igp3(hw);
+		igb_phy_init_script_igp3(hw);
 
 	return ret_val;
 }
@@ -692,18 +692,18 @@ static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
  *  If sgmii is enabled, then use the pcs register to determine link, otherwise
  *  use the generic interface for determining link.
  **/
-static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
+static s32 igb_check_for_link_82575(struct e1000_hw *hw)
 {
 	s32 ret_val;
 	u16 speed, duplex;
 
 	/* SGMII link check is done through the PCS register. */
 	if ((hw->phy.media_type != e1000_media_type_copper) ||
-	    (e1000_sgmii_active_82575(hw)))
-		ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
+	    (igb_sgmii_active_82575(hw)))
+		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
 							       &duplex);
 	else
-		ret_val = e1000_check_for_copper_link(hw);
+		ret_val = igb_check_for_copper_link(hw);
 
 	return ret_val;
 }
@@ -717,14 +717,14 @@ static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
  *  Using the physical coding sub-layer (PCS), retreive the current speed and
  *  duplex, then store the values in the pointers provided.
  **/
-static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
+static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
 						u16 *duplex)
 {
 	struct e1000_mac_info *mac = &hw->mac;
 	u32 pcs;
 
 	/* Set up defaults for the return values of this function */
-	mac->serdes_has_link = 0;
+	mac->serdes_has_link = false;
 	*speed = 0;
 	*duplex = 0;
 
@@ -733,7 +733,7 @@ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
 	 * the status register is not accurate. The PCS status register is
 	 * used instead.
 	 */
-	pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+	pcs = rd32(E1000_PCS_LSTAT);
 
 	/*
 	 * The link up bit determines when link is up on autoneg. The sync ok
@@ -741,7 +741,7 @@ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
 	 * can be determined by checking for both link up and link sync ok
 	 */
 	if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
-		mac->serdes_has_link = 1;
+		mac->serdes_has_link = true;
 
 		/* Detect and store PCS speed */
 		if (pcs & E1000_PCS_LSTS_SPEED_1000) {
@@ -772,10 +772,10 @@ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
  *  Sets the receive address array register at index to the address passed
  *  in by addr.
  **/
-static void e1000_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index)
+static void igb_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index)
 {
 	if (index < E1000_RAR_ENTRIES_82575)
-		e1000_rar_set(hw, addr, index);
+		igb_rar_set(hw, addr, index);
 
 	return;
 }
@@ -787,7 +787,7 @@ static void e1000_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index)
  *  This resets the hardware into a known state.  This is a
  *  function pointer entry point called by the api module.
  **/
-static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
+static s32 igb_reset_hw_82575(struct e1000_hw *hw)
 {
 	u32 ctrl, icr;
 	s32 ret_val;
@@ -796,25 +796,25 @@ static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
 	 * on the last TLP read/write transaction when MAC is reset.
 	 */
-	ret_val = e1000_disable_pcie_master(hw);
+	ret_val = igb_disable_pcie_master(hw);
 	if (ret_val)
 		hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
 
 	hw_dbg(hw, "Masking off all interrupts\n");
-	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	wr32(E1000_IMC, 0xffffffff);
 
-	E1000_WRITE_REG(hw, E1000_RCTL, 0);
-	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
-	E1000_WRITE_FLUSH(hw);
+	wr32(E1000_RCTL, 0);
+	wr32(E1000_TCTL, E1000_TCTL_PSP);
+	wrfl();
 
 	msleep(10);
 
-	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl = rd32(E1000_CTRL);
 
 	hw_dbg(hw, "Issuing a global reset to MAC\n");
-	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+	wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
 
-	ret_val = e1000_get_auto_rd_done(hw);
+	ret_val = igb_get_auto_rd_done(hw);
 	if (ret_val) {
 		/*
 		 * When auto config read does not complete, do not
@@ -825,14 +825,14 @@ static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
 	}
 
 	/* If EEPROM is not present, run manual init scripts */
-	if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
-		e1000_reset_init_script_82575(hw);
+	if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
+		igb_reset_init_script_82575(hw);
 
 	/* Clear any pending interrupt events. */
-	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
-	icr = E1000_READ_REG(hw, E1000_ICR);
+	wr32(E1000_IMC, 0xffffffff);
+	icr = rd32(E1000_ICR);
 
-	e1000_check_alt_mac_addr(hw);
+	igb_check_alt_mac_addr(hw);
 
 	return ret_val;
 }
@@ -843,14 +843,14 @@ static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
  *
  *  This inits the hardware readying it for operation.
  **/
-static s32 e1000_init_hw_82575(struct e1000_hw *hw)
+static s32 igb_init_hw_82575(struct e1000_hw *hw)
 {
 	struct e1000_mac_info *mac = &hw->mac;
 	s32 ret_val;
 	u16 i, rar_count = mac->rar_entry_count;
 
 	/* Initialize identification LED */
-	ret_val = e1000_id_led_init(hw);
+	ret_val = igb_id_led_init(hw);
 	if (ret_val) {
 		hw_dbg(hw, "Error initializing identification LED\n");
 		/* This is not fatal and we should not stop init due to this */
@@ -858,17 +858,17 @@ static s32 e1000_init_hw_82575(struct e1000_hw *hw)
 
 	/* Disabling VLAN filtering */
 	hw_dbg(hw, "Initializing the IEEE VLAN\n");
-	e1000_clear_vfta(hw);
+	igb_clear_vfta(hw);
 
 	/* Setup the receive address */
-	e1000_init_rx_addrs(hw, rar_count);
+	igb_init_rx_addrs(hw, rar_count);
 	/* Zero out the Multicast HASH table */
 	hw_dbg(hw, "Zeroing the MTA\n");
 	for (i = 0; i < mac->mta_reg_count; i++)
-		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+		array_wr32(E1000_MTA, i, 0);
 
 	/* Setup link and flow control */
-	ret_val = e1000_setup_link(hw);
+	ret_val = igb_setup_link(hw);
 
 	/*
 	 * Clear all of the statistics registers (clear on read).  It is
@@ -876,7 +876,7 @@ static s32 e1000_init_hw_82575(struct e1000_hw *hw)
 	 * because the symbol error count will increment wildly if there
 	 * is no link.
 	 */
-	e1000_clear_hw_cntrs_82575(hw);
+	igb_clear_hw_cntrs_82575(hw);
 
 	return ret_val;
 }
@@ -889,28 +889,28 @@ static s32 e1000_init_hw_82575(struct e1000_hw *hw)
  *  for link, once link is established calls to configure collision distance
  *  and flow control are called.
  **/
-static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
+static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
 {
 	u32 ctrl, led_ctrl;
 	s32  ret_val;
 	bool link;
 
-	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl = rd32(E1000_CTRL);
 	ctrl |= E1000_CTRL_SLU;
 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
-	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	wr32(E1000_CTRL, ctrl);
 
 	switch (hw->phy.type) {
 	case e1000_phy_m88:
-		ret_val = e1000_copper_link_setup_m88(hw);
+		ret_val = igb_copper_link_setup_m88(hw);
 		break;
 	case e1000_phy_igp_3:
-		ret_val = e1000_copper_link_setup_igp(hw);
+		ret_val = igb_copper_link_setup_igp(hw);
 		/* Setup activity LED */
-		led_ctrl = E1000_READ_REG(hw, E1000_LEDCTL);
+		led_ctrl = rd32(E1000_LEDCTL);
 		led_ctrl &= IGP_ACTIVITY_LED_MASK;
 		led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
-		E1000_WRITE_REG(hw, E1000_LEDCTL, led_ctrl);
+		wr32(E1000_LEDCTL, led_ctrl);
 		break;
 	default:
 		ret_val = -E1000_ERR_PHY;
@@ -925,7 +925,7 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
 		 * Setup autoneg and flow control advertisement
 		 * and perform autonegotiation.
 		 */
-		ret_val = e1000_copper_link_autoneg(hw);
+		ret_val = igb_copper_link_autoneg(hw);
 		if (ret_val)
 			goto out;
 	} else {
@@ -934,14 +934,14 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
 		 * depending on user settings.
 		 */
 		hw_dbg(hw, "Forcing Speed and Duplex\n");
-		ret_val = e1000_phy_force_speed_duplex(hw);
+		ret_val = igb_phy_force_speed_duplex(hw);
 		if (ret_val) {
 			hw_dbg(hw, "Error Forcing Speed and Duplex\n");
 			goto out;
 		}
 	}
 
-	ret_val = e1000_configure_pcs_link_82575(hw);
+	ret_val = igb_configure_pcs_link_82575(hw);
 	if (ret_val)
 		goto out;
 
@@ -949,7 +949,7 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
 	 * Check link status. Wait up to 100 microseconds for link to become
 	 * valid.
 	 */
-	ret_val = e1000_phy_has_link(hw,
+	ret_val = igb_phy_has_link(hw,
 					     COPPER_LINK_UP_LIMIT,
 					     10,
 					     &link);
@@ -959,8 +959,8 @@ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
 	if (link) {
 		hw_dbg(hw, "Valid link established!!!\n");
 		/* Config the MAC and PHY after link is up */
-		e1000_config_collision_dist(hw);
-		ret_val = e1000_config_fc_after_link_up(hw);
+		igb_config_collision_dist(hw);
+		ret_val = igb_config_fc_after_link_up(hw);
 	} else {
 		hw_dbg(hw, "Unable to establish link!!!\n");
 	}
@@ -975,7 +975,7 @@ out:
  *
  *  Configures speed and duplex for fiber and serdes links.
  **/
-static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
+static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
 {
 	u32 reg;
 
@@ -985,21 +985,21 @@ static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
 	 * the register does not indicate its status.  Therefore, we ensure
 	 * loopback mode is disabled during initialization.
 	 */
-	E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+	wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
 
 	/* Force link up, set 1gb, set both sw defined pins */
-	reg = E1000_READ_REG(hw, E1000_CTRL);
+	reg = rd32(E1000_CTRL);
 	reg |= E1000_CTRL_SLU |
 	       E1000_CTRL_SPD_1000 |
 	       E1000_CTRL_FRCSPD |
 	       E1000_CTRL_SWDPIN0 |
 	       E1000_CTRL_SWDPIN1;
-	E1000_WRITE_REG(hw, E1000_CTRL, reg);
+	wr32(E1000_CTRL, reg);
 
 	/* Set switch control to serdes energy detect */
-	reg = E1000_READ_REG(hw, E1000_CONNSW);
+	reg = rd32(E1000_CONNSW);
 	reg |= E1000_CONNSW_ENRGSRC;
-	E1000_WRITE_REG(hw, E1000_CONNSW, reg);
+	wr32(E1000_CONNSW, reg);
 
 	/*
 	 * New SerDes mode allows for forcing speed or autonegotiating speed
@@ -1007,7 +1007,7 @@ static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
 	 * mode that will be compatible with older link partners and switches.
 	 * However, both are supported by the hardware and some drivers/tools.
 	 */
-	reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+	reg = rd32(E1000_PCS_LCTL);
 
 	reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
 		E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
@@ -1028,7 +1028,7 @@ static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
 		       E1000_PCS_LCTL_FORCE_LINK;     /* Force Link */
 		hw_dbg(hw, "Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
 	}
-	E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+	wr32(E1000_PCS_LCTL, reg);
 
 	return 0;
 }
@@ -1042,17 +1042,17 @@ static s32 e1000_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
  *  independent interface (sgmii) is being used.  Configures the link
  *  for auto-negotiation or forces speed/duplex.
  **/
-static s32 e1000_configure_pcs_link_82575(struct e1000_hw *hw)
+static s32 igb_configure_pcs_link_82575(struct e1000_hw *hw)
 {
 	struct e1000_mac_info *mac = &hw->mac;
 	u32 reg = 0;
 
 	if (hw->phy.media_type != e1000_media_type_copper ||
-	    !(e1000_sgmii_active_82575(hw)))
+	    !(igb_sgmii_active_82575(hw)))
 		goto out;
 
 	/* For SGMII, we need to issue a PCS autoneg restart */
-	reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+	reg = rd32(E1000_PCS_LCTL);
 
 	/* AN time out should be disabled for SGMII mode */
 	reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
@@ -1092,7 +1092,7 @@ static s32 e1000_configure_pcs_link_82575(struct e1000_hw *hw)
 		       "Wrote 0x%08X to PCS_LCTL to configure forced link\n",
 		       reg);
 	}
-	E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+	wr32(E1000_PCS_LCTL, reg);
 
 out:
 	return 0;
@@ -1106,13 +1106,13 @@ out:
  *  which can be enabled for use in the embedded applications.  Simply
  *  return the current state of the sgmii interface.
  **/
-static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
+static bool igb_sgmii_active_82575(struct e1000_hw *hw)
 {
 	struct e1000_dev_spec_82575 *dev_spec;
 	bool ret_val;
 
 	if (hw->mac.type != e1000_82575) {
-		ret_val = 0;
+		ret_val = false;
 		goto out;
 	}
 
@@ -1131,30 +1131,30 @@ out:
  *  Inits recommended HW defaults after a reset when there is no EEPROM
  *  detected. This is only for the 82575.
  **/
-static s32 e1000_reset_init_script_82575(struct e1000_hw *hw)
+static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
 {
 	if (hw->mac.type == e1000_82575) {
 		hw_dbg(hw, "Running reset init script for 82575\n");
 		/* SerDes configuration via SERDESCTRL */
-		e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
-		e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
-		e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
-		e1000_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
 
 		/* CCM configuration via CCMCTL register */
-		e1000_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
-		e1000_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
+		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
+		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
 
 		/* PCIe lanes configuration */
-		e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
-		e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
-		e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
-		e1000_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
 
 		/* PCIe PLL Configuration */
-		e1000_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
-		e1000_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
-		e1000_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
 	}
 
 	return 0;
@@ -1164,12 +1164,12 @@ static s32 e1000_reset_init_script_82575(struct e1000_hw *hw)
  *  e1000_read_mac_addr_82575 - Read device MAC address
  *  @hw: pointer to the HW structure
  **/
-static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
+static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
 {
 	s32 ret_val = 0;
 
-	if (e1000_check_alt_mac_addr(hw))
-		ret_val = e1000_read_mac_addr(hw);
+	if (igb_check_alt_mac_addr(hw))
+		ret_val = igb_read_mac_addr(hw);
 
 	return ret_val;
 }
@@ -1180,88 +1180,88 @@ static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
  *
  *  Clears the hardware counters by reading the counter registers.
  **/
-static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
+static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
 {
 	u32 temp;
 
-	e1000_clear_hw_cntrs_base(hw);
-
-	temp = E1000_READ_REG(hw, E1000_PRC64);
-	temp = E1000_READ_REG(hw, E1000_PRC127);
-	temp = E1000_READ_REG(hw, E1000_PRC255);
-	temp = E1000_READ_REG(hw, E1000_PRC511);
-	temp = E1000_READ_REG(hw, E1000_PRC1023);
-	temp = E1000_READ_REG(hw, E1000_PRC1522);
-	temp = E1000_READ_REG(hw, E1000_PTC64);
-	temp = E1000_READ_REG(hw, E1000_PTC127);
-	temp = E1000_READ_REG(hw, E1000_PTC255);
-	temp = E1000_READ_REG(hw, E1000_PTC511);
-	temp = E1000_READ_REG(hw, E1000_PTC1023);
-	temp = E1000_READ_REG(hw, E1000_PTC1522);
-
-	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
-	temp = E1000_READ_REG(hw, E1000_RXERRC);
-	temp = E1000_READ_REG(hw, E1000_TNCRS);
-	temp = E1000_READ_REG(hw, E1000_CEXTERR);
-	temp = E1000_READ_REG(hw, E1000_TSCTC);
-	temp = E1000_READ_REG(hw, E1000_TSCTFC);
-
-	temp = E1000_READ_REG(hw, E1000_MGTPRC);
-	temp = E1000_READ_REG(hw, E1000_MGTPDC);
-	temp = E1000_READ_REG(hw, E1000_MGTPTC);
-
-	temp = E1000_READ_REG(hw, E1000_IAC);
-	temp = E1000_READ_REG(hw, E1000_ICRXOC);
-
-	temp = E1000_READ_REG(hw, E1000_ICRXPTC);
-	temp = E1000_READ_REG(hw, E1000_ICRXATC);
-	temp = E1000_READ_REG(hw, E1000_ICTXPTC);
-	temp = E1000_READ_REG(hw, E1000_ICTXATC);
-	temp = E1000_READ_REG(hw, E1000_ICTXQEC);
-	temp = E1000_READ_REG(hw, E1000_ICTXQMTC);
-	temp = E1000_READ_REG(hw, E1000_ICRXDMTC);
-
-	temp = E1000_READ_REG(hw, E1000_CBTMPC);
-	temp = E1000_READ_REG(hw, E1000_HTDPMC);
-	temp = E1000_READ_REG(hw, E1000_CBRMPC);
-	temp = E1000_READ_REG(hw, E1000_RPTHC);
-	temp = E1000_READ_REG(hw, E1000_HGPTC);
-	temp = E1000_READ_REG(hw, E1000_HTCBDPC);
-	temp = E1000_READ_REG(hw, E1000_HGORCL);
-	temp = E1000_READ_REG(hw, E1000_HGORCH);
-	temp = E1000_READ_REG(hw, E1000_HGOTCL);
-	temp = E1000_READ_REG(hw, E1000_HGOTCH);
-	temp = E1000_READ_REG(hw, E1000_LENERRS);
+	igb_clear_hw_cntrs_base(hw);
+
+	temp = rd32(E1000_PRC64);
+	temp = rd32(E1000_PRC127);
+	temp = rd32(E1000_PRC255);
+	temp = rd32(E1000_PRC511);
+	temp = rd32(E1000_PRC1023);
+	temp = rd32(E1000_PRC1522);
+	temp = rd32(E1000_PTC64);
+	temp = rd32(E1000_PTC127);
+	temp = rd32(E1000_PTC255);
+	temp = rd32(E1000_PTC511);
+	temp = rd32(E1000_PTC1023);
+	temp = rd32(E1000_PTC1522);
+
+	temp = rd32(E1000_ALGNERRC);
+	temp = rd32(E1000_RXERRC);
+	temp = rd32(E1000_TNCRS);
+	temp = rd32(E1000_CEXTERR);
+	temp = rd32(E1000_TSCTC);
+	temp = rd32(E1000_TSCTFC);
+
+	temp = rd32(E1000_MGTPRC);
+	temp = rd32(E1000_MGTPDC);
+	temp = rd32(E1000_MGTPTC);
+
+	temp = rd32(E1000_IAC);
+	temp = rd32(E1000_ICRXOC);
+
+	temp = rd32(E1000_ICRXPTC);
+	temp = rd32(E1000_ICRXATC);
+	temp = rd32(E1000_ICTXPTC);
+	temp = rd32(E1000_ICTXATC);
+	temp = rd32(E1000_ICTXQEC);
+	temp = rd32(E1000_ICTXQMTC);
+	temp = rd32(E1000_ICRXDMTC);
+
+	temp = rd32(E1000_CBTMPC);
+	temp = rd32(E1000_HTDPMC);
+	temp = rd32(E1000_CBRMPC);
+	temp = rd32(E1000_RPTHC);
+	temp = rd32(E1000_HGPTC);
+	temp = rd32(E1000_HTCBDPC);
+	temp = rd32(E1000_HGORCL);
+	temp = rd32(E1000_HGORCH);
+	temp = rd32(E1000_HGOTCL);
+	temp = rd32(E1000_HGOTCH);
+	temp = rd32(E1000_LENERRS);
 
 	/* This register should not be read in copper configurations */
 	if (hw->phy.media_type == e1000_media_type_internal_serdes)
-		temp = E1000_READ_REG(hw, E1000_SCVPC);
+		temp = rd32(E1000_SCVPC);
 }
 
 static struct e1000_mac_operations e1000_mac_ops_82575 = {
-	.reset_hw             = e1000_reset_hw_82575,
-	.init_hw              = e1000_init_hw_82575,
-	.check_for_link       = e1000_check_for_link_82575,
-	.rar_set              = e1000_rar_set_82575,
-	.read_mac_addr        = e1000_read_mac_addr_82575,
-	.get_speed_and_duplex = e1000_get_speed_and_duplex_copper,
+	.reset_hw             = igb_reset_hw_82575,
+	.init_hw              = igb_init_hw_82575,
+	.check_for_link       = igb_check_for_link_82575,
+	.rar_set              = igb_rar_set_82575,
+	.read_mac_addr        = igb_read_mac_addr_82575,
+	.get_speed_and_duplex = igb_get_speed_and_duplex_copper,
 };
 
 static struct e1000_phy_operations e1000_phy_ops_82575 = {
-	.acquire_phy          = e1000_acquire_phy_82575,
-	.get_cfg_done         = e1000_get_cfg_done_82575,
-	.release_phy          = e1000_release_phy_82575,
+	.acquire_phy          = igb_acquire_phy_82575,
+	.get_cfg_done         = igb_get_cfg_done_82575,
+	.release_phy          = igb_release_phy_82575,
 };
 
 static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
-	.acquire_nvm          = e1000_acquire_nvm_82575,
-	.read_nvm             = e1000_read_nvm_eerd,
-	.release_nvm          = e1000_release_nvm_82575,
-	.write_nvm            = e1000_write_nvm_spi,
+	.acquire_nvm          = igb_acquire_nvm_82575,
+	.read_nvm             = igb_read_nvm_eerd,
+	.release_nvm          = igb_release_nvm_82575,
+	.write_nvm            = igb_write_nvm_spi,
 };
 
 const struct e1000_info e1000_82575_info = {
-	.get_invariants = e1000_get_invariants_82575,
+	.get_invariants = igb_get_invariants_82575,
 	.mac_ops = &e1000_mac_ops_82575,
 	.phy_ops = &e1000_phy_ops_82575,
 	.nvm_ops = &e1000_nvm_ops_82575,
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index 66e9651..6604d96 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -30,54 +30,6 @@
 
 #define E1000_RAR_ENTRIES_82575   16
 
-struct e1000_adv_data_desc {
-	u64 buffer_addr;    /* Address of the descriptor's data buffer */
-	union {
-		u32 data;
-		struct {
-			u32 datalen :16; /* Data buffer length */
-			u32 rsvd    :4;
-			u32 dtyp    :4;  /* Descriptor type */
-			u32 dcmd    :8;  /* Descriptor command */
-		} config;
-	} lower;
-	union {
-		u32 data;
-		struct {
-			u32 status  :4;  /* Descriptor status */
-			u32 idx     :4;
-			u32 popts   :6;  /* Packet Options */
-			u32 paylen  :18; /* Payload length */
-		} options;
-	} upper;
-};
-
-
-struct e1000_adv_context_desc {
-	union {
-		u32 ip_config;
-		struct {
-			u32 iplen    :9;
-			u32 maclen   :7;
-			u32 vlan_tag :16;
-		} fields;
-	} ip_setup;
-	u32 seq_num;
-	union {
-		u64 l4_config;
-		struct {
-			u32 mkrloc :9;
-			u32 tucmd  :11;
-			u32 dtyp   :4;
-			u32 adv    :8;
-			u32 rsvd   :4;
-			u32 idx    :4;
-			u32 l4len  :8;
-			u32 mss    :16;
-		} fields;
-	} l4_setup;
-};
-
 /* SRRCTL bit definitions */
 #define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
 #define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index d8b2624..3e84a3f 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -34,9 +34,9 @@
 
 #include "igb.h"
 
-static s32 e1000_set_default_fc(struct e1000_hw *hw);
-static s32 e1000_set_fc_watermarks(struct e1000_hw *hw);
-static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
+static s32 igb_set_default_fc(struct e1000_hw *hw);
+static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
+static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
 
 /**
  *  e1000_remove_device - Free device specific structure
@@ -45,20 +45,20 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
  *  If a device specific structure was allocated, this function will
  *  free it.
  **/
-void e1000_remove_device(struct e1000_hw *hw)
+void igb_remove_device(struct e1000_hw *hw)
 {
 	/* Freeing the dev_spec member of e1000_hw structure */
 	kfree(hw->dev_spec);
 }
 
-static void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+static void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
 {
 	struct igb_adapter *adapter = hw->back;
 
 	pci_read_config_word(adapter->pdev, reg, value);
 }
 
-static s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
 {
 	struct igb_adapter *adapter = hw->back;
 	u16 cap_offset;
@@ -80,7 +80,7 @@ static s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
  *  network interface.  The following bus information is determined and stored:
  *  bus speed, bus width, type (PCIe), and PCIe function.
  **/
-s32 e1000_get_bus_info_pcie(struct e1000_hw *hw)
+s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
 {
 	struct e1000_bus_info *bus = &hw->bus;
 	s32 ret_val;
@@ -90,7 +90,7 @@ s32 e1000_get_bus_info_pcie(struct e1000_hw *hw)
 	bus->type = e1000_bus_type_pci_express;
 	bus->speed = e1000_bus_speed_2500;
 
-	ret_val = e1000_read_pcie_cap_reg(hw,
+	ret_val = igb_read_pcie_cap_reg(hw,
 					  PCIE_LINK_STATUS,
 					  &pcie_link_status);
 	if (ret_val)
@@ -100,9 +100,9 @@ s32 e1000_get_bus_info_pcie(struct e1000_hw *hw)
 						     PCIE_LINK_WIDTH_MASK) >>
 						     PCIE_LINK_WIDTH_SHIFT);
 
-	e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
+	igb_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
 	if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
-		status = E1000_READ_REG(hw, E1000_STATUS);
+		status = rd32(E1000_STATUS);
 		bus->func = (status & E1000_STATUS_FUNC_MASK)
 			    >> E1000_STATUS_FUNC_SHIFT;
 	} else {
@@ -119,13 +119,13 @@ s32 e1000_get_bus_info_pcie(struct e1000_hw *hw)
  *  Clears the register array which contains the VLAN filter table by
  *  setting all the values to 0.
  **/
-void e1000_clear_vfta(struct e1000_hw *hw)
+void igb_clear_vfta(struct e1000_hw *hw)
 {
 	u32 offset;
 
 	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
-		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
-		E1000_WRITE_FLUSH(hw);
+		array_wr32(E1000_VFTA, offset, 0);
+		wrfl();
 	}
 }
 
@@ -138,10 +138,10 @@ void e1000_clear_vfta(struct e1000_hw *hw)
  *  Writes value at the given offset in the register array which stores
  *  the VLAN filter table.
  **/
-void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
 {
-	E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
-	E1000_WRITE_FLUSH(hw);
+	array_wr32(E1000_VFTA, offset, value);
+	wrfl();
 }
 
 /**
@@ -153,7 +153,7 @@ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
  *  register to the devices MAC address and clearing all the other receive
  *  address registers to 0.
  **/
-void e1000_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
+void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
 {
 	u32 i;
 
@@ -165,10 +165,10 @@ void e1000_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
 	/* Zero out the other (rar_entry_count - 1) receive addresses */
 	hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
 	for (i = 1; i < rar_count; i++) {
-		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
-		E1000_WRITE_FLUSH(hw);
-		E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
-		E1000_WRITE_FLUSH(hw);
+		array_wr32(E1000_RA, (i << 1), 0);
+		wrfl();
+		array_wr32(E1000_RA, ((i << 1) + 1), 0);
+		wrfl();
 	}
 }
 
@@ -183,7 +183,7 @@ void e1000_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
  *  prgrammed into RAR0 and the cuntion returns success, otherwise the
  *  fucntion returns an error.
  **/
-s32 e1000_check_alt_mac_addr(struct e1000_hw *hw)
+s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
 {
 	u32 i;
 	s32 ret_val = 0;
@@ -241,7 +241,7 @@ out:
  *  Sets the receive address array register at index to the address passed
  *  in by addr.
  **/
-void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
 {
 	u32 rar_low, rar_high;
 
@@ -258,8 +258,8 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
 	if (!hw->mac.disable_av)
 		rar_high |= E1000_RAH_AV;
 
-	E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
-	E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+	array_wr32(E1000_RA, (index << 1), rar_low);
+	array_wr32(E1000_RA, ((index << 1) + 1), rar_high);
 }
 
 /**
@@ -272,7 +272,7 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
  *  current value is read, the new bit is OR'd in and the new value is
  *  written back into the register.
  **/
-static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
+static void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
 {
 	u32 hash_bit, hash_reg, mta;
 
@@ -289,12 +289,12 @@ static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
 	hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
 	hash_bit = hash_value & 0x1F;
 
-	mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
+	mta = array_rd32(E1000_MTA, hash_reg);
 
 	mta |= (1 << hash_bit);
 
-	E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
-	E1000_WRITE_FLUSH(hw);
+	array_wr32(E1000_MTA, hash_reg, mta);
+	wrfl();
 }
 
 /**
@@ -310,7 +310,7 @@ static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
  *  The parameter rar_count will usually be hw->mac.rar_entry_count
  *  unless there are workarounds that change this.
  **/
-void e1000_update_mc_addr_list(struct e1000_hw *hw,
+void igb_update_mc_addr_list(struct e1000_hw *hw,
 			       u8 *mc_addr_list, u32 mc_addr_count,
 			       u32 rar_used_count, u32 rar_count)
 {
@@ -328,25 +328,25 @@ void e1000_update_mc_addr_list(struct e1000_hw *hw,
 			mc_addr_count--;
 			mc_addr_list += ETH_ALEN;
 		} else {
-			E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
-			E1000_WRITE_FLUSH(hw);
-			E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
-			E1000_WRITE_FLUSH(hw);
+			array_wr32(E1000_RA, i << 1, 0);
+			wrfl();
+			array_wr32(E1000_RA, (i << 1) + 1, 0);
+			wrfl();
 		}
 	}
 
 	/* Clear the old settings from the MTA */
 	hw_dbg(hw, "Clearing MTA\n");
 	for (i = 0; i < hw->mac.mta_reg_count; i++) {
-		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
-		E1000_WRITE_FLUSH(hw);
+		array_wr32(E1000_MTA, i, 0);
+		wrfl();
 	}
 
 	/* Load any remaining multicast addresses into the hash table. */
 	for (; mc_addr_count > 0; mc_addr_count--) {
-		hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
+		hash_value = igb_hash_mc_addr(hw, mc_addr_list);
 		hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
-		e1000_mta_set(hw, hash_value);
+		igb_mta_set(hw, hash_value);
 		mc_addr_list += ETH_ALEN;
 	}
 }
@@ -358,9 +358,9 @@ void e1000_update_mc_addr_list(struct e1000_hw *hw,
  *
  *  Generates a multicast address hash value which is used to determine
  *  the multicast filter table array address and new table value.  See
- *  e1000_mta_set()
+ *  igb_mta_set()
  **/
-static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
 {
 	u32 hash_value, hash_mask;
 	u8 bit_shift = 0;
@@ -428,47 +428,47 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
  *
  *  Clears the base hardware counters by reading the counter registers.
  **/
-void e1000_clear_hw_cntrs_base(struct e1000_hw *hw)
+void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
 {
 	u32 temp;
 
-	temp = E1000_READ_REG(hw, E1000_CRCERRS);
-	temp = E1000_READ_REG(hw, E1000_SYMERRS);
-	temp = E1000_READ_REG(hw, E1000_MPC);
-	temp = E1000_READ_REG(hw, E1000_SCC);
-	temp = E1000_READ_REG(hw, E1000_ECOL);
-	temp = E1000_READ_REG(hw, E1000_MCC);
-	temp = E1000_READ_REG(hw, E1000_LATECOL);
-	temp = E1000_READ_REG(hw, E1000_COLC);
-	temp = E1000_READ_REG(hw, E1000_DC);
-	temp = E1000_READ_REG(hw, E1000_SEC);
-	temp = E1000_READ_REG(hw, E1000_RLEC);
-	temp = E1000_READ_REG(hw, E1000_XONRXC);
-	temp = E1000_READ_REG(hw, E1000_XONTXC);
-	temp = E1000_READ_REG(hw, E1000_XOFFRXC);
-	temp = E1000_READ_REG(hw, E1000_XOFFTXC);
-	temp = E1000_READ_REG(hw, E1000_FCRUC);
-	temp = E1000_READ_REG(hw, E1000_GPRC);
-	temp = E1000_READ_REG(hw, E1000_BPRC);
-	temp = E1000_READ_REG(hw, E1000_MPRC);
-	temp = E1000_READ_REG(hw, E1000_GPTC);
-	temp = E1000_READ_REG(hw, E1000_GORCL);
-	temp = E1000_READ_REG(hw, E1000_GORCH);
-	temp = E1000_READ_REG(hw, E1000_GOTCL);
-	temp = E1000_READ_REG(hw, E1000_GOTCH);
-	temp = E1000_READ_REG(hw, E1000_RNBC);
-	temp = E1000_READ_REG(hw, E1000_RUC);
-	temp = E1000_READ_REG(hw, E1000_RFC);
-	temp = E1000_READ_REG(hw, E1000_ROC);
-	temp = E1000_READ_REG(hw, E1000_RJC);
-	temp = E1000_READ_REG(hw, E1000_TORL);
-	temp = E1000_READ_REG(hw, E1000_TORH);
-	temp = E1000_READ_REG(hw, E1000_TOTL);
-	temp = E1000_READ_REG(hw, E1000_TOTH);
-	temp = E1000_READ_REG(hw, E1000_TPR);
-	temp = E1000_READ_REG(hw, E1000_TPT);
-	temp = E1000_READ_REG(hw, E1000_MPTC);
-	temp = E1000_READ_REG(hw, E1000_BPTC);
+	temp = rd32(E1000_CRCERRS);
+	temp = rd32(E1000_SYMERRS);
+	temp = rd32(E1000_MPC);
+	temp = rd32(E1000_SCC);
+	temp = rd32(E1000_ECOL);
+	temp = rd32(E1000_MCC);
+	temp = rd32(E1000_LATECOL);
+	temp = rd32(E1000_COLC);
+	temp = rd32(E1000_DC);
+	temp = rd32(E1000_SEC);
+	temp = rd32(E1000_RLEC);
+	temp = rd32(E1000_XONRXC);
+	temp = rd32(E1000_XONTXC);
+	temp = rd32(E1000_XOFFRXC);
+	temp = rd32(E1000_XOFFTXC);
+	temp = rd32(E1000_FCRUC);
+	temp = rd32(E1000_GPRC);
+	temp = rd32(E1000_BPRC);
+	temp = rd32(E1000_MPRC);
+	temp = rd32(E1000_GPTC);
+	temp = rd32(E1000_GORCL);
+	temp = rd32(E1000_GORCH);
+	temp = rd32(E1000_GOTCL);
+	temp = rd32(E1000_GOTCH);
+	temp = rd32(E1000_RNBC);
+	temp = rd32(E1000_RUC);
+	temp = rd32(E1000_RFC);
+	temp = rd32(E1000_ROC);
+	temp = rd32(E1000_RJC);
+	temp = rd32(E1000_TORL);
+	temp = rd32(E1000_TORH);
+	temp = rd32(E1000_TOTL);
+	temp = rd32(E1000_TOTH);
+	temp = rd32(E1000_TPR);
+	temp = rd32(E1000_TPT);
+	temp = rd32(E1000_MPTC);
+	temp = rd32(E1000_BPTC);
 }
 
 /**
@@ -479,7 +479,7 @@ void e1000_clear_hw_cntrs_base(struct e1000_hw *hw)
  *  change in link status has been detected, then we read the PHY registers
  *  to get the current speed/duplex if link exists.
  **/
-s32 e1000_check_for_copper_link(struct e1000_hw *hw)
+s32 igb_check_for_copper_link(struct e1000_hw *hw)
 {
 	struct e1000_mac_info *mac = &hw->mac;
 	s32 ret_val;
@@ -501,20 +501,20 @@ s32 e1000_check_for_copper_link(struct e1000_hw *hw)
 	 * link.  If so, then we want to get the current speed/duplex
 	 * of the PHY.
 	 */
-	ret_val = e1000_phy_has_link(hw, 1, 0, &link);
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
 	if (ret_val)
 		goto out;
 
 	if (!link)
 		goto out; /* No link detected */
 
-	mac->get_link_status = 0;
+	mac->get_link_status = false;
 
 	/*
 	 * Check if there was DownShift, must be checked
 	 * immediately after link-up
 	 */
-	e1000_check_downshift(hw);
+	igb_check_downshift(hw);
 
 	/*
 	 * If we are forcing speed/duplex, then we simply return since
@@ -530,7 +530,7 @@ s32 e1000_check_for_copper_link(struct e1000_hw *hw)
 	 * of MAC speed/duplex configuration.  So we only need to
 	 * configure Collision Distance in the MAC.
 	 */
-	e1000_config_collision_dist(hw);
+	igb_config_collision_dist(hw);
 
 	/*
 	 * Configure Flow Control now that Auto-Neg has completed.
@@ -538,7 +538,7 @@ s32 e1000_check_for_copper_link(struct e1000_hw *hw)
 	 * settings because we may have had to re-autoneg with a
 	 * different link partner.
 	 */
-	ret_val = e1000_config_fc_after_link_up(hw);
+	ret_val = igb_config_fc_after_link_up(hw);
 	if (ret_val)
 		hw_dbg(hw, "Error configuring flow control\n");
 
@@ -556,7 +556,7 @@ out:
  *  should be established.  Assumes the hardware has previously been reset
  *  and the transmitter and receiver are not enabled.
  **/
-s32 e1000_setup_link(struct e1000_hw *hw)
+s32 igb_setup_link(struct e1000_hw *hw)
 {
 	s32 ret_val = 0;
 
@@ -564,10 +564,10 @@ s32 e1000_setup_link(struct e1000_hw *hw)
 	 * In the case of the phy reset being blocked, we already have a link.
 	 * We do not need to set it up again.
 	 */
-	if (e1000_check_reset_block(hw))
+	if (igb_check_reset_block(hw))
 		goto out;
 
-	ret_val = e1000_set_default_fc(hw);
+	ret_val = igb_set_default_fc(hw);
 	if (ret_val)
 		goto out;
 
@@ -593,13 +593,13 @@ s32 e1000_setup_link(struct e1000_hw *hw)
 	 */
 	hw_dbg(hw,
 	       "Initializing the Flow Control address, type and timer regs\n");
-	E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
-	E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
-	E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+	wr32(E1000_FCT, FLOW_CONTROL_TYPE);
+	wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+	wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
 
-	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+	wr32(E1000_FCTTV, hw->fc.pause_time);
 
-	ret_val = e1000_set_fc_watermarks(hw);
+	ret_val = igb_set_fc_watermarks(hw);
 
 out:
 	return ret_val;
@@ -613,17 +613,17 @@ out:
  *  during link setup. Currently no func pointer exists and all
  *  implementations are handled in the generic version of this function.
  **/
-void e1000_config_collision_dist(struct e1000_hw *hw)
+void igb_config_collision_dist(struct e1000_hw *hw)
 {
 	u32 tctl;
 
-	tctl = E1000_READ_REG(hw, E1000_TCTL);
+	tctl = rd32(E1000_TCTL);
 
 	tctl &= ~E1000_TCTL_COLD;
 	tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
 
-	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
-	E1000_WRITE_FLUSH(hw);
+	wr32(E1000_TCTL, tctl);
+	wrfl();
 }
 
 /**
@@ -634,7 +634,7 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
  *  flow control XON frame transmission is enabled, then set XON frame
  *  tansmission as well.
  **/
-static s32 e1000_set_fc_watermarks(struct e1000_hw *hw)
+static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
 {
 	s32 ret_val = 0;
 	u32 fcrtl = 0, fcrth = 0;
@@ -658,8 +658,8 @@ static s32 e1000_set_fc_watermarks(struct e1000_hw *hw)
 
 		fcrth = hw->fc.high_water;
 	}
-	E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
-	E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
+	wr32(E1000_FCRTL, fcrtl);
+	wr32(E1000_FCRTH, fcrth);
 
 	return ret_val;
 }
@@ -671,7 +671,7 @@ static s32 e1000_set_fc_watermarks(struct e1000_hw *hw)
  *  Read the EEPROM for the default values for flow control and store the
  *  values.
  **/
-static s32 e1000_set_default_fc(struct e1000_hw *hw)
+static s32 igb_set_default_fc(struct e1000_hw *hw)
 {
 	s32 ret_val = 0;
 	u16 nvm_data;
@@ -715,12 +715,12 @@ out:
  *  autonegotiation is managed by the PHY rather than the MAC.  Software must
  *  also configure these bits when link is forced on a fiber connection.
  **/
-s32 e1000_force_mac_fc(struct e1000_hw *hw)
+s32 igb_force_mac_fc(struct e1000_hw *hw)
 {
 	u32 ctrl;
 	s32 ret_val = 0;
 
-	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl = rd32(E1000_CTRL);
 
 	/*
 	 * Because we didn't get link via the internal auto-negotiation
@@ -763,7 +763,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
 		goto out;
 	}
 
-	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	wr32(E1000_CTRL, ctrl);
 
 out:
 	return ret_val;
@@ -779,7 +779,7 @@ out:
  *  and did not fail, then we configure flow control based on our link
  *  partner.
  **/
-s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
+s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
 {
 	struct e1000_mac_info *mac = &hw->mac;
 	s32 ret_val = 0;
@@ -794,10 +794,10 @@ s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
 	if (mac->autoneg_failed) {
 		if (hw->phy.media_type == e1000_media_type_fiber ||
 		    hw->phy.media_type == e1000_media_type_internal_serdes)
-			ret_val = e1000_force_mac_fc(hw);
+			ret_val = igb_force_mac_fc(hw);
 	} else {
 		if (hw->phy.media_type == e1000_media_type_copper)
-			ret_val = e1000_force_mac_fc(hw);
+			ret_val = igb_force_mac_fc(hw);
 	}
 
 	if (ret_val) {
@@ -979,7 +979,7 @@ s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
 		 * Now we call a subroutine to actually force the MAC
 		 * controller to use the correct flow control settings.
 		 */
-		ret_val = e1000_force_mac_fc(hw);
+		ret_val = igb_force_mac_fc(hw);
 		if (ret_val) {
 			hw_dbg(hw, "Error forcing flow control settings\n");
 			goto out;
@@ -999,12 +999,12 @@ out:
  *  Read the status register for the current speed/duplex and store the current
  *  speed and duplex for copper connections.
  **/
-s32 e1000_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
 				      u16 *duplex)
 {
 	u32 status;
 
-	status = E1000_READ_REG(hw, E1000_STATUS);
+	status = rd32(E1000_STATUS);
 	if (status & E1000_STATUS_SPEED_1000) {
 		*speed = SPEED_1000;
 		hw_dbg(hw, "1000 Mbs, ");
@@ -1033,7 +1033,7 @@ s32 e1000_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
  *
  *  Acquire the HW semaphore to access the PHY or NVM
  **/
-s32 e1000_get_hw_semaphore(struct e1000_hw *hw)
+s32 igb_get_hw_semaphore(struct e1000_hw *hw)
 {
 	u32 swsm;
 	s32 ret_val = 0;
@@ -1042,7 +1042,7 @@ s32 e1000_get_hw_semaphore(struct e1000_hw *hw)
 
 	/* Get the SW semaphore */
 	while (i < timeout) {
-		swsm = E1000_READ_REG(hw, E1000_SWSM);
+		swsm = rd32(E1000_SWSM);
 		if (!(swsm & E1000_SWSM_SMBI))
 			break;
 
@@ -1058,11 +1058,11 @@ s32 e1000_get_hw_semaphore(struct e1000_hw *hw)
 
 	/* Get the FW semaphore. */
 	for (i = 0; i < timeout; i++) {
-		swsm = E1000_READ_REG(hw, E1000_SWSM);
-		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+		swsm = rd32(E1000_SWSM);
+		wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
 
 		/* Semaphore acquired if bit latched */
-		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+		if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
 			break;
 
 		udelay(50);
@@ -1070,7 +1070,7 @@ s32 e1000_get_hw_semaphore(struct e1000_hw *hw)
 
 	if (i == timeout) {
 		/* Release semaphores */
-		e1000_put_hw_semaphore(hw);
+		igb_put_hw_semaphore(hw);
 		hw_dbg(hw, "Driver can't access the NVM\n");
 		ret_val = -E1000_ERR_NVM;
 		goto out;
@@ -1086,15 +1086,15 @@ out:
  *
  *  Release hardware semaphore used to access the PHY or NVM
  **/
-void e1000_put_hw_semaphore(struct e1000_hw *hw)
+void igb_put_hw_semaphore(struct e1000_hw *hw)
 {
 	u32 swsm;
 
-	swsm = E1000_READ_REG(hw, E1000_SWSM);
+	swsm = rd32(E1000_SWSM);
 
 	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
 
-	E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+	wr32(E1000_SWSM, swsm);
 }
 
 /**
@@ -1103,14 +1103,14 @@ void e1000_put_hw_semaphore(struct e1000_hw *hw)
  *
  *  Check EEPROM for Auto Read done bit.
  **/
-s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
+s32 igb_get_auto_rd_done(struct e1000_hw *hw)
 {
 	s32 i = 0;
 	s32 ret_val = 0;
 
 
 	while (i < AUTO_READ_DONE_TIMEOUT) {
-		if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
+		if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
 			break;
 		msleep(1);
 		i++;
@@ -1134,7 +1134,7 @@ out:
  *  Read the EEPROM for the current default LED configuration.  If the
  *  LED configuration is not valid, set to a valid LED configuration.
  **/
-static s32 e1000_valid_led_default(struct e1000_hw *hw, u16 *data)
+static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
 {
 	s32 ret_val;
 
@@ -1156,7 +1156,7 @@ out:
  *  @hw: pointer to the HW structure
  *
  **/
-s32 e1000_id_led_init(struct e1000_hw *hw)
+s32 igb_id_led_init(struct e1000_hw *hw)
 {
 	struct e1000_mac_info *mac = &hw->mac;
 	s32 ret_val;
@@ -1166,11 +1166,11 @@ s32 e1000_id_led_init(struct e1000_hw *hw)
 	u16 data, i, temp;
 	const u16 led_mask = 0x0F;
 
-	ret_val = e1000_valid_led_default(hw, &data);
+	ret_val = igb_valid_led_default(hw, &data);
 	if (ret_val)
 		goto out;
 
-	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+	mac->ledctl_default = rd32(E1000_LEDCTL);
 	mac->ledctl_mode1 = mac->ledctl_default;
 	mac->ledctl_mode2 = mac->ledctl_default;
 
@@ -1223,9 +1223,9 @@ out:
  *  Remove the current LED configuration and set the LED configuration
  *  to the default value, saved from the EEPROM.
  **/
-s32 e1000_cleanup_led(struct e1000_hw *hw)
+s32 igb_cleanup_led(struct e1000_hw *hw)
 {
-	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+	wr32(E1000_LEDCTL, hw->mac.ledctl_default);
 	return 0;
 }
 
@@ -1235,7 +1235,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
  *
  *  Blink the led's which are set to be on.
  **/
-s32 e1000_blink_led(struct e1000_hw *hw)
+s32 igb_blink_led(struct e1000_hw *hw)
 {
 	u32 ledctl_blink = 0;
 	u32 i;
@@ -1257,7 +1257,7 @@ s32 e1000_blink_led(struct e1000_hw *hw)
 						 (i * 8));
 	}
 
-	E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
+	wr32(E1000_LEDCTL, ledctl_blink);
 
 	return 0;
 }
@@ -1268,19 +1268,19 @@ s32 e1000_blink_led(struct e1000_hw *hw)
  *
  *  Turn LED off.
  **/
-s32 e1000_led_off(struct e1000_hw *hw)
+s32 igb_led_off(struct e1000_hw *hw)
 {
 	u32 ctrl;
 
 	switch (hw->phy.media_type) {
 	case e1000_media_type_fiber:
-		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl = rd32(E1000_CTRL);
 		ctrl |= E1000_CTRL_SWDPIN0;
 		ctrl |= E1000_CTRL_SWDPIO0;
-		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+		wr32(E1000_CTRL, ctrl);
 		break;
 	case e1000_media_type_copper:
-		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+		wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
 		break;
 	default:
 		break;
@@ -1300,7 +1300,7 @@ s32 e1000_led_off(struct e1000_hw *hw)
  *  Disables PCI-Express master access and verifies there are no pending
  *  requests.
  **/
-s32 e1000_disable_pcie_master(struct e1000_hw *hw)
+s32 igb_disable_pcie_master(struct e1000_hw *hw)
 {
 	u32 ctrl;
 	s32 timeout = MASTER_DISABLE_TIMEOUT;
@@ -1309,12 +1309,12 @@ s32 e1000_disable_pcie_master(struct e1000_hw *hw)
 	if (hw->bus.type != e1000_bus_type_pci_express)
 		goto out;
 
-	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl = rd32(E1000_CTRL);
 	ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
-	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	wr32(E1000_CTRL, ctrl);
 
 	while (timeout) {
-		if (!(E1000_READ_REG(hw, E1000_STATUS) &
+		if (!(rd32(E1000_STATUS) &
 		      E1000_STATUS_GIO_MASTER_ENABLE))
 			break;
 		udelay(100);
@@ -1337,7 +1337,7 @@ out:
  *
  *  Reset the Adaptive Interframe Spacing throttle to default values.
  **/
-void e1000_reset_adaptive(struct e1000_hw *hw)
+void igb_reset_adaptive(struct e1000_hw *hw)
 {
 	struct e1000_mac_info *mac = &hw->mac;
 
@@ -1354,8 +1354,8 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
 		mac->ifs_ratio = IFS_RATIO;
 	}
 
-	mac->in_ifs_mode = 0;
-	E1000_WRITE_REG(hw, E1000_AIT, 0);
+	mac->in_ifs_mode = false;
+	wr32(E1000_AIT, 0);
 out:
 	return;
 }
@@ -1367,7 +1367,7 @@ out:
  *  Update the Adaptive Interframe Spacing Throttle value based on the
  *  time between transmitted packets and time between collisions.
  **/
-void e1000_update_adaptive(struct e1000_hw *hw)
+void igb_update_adaptive(struct e1000_hw *hw)
 {
 	struct e1000_mac_info *mac = &hw->mac;
 
@@ -1378,14 +1378,14 @@ void e1000_update_adaptive(struct e1000_hw *hw)
 
 	if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
 		if (mac->tx_packet_delta > MIN_NUM_XMITS) {
-			mac->in_ifs_mode = 1;
+			mac->in_ifs_mode = true;
 			if (mac->current_ifs_val < mac->ifs_max_val) {
 				if (!mac->current_ifs_val)
 					mac->current_ifs_val = mac->ifs_min_val;
 				else
 					mac->current_ifs_val +=
 						mac->ifs_step_size;
-				E1000_WRITE_REG(hw, E1000_AIT,
+				wr32(E1000_AIT,
 						mac->current_ifs_val);
 			}
 		}
@@ -1393,8 +1393,8 @@ void e1000_update_adaptive(struct e1000_hw *hw)
 		if (mac->in_ifs_mode &&
 		    (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
 			mac->current_ifs_val = 0;
-			mac->in_ifs_mode = 0;
-			E1000_WRITE_REG(hw, E1000_AIT, 0);
+			mac->in_ifs_mode = false;
+			wr32(E1000_AIT, 0);
 		}
 	}
 out:
@@ -1408,7 +1408,7 @@ out:
  *  Verify that when not using auto-negotitation that MDI/MDIx is correctly
  *  set, which is forced to MDI mode only.
  **/
-s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+s32 igb_validate_mdi_setting(struct e1000_hw *hw)
 {
 	s32 ret_val = 0;
 
@@ -1434,7 +1434,7 @@ out:
  *  and they all have the format address << 8 | data and bit 31 is polled for
  *  completion.
  **/
-s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
 			      u32 offset, u8 data)
 {
 	u32 i, regvalue = 0;
@@ -1442,12 +1442,12 @@ s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
 
 	/* Set up the address and data */
 	regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
-	E1000_WRITE_REG(hw, reg, regvalue);
+	wr32(reg, regvalue);
 
 	/* Poll the ready bit to see if the MDI read completed */
 	for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
 		udelay(5);
-		regvalue = E1000_READ_REG(hw, reg);
+		regvalue = rd32(reg);
 		if (regvalue & E1000_GEN_CTL_READY)
 			break;
 	}
@@ -1467,35 +1467,35 @@ out:
  *
  *  Verifies the hardware needs to allow ARPs to be processed by the host.
  **/
-bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
 {
 	u32 manc;
 	u32 fwsm, factps;
-	bool ret_val = 0;
+	bool ret_val = false;
 
 	if (!hw->mac.asf_firmware_present)
 		goto out;
 
-	manc = E1000_READ_REG(hw, E1000_MANC);
+	manc = rd32(E1000_MANC);
 
 	if (!(manc & E1000_MANC_RCV_TCO_EN) ||
 	    !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
 		goto out;
 
 	if (hw->mac.arc_subsystem_valid) {
-		fwsm = E1000_READ_REG(hw, E1000_FWSM);
-		factps = E1000_READ_REG(hw, E1000_FACTPS);
+		fwsm = rd32(E1000_FWSM);
+		factps = rd32(E1000_FACTPS);
 
 		if (!(factps & E1000_FACTPS_MNGCG) &&
 		    ((fwsm & E1000_FWSM_MODE_MASK) ==
 		     (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
-			ret_val = 1;
+			ret_val = true;
 			goto out;
 		}
 	} else {
 		if ((manc & E1000_MANC_SMBUS_EN) &&
 		    !(manc & E1000_MANC_ASF_EN)) {
-			ret_val = 1;
+			ret_val = true;
 			goto out;
 		}
 	}
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index c4ba3a4..326b659 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -38,40 +38,40 @@
  * Functions that should not be called directly from drivers but can be used
  * by other files in this 'shared code'
  */
-s32  e1000_blink_led(struct e1000_hw *hw);
-s32  e1000_check_for_copper_link(struct e1000_hw *hw);
-s32  e1000_cleanup_led(struct e1000_hw *hw);
-s32  e1000_config_fc_after_link_up(struct e1000_hw *hw);
-s32  e1000_disable_pcie_master(struct e1000_hw *hw);
-s32  e1000_force_mac_fc(struct e1000_hw *hw);
-s32  e1000_get_auto_rd_done(struct e1000_hw *hw);
-s32  e1000_get_bus_info_pcie(struct e1000_hw *hw);
-s32  e1000_get_hw_semaphore(struct e1000_hw *hw);
-s32  e1000_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+s32  igb_blink_led(struct e1000_hw *hw);
+s32  igb_check_for_copper_link(struct e1000_hw *hw);
+s32  igb_cleanup_led(struct e1000_hw *hw);
+s32  igb_config_fc_after_link_up(struct e1000_hw *hw);
+s32  igb_disable_pcie_master(struct e1000_hw *hw);
+s32  igb_force_mac_fc(struct e1000_hw *hw);
+s32  igb_get_auto_rd_done(struct e1000_hw *hw);
+s32  igb_get_bus_info_pcie(struct e1000_hw *hw);
+s32  igb_get_hw_semaphore(struct e1000_hw *hw);
+s32  igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
 				       u16 *duplex);
-s32  e1000_id_led_init(struct e1000_hw *hw);
-s32  e1000_led_off(struct e1000_hw *hw);
-void e1000_update_mc_addr_list(struct e1000_hw *hw,
+s32  igb_id_led_init(struct e1000_hw *hw);
+s32  igb_led_off(struct e1000_hw *hw);
+void igb_update_mc_addr_list(struct e1000_hw *hw,
 			       u8 *mc_addr_list, u32 mc_addr_count,
 			       u32 rar_used_count, u32 rar_count);
-s32  e1000_setup_link(struct e1000_hw *hw);
-s32  e1000_validate_mdi_setting(struct e1000_hw *hw);
-s32  e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+s32  igb_setup_link(struct e1000_hw *hw);
+s32  igb_validate_mdi_setting(struct e1000_hw *hw);
+s32  igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
 			       u32 offset, u8 data);
 
-void e1000_clear_hw_cntrs_base(struct e1000_hw *hw);
-void e1000_clear_vfta(struct e1000_hw *hw);
-void e1000_config_collision_dist(struct e1000_hw *hw);
-void e1000_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
-void e1000_put_hw_semaphore(struct e1000_hw *hw);
-void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
-s32  e1000_check_alt_mac_addr(struct e1000_hw *hw);
-void e1000_remove_device(struct e1000_hw *hw);
-void e1000_reset_adaptive(struct e1000_hw *hw);
-void e1000_update_adaptive(struct e1000_hw *hw);
-void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
-
-bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
+void igb_clear_vfta(struct e1000_hw *hw);
+void igb_config_collision_dist(struct e1000_hw *hw);
+void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+void igb_put_hw_semaphore(struct e1000_hw *hw);
+void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+s32  igb_check_alt_mac_addr(struct e1000_hw *hw);
+void igb_remove_device(struct e1000_hw *hw);
+void igb_reset_adaptive(struct e1000_hw *hw);
+void igb_update_adaptive(struct e1000_hw *hw);
+void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+
+bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
 
 enum e1000_mng_mode {
 	e1000_mng_mode_none = 0,
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index 3dbaf9c..2897106 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -38,11 +38,11 @@
  *
  *  Enable/Raise the EEPROM clock bit.
  **/
-static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
 {
 	*eecd = *eecd | E1000_EECD_SK;
-	E1000_WRITE_REG(hw, E1000_EECD, *eecd);
-	E1000_WRITE_FLUSH(hw);
+	wr32(E1000_EECD, *eecd);
+	wrfl();
 	udelay(hw->nvm.delay_usec);
 }
 
@@ -53,11 +53,11 @@ static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
  *
  *  Clear/Lower the EEPROM clock bit.
  **/
-static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
 {
 	*eecd = *eecd & ~E1000_EECD_SK;
-	E1000_WRITE_REG(hw, E1000_EECD, *eecd);
-	E1000_WRITE_FLUSH(hw);
+	wr32(E1000_EECD, *eecd);
+	wrfl();
 	udelay(hw->nvm.delay_usec);
 }
 
@@ -71,10 +71,10 @@ static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
  *  "data" parameter will be shifted out to the EEPROM one bit at a time.
  *  In order to do this, "data" must be broken down into bits.
  **/
-static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
 {
 	struct e1000_nvm_info *nvm = &hw->nvm;
-	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u32 eecd = rd32(E1000_EECD);
 	u32 mask;
 
 	mask = 0x01 << (count - 1);
@@ -89,19 +89,19 @@ static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
 		if (data & mask)
 			eecd |= E1000_EECD_DI;
 
-		E1000_WRITE_REG(hw, E1000_EECD, eecd);
-		E1000_WRITE_FLUSH(hw);
+		wr32(E1000_EECD, eecd);
+		wrfl();
 
 		udelay(nvm->delay_usec);
 
-		e1000_raise_eec_clk(hw, &eecd);
-		e1000_lower_eec_clk(hw, &eecd);
+		igb_raise_eec_clk(hw, &eecd);
+		igb_lower_eec_clk(hw, &eecd);
 
 		mask >>= 1;
 	} while (mask);
 
 	eecd &= ~E1000_EECD_DI;
-	E1000_WRITE_REG(hw, E1000_EECD, eecd);
+	wr32(E1000_EECD, eecd);
 }
 
 /**
@@ -115,28 +115,28 @@ static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
  *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
  *  always be clear.
  **/
-static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
 {
 	u32 eecd;
 	u32 i;
 	u16 data;
 
-	eecd = E1000_READ_REG(hw, E1000_EECD);
+	eecd = rd32(E1000_EECD);
 
 	eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
 	data = 0;
 
 	for (i = 0; i < count; i++) {
 		data <<= 1;
-		e1000_raise_eec_clk(hw, &eecd);
+		igb_raise_eec_clk(hw, &eecd);
 
-		eecd = E1000_READ_REG(hw, E1000_EECD);
+		eecd = rd32(E1000_EECD);
 
 		eecd &= ~E1000_EECD_DI;
 		if (eecd & E1000_EECD_DO)
 			data |= 1;
 
-		e1000_lower_eec_clk(hw, &eecd);
+		igb_lower_eec_clk(hw, &eecd);
 	}
 
 	return data;
@@ -150,7 +150,7 @@ static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
  *  Polls the EEPROM status bit for either read or write completion based
  *  upon the value of 'ee_reg'.
  **/
-static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
 {
 	u32 attempts = 100000;
 	u32 i, reg = 0;
@@ -158,9 +158,9 @@ static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
 
 	for (i = 0; i < attempts; i++) {
 		if (ee_reg == E1000_NVM_POLL_READ)
-			reg = E1000_READ_REG(hw, E1000_EERD);
+			reg = rd32(E1000_EERD);
 		else
-			reg = E1000_READ_REG(hw, E1000_EEWR);
+			reg = rd32(E1000_EEWR);
 
 		if (reg & E1000_NVM_RW_REG_DONE) {
 			ret_val = 0;
@@ -181,27 +181,27 @@ static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
  *  Return successful if access grant bit set, else clear the request for
  *  EEPROM access and return -E1000_ERR_NVM (-1).
  **/
-s32 e1000_acquire_nvm(struct e1000_hw *hw)
+s32 igb_acquire_nvm(struct e1000_hw *hw)
 {
-	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u32 eecd = rd32(E1000_EECD);
 	s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
 	s32 ret_val = 0;
 
 
-	E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
-	eecd = E1000_READ_REG(hw, E1000_EECD);
+	wr32(E1000_EECD, eecd | E1000_EECD_REQ);
+	eecd = rd32(E1000_EECD);
 
 	while (timeout) {
 		if (eecd & E1000_EECD_GNT)
 			break;
 		udelay(5);
-		eecd = E1000_READ_REG(hw, E1000_EECD);
+		eecd = rd32(E1000_EECD);
 		timeout--;
 	}
 
 	if (!timeout) {
 		eecd &= ~E1000_EECD_REQ;
-		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		wr32(E1000_EECD, eecd);
 		hw_dbg(hw, "Could not acquire NVM grant\n");
 		ret_val = -E1000_ERR_NVM;
 	}
@@ -215,35 +215,35 @@ s32 e1000_acquire_nvm(struct e1000_hw *hw)
  *
  *  Return the EEPROM to a standby state.
  **/
-static void e1000_standby_nvm(struct e1000_hw *hw)
+static void igb_standby_nvm(struct e1000_hw *hw)
 {
 	struct e1000_nvm_info *nvm = &hw->nvm;
-	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u32 eecd = rd32(E1000_EECD);
 
 	if (nvm->type == e1000_nvm_eeprom_microwire) {
 		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
-		E1000_WRITE_REG(hw, E1000_EECD, eecd);
-		E1000_WRITE_FLUSH(hw);
+		wr32(E1000_EECD, eecd);
+		wrfl();
 		udelay(nvm->delay_usec);
 
-		e1000_raise_eec_clk(hw, &eecd);
+		igb_raise_eec_clk(hw, &eecd);
 
 		/* Select EEPROM */
 		eecd |= E1000_EECD_CS;
-		E1000_WRITE_REG(hw, E1000_EECD, eecd);
-		E1000_WRITE_FLUSH(hw);
+		wr32(E1000_EECD, eecd);
+		wrfl();
 		udelay(nvm->delay_usec);
 
-		e1000_lower_eec_clk(hw, &eecd);
+		igb_lower_eec_clk(hw, &eecd);
 	} else if (nvm->type == e1000_nvm_eeprom_spi) {
 		/* Toggle CS to flush commands */
 		eecd |= E1000_EECD_CS;
-		E1000_WRITE_REG(hw, E1000_EECD, eecd);
-		E1000_WRITE_FLUSH(hw);
+		wr32(E1000_EECD, eecd);
+		wrfl();
 		udelay(nvm->delay_usec);
 		eecd &= ~E1000_EECD_CS;
-		E1000_WRITE_REG(hw, E1000_EECD, eecd);
-		E1000_WRITE_FLUSH(hw);
+		wr32(E1000_EECD, eecd);
+		wrfl();
 		udelay(nvm->delay_usec);
 	}
 }
@@ -258,17 +258,17 @@ static void e1000_stop_nvm(struct e1000_hw *hw)
 {
 	u32 eecd;
 
-	eecd = E1000_READ_REG(hw, E1000_EECD);
+	eecd = rd32(E1000_EECD);
 	if (hw->nvm.type == e1000_nvm_eeprom_spi) {
 		/* Pull CS high */
 		eecd |= E1000_EECD_CS;
-		e1000_lower_eec_clk(hw, &eecd);
+		igb_lower_eec_clk(hw, &eecd);
 	} else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
 		/* CS on Microcwire is active-high */
 		eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
-		E1000_WRITE_REG(hw, E1000_EECD, eecd);
-		e1000_raise_eec_clk(hw, &eecd);
-		e1000_lower_eec_clk(hw, &eecd);
+		wr32(E1000_EECD, eecd);
+		igb_raise_eec_clk(hw, &eecd);
+		igb_lower_eec_clk(hw, &eecd);
 	}
 }
 
@@ -278,15 +278,15 @@ static void e1000_stop_nvm(struct e1000_hw *hw)
  *
  *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
  **/
-void e1000_release_nvm(struct e1000_hw *hw)
+void igb_release_nvm(struct e1000_hw *hw)
 {
 	u32 eecd;
 
 	e1000_stop_nvm(hw);
 
-	eecd = E1000_READ_REG(hw, E1000_EECD);
+	eecd = rd32(E1000_EECD);
 	eecd &= ~E1000_EECD_REQ;
-	E1000_WRITE_REG(hw, E1000_EECD, eecd);
+	wr32(E1000_EECD, eecd);
 }
 
 /**
@@ -295,10 +295,10 @@ void e1000_release_nvm(struct e1000_hw *hw)
  *
  *  Setups the EEPROM for reading and writing.
  **/
-static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
 {
 	struct e1000_nvm_info *nvm = &hw->nvm;
-	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u32 eecd = rd32(E1000_EECD);
 	s32 ret_val = 0;
 	u16 timeout = 0;
 	u8 spi_stat_reg;
@@ -307,14 +307,14 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
 	if (nvm->type == e1000_nvm_eeprom_microwire) {
 		/* Clear SK and DI */
 		eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
-		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		wr32(E1000_EECD, eecd);
 		/* Set CS */
 		eecd |= E1000_EECD_CS;
-		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		wr32(E1000_EECD, eecd);
 	} else if (nvm->type == e1000_nvm_eeprom_spi) {
 		/* Clear SK and CS */
 		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
-		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		wr32(E1000_EECD, eecd);
 		udelay(1);
 		timeout = NVM_MAX_RETRY_SPI;
 
@@ -325,14 +325,14 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
 		 * not cleared within 'timeout', then error out.
 		 */
 		while (timeout) {
-			e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+			igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
 						 hw->nvm.opcode_bits);
-			spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+			spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
 			if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
 				break;
 
 			udelay(5);
-			e1000_standby_nvm(hw);
+			igb_standby_nvm(hw);
 			timeout--;
 		}
 
@@ -356,7 +356,7 @@ out:
  *
  *  Reads a 16 bit word from the EEPROM using the EERD register.
  **/
-s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 {
 	struct e1000_nvm_info *nvm = &hw->nvm;
 	u32 i, eerd = 0;
@@ -377,12 +377,12 @@ s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 		eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
 		       E1000_NVM_RW_REG_START;
 
-		E1000_WRITE_REG(hw, E1000_EERD, eerd);
-		ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+		wr32(E1000_EERD, eerd);
+		ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
 		if (ret_val)
 			break;
 
-		data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
+		data[i] = (rd32(E1000_EERD) >>
 			   E1000_NVM_RW_REG_DATA);
 	}
 
@@ -402,7 +402,7 @@ out:
  *  If e1000_update_nvm_checksum is not called after this function , the
  *  EEPROM will most likley contain an invalid checksum.
  **/
-s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 {
 	struct e1000_nvm_info *nvm = &hw->nvm;
 	s32 ret_val;
@@ -428,17 +428,17 @@ s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 	while (widx < words) {
 		u8 write_opcode = NVM_WRITE_OPCODE_SPI;
 
-		ret_val = e1000_ready_nvm_eeprom(hw);
+		ret_val = igb_ready_nvm_eeprom(hw);
 		if (ret_val)
 			goto release;
 
-		e1000_standby_nvm(hw);
+		igb_standby_nvm(hw);
 
 		/* Send the WRITE ENABLE command (8 bit opcode) */
-		e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+		igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
 					 nvm->opcode_bits);
 
-		e1000_standby_nvm(hw);
+		igb_standby_nvm(hw);
 
 		/*
 		 * Some SPI eeproms use the 8th address bit embedded in the
@@ -448,19 +448,19 @@ s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 			write_opcode |= NVM_A8_OPCODE_SPI;
 
 		/* Send the Write command (8-bit opcode + addr) */
-		e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
-		e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+		igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+		igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
 					 nvm->address_bits);
 
 		/* Loop to allow for up to whole page write of eeprom */
 		while (widx < words) {
 			u16 word_out = data[widx];
 			word_out = (word_out >> 8) | (word_out << 8);
-			e1000_shift_out_eec_bits(hw, word_out, 16);
+			igb_shift_out_eec_bits(hw, word_out, 16);
 			widx++;
 
 			if ((((offset + widx) * 2) % nvm->page_size) == 0) {
-				e1000_standby_nvm(hw);
+				igb_standby_nvm(hw);
 				break;
 			}
 		}
@@ -482,7 +482,7 @@ out:
  *  Reads the product board assembly (PBA) number from the EEPROM and stores
  *  the value in part_num.
  **/
-s32 e1000_read_part_num(struct e1000_hw *hw, u32 *part_num)
+s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num)
 {
 	s32  ret_val;
 	u16 nvm_data;
@@ -513,7 +513,7 @@ out:
  *  Since devices with two ports use the same EEPROM, we increment the
  *  last bit in the MAC address for the second port.
  **/
-s32 e1000_read_mac_addr(struct e1000_hw *hw)
+s32 igb_read_mac_addr(struct e1000_hw *hw)
 {
 	s32  ret_val = 0;
 	u16 offset, nvm_data, i;
@@ -547,7 +547,7 @@ out:
  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
  **/
-s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
 {
 	s32 ret_val = 0;
 	u16 checksum = 0;
@@ -580,7 +580,7 @@ out:
  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
  *  value to the EEPROM.
  **/
-s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
+s32 igb_update_nvm_checksum(struct e1000_hw *hw)
 {
 	s32  ret_val;
 	u16 checksum = 0;
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h
index 0b0e546..1041c34 100644
--- a/drivers/net/igb/e1000_nvm.h
+++ b/drivers/net/igb/e1000_nvm.h
@@ -28,13 +28,13 @@
 #ifndef _E1000_NVM_H_
 #define _E1000_NVM_H_
 
-s32  e1000_acquire_nvm(struct e1000_hw *hw);
-void e1000_release_nvm(struct e1000_hw *hw);
-s32  e1000_read_mac_addr(struct e1000_hw *hw);
-s32  e1000_read_part_num(struct e1000_hw *hw, u32 *part_num);
-s32  e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32  e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32  e1000_validate_nvm_checksum(struct e1000_hw *hw);
-s32  e1000_update_nvm_checksum(struct e1000_hw *hw);
+s32  igb_acquire_nvm(struct e1000_hw *hw);
+void igb_release_nvm(struct e1000_hw *hw);
+s32  igb_read_mac_addr(struct e1000_hw *hw);
+s32  igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
+s32  igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  igb_validate_nvm_checksum(struct e1000_hw *hw);
+s32  igb_update_nvm_checksum(struct e1000_hw *hw);
 
 #endif
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 6ca05fe..08a86b1 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -31,14 +31,14 @@
 #include "e1000_mac.h"
 #include "e1000_phy.h"
 
-static s32  e1000_get_phy_cfg_done(struct e1000_hw *hw);
-static void e1000_release_phy(struct e1000_hw *hw);
-static s32  e1000_acquire_phy(struct e1000_hw *hw);
-static s32  e1000_phy_reset_dsp(struct e1000_hw *hw);
-static s32  e1000_phy_setup_autoneg(struct e1000_hw *hw);
-static void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw,
+static s32  igb_get_phy_cfg_done(struct e1000_hw *hw);
+static void igb_release_phy(struct e1000_hw *hw);
+static s32  igb_acquire_phy(struct e1000_hw *hw);
+static s32  igb_phy_reset_dsp(struct e1000_hw *hw);
+static s32  igb_phy_setup_autoneg(struct e1000_hw *hw);
+static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
 					       u16 *phy_ctrl);
-static s32  e1000_wait_autoneg(struct e1000_hw *hw);
+static s32  igb_wait_autoneg(struct e1000_hw *hw);
 
 /* Cable length tables */
 static const u16 e1000_m88_cable_length_table[] =
@@ -68,11 +68,11 @@ static const u16 e1000_igp_2_cable_length_table[] =
  *  is blocked.  If a reset is not blocked return 0, otherwise
  *  return E1000_BLK_PHY_RESET (12).
  **/
-s32 e1000_check_reset_block(struct e1000_hw *hw)
+s32 igb_check_reset_block(struct e1000_hw *hw)
 {
 	u32 manc;
 
-	manc = E1000_READ_REG(hw, E1000_MANC);
+	manc = rd32(E1000_MANC);
 
 	return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
 	       E1000_BLK_PHY_RESET : 0;
@@ -85,7 +85,7 @@ s32 e1000_check_reset_block(struct e1000_hw *hw)
  *  Reads the PHY registers and stores the PHY ID and possibly the PHY
  *  revision in the hardware structure.
  **/
-s32 e1000_get_phy_id(struct e1000_hw *hw)
+s32 igb_get_phy_id(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val = 0;
@@ -114,7 +114,7 @@ out:
  *
  *  Reset the digital signal processor.
  **/
-static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
+static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
 {
 	s32 ret_val;
 
@@ -137,7 +137,7 @@ out:
  *  Reads the MDI control regsiter in the PHY at offset and stores the
  *  information read to data.
  **/
-static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	u32 i, mdic = 0;
@@ -158,7 +158,7 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
 		(phy->addr << E1000_MDIC_PHY_SHIFT) |
 		(E1000_MDIC_OP_READ));
 
-	E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+	wr32(E1000_MDIC, mdic);
 
 	/*
 	 * Poll the ready bit to see if the MDI read completed
@@ -167,7 +167,7 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
 	 */
 	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
 		udelay(50);
-		mdic = E1000_READ_REG(hw, E1000_MDIC);
+		mdic = rd32(E1000_MDIC);
 		if (mdic & E1000_MDIC_READY)
 			break;
 	}
@@ -195,7 +195,7 @@ out:
  *
  *  Writes data to MDI control register in the PHY at offset.
  **/
-static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	u32 i, mdic = 0;
@@ -217,7 +217,7 @@ static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
 		(phy->addr << E1000_MDIC_PHY_SHIFT) |
 		(E1000_MDIC_OP_WRITE));
 
-	E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+	wr32(E1000_MDIC, mdic);
 
 	/*
 	 * Poll the ready bit to see if the MDI read completed
@@ -226,7 +226,7 @@ static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
 	 */
 	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
 		udelay(50);
-		mdic = E1000_READ_REG(hw, E1000_MDIC);
+		mdic = rd32(E1000_MDIC);
 		if (mdic & E1000_MDIC_READY)
 			break;
 	}
@@ -255,29 +255,29 @@ out:
  *  and storing the retrieved information in data.  Release any acquired
  *  semaphores before exiting.
  **/
-s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
 {
 	s32 ret_val;
 
-	ret_val = e1000_acquire_phy(hw);
+	ret_val = igb_acquire_phy(hw);
 	if (ret_val)
 		goto out;
 
 	if (offset > MAX_PHY_MULTI_PAGE_REG) {
-		ret_val = e1000_write_phy_reg_mdic(hw,
+		ret_val = igb_write_phy_reg_mdic(hw,
 						   IGP01E1000_PHY_PAGE_SELECT,
 						   (u16)offset);
 		if (ret_val) {
-			e1000_release_phy(hw);
+			igb_release_phy(hw);
 			goto out;
 		}
 	}
 
-	ret_val = e1000_read_phy_reg_mdic(hw,
+	ret_val = igb_read_phy_reg_mdic(hw,
 					  MAX_PHY_REG_ADDRESS & offset,
 					  data);
 
-	e1000_release_phy(hw);
+	igb_release_phy(hw);
 
 out:
 	return ret_val;
@@ -292,29 +292,29 @@ out:
  *  Acquires semaphore, if necessary, then writes the data to PHY register
  *  at the offset.  Release any acquired semaphores before exiting.
  **/
-s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
 {
 	s32 ret_val;
 
-	ret_val = e1000_acquire_phy(hw);
+	ret_val = igb_acquire_phy(hw);
 	if (ret_val)
 		goto out;
 
 	if (offset > MAX_PHY_MULTI_PAGE_REG) {
-		ret_val = e1000_write_phy_reg_mdic(hw,
+		ret_val = igb_write_phy_reg_mdic(hw,
 						   IGP01E1000_PHY_PAGE_SELECT,
 						   (u16)offset);
 		if (ret_val) {
-			e1000_release_phy(hw);
+			igb_release_phy(hw);
 			goto out;
 		}
 	}
 
-	ret_val = e1000_write_phy_reg_mdic(hw,
+	ret_val = igb_write_phy_reg_mdic(hw,
 					   MAX_PHY_REG_ADDRESS & offset,
 					   data);
 
-	e1000_release_phy(hw);
+	igb_release_phy(hw);
 
 out:
 	return ret_val;
@@ -327,7 +327,7 @@ out:
  *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
  *  and downshift values are set also.
  **/
-s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
+s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -421,7 +421,7 @@ s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
 	}
 
 	/* Commit the changes. */
-	ret_val = e1000_phy_sw_reset(hw);
+	ret_val = igb_phy_sw_reset(hw);
 	if (ret_val) {
 		hw_dbg(hw, "Error committing the PHY changes\n");
 		goto out;
@@ -438,7 +438,7 @@ out:
  *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
  *  igp PHY's.
  **/
-s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
+s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -465,7 +465,7 @@ s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
 	if (phy->type == e1000_phy_igp) {
 		/* disable lplu d3 during driver init */
 		if (hw->phy.ops.set_d3_lplu_state)
-			ret_val = hw->phy.ops.set_d3_lplu_state(hw, 0);
+			ret_val = hw->phy.ops.set_d3_lplu_state(hw, false);
 		if (ret_val) {
 			hw_dbg(hw, "Error Disabling LPLU D3\n");
 			goto out;
@@ -473,7 +473,7 @@ s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
 	}
 
 	/* disable lplu d0 during driver init */
-	ret_val = hw->phy.ops.set_d0_lplu_state(hw, 0);
+	ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
 	if (ret_val) {
 		hw_dbg(hw, "Error Disabling LPLU D0\n");
 		goto out;
@@ -578,7 +578,7 @@ out:
  *  and restart the negotiation process between the link partner.  If
  *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
  **/
-s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+s32 igb_copper_link_autoneg(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -598,7 +598,7 @@ s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
 		phy->autoneg_advertised = phy->autoneg_mask;
 
 	hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n");
-	ret_val = e1000_phy_setup_autoneg(hw);
+	ret_val = igb_phy_setup_autoneg(hw);
 	if (ret_val) {
 		hw_dbg(hw, "Error Setting up Auto-Negotiation\n");
 		goto out;
@@ -623,7 +623,7 @@ s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
 	 * check at a later time (for example, callback routine).
 	 */
 	if (phy->autoneg_wait_to_complete) {
-		ret_val = e1000_wait_autoneg(hw);
+		ret_val = igb_wait_autoneg(hw);
 		if (ret_val) {
 			hw_dbg(hw, "Error while waiting for "
 				 "autoneg to complete\n");
@@ -631,7 +631,7 @@ s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
 		}
 	}
 
-	hw->mac.get_link_status = 1;
+	hw->mac.get_link_status = true;
 
 out:
 	return ret_val;
@@ -646,7 +646,7 @@ out:
  *  return successful.  Otherwise, setup advertisement and flow control to
  *  the appropriate values for the wanted auto-negotiation.
  **/
-static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -812,7 +812,7 @@ out:
  *  auto-crossover to force MDI manually.  Waits for link and returns
  *  successful if link up is successful, else -E1000_ERR_PHY (-2).
  **/
-s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -823,7 +823,7 @@ s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
 	if (ret_val)
 		goto out;
 
-	e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+	igb_phy_force_speed_duplex_setup(hw, &phy_data);
 
 	ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_data);
 	if (ret_val)
@@ -854,7 +854,7 @@ s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
 		hw_dbg(hw,
 		       "Waiting for forced speed/duplex link on IGP phy.\n");
 
-		ret_val = e1000_phy_has_link(hw,
+		ret_val = igb_phy_has_link(hw,
 						     PHY_FORCE_LIMIT,
 						     100000,
 						     &link);
@@ -865,7 +865,7 @@ s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
 			hw_dbg(hw, "Link taking longer than expected.\n");
 
 		/* Try once more */
-		ret_val = e1000_phy_has_link(hw,
+		ret_val = igb_phy_has_link(hw,
 						     PHY_FORCE_LIMIT,
 						     100000,
 						     &link);
@@ -887,7 +887,7 @@ out:
  *  After reset, TX_CLK and CRS on TX must be set.  Return successful upon
  *  successful completion, else return corresponding error code.
  **/
-s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -915,7 +915,7 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 	if (ret_val)
 		goto out;
 
-	e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+	igb_phy_force_speed_duplex_setup(hw, &phy_data);
 
 	/* Reset the phy to commit changes. */
 	phy_data |= MII_CR_RESET;
@@ -930,7 +930,7 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 		hw_dbg(hw,
 		       "Waiting for forced speed/duplex link on M88 phy.\n");
 
-		ret_val = e1000_phy_has_link(hw,
+		ret_val = igb_phy_has_link(hw,
 						     PHY_FORCE_LIMIT,
 						     100000,
 						     &link);
@@ -947,13 +947,13 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
 						      0x001d);
 			if (ret_val)
 				goto out;
-			ret_val = e1000_phy_reset_dsp(hw);
+			ret_val = igb_phy_reset_dsp(hw);
 			if (ret_val)
 				goto out;
 		}
 
 		/* Try once more */
-		ret_val = e1000_phy_has_link(hw, PHY_FORCE_LIMIT,
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
 					     100000, &link);
 		if (ret_val)
 			goto out;
@@ -1004,7 +1004,7 @@ out:
  *  caller must write to the PHY_CONTROL register for these settings to
  *  take affect.
  **/
-static void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw,
+static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
 					       u16 *phy_ctrl)
 {
 	struct e1000_mac_info *mac = &hw->mac;
@@ -1014,7 +1014,7 @@ static void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw,
 	hw->fc.type = e1000_fc_none;
 
 	/* Force speed/duplex on the mac */
-	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl = rd32(E1000_CTRL);
 	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
 	ctrl &= ~E1000_CTRL_SPD_SEL;
 
@@ -1048,9 +1048,9 @@ static void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw,
 		hw_dbg(hw, "Forcing 10mb\n");
 	}
 
-	e1000_config_collision_dist(hw);
+	igb_config_collision_dist(hw);
 
-	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	wr32(E1000_CTRL, ctrl);
 }
 
 /**
@@ -1067,7 +1067,7 @@ static void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw,
  *  During driver activity, SmartSpeed should be enabled so performance is
  *  maintained.
  **/
-s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -1153,7 +1153,7 @@ out:
  *
  *  A downshift is detected by querying the PHY link health.
  **/
-s32 e1000_check_downshift(struct e1000_hw *hw)
+s32 igb_check_downshift(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -1173,7 +1173,7 @@ s32 e1000_check_downshift(struct e1000_hw *hw)
 		break;
 	default:
 		/* speed downshift not supported */
-		phy->speed_downgraded = 0;
+		phy->speed_downgraded = false;
 		ret_val = 0;
 		goto out;
 	}
@@ -1181,7 +1181,7 @@ s32 e1000_check_downshift(struct e1000_hw *hw)
 	ret_val = hw->phy.ops.read_phy_reg(hw, offset, &phy_data);
 
 	if (!ret_val)
-		phy->speed_downgraded = (phy_data & mask) ? 1 : 0;
+		phy->speed_downgraded = (phy_data & mask) ? true : false;
 
 out:
 	return ret_val;
@@ -1195,7 +1195,7 @@ out:
  *
  *  Polarity is determined based on the PHY specific status register.
  **/
-static s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+static s32 igb_check_polarity_m88(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -1220,7 +1220,7 @@ static s32 e1000_check_polarity_m88(struct e1000_hw *hw)
  *  Polarity is determined based on the PHY port status register, and the
  *  current speed (since there is no polarity at 100Mbps).
  **/
-static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+static s32 igb_check_polarity_igp(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -1266,7 +1266,7 @@ out:
  *  Waits for auto-negotiation to complete or for the auto-negotiation time
  *  limit to expire, which ever happens first.
  **/
-static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+static s32 igb_wait_autoneg(struct e1000_hw *hw)
 {
 	s32 ret_val = 0;
 	u16 i, phy_status;
@@ -1300,7 +1300,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
  *
  *  Polls the PHY status register for link, 'iterations' number of times.
  **/
-s32 e1000_phy_has_link(struct e1000_hw *hw, u32 iterations,
+s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
 			       u32 usec_interval, bool *success)
 {
 	s32 ret_val = 0;
@@ -1326,7 +1326,7 @@ s32 e1000_phy_has_link(struct e1000_hw *hw, u32 iterations,
 			udelay(usec_interval);
 	}
 
-	*success = (i < iterations) ? 1 : 0;
+	*success = (i < iterations) ? true : false;
 
 	return ret_val;
 }
@@ -1346,7 +1346,7 @@ s32 e1000_phy_has_link(struct e1000_hw *hw, u32 iterations,
  *	3			110 - 140 meters
  *	4			> 140 meters
  **/
-s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
+s32 igb_get_cable_length_m88(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -1379,7 +1379,7 @@ out:
  *  into a lookup table to obtain the approximate cable length
  *  for each channel.
  **/
-s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
+s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val = 0;
@@ -1451,7 +1451,7 @@ out:
  *  special status register to determine MDI/MDIx and current speed.  If
  *  speed is 1000, then determine cable length, local and remote receiver.
  **/
-s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
+s32 igb_get_phy_info_m88(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32  ret_val;
@@ -1464,7 +1464,7 @@ s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
 		goto out;
 	}
 
-	ret_val = e1000_phy_has_link(hw, 1, 0, &link);
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
 	if (ret_val)
 		goto out;
 
@@ -1480,10 +1480,10 @@ s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
 		goto out;
 
 	phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
-				   ? 1
-				   : 0;
+				   ? true
+				   : false;
 
-	ret_val = e1000_check_polarity_m88(hw);
+	ret_val = igb_check_polarity_m88(hw);
 	if (ret_val)
 		goto out;
 
@@ -1492,7 +1492,7 @@ s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
 	if (ret_val)
 		goto out;
 
-	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? 1 : 0;
+	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
 
 	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
 		ret_val = hw->phy.ops.get_cable_length(hw);
@@ -1531,14 +1531,14 @@ out:
  *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
  *  determine on the cable length, local and remote receiver.
  **/
-s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
+s32 igb_get_phy_info_igp(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
 	u16 data;
 	bool link;
 
-	ret_val = e1000_phy_has_link(hw, 1, 0, &link);
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
 	if (ret_val)
 		goto out;
 
@@ -1548,9 +1548,9 @@ s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
 		goto out;
 	}
 
-	phy->polarity_correction = 1;
+	phy->polarity_correction = true;
 
-	ret_val = e1000_check_polarity_igp(hw);
+	ret_val = igb_check_polarity_igp(hw);
 	if (ret_val)
 		goto out;
 
@@ -1559,7 +1559,7 @@ s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
 	if (ret_val)
 		goto out;
 
-	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? 1 : 0;
+	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
 
 	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
 	    IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -1596,7 +1596,7 @@ out:
  *  Does a software reset of the PHY by reading the PHY control register and
  *  setting/write the control register reset bit to the PHY.
  **/
-s32 e1000_phy_sw_reset(struct e1000_hw *hw)
+s32 igb_phy_sw_reset(struct e1000_hw *hw)
 {
 	s32 ret_val;
 	u16 phy_ctrl;
@@ -1625,36 +1625,36 @@ out:
  *  bit in the PHY.  Wait the appropriate delay time for the device to
  *  reset and relase the semaphore (if necessary).
  **/
-s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+s32 igb_phy_hw_reset(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32  ret_val;
 	u32 ctrl;
 
-	ret_val = e1000_check_reset_block(hw);
+	ret_val = igb_check_reset_block(hw);
 	if (ret_val) {
 		ret_val = 0;
 		goto out;
 	}
 
-	ret_val = e1000_acquire_phy(hw);
+	ret_val = igb_acquire_phy(hw);
 	if (ret_val)
 		goto out;
 
-	ctrl = E1000_READ_REG(hw, E1000_CTRL);
-	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
-	E1000_WRITE_FLUSH(hw);
+	ctrl = rd32(E1000_CTRL);
+	wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+	wrfl();
 
 	udelay(phy->reset_delay_us);
 
-	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-	E1000_WRITE_FLUSH(hw);
+	wr32(E1000_CTRL, ctrl);
+	wrfl();
 
 	udelay(150);
 
-	e1000_release_phy(hw);
+	igb_release_phy(hw);
 
-	ret_val = e1000_get_phy_cfg_done(hw);
+	ret_val = igb_get_phy_cfg_done(hw);
 
 out:
 	return ret_val;
@@ -1669,7 +1669,7 @@ out:
  *  Return success if silicon family did not implement a family specific
  *  get_cfg_done function.
  **/
-static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+static s32 igb_get_phy_cfg_done(struct e1000_hw *hw)
 {
 	if (hw->phy.ops.get_cfg_done)
 		return hw->phy.ops.get_cfg_done(hw);
@@ -1684,7 +1684,7 @@ static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
  *  Return if silicon family does not require a semaphore when accessing the
  *  PHY.
  **/
-static void e1000_release_phy(struct e1000_hw *hw)
+static void igb_release_phy(struct e1000_hw *hw)
 {
 	if (hw->phy.ops.release_phy)
 		hw->phy.ops.release_phy(hw);
@@ -1697,7 +1697,7 @@ static void e1000_release_phy(struct e1000_hw *hw)
  *  Return success if silicon family does not require a semaphore when
  *  accessing the PHY.
  **/
-static s32 e1000_acquire_phy(struct e1000_hw *hw)
+static s32 igb_acquire_phy(struct e1000_hw *hw)
 {
 	if (hw->phy.ops.acquire_phy)
 		return hw->phy.ops.acquire_phy(hw);
@@ -1712,7 +1712,7 @@ static s32 e1000_acquire_phy(struct e1000_hw *hw)
  *  When the silicon family has not implemented a forced speed/duplex
  *  function for the PHY, simply return 0.
  **/
-s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+s32 igb_phy_force_speed_duplex(struct e1000_hw *hw)
 {
 	if (hw->phy.ops.force_speed_duplex)
 		return hw->phy.ops.force_speed_duplex(hw);
@@ -1726,7 +1726,7 @@ s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
  *
  *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
  **/
-s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
+s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
 {
 	hw_dbg(hw, "Running IGP 3 PHY init script\n");
 
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 02a6335..7e75dab 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -43,27 +43,27 @@ enum e1000_smart_speed {
 	e1000_smart_speed_off
 };
 
-s32  e1000_check_downshift(struct e1000_hw *hw);
-s32  e1000_check_reset_block(struct e1000_hw *hw);
-s32  e1000_copper_link_autoneg(struct e1000_hw *hw);
-s32  e1000_phy_force_speed_duplex(struct e1000_hw *hw);
-s32  e1000_copper_link_setup_igp(struct e1000_hw *hw);
-s32  e1000_copper_link_setup_m88(struct e1000_hw *hw);
-s32  e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
-s32  e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
-s32  e1000_get_cable_length_m88(struct e1000_hw *hw);
-s32  e1000_get_cable_length_igp_2(struct e1000_hw *hw);
-s32  e1000_get_phy_id(struct e1000_hw *hw);
-s32  e1000_get_phy_info_igp(struct e1000_hw *hw);
-s32  e1000_get_phy_info_m88(struct e1000_hw *hw);
-s32  e1000_phy_sw_reset(struct e1000_hw *hw);
-s32  e1000_phy_hw_reset(struct e1000_hw *hw);
-s32  e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
-s32  e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-s32  e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
-s32  e1000_phy_has_link(struct e1000_hw *hw, u32 iterations,
+s32  igb_check_downshift(struct e1000_hw *hw);
+s32  igb_check_reset_block(struct e1000_hw *hw);
+s32  igb_copper_link_autoneg(struct e1000_hw *hw);
+s32  igb_phy_force_speed_duplex(struct e1000_hw *hw);
+s32  igb_copper_link_setup_igp(struct e1000_hw *hw);
+s32  igb_copper_link_setup_m88(struct e1000_hw *hw);
+s32  igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32  igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32  igb_get_cable_length_m88(struct e1000_hw *hw);
+s32  igb_get_cable_length_igp_2(struct e1000_hw *hw);
+s32  igb_get_phy_id(struct e1000_hw *hw);
+s32  igb_get_phy_info_igp(struct e1000_hw *hw);
+s32  igb_get_phy_info_m88(struct e1000_hw *hw);
+s32  igb_phy_sw_reset(struct e1000_hw *hw);
+s32  igb_phy_hw_reset(struct e1000_hw *hw);
+s32  igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32  igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
 				u32 usec_interval, bool *success);
-s32  e1000_phy_init_script_igp3(struct e1000_hw *hw);
+s32  igb_phy_init_script_igp3(struct e1000_hw *hw);
 
 /* IGP01E1000 Specific Registers */
 #define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 5b1abe9..ff187b7 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -258,20 +258,13 @@
 
 #define E1000_REGISTER(a, reg) reg
 
-#define E1000_WRITE_REG(a, reg, value) ( \
-    writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg))))
+#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
+#define rd32(reg) (readl(hw->hw_addr + reg))
+#define wrfl() ((void)rd32(E1000_STATUS))
 
-#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg)))
-
-#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
-    writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))))
-
-#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
-    readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))
-
-#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
-#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
-
-#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+#define array_wr32(reg, offset, value) \
+	(writel(value, hw->hw_addr + reg + ((offset) << 2)))
+#define array_rd32(reg, offset) \
+	(readl(hw->hw_addr + reg + ((offset) << 2)))
 
 #endif
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index cb7a30f..7d5d711 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -61,7 +61,7 @@ struct igb_adapter;
 #define IGB_MIN_ITR_USECS                 10
 
 /* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES                  4
+#define IGB_MAX_RX_QUEUES                  1
 
 /* RX descriptor control thresholds.
  * PTHRESH - MAC will consider prefetch if it has fewer than this number of
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 345cbb5..94ccbbf 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -43,7 +43,7 @@ struct igb_stats {
 	int stat_offset;
 };
 
-#define IGB_STAT(m) sizeof(((struct igb_adapter *)0)->m), \
+#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \
 		      offsetof(struct igb_adapter, m)
 static const struct igb_stats igb_gstrings_stats[] = {
 	{ "rx_packets", IGB_STAT(stats.gprc) },
@@ -149,7 +149,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
 	ecmd->transceiver = XCVR_INTERNAL;
 
-	if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU) {
+	if (rd32(E1000_STATUS) & E1000_STATUS_LU) {
 
 		adapter->hw.mac.ops.get_speed_and_duplex(hw,
 					&adapter->link_speed,
@@ -180,7 +180,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
 	/* When SoL/IDER sessions are active, autoneg/speed/duplex
 	 * cannot be changed */
-	if (e1000_check_reset_block(hw)) {
+	if (igb_check_reset_block(hw)) {
 		dev_err(&adapter->pdev->dev, "Cannot change link "
 			"characteristics when SoL/IDER is active.\n");
 		return -EINVAL;
@@ -268,7 +268,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
 			igb_reset(adapter);
 	} else
 		retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
-			  e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+			  igb_setup_link(hw) : igb_force_mac_fc(hw));
 
 	clear_bit(__IGB_RESETTING, &adapter->state);
 	return retval;
@@ -353,74 +353,74 @@ static void igb_get_regs(struct net_device *netdev,
 	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
 
 	/* General Registers */
-	regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
-	regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
-	regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
-	regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
-	regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
-	regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
-	regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
-	regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
-	regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
-	regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
-	regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
-	regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
+	regs_buff[0] = rd32(E1000_CTRL);
+	regs_buff[1] = rd32(E1000_STATUS);
+	regs_buff[2] = rd32(E1000_CTRL_EXT);
+	regs_buff[3] = rd32(E1000_MDIC);
+	regs_buff[4] = rd32(E1000_SCTL);
+	regs_buff[5] = rd32(E1000_CONNSW);
+	regs_buff[6] = rd32(E1000_VET);
+	regs_buff[7] = rd32(E1000_LEDCTL);
+	regs_buff[8] = rd32(E1000_PBA);
+	regs_buff[9] = rd32(E1000_PBS);
+	regs_buff[10] = rd32(E1000_FRTIMER);
+	regs_buff[11] = rd32(E1000_TCPTIMER);
 
 	/* NVM Register */
-	regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
+	regs_buff[12] = rd32(E1000_EECD);
 
 	/* Interrupt */
-	regs_buff[13] = E1000_READ_REG(hw, E1000_EICR);
-	regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
-	regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
-	regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
-	regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
-	regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
-	regs_buff[19] = E1000_READ_REG(hw, E1000_ICR);
-	regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
-	regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
-	regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
-	regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
-	regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
-	regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
+	regs_buff[13] = rd32(E1000_EICR);
+	regs_buff[14] = rd32(E1000_EICS);
+	regs_buff[15] = rd32(E1000_EIMS);
+	regs_buff[16] = rd32(E1000_EIMC);
+	regs_buff[17] = rd32(E1000_EIAC);
+	regs_buff[18] = rd32(E1000_EIAM);
+	regs_buff[19] = rd32(E1000_ICR);
+	regs_buff[20] = rd32(E1000_ICS);
+	regs_buff[21] = rd32(E1000_IMS);
+	regs_buff[22] = rd32(E1000_IMC);
+	regs_buff[23] = rd32(E1000_IAC);
+	regs_buff[24] = rd32(E1000_IAM);
+	regs_buff[25] = rd32(E1000_IMIRVP);
 
 	/* Flow Control */
-	regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
-	regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
-	regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
-	regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
-	regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
-	regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
+	regs_buff[26] = rd32(E1000_FCAL);
+	regs_buff[27] = rd32(E1000_FCAH);
+	regs_buff[28] = rd32(E1000_FCTTV);
+	regs_buff[29] = rd32(E1000_FCRTL);
+	regs_buff[30] = rd32(E1000_FCRTH);
+	regs_buff[31] = rd32(E1000_FCRTV);
 
 	/* Receive */
-	regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
-	regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
-	regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
-	regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
-	regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
-	regs_buff[37] = E1000_READ_REG(hw, E1000_VMD_CTL);
+	regs_buff[32] = rd32(E1000_RCTL);
+	regs_buff[33] = rd32(E1000_RXCSUM);
+	regs_buff[34] = rd32(E1000_RLPML);
+	regs_buff[35] = rd32(E1000_RFCTL);
+	regs_buff[36] = rd32(E1000_MRQC);
+	regs_buff[37] = rd32(E1000_VMD_CTL);
 
 	/* Transmit */
-	regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
-	regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
-	regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
-	regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
+	regs_buff[38] = rd32(E1000_TCTL);
+	regs_buff[39] = rd32(E1000_TCTL_EXT);
+	regs_buff[40] = rd32(E1000_TIPG);
+	regs_buff[41] = rd32(E1000_DTXCTL);
 
 	/* Wake Up */
-	regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
-	regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
-	regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
-	regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
-	regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
+	regs_buff[42] = rd32(E1000_WUC);
+	regs_buff[43] = rd32(E1000_WUFC);
+	regs_buff[44] = rd32(E1000_WUS);
+	regs_buff[45] = rd32(E1000_IPAV);
+	regs_buff[46] = rd32(E1000_WUPL);
 
 	/* MAC */
-	regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
-	regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
-	regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
-	regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
-	regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
-	regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
-	regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
+	regs_buff[47] = rd32(E1000_PCS_CFG0);
+	regs_buff[48] = rd32(E1000_PCS_LCTL);
+	regs_buff[49] = rd32(E1000_PCS_LSTAT);
+	regs_buff[50] = rd32(E1000_PCS_ANADV);
+	regs_buff[51] = rd32(E1000_PCS_LPAB);
+	regs_buff[52] = rd32(E1000_PCS_NPTX);
+	regs_buff[53] = rd32(E1000_PCS_LPABNP);
 
 	/* Statistics */
 	regs_buff[54] = adapter->stats.crcerrs;
@@ -497,69 +497,69 @@ static void igb_get_regs(struct net_device *netdev,
 	#define E1000_FFLT_REG(_i)    (0x05F00 + ((_i) * 8))
 
 	for (i = 0; i < 4; i++)
-		regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
+		regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE_REG(i));
+		regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
+		regs_buff[129 + i] = rd32(E1000_RDBAL(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
+		regs_buff[133 + i] = rd32(E1000_RDBAH(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
+		regs_buff[137 + i] = rd32(E1000_RDLEN(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
+		regs_buff[141 + i] = rd32(E1000_RDH(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
+		regs_buff[145 + i] = rd32(E1000_RDT(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
+		regs_buff[149 + i] = rd32(E1000_RXDCTL(i));
 
 	for (i = 0; i < 10; i++)
-		regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
+		regs_buff[153 + i] = rd32(E1000_EITR(i));
 	for (i = 0; i < 8; i++)
-		regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
+		regs_buff[163 + i] = rd32(E1000_IMIR(i));
 	for (i = 0; i < 8; i++)
-		regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
+		regs_buff[171 + i] = rd32(E1000_IMIREXT(i));
 	for (i = 0; i < 16; i++)
-		regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
+		regs_buff[179 + i] = rd32(E1000_RAL(i));
 	for (i = 0; i < 16; i++)
-		regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
+		regs_buff[195 + i] = rd32(E1000_RAH(i));
 
 	for (i = 0; i < 4; i++)
-		regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
+		regs_buff[211 + i] = rd32(E1000_TDBAL(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
+		regs_buff[215 + i] = rd32(E1000_TDBAH(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
+		regs_buff[219 + i] = rd32(E1000_TDLEN(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
+		regs_buff[223 + i] = rd32(E1000_TDH(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
+		regs_buff[227 + i] = rd32(E1000_TDT(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
+		regs_buff[231 + i] = rd32(E1000_TXDCTL(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
+		regs_buff[235 + i] = rd32(E1000_TDWBAL(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
+		regs_buff[239 + i] = rd32(E1000_TDWBAH(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
+		regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i));
 
 	for (i = 0; i < 4; i++)
-		regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
+		regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
+		regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i));
 	for (i = 0; i < 32; i++)
-		regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
+		regs_buff[255 + i] = rd32(E1000_WUPM_REG(i));
 	for (i = 0; i < 128; i++)
-		regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
+		regs_buff[287 + i] = rd32(E1000_FFMT_REG(i));
 	for (i = 0; i < 128; i++)
-		regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
+		regs_buff[415 + i] = rd32(E1000_FFVT_REG(i));
 	for (i = 0; i < 4; i++)
-		regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
+		regs_buff[543 + i] = rd32(E1000_FFLT_REG(i));
 
-	regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
-	regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
-	regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
-	regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
+	regs_buff[547] = rd32(E1000_TDFH);
+	regs_buff[548] = rd32(E1000_TDFT);
+	regs_buff[549] = rd32(E1000_TDFHS);
+	regs_buff[550] = rd32(E1000_TDFPC);
 
 }
 
@@ -671,7 +671,7 @@ static int igb_set_eeprom(struct net_device *netdev,
 	/* Update the checksum over the first part of the EEPROM if needed
 	 * and flush shadow RAM for 82573 controllers */
 	if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
-		e1000_update_nvm_checksum(hw);
+		igb_update_nvm_checksum(hw);
 
 	kfree(eeprom_buff);
 	return ret_val;
@@ -933,6 +933,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
 
 static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	struct igb_reg_test *test;
 	u32 value, before, after;
 	u32 i, toggle;
@@ -945,10 +946,10 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
 	 * tests.  Some bits are read-only, some toggle, and some
 	 * are writable on newer MACs.
 	 */
-	before = E1000_READ_REG(&adapter->hw, E1000_STATUS);
-	value = (E1000_READ_REG(&adapter->hw, E1000_STATUS) & toggle);
-	E1000_WRITE_REG(&adapter->hw, E1000_STATUS, toggle);
-	after = E1000_READ_REG(&adapter->hw, E1000_STATUS) & toggle;
+	before = rd32(E1000_STATUS);
+	value = (rd32(E1000_STATUS) & toggle);
+	wr32(E1000_STATUS, toggle);
+	after = rd32(E1000_STATUS) & toggle;
 	if (value != after) {
 		dev_err(&adapter->pdev->dev, "failed STATUS register test "
 			"got: 0x%08X expected: 0x%08X\n", after, value);
@@ -956,7 +957,7 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
 		return 1;
 	}
 	/* restore previous status */
-	E1000_WRITE_REG(&adapter->hw, E1000_STATUS, before);
+	wr32(E1000_STATUS, before);
 
 	/* Perform the remainder of the register test, looping through
 	 * the test table until we either fail or reach the null entry.
@@ -1031,16 +1032,18 @@ static irqreturn_t igb_test_intr(int irq, void *data, struct pt_regs *regs)
 {
 	struct net_device *netdev = (struct net_device *) data;
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 
-	adapter->test_icr |= E1000_READ_REG(&adapter->hw, E1000_ICR);
+	adapter->test_icr |= rd32(E1000_ICR);
 
 	return IRQ_HANDLED;
 }
 
 static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
-	u32 mask, i = 0, shared_int = 1;
+	u32 mask, i = 0, shared_int = true;
 	u32 irq = adapter->pdev->irq;
 
 	*data = 0;
@@ -1050,14 +1053,14 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 		/* NOTE: we don't test MSI-X interrupts here, yet */
 		return 0;
 	} else if (adapter->msi_enabled) {
-		shared_int = 0;
+		shared_int = false;
 		if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
 			*data = 1;
 			return -1;
 		}
 	} else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
 				netdev->name, netdev)) {
-		shared_int = 0;
+		shared_int = false;
 	} else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
 		 netdev->name, netdev)) {
 		*data = 1;
@@ -1067,7 +1070,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 		(shared_int ? "shared" : "unshared"));
 
 	/* Disable all the interrupts */
-	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF);
+	wr32(E1000_IMC, 0xFFFFFFFF);
 	msleep(10);
 
 	/* Test each interrupt */
@@ -1083,10 +1086,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 			 * test failed.
 			 */
 			adapter->test_icr = 0;
-			E1000_WRITE_REG(&adapter->hw, E1000_IMC,
-					~mask & 0x00007FFF);
-			E1000_WRITE_REG(&adapter->hw, E1000_ICS,
-					~mask & 0x00007FFF);
+			wr32(E1000_IMC, ~mask & 0x00007FFF);
+			wr32(E1000_ICS, ~mask & 0x00007FFF);
 			msleep(10);
 
 			if (adapter->test_icr & mask) {
@@ -1102,8 +1103,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 		 * test failed.
 		 */
 		adapter->test_icr = 0;
-		E1000_WRITE_REG(&adapter->hw, E1000_IMS, mask);
-		E1000_WRITE_REG(&adapter->hw, E1000_ICS, mask);
+		wr32(E1000_IMS, mask);
+		wr32(E1000_ICS, mask);
 		msleep(10);
 
 		if (!(adapter->test_icr & mask)) {
@@ -1119,10 +1120,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 			 * test failed.
 			 */
 			adapter->test_icr = 0;
-			E1000_WRITE_REG(&adapter->hw, E1000_IMC,
-					~mask & 0x00007FFF);
-			E1000_WRITE_REG(&adapter->hw, E1000_ICS,
-					~mask & 0x00007FFF);
+			wr32(E1000_IMC, ~mask & 0x00007FFF);
+			wr32(E1000_ICS, ~mask & 0x00007FFF);
 			msleep(10);
 
 			if (adapter->test_icr) {
@@ -1133,7 +1132,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
 	}
 
 	/* Disable all the interrupts */
-	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF);
+	wr32(E1000_IMC, 0xFFFFFFFF);
 	msleep(10);
 
 	/* Unhook test interrupt handler */
@@ -1193,6 +1192,7 @@ static void igb_free_desc_rings(struct igb_adapter *adapter)
 
 static int igb_setup_desc_rings(struct igb_adapter *adapter)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	struct igb_ring *tx_ring = &adapter->test_tx_ring;
 	struct igb_ring *rx_ring = &adapter->test_rx_ring;
 	struct pci_dev *pdev = adapter->pdev;
@@ -1222,15 +1222,14 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
 	}
 	tx_ring->next_to_use = tx_ring->next_to_clean = 0;
 
-	E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
+	wr32(E1000_TDBAL(0),
 			((u64) tx_ring->dma & 0x00000000FFFFFFFF));
-	E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
-			((u64) tx_ring->dma >> 32));
-	E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
+	wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
+	wr32(E1000_TDLEN(0),
 			tx_ring->count * sizeof(struct e1000_tx_desc));
-	E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
-	E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
-	E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
+	wr32(E1000_TDH(0), 0);
+	wr32(E1000_TDT(0), 0);
+	wr32(E1000_TCTL,
 			E1000_TCTL_PSP | E1000_TCTL_EN |
 			E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
 			E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
@@ -1281,20 +1280,20 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
 	}
 	rx_ring->next_to_use = rx_ring->next_to_clean = 0;
 
-	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
-	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
-	E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
+	rctl = rd32(E1000_RCTL);
+	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	wr32(E1000_RDBAL(0),
 			((u64) rx_ring->dma & 0xFFFFFFFF));
-	E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
+	wr32(E1000_RDBAH(0),
 			((u64) rx_ring->dma >> 32));
-	E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), rx_ring->size);
-	E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
-	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), 0);
+	wr32(E1000_RDLEN(0), rx_ring->size);
+	wr32(E1000_RDH(0), 0);
+	wr32(E1000_RDT(0), 0);
 	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
 		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
 		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
-	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
-	E1000_WRITE_REG(&adapter->hw, E1000_SRRCTL(0), 0);
+	wr32(E1000_RCTL, rctl);
+	wr32(E1000_SRRCTL(0), 0);
 
 	for (i = 0; i < rx_ring->count; i++) {
 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
@@ -1339,7 +1338,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
 	u32 ctrl_reg = 0;
 	u32 stat_reg = 0;
 
-	hw->mac.autoneg = 0;
+	hw->mac.autoneg = false;
 
 	if (hw->phy.type == e1000_phy_m88) {
 		/* Auto-MDI/MDIX Off */
@@ -1350,13 +1349,13 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
 		hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x8140);
 	}
 
-	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl_reg = rd32(E1000_CTRL);
 
 	/* force 1000, set loopback */
 	hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x4140);
 
 	/* Now set up the MAC to the same speed/duplex as the PHY. */
-	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl_reg = rd32(E1000_CTRL);
 	ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
 	ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
 		     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
@@ -1369,12 +1368,12 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
 	else {
 		/* Set the ILOS bit on the fiber Nic if half duplex link is
 		 * detected. */
-		stat_reg = E1000_READ_REG(hw, E1000_STATUS);
+		stat_reg = rd32(E1000_STATUS);
 		if ((stat_reg & E1000_STATUS_FD) == 0)
 			ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
 	}
 
-	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+	wr32(E1000_CTRL, ctrl_reg);
 
 	/* Disable the receiver on the PHY so when a cable is plugged in, the
 	 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
@@ -1399,9 +1398,9 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
 
 	if (hw->phy.media_type == e1000_media_type_fiber ||
 	    hw->phy.media_type == e1000_media_type_internal_serdes) {
-		rctl = E1000_READ_REG(hw, E1000_RCTL);
+		rctl = rd32(E1000_RCTL);
 		rctl |= E1000_RCTL_LBM_TCVR;
-		E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+		wr32(E1000_RCTL, rctl);
 		return 0;
 	} else if (hw->phy.media_type == e1000_media_type_copper) {
 		return igb_set_phy_loopback(adapter);
@@ -1416,16 +1415,16 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
 	u32 rctl;
 	u16 phy_reg;
 
-	rctl = E1000_READ_REG(hw, E1000_RCTL);
+	rctl = rd32(E1000_RCTL);
 	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
-	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+	wr32(E1000_RCTL, rctl);
 
-	hw->mac.autoneg = 1;
+	hw->mac.autoneg = true;
 	hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_reg);
 	if (phy_reg & MII_CR_LOOPBACK) {
 		phy_reg &= ~MII_CR_LOOPBACK;
 		hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_reg);
-		e1000_phy_sw_reset(hw);
+		igb_phy_sw_reset(hw);
 	}
 }
 
@@ -1451,6 +1450,7 @@ static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 
 static int igb_run_loopback_test(struct igb_adapter *adapter)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	struct igb_ring *tx_ring = &adapter->test_tx_ring;
 	struct igb_ring *rx_ring = &adapter->test_rx_ring;
 	struct pci_dev *pdev = adapter->pdev;
@@ -1458,7 +1458,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
 	int ret_val = 0;
 	unsigned long time;
 
-	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rx_ring->count - 1);
+	wr32(E1000_RDT(0), rx_ring->count - 1);
 
 	/* Calculate the loop count based on the largest descriptor ring
 	 * The idea is to wrap the largest ring a number of times using 64
@@ -1483,7 +1483,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
 			if (k == tx_ring->count)
 				k = 0;
 		}
-		E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), k);
+		wr32(E1000_TDT(0), k);
 		msleep(200);
 		time = jiffies; /* set the start time for the receive */
 		good_cnt = 0;
@@ -1521,7 +1521,7 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
 {
 	/* PHY loopback cannot be performed if SoL/IDER
 	 * sessions are active */
-	if (e1000_check_reset_block(&adapter->hw)) {
+	if (igb_check_reset_block(&adapter->hw)) {
 		dev_err(&adapter->pdev->dev,
 			"Cannot do PHY loopback test "
 			"when SoL/IDER is active.\n");
@@ -1545,27 +1545,28 @@ out:
 
 static int igb_link_test(struct igb_adapter *adapter, u64 *data)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	*data = 0;
-	if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
+	if (hw->phy.media_type == e1000_media_type_internal_serdes) {
 		int i = 0;
-		adapter->hw.mac.serdes_has_link = 0;
+		hw->mac.serdes_has_link = false;
 
 		/* On some blade server designs, link establishment
 		 * could take as long as 2-3 minutes */
 		do {
-			adapter->hw.mac.ops.check_for_link(&adapter->hw);
-			if (adapter->hw.mac.serdes_has_link)
+			hw->mac.ops.check_for_link(&adapter->hw);
+			if (hw->mac.serdes_has_link)
 				return *data;
 			msleep(20);
 		} while (i++ < 3750);
 
 		*data = 1;
 	} else {
-		adapter->hw.mac.ops.check_for_link(&adapter->hw);
-		if (adapter->hw.mac.autoneg)
+		hw->mac.ops.check_for_link(&adapter->hw);
+		if (hw->mac.autoneg)
 			msleep(4000);
 
-		if (!(E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+		if (!(rd32(E1000_STATUS) &
 		      E1000_STATUS_LU))
 			*data = 1;
 	}
@@ -1623,9 +1624,9 @@ static void igb_diag_test(struct net_device *netdev,
 		adapter->hw.mac.autoneg = autoneg;
 
 		/* force this routine to wait until autoneg complete/timeout */
-		adapter->hw.phy.autoneg_wait_to_complete = 1;
+		adapter->hw.phy.autoneg_wait_to_complete = true;
 		igb_reset(adapter);
-		adapter->hw.phy.autoneg_wait_to_complete = 0;
+		adapter->hw.phy.autoneg_wait_to_complete = false;
 
 		clear_bit(__IGB_TESTING, &adapter->state);
 		if (if_running)
@@ -1660,7 +1661,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
 		break;
 	case E1000_DEV_ID_82575EB_FIBER_SERDES:
 		/* Wake events not supported on port B */
-		if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) {
+		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
 			wol->supported = 0;
 			break;
 		}
@@ -1671,7 +1672,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
 		/* dual port cards only support WoL on port A from now on
 		 * unless it was enabled in the eeprom for port B
 		 * so exclude FUNC_1 ports from having WoL enabled */
-		if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1 &&
+		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 &&
 		    !adapter->eeprom_wol) {
 			wol->supported = 0;
 			break;
@@ -1759,12 +1760,12 @@ static int igb_phys_id(struct net_device *netdev, u32 data)
 	if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
 		data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
 
-	e1000_blink_led(hw);
+	igb_blink_led(hw);
 	msleep_interruptible(data * 1000);
 
-	e1000_led_off(hw);
+	igb_led_off(hw);
 	clear_bit(IGB_LED_ON, &adapter->led_status);
-	e1000_cleanup_led(hw);
+	igb_cleanup_led(hw);
 
 	return 0;
 }
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 3859962..02b9634 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -90,7 +90,7 @@ static void igb_clean_rx_ring(struct igb_adapter *, struct igb_ring *);
 static void igb_set_multi(struct net_device *);
 static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
-static void igb_watchdog_task(struct work_struct *);
+static void igb_watchdog_task(struct igb_adapter *);
 static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
 				  struct igb_ring *);
 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
@@ -111,7 +111,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_adapter *,
 				     struct igb_ring *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
-static void igb_reset_task(struct work_struct *);
+static void igb_reset_task(struct igb_adapter *);
 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
 static void igb_vlan_rx_add_vid(struct net_device *, u16);
 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
@@ -213,6 +213,8 @@ module_exit(igb_exit_module);
  **/
 static int igb_alloc_queues(struct igb_adapter *adapter)
 {
+	int i;
+
 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
 				   sizeof(struct igb_ring), GFP_KERNEL);
 	if (!adapter->tx_ring)
@@ -225,7 +227,34 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
 		return -ENOMEM;
 	}
 
-	return 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *ring = &(adapter->rx_ring[i]);
+		ring->adapter = adapter;
+		ring->itr_register = E1000_ITR;
+
+		ring->netdev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
+		if (!ring->netdev)
+			goto err;
+
+		ring->netdev->priv = (void *)ring;
+		ring->netdev->poll = igb_clean;
+		ring->netdev->weight = adapter->netdev->weight /
+				       adapter->num_rx_queues;
+		dev_hold(ring->netdev);
+		set_bit(__LINK_STATE_START, &ring->netdev->state);
+	}
+        return 0;
+
+err:
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *ring = &(adapter->rx_ring[i]);
+		kfree(ring->netdev);
+	}
+	kfree(adapter->rx_ring);
+	kfree(adapter->tx_ring);
+return -ENOMEM;
+
 }
 
 #define IGB_N0_QUEUE -1
@@ -247,7 +276,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
 			adapter->tx_ring[tx_queue].eims_value =
 				  E1000_EICR_TX_QUEUE0 << tx_queue;
 		}
-		E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm);
+		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
 }
 
 /**
@@ -288,13 +317,13 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 
 
 	/* set vector for other causes, i.e. link changes */
-		E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++,
+		array_wr32(E1000_MSIXBM(0), vector++,
 				      E1000_EIMS_OTHER);
 
 		/* disable IAM for ICR interrupt bits */
-		E1000_WRITE_REG(hw, E1000_IAM, 0);
+		wr32(E1000_IAM, 0);
 
-		tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
+		tmp = rd32(E1000_CTRL_EXT);
 		/* enable MSI-X PBA support*/
 		tmp |= E1000_CTRL_EXT_PBA_CLR;
 
@@ -302,10 +331,10 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 		tmp |= E1000_CTRL_EXT_EIAME;
 		tmp |= E1000_CTRL_EXT_IRCA;
 
-		E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
+		wr32(E1000_CTRL_EXT, tmp);
 		adapter->eims_enable_mask |= E1000_EIMS_OTHER;
 
-	E1000_WRITE_FLUSH(hw);
+	wrfl();
 }
 
 /**
@@ -336,11 +365,11 @@ static int igb_request_msix(struct igb_adapter *adapter)
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		struct igb_ring *ring = &(adapter->rx_ring[i]);
 		if (strlen(netdev->name) < (IFNAMSIZ - 5))
-			sprintf(ring->name, "%s-rx%d", netdev->name, i);
+			sprintf(ring->netdev->name, "%s-rx%d", netdev->name, i);
 		else
-			memcpy(ring->name, netdev->name, IFNAMSIZ);
+			memcpy(ring->netdev->name, netdev->name, IFNAMSIZ);
 		err = request_irq(adapter->msix_entries[vector].vector,
-				  &igb_msix_rx, 0, ring->name,
+				  &igb_msix_rx, 0, ring->netdev->name,
 				  &(adapter->rx_ring[i]));
 		if (err)
 			goto out;
@@ -356,7 +385,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
 
 	adapter->netdev->poll = igb_clean_rx_ring_msix;
 	for (i = 0; i < adapter->num_rx_queues; i++)
-		adapter->rx_ring[i].netdev->poll = adapter->netdev->poll;
+		adapter->rx_ring[i].netdev->poll = igb_clean_rx_ring_msix;
 	igb_configure_msix(adapter);
 	return 0;
 out:
@@ -420,15 +449,15 @@ msi_only:
 static int igb_request_irq(struct igb_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
 	int err = 0;
 
 	if (adapter->msix_entries) {
 		err = igb_request_msix(adapter);
 		if (!err) {
-			struct e1000_hw *hw = &adapter->hw;
 			/* enable IAM, auto-mask,
-			 * DO NOT USE EIAME or IAME in legacy mode */
-			E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK);
+			 * DO NOT USE EIAM or IAM in legacy mode */
+			wr32(E1000_IAM, IMS_ENABLE_MASK);
 			goto request_done;
 		}
 		/* fall back to MSI */
@@ -453,14 +482,9 @@ static int igb_request_irq(struct igb_adapter *adapter)
 	err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
 			  netdev->name, netdev);
 
-	if (err) {
+	if (err)
 		dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
 			err);
-		goto request_done;
-	}
-
-	/* enable IAM, auto-mask */
-	E1000_WRITE_REG(&adapter->hw, E1000_IAM, IMS_ENABLE_MASK);
 
 request_done:
 	return err;
@@ -493,12 +517,14 @@ static void igb_free_irq(struct igb_adapter *adapter)
  **/
 static void igb_irq_disable(struct igb_adapter *adapter)
 {
+	struct e1000_hw *hw = &adapter->hw;
+
 	if (adapter->msix_entries) {
-		E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0);
-		E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0);
+		wr32(E1000_EIMC, ~0);
+		wr32(E1000_EIAC, 0);
 	}
-	E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0);
-	E1000_WRITE_FLUSH(&adapter->hw);
+	wr32(E1000_IMC, ~0);
+	wrfl();
 	synchronize_irq(adapter->pdev->irq);
 }
 
@@ -508,14 +534,16 @@ static void igb_irq_disable(struct igb_adapter *adapter)
  **/
 static void igb_irq_enable(struct igb_adapter *adapter)
 {
+	struct e1000_hw *hw = &adapter->hw;
+
 	if (adapter->msix_entries) {
-		E1000_WRITE_REG(&adapter->hw, E1000_EIMS,
+		wr32(E1000_EIMS,
 				adapter->eims_enable_mask);
-		E1000_WRITE_REG(&adapter->hw, E1000_EIAC,
+		wr32(E1000_EIAC,
 				adapter->eims_enable_mask);
-		E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC);
+		wr32(E1000_IMS, E1000_IMS_LSC);
 	} else
-	E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK);
+	wr32(E1000_IMS, IMS_ENABLE_MASK);
 }
 
 static void igb_update_mng_vlan(struct igb_adapter *adapter)
@@ -552,11 +580,12 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
  **/
 static void igb_release_hw_control(struct igb_adapter *adapter)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl_ext;
 
 	/* Let firmware take over control of h/w */
-	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
-	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	wr32(E1000_CTRL_EXT,
 			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
 }
 
@@ -572,22 +601,22 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
  **/
 static void igb_get_hw_control(struct igb_adapter *adapter)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl_ext;
 
 	/* Let firmware know the driver has taken over */
-	ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
-	E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	wr32(E1000_CTRL_EXT,
 			ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
 }
 
 static void igb_init_manageability(struct igb_adapter *adapter)
 {
-	if (adapter->en_mng_pt) {
-		u32 manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
-		u32 manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
+	struct e1000_hw *hw = &adapter->hw;
 
-		/* disable hardware interception of ARP */
-		manc &= ~(E1000_MANC_ARP_EN);
+	if (adapter->en_mng_pt) {
+		u32 manc2h = rd32(E1000_MANC2H);
+		u32 manc = rd32(E1000_MANC);
 
 		/* enable receiving management packets to the host */
 		/* this will probably generate destination unreachable messages
@@ -597,26 +626,9 @@ static void igb_init_manageability(struct igb_adapter *adapter)
 #define E1000_MNG2HOST_PORT_664 (1 << 6)
 		manc2h |= E1000_MNG2HOST_PORT_623;
 		manc2h |= E1000_MNG2HOST_PORT_664;
-		E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
-
-		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
-	}
-}
-
-static void igb_release_manageability(struct igb_adapter *adapter)
-{
-	if (adapter->en_mng_pt) {
-		u32 manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
-
-		/* re-enable hardware interception of ARP */
-		manc |= E1000_MANC_ARP_EN;
-		manc &= ~E1000_MANC_EN_MNG2HOST;
+		wr32(E1000_MANC2H, manc2h);
 
-		/* don't explicitly have to mess with MANC2H since
-		 * MANC has an enable disable that gates MANC2H */
-
-		/* XXX stop the hardware watchdog ? */
-		E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
+		wr32(E1000_MANC, manc);
 	}
 }
 
@@ -658,6 +670,8 @@ static void igb_configure(struct igb_adapter *adapter)
 
 int igb_up(struct igb_adapter *adapter)
 {
+	struct e1000_hw *hw = &adapter->hw;
+
 	/* hardware has been reset, we need to reload some things */
 	igb_configure(adapter);
 
@@ -670,16 +684,17 @@ int igb_up(struct igb_adapter *adapter)
 	}
 
 	/* Clear any pending interrupts. */
-	E1000_READ_REG(&adapter->hw, E1000_ICR);
+	rd32(E1000_ICR);
 	igb_irq_enable(adapter);
 
 	/* Fire a link change interrupt to start the watchdog. */
-	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
+	wr32(E1000_ICS, E1000_ICS_LSC);
 	return 0;
 }
 
 void igb_down(struct igb_adapter *adapter)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
 	u32 tctl, rctl;
 
@@ -688,18 +703,18 @@ void igb_down(struct igb_adapter *adapter)
 	set_bit(__IGB_DOWN, &adapter->state);
 
 	/* disable receives in the hardware */
-	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
-	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	rctl = rd32(E1000_RCTL);
+	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
 	/* flush and sleep below */
 
-	netif_tx_disable(netdev);
+	netif_stop_queue(netdev);
 
 	/* disable transmits in the hardware */
-	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
+	tctl = rd32(E1000_TCTL);
 	tctl &= ~E1000_TCTL_EN;
-	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
+	wr32(E1000_TCTL, tctl);
 	/* flush both disables and wait for them to finish */
-	E1000_WRITE_FLUSH(&adapter->hw);
+	wrfl();
 	msleep(10);
 
 	netif_poll_disable(netdev);
@@ -730,6 +745,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
 
 void igb_reset(struct igb_adapter *adapter)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	struct e1000_fc_info *fc = &adapter->hw.fc;
 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 	u16 hwm;
@@ -741,7 +757,7 @@ void igb_reset(struct igb_adapter *adapter)
 
 	if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
 		/* adjust PBA for jumbo frames */
-		E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
+		wr32(E1000_PBA, pba);
 
 		/* To maintain wire speed transmits, the Tx FIFO should be
 		 * large enough to accommodate two full transmit packets,
@@ -749,7 +765,7 @@ void igb_reset(struct igb_adapter *adapter)
 		 * the Rx FIFO should be large enough to accommodate at least
 		 * one full receive packet and is similarly rounded up and
 		 * expressed in KB. */
-		pba = E1000_READ_REG(&adapter->hw, E1000_PBA);
+		pba = rd32(E1000_PBA);
 		/* upper 16 bits has Tx packet buffer allocation size in KB */
 		tx_space = pba >> 16;
 		/* lower 16 bits has Rx packet buffer allocation size in KB */
@@ -779,7 +795,7 @@ void igb_reset(struct igb_adapter *adapter)
 				pba = min_rx_space;
 		}
 	}
-	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
+	wr32(E1000_PBA, pba);
 
 	/* flow control settings */
 	/* The high water mark must be low enough to fit one full frame
@@ -798,7 +814,7 @@ void igb_reset(struct igb_adapter *adapter)
 
 	/* Allow time for pending master requests to run */
 	adapter->hw.mac.ops.reset_hw(&adapter->hw);
-	E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
+	wr32(E1000_WUC, 0);
 
 	if (adapter->hw.mac.ops.init_hw(&adapter->hw))
 		dev_err(&adapter->pdev->dev, "Hardware Error\n");
@@ -806,11 +822,11 @@ void igb_reset(struct igb_adapter *adapter)
 	igb_update_mng_vlan(adapter);
 
 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
-	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
 
-	e1000_reset_adaptive(&adapter->hw);
-	adapter->hw.phy.ops.get_phy_info(&adapter->hw);
-	igb_release_manageability(adapter);
+	igb_reset_adaptive(&adapter->hw);
+	if (adapter->hw.phy.ops.get_phy_info)
+		adapter->hw.phy.ops.get_phy_info(&adapter->hw);
 }
 
 /**
@@ -832,7 +848,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	struct e1000_hw *hw;
 	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
 	unsigned long mmio_start, mmio_len;
-	static int cards_found;
+	static int cards_found = 0;
 	int i, err, pci_using_dac;
 	u16 eeprom_data = 0;
 	u16 eeprom_apme_mask = IGB_EEPROM_APME;
@@ -899,11 +915,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	igb_set_ethtool_ops(netdev);
 	netdev->tx_timeout = &igb_tx_timeout;
 	netdev->watchdog_timeo = 5 * HZ;
+	netdev->poll = igb_clean;
+	netdev->weight = 64;
 	netdev->vlan_rx_register = igb_vlan_rx_register;
 	netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
 	netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
-	netdev->poll = igb_clean;
-	netdev->weight = 64;
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	netdev->poll_controller = igb_netpoll;
 #endif
@@ -939,19 +955,19 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	if (err)
 		goto err_sw_init;
 
-	e1000_get_bus_info_pcie(hw);
+	igb_get_bus_info_pcie(hw);
 
-	hw->phy.autoneg_wait_to_complete = 0;
-	hw->mac.adaptive_ifs = 1;
+	hw->phy.autoneg_wait_to_complete = false;
+	hw->mac.adaptive_ifs = true;
 
 	/* Copper options */
 	if (hw->phy.media_type == e1000_media_type_copper) {
 		hw->phy.mdix = AUTO_ALL_MODES;
-		hw->phy.disable_polarity_correction = 0;
+		hw->phy.disable_polarity_correction = false;
 		hw->phy.ms_type = e1000_ms_hw_default;
 	}
 
-	if (e1000_check_reset_block(hw))
+	if (igb_check_reset_block(hw))
 		dev_info(&pdev->dev,
 			"PHY reset is blocked due to SOL/IDER session.\n");
 
@@ -968,14 +984,14 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 		netdev->features |= NETIF_F_HIGHDMA;
 
 	netdev->features |= NETIF_F_LLTX;
-	adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
+	adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
 
 	/* before reading the NVM, reset the controller to put the device in a
 	 * known good starting state */
 	hw->mac.ops.reset_hw(hw);
 
 	/* make sure the NVM is good */
-	if (e1000_validate_nvm_checksum(hw) < 0) {
+	if (igb_validate_nvm_checksum(hw) < 0) {
 		dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
 		err = -EIO;
 		goto err_eeprom;
@@ -1003,9 +1019,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	adapter->phy_info_timer.data = (unsigned long) adapter;
 
 	INIT_WORK(&adapter->reset_task,
-			(void (*)(void *))igb_reset_task, netdev);
+			(void (*)(void *))igb_reset_task, adapter);
 	INIT_WORK(&adapter->watchdog_task,
-			(void (*)(void *))igb_watchdog_task, netdev);
+			(void (*)(void *))igb_watchdog_task, adapter);
 	/* Initialize link & ring properties that are user-changeable */
 	adapter->tx_ring->count = 256;
 	for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1014,8 +1030,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	for (i = 0; i < adapter->num_rx_queues; i++)
 		adapter->rx_ring[i].count = adapter->rx_ring->count;
 
-	adapter->fc_autoneg = 1;
-	hw->mac.autoneg = 1;
+	adapter->fc_autoneg = true;
+	hw->mac.autoneg = true;
 	hw->phy.autoneg_advertised = 0x2f;
 
 	hw->fc.original_type = e1000_fc_default;
@@ -1024,7 +1040,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	adapter->itr_setting = 3;
 	adapter->itr = IGB_START_ITR;
 
-	e1000_validate_mdi_setting(hw);
+	igb_validate_mdi_setting(hw);
 
 	adapter->rx_csum = 1;
 
@@ -1050,7 +1066,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	case E1000_DEV_ID_82575EB_FIBER_SERDES:
 		/* Wake events only supported on port A for dual fiber
 		 * regardless of eeprom setting */
-		if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
+		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
 			adapter->eeprom_wol = 0;
 		break;
 	}
@@ -1068,6 +1084,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 	/* tell the stack to leave us alone until igb_open() is called */
 	netif_carrier_off(netdev);
 	netif_stop_queue(netdev);
+	netif_poll_disable(netdev);
 
 	strcpy(netdev->name, "eth%d");
 	err = register_netdev(netdev);
@@ -1087,7 +1104,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 		 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
 		 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
 
-	e1000_read_part_num(hw, &part_num);
+	igb_read_part_num(hw, &part_num);
 	dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
 		(part_num >> 8), (part_num & 0xff));
 
@@ -1103,13 +1120,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 err_register:
 	igb_release_hw_control(adapter);
 err_eeprom:
-	if (!e1000_check_reset_block(hw))
+	if (!igb_check_reset_block(hw))
 		hw->phy.ops.reset_phy(hw);
 
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
 
-	e1000_remove_device(hw);
+	igb_remove_device(hw);
 	kfree(adapter->tx_ring);
 	kfree(adapter->rx_ring);
 err_sw_init:
@@ -1147,19 +1164,16 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 
 	flush_scheduled_work();
 
-
-	igb_release_manageability(adapter);
-
 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant. */
 	igb_release_hw_control(adapter);
 
 	unregister_netdev(netdev);
 
-	if (!e1000_check_reset_block(&adapter->hw))
+	if (!igb_check_reset_block(&adapter->hw))
 		adapter->hw.phy.ops.reset_phy(&adapter->hw);
 
-	e1000_remove_device(&adapter->hw);
+	igb_remove_device(&adapter->hw);
 	igb_reset_interrupt_capability(adapter);
 
 	kfree(adapter->tx_ring);
@@ -1230,6 +1244,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
 static int igb_open(struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 	int err;
 
 	/* disallow open during test */
@@ -1271,9 +1286,9 @@ static int igb_open(struct net_device *netdev)
 	igb_irq_enable(adapter);
 
 	/* Clear any pending interrupts. */
-	E1000_READ_REG(&adapter->hw, E1000_ICR);
+	rd32(E1000_ICR);
 	/* Fire a link status change interrupt to start the watchdog. */
-	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
+	wr32(E1000_ICS, E1000_ICS_LSC);
 
 	return 0;
 
@@ -1411,34 +1426,34 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		struct igb_ring *ring = &(adapter->tx_ring[i]);
 
-		E1000_WRITE_REG(hw, E1000_TDLEN(i),
+		wr32(E1000_TDLEN(i),
 				ring->count * sizeof(struct e1000_tx_desc));
 		tdba = ring->dma;
-		E1000_WRITE_REG(hw, E1000_TDBAL(i),
+		wr32(E1000_TDBAL(i),
 				tdba & 0x00000000ffffffffULL);
-		E1000_WRITE_REG(hw, E1000_TDBAH(i), tdba >> 32);
+		wr32(E1000_TDBAH(i), tdba >> 32);
 
 		tdwba = ring->dma + ring->count * sizeof(struct e1000_tx_desc);
 		tdwba |= 1; /* enable head wb */
-		E1000_WRITE_REG(hw, E1000_TDWBAL(i),
+		wr32(E1000_TDWBAL(i),
 				tdwba & 0x00000000ffffffffULL);
-		E1000_WRITE_REG(hw, E1000_TDWBAH(i), tdwba >> 32);
+		wr32(E1000_TDWBAH(i), tdwba >> 32);
 
 		ring->head = E1000_TDH(i);
 		ring->tail = E1000_TDT(i);
 		writel(0, hw->hw_addr + ring->tail);
 		writel(0, hw->hw_addr + ring->head);
-		txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
+		txdctl = rd32(E1000_TXDCTL(i));
 		txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
-		E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
+		wr32(E1000_TXDCTL(i), txdctl);
 
 		/* Turn off Relaxed Ordering on head write-backs.  The
 		 * writebacks MUST be delivered in order or it will
 		 * completely screw up our bookeeping.
 		 */
-		txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
+		txctrl = rd32(E1000_DCA_TXCTRL(i));
 		txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
-		E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), txctrl);
+		wr32(E1000_DCA_TXCTRL(i), txctrl);
 	}
 
 
@@ -1447,12 +1462,12 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 
 	/* Program the Transmit Control Register */
 
-	tctl = E1000_READ_REG(hw, E1000_TCTL);
+	tctl = rd32(E1000_TCTL);
 	tctl &= ~E1000_TCTL_CT;
 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
 
-	e1000_config_collision_dist(hw);
+	igb_config_collision_dist(hw);
 
 	/* Setup Transmit Descriptor Settings for eop descriptor */
 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
@@ -1460,7 +1475,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
 	/* Enable transmits */
 	tctl |= E1000_TCTL_EN;
 
-	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+	wr32(E1000_TCTL, tctl);
 }
 
 /**
@@ -1500,8 +1515,10 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
 	rx_ring->pending_skb = NULL;
 
 	rx_ring->adapter = adapter;
-	/* FIXME: do we want to setup ring->napi->poll here? */
+	rx_ring->netdev->priv = rx_ring;
 	rx_ring->netdev->poll = adapter->netdev->poll;
+	rx_ring->netdev->weight = 64;
+	set_bit(__LINK_STATE_START, &rx_ring->netdev->state);
 
 	return 0;
 
@@ -1544,11 +1561,12 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
  **/
 static void igb_setup_rctl(struct igb_adapter *adapter)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	u32 rctl;
 	u32 srrctl = 0;
 	int i;
 
-	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+	rctl = rd32(E1000_RCTL);
 
 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
 
@@ -1626,9 +1644,9 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
 	}
 
 	for (i = 0; i < adapter->num_rx_queues; i++)
-		E1000_WRITE_REG(&adapter->hw, E1000_SRRCTL(i), srrctl);
+		wr32(E1000_SRRCTL(i), srrctl);
 
-	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+	wr32(E1000_RCTL, rctl);
 }
 
 /**
@@ -1646,13 +1664,13 @@ static void igb_configure_rx(struct igb_adapter *adapter)
 	int i;
 
 	/* disable receives while setting up the descriptors */
-	rctl = E1000_READ_REG(hw, E1000_RCTL);
-	E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
-	E1000_WRITE_FLUSH(hw);
+	rctl = rd32(E1000_RCTL);
+	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	wrfl();
 	mdelay(10);
 
 	if (adapter->itr_setting > 3)
-		E1000_WRITE_REG(hw, E1000_ITR,
+		wr32(E1000_ITR,
 				1000000000 / (adapter->itr * 256));
 
 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1660,10 +1678,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		struct igb_ring *ring = &(adapter->rx_ring[i]);
 		rdba = ring->dma;
-		E1000_WRITE_REG(hw, E1000_RDBAL(i),
+		wr32(E1000_RDBAL(i),
 				rdba & 0x00000000ffffffffULL);
-		E1000_WRITE_REG(hw, E1000_RDBAH(i), rdba >> 32);
-		E1000_WRITE_REG(hw, E1000_RDLEN(i),
+		wr32(E1000_RDBAH(i), rdba >> 32);
+		wr32(E1000_RDLEN(i),
 			       ring->count * sizeof(union e1000_adv_rx_desc));
 
 		ring->head = E1000_RDH(i);
@@ -1671,13 +1689,13 @@ static void igb_configure_rx(struct igb_adapter *adapter)
 		writel(0, hw->hw_addr + ring->tail);
 		writel(0, hw->hw_addr + ring->head);
 
-		rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
+		rxdctl = rd32(E1000_RXDCTL(i));
 		rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
 		rxdctl &= 0xFFF00000;
 		rxdctl |= IGB_RX_PTHRESH;
 		rxdctl |= IGB_RX_HTHRESH << 8;
 		rxdctl |= IGB_RX_WTHRESH << 16;
-		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
+		wr32(E1000_RXDCTL(i), rxdctl);
 	}
 
 	if (adapter->num_rx_queues > 1) {
@@ -1703,7 +1721,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
 
 		/* Fill out hash function seeds */
 		for (j = 0; j < 10; j++)
-			E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), j, random[j]);
+			array_wr32(E1000_RSSRK(0), j, random[j]);
 
 		mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
 			 E1000_MRQC_RSS_FIELD_IPV4_TCP);
@@ -1715,17 +1733,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
 			 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
 
 
-		E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+		wr32(E1000_MRQC, mrqc);
 
 		/* Multiqueue and raw packet checksumming are mutually
 		 * exclusive.  Note that this not the same as TCP/IP
 		 * checksumming, which works fine. */
-		rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+		rxcsum = rd32(E1000_RXCSUM);
 		rxcsum |= E1000_RXCSUM_PCSD;
-		E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+		wr32(E1000_RXCSUM, rxcsum);
 	} else {
 		/* Enable Receive Checksum Offload for TCP and UDP */
-		rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+		rxcsum = rd32(E1000_RXCSUM);
 		if (adapter->rx_csum) {
 			rxcsum |= E1000_RXCSUM_TUOFL;
 
@@ -1737,17 +1755,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
 			rxcsum &= ~E1000_RXCSUM_TUOFL;
 			/* don't need to clear IPPCSE as it defaults to 0 */
 		}
-		E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+		wr32(E1000_RXCSUM, rxcsum);
 	}
 
 	if (adapter->vlgrp)
-		E1000_WRITE_REG(hw, E1000_RLPML,
+		wr32(E1000_RLPML,
 				adapter->max_frame_size + VLAN_TAG_SIZE);
 	else
-		E1000_WRITE_REG(hw, E1000_RLPML, adapter->max_frame_size);
+		wr32(E1000_RLPML, adapter->max_frame_size);
 
 	/* Enable Receives */
-	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+	wr32(E1000_RCTL, rctl);
 }
 
 /**
@@ -1883,8 +1901,10 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
 {
 	int i;
 
-	for (i = 0; i < adapter->num_rx_queues; i++)
+	for (i = 0; i < adapter->num_rx_queues; i++) {
 		igb_free_rx_resources(adapter, &adapter->rx_ring[i]);
+		dev_put(adapter->rx_ring[i].netdev);
+	}
 }
 
 /**
@@ -2005,7 +2025,7 @@ static void igb_set_multi(struct net_device *netdev)
 
 	/* Check for Promiscuous and All Multicast modes */
 
-	rctl = E1000_READ_REG(hw, E1000_RCTL);
+	rctl = rd32(E1000_RCTL);
 
 	if (netdev->flags & IFF_PROMISC)
 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
@@ -2015,11 +2035,11 @@ static void igb_set_multi(struct net_device *netdev)
 	} else
 		rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
 
-	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+	wr32(E1000_RCTL, rctl);
 
 	if (!netdev->mc_count) {
 		/* nothing to program, so clear mc list */
-		e1000_update_mc_addr_list(hw, NULL, 0, 1,
+		igb_update_mc_addr_list(hw, NULL, 0, 1,
 					  mac->rar_entry_count);
 		return;
 	}
@@ -2037,7 +2057,7 @@ static void igb_set_multi(struct net_device *netdev)
 		memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
 		mc_ptr = mc_ptr->next;
 	}
-	e1000_update_mc_addr_list(hw, mta_list, i, 1, mac->rar_entry_count);
+	igb_update_mc_addr_list(hw, mta_list, i, 1, mac->rar_entry_count);
 	kfree(mta_list);
 }
 
@@ -2046,7 +2066,8 @@ static void igb_set_multi(struct net_device *netdev)
 static void igb_update_phy_info(unsigned long data)
 {
 	struct igb_adapter *adapter = (struct igb_adapter *) data;
-	adapter->hw.phy.ops.get_phy_info(&adapter->hw);
+	if (adapter->hw.phy.ops.get_phy_info)
+		adapter->hw.phy.ops.get_phy_info(&adapter->hw);
 }
 
 /**
@@ -2060,11 +2081,9 @@ static void igb_watchdog(unsigned long data)
 	schedule_work(&adapter->watchdog_task);
 }
 
-static void igb_watchdog_task(struct work_struct *work)
+static void igb_watchdog_task(struct igb_adapter *adapter)
 {
-	struct igb_adapter *adapter = container_of(work,
-					struct igb_adapter, watchdog_task);
-
+	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
 	struct igb_ring *tx_ring = adapter->tx_ring;
 	struct e1000_mac_info *mac = &adapter->hw.mac;
@@ -2072,32 +2091,32 @@ static void igb_watchdog_task(struct work_struct *work)
 	s32 ret_val;
 
 	if ((netif_carrier_ok(netdev)) &&
-	    (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU))
+	    (rd32(E1000_STATUS) & E1000_STATUS_LU))
 		goto link_up;
 
-	ret_val = adapter->hw.mac.ops.check_for_link(&adapter->hw);
+	ret_val = hw->mac.ops.check_for_link(&adapter->hw);
 	if ((ret_val == E1000_ERR_PHY) &&
-	    (adapter->hw.phy.type == e1000_phy_igp_3) &&
-	    (E1000_READ_REG(&adapter->hw, E1000_CTRL) &
+	    (hw->phy.type == e1000_phy_igp_3) &&
+	    (rd32(E1000_CTRL) &
 	     E1000_PHY_CTRL_GBE_DISABLE))
 		dev_info(&adapter->pdev->dev,
 			 "Gigabit has been disabled, downgrading speed\n");
 
-	if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
-	    !(E1000_READ_REG(&adapter->hw, E1000_TXCW) & E1000_TXCW_ANE))
+	if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
+	    !(rd32(E1000_TXCW) & E1000_TXCW_ANE))
 		link = mac->serdes_has_link;
 	else
-		link = E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+		link = rd32(E1000_STATUS) &
 				      E1000_STATUS_LU;
 
 	if (link) {
 		if (!netif_carrier_ok(netdev)) {
 			u32 ctrl;
-			adapter->hw.mac.ops.get_speed_and_duplex(&adapter->hw,
+			hw->mac.ops.get_speed_and_duplex(&adapter->hw,
 						   &adapter->link_speed,
 						   &adapter->link_duplex);
 
-			ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+			ctrl = rd32(E1000_CTRL);
 			dev_info(&adapter->pdev->dev,
 				 "NIC Link is Up %d Mbps %s, "
 				 "Flow Control: %s\n",
@@ -2157,7 +2176,7 @@ link_up:
 	adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
 	adapter->gotc_old = adapter->stats.gotc;
 
-	e1000_update_adaptive(&adapter->hw);
+	igb_update_adaptive(&adapter->hw);
 
 	if (!netif_carrier_ok(netdev)) {
 		if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
@@ -2171,10 +2190,10 @@ link_up:
 	}
 
 	/* Cause software interrupt to ensure rx ring is cleaned */
-	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_RXDMT0);
+	wr32(E1000_ICS, E1000_ICS_RXDMT0);
 
 	/* Force detection of hung controller every watchdog period */
-	tx_ring->detect_tx_hung = 1;
+	tx_ring->detect_tx_hung = true;
 
 	/* Reset the timer */
 	if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -2193,6 +2212,7 @@ enum latency_range {
 static void igb_lower_rx_eitr(struct igb_adapter *adapter,
 			      struct igb_ring *rx_ring)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	int new_val;
 
 	new_val = rx_ring->itr_val / 2;
@@ -2201,7 +2221,7 @@ static void igb_lower_rx_eitr(struct igb_adapter *adapter,
 
 	if (new_val != rx_ring->itr_val) {
 		rx_ring->itr_val = new_val;
-		E1000_WRITE_REG(&adapter->hw, rx_ring->itr_register,
+		wr32(rx_ring->itr_register,
 				1000000000 / (new_val * 256));
 	}
 }
@@ -2209,6 +2229,7 @@ static void igb_lower_rx_eitr(struct igb_adapter *adapter,
 static void igb_raise_rx_eitr(struct igb_adapter *adapter,
 			      struct igb_ring *rx_ring)
 {
+	struct e1000_hw *hw = &adapter->hw;
 	int new_val;
 
 	new_val = rx_ring->itr_val * 2;
@@ -2217,7 +2238,7 @@ static void igb_raise_rx_eitr(struct igb_adapter *adapter,
 
 	if (new_val != rx_ring->itr_val) {
 		rx_ring->itr_val = new_val;
-		E1000_WRITE_REG(&adapter->hw, rx_ring->itr_register,
+		wr32(rx_ring->itr_register,
 				1000000000 / (new_val * 256));
 	}
 }
@@ -2444,7 +2465,7 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
 
 	tx_ring->next_to_use = i;
 
-	return 1;
+	return true;
 }
 
 static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
@@ -2492,11 +2513,11 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
 			i = 0;
 		tx_ring->next_to_use = i;
 
-		return 1;
+		return true;
 	}
 
 
-	return 0;
+	return false;
 }
 
 #define IGB_MAX_TXD_PWR	16
@@ -2734,8 +2755,6 @@ static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
 	return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
 }
 
-
-
 /**
  * igb_tx_timeout - Respond to a Tx Hang
  * @netdev: network interface device structure
@@ -2743,19 +2762,17 @@ static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
 static void igb_tx_timeout(struct net_device *netdev)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 
 	/* Do the reset outside of interrupt context */
 	adapter->tx_timeout_count++;
 	schedule_work(&adapter->reset_task);
-	E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->eims_enable_mask &
+	wr32(E1000_EICS, adapter->eims_enable_mask &
 		~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER));
 }
 
-static void igb_reset_task(struct work_struct *work)
+static void igb_reset_task(struct igb_adapter *adapter)
 {
-	struct igb_adapter *adapter;
-	adapter = container_of(work, struct igb_adapter, reset_task);
-
 	igb_reinit_locked(adapter);
 }
 
@@ -2863,78 +2880,78 @@ void igb_update_stats(struct igb_adapter *adapter)
 	if (pdev->error_state != pci_channel_io_normal)
 		return;
 
-	adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
-	adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC);
-	adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL);
-	E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */
-	adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC);
-	adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC);
-	adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC);
-
-	adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64);
-	adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127);
-	adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255);
-	adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511);
-	adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
-	adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
-	adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS);
-	adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC);
-
-	adapter->stats.mpc += E1000_READ_REG(hw, E1000_MPC);
-	adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC);
-	adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL);
-	adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC);
-	adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL);
-	adapter->stats.dc += E1000_READ_REG(hw, E1000_DC);
-	adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC);
-	adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
-	adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC);
-	adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC);
-	adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
-	adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC);
-	adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC);
-	adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL);
-	E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */
-	adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC);
-	adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC);
-	adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC);
-	adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC);
-	adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH);
-	adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH);
-	adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR);
-
-	adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64);
-	adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127);
-	adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255);
-	adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511);
-	adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
-	adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
-
-	adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC);
-	adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC);
+	adapter->stats.crcerrs += rd32(E1000_CRCERRS);
+	adapter->stats.gprc += rd32(E1000_GPRC);
+	adapter->stats.gorc += rd32(E1000_GORCL);
+	rd32(E1000_GORCH); /* clear GORCL */
+	adapter->stats.bprc += rd32(E1000_BPRC);
+	adapter->stats.mprc += rd32(E1000_MPRC);
+	adapter->stats.roc += rd32(E1000_ROC);
+
+	adapter->stats.prc64 += rd32(E1000_PRC64);
+	adapter->stats.prc127 += rd32(E1000_PRC127);
+	adapter->stats.prc255 += rd32(E1000_PRC255);
+	adapter->stats.prc511 += rd32(E1000_PRC511);
+	adapter->stats.prc1023 += rd32(E1000_PRC1023);
+	adapter->stats.prc1522 += rd32(E1000_PRC1522);
+	adapter->stats.symerrs += rd32(E1000_SYMERRS);
+	adapter->stats.sec += rd32(E1000_SEC);
+
+	adapter->stats.mpc += rd32(E1000_MPC);
+	adapter->stats.scc += rd32(E1000_SCC);
+	adapter->stats.ecol += rd32(E1000_ECOL);
+	adapter->stats.mcc += rd32(E1000_MCC);
+	adapter->stats.latecol += rd32(E1000_LATECOL);
+	adapter->stats.dc += rd32(E1000_DC);
+	adapter->stats.rlec += rd32(E1000_RLEC);
+	adapter->stats.xonrxc += rd32(E1000_XONRXC);
+	adapter->stats.xontxc += rd32(E1000_XONTXC);
+	adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
+	adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
+	adapter->stats.fcruc += rd32(E1000_FCRUC);
+	adapter->stats.gptc += rd32(E1000_GPTC);
+	adapter->stats.gotc += rd32(E1000_GOTCL);
+	rd32(E1000_GOTCH); /* clear GOTCL */
+	adapter->stats.rnbc += rd32(E1000_RNBC);
+	adapter->stats.ruc += rd32(E1000_RUC);
+	adapter->stats.rfc += rd32(E1000_RFC);
+	adapter->stats.rjc += rd32(E1000_RJC);
+	adapter->stats.tor += rd32(E1000_TORH);
+	adapter->stats.tot += rd32(E1000_TOTH);
+	adapter->stats.tpr += rd32(E1000_TPR);
+
+	adapter->stats.ptc64 += rd32(E1000_PTC64);
+	adapter->stats.ptc127 += rd32(E1000_PTC127);
+	adapter->stats.ptc255 += rd32(E1000_PTC255);
+	adapter->stats.ptc511 += rd32(E1000_PTC511);
+	adapter->stats.ptc1023 += rd32(E1000_PTC1023);
+	adapter->stats.ptc1522 += rd32(E1000_PTC1522);
+
+	adapter->stats.mptc += rd32(E1000_MPTC);
+	adapter->stats.bptc += rd32(E1000_BPTC);
 
 	/* used for adaptive IFS */
 
-	hw->mac.tx_packet_delta = E1000_READ_REG(hw, E1000_TPT);
+	hw->mac.tx_packet_delta = rd32(E1000_TPT);
 	adapter->stats.tpt += hw->mac.tx_packet_delta;
-	hw->mac.collision_delta = E1000_READ_REG(hw, E1000_COLC);
+	hw->mac.collision_delta = rd32(E1000_COLC);
 	adapter->stats.colc += hw->mac.collision_delta;
 
-	adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
-	adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
-	adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
-	adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC);
-	adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
-
-	adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC);
-	adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
-	adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
-	adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
-	adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
-	adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
-	adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
-	adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
-	adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+	adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
+	adapter->stats.rxerrc += rd32(E1000_RXERRC);
+	adapter->stats.tncrs += rd32(E1000_TNCRS);
+	adapter->stats.tsctc += rd32(E1000_TSCTC);
+	adapter->stats.tsctfc += rd32(E1000_TSCTFC);
+
+	adapter->stats.iac += rd32(E1000_IAC);
+	adapter->stats.icrxoc += rd32(E1000_ICRXOC);
+	adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
+	adapter->stats.icrxatc += rd32(E1000_ICRXATC);
+	adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
+	adapter->stats.ictxatc += rd32(E1000_ICTXATC);
+	adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
+	adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
+	adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
 
 	/* Fill out the OS statistics structure */
 	adapter->net_stats.multicast = adapter->stats.mprc;
@@ -2974,11 +2991,12 @@ void igb_update_stats(struct igb_adapter *adapter)
 	}
 
 	/* Management Stats */
-	adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC);
-	adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC);
-	adapter->stats.mgpdc += E1000_READ_REG(hw, E1000_MGTPDC);
+	adapter->stats.mgptc += rd32(E1000_MGTPTC);
+	adapter->stats.mgprc += rd32(E1000_MGTPRC);
+	adapter->stats.mgpdc += rd32(E1000_MGTPDC);
 }
 
+
 static irqreturn_t igb_msix_other(int irq, void *data, struct pt_regs *regs)
 {
 	struct net_device *netdev = data;
@@ -2986,12 +3004,12 @@ static irqreturn_t igb_msix_other(int irq, void *data, struct pt_regs *regs)
 	struct e1000_hw *hw = &adapter->hw;
 	u32 eicr;
 	/* disable interrupts from the "other" bit, avoid re-entry */
-	E1000_WRITE_REG(hw, E1000_EIMC, E1000_EIMS_OTHER);
+	wr32(E1000_EIMC, E1000_EIMS_OTHER);
 
-	eicr = E1000_READ_REG(hw, E1000_EICR);
+	eicr = rd32(E1000_EICR);
 
 	if (eicr & E1000_EIMS_OTHER) {
-		u32 icr = E1000_READ_REG(hw, E1000_ICR);
+		u32 icr = rd32(E1000_ICR);
 		/* reading ICR causes bit 31 of EICR to be cleared */
 		if (!(icr & E1000_ICR_LSC))
 			goto no_link_interrupt;
@@ -3002,8 +3020,8 @@ static irqreturn_t igb_msix_other(int irq, void *data, struct pt_regs *regs)
 	}
 
 no_link_interrupt:
-	E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC);
-	E1000_WRITE_REG(hw, E1000_EIMS, E1000_EIMS_OTHER);
+	wr32(E1000_IMS, E1000_IMS_LSC);
+	wr32(E1000_EIMS, E1000_EIMS_OTHER);
 
 	return IRQ_HANDLED;
 }
@@ -3012,19 +3030,19 @@ static irqreturn_t igb_msix_tx(int irq, void *data, struct pt_regs *regs)
 {
 	struct igb_ring *tx_ring = data;
 	struct igb_adapter *adapter = tx_ring->adapter;
+	struct e1000_hw *hw = &adapter->hw;
 
 	if (!tx_ring->itr_val)
-		E1000_WRITE_REG(&adapter->hw, E1000_EIMC, tx_ring->eims_value);
+		wr32(E1000_EIMC, tx_ring->eims_value);
 	
 	tx_ring->total_bytes = 0;
 	tx_ring->total_packets = 0;
-
 	if (!igb_clean_tx_irq(adapter, tx_ring))
 		/* Ring was not completely cleaned, so fire another interrupt */
-		E1000_WRITE_REG(&adapter->hw, E1000_EICS, tx_ring->eims_value);
+		wr32(E1000_EICS, tx_ring->eims_value);
 
 	if (!tx_ring->itr_val)
-		E1000_WRITE_REG(&adapter->hw, E1000_EIMS, tx_ring->eims_value);
+		wr32(E1000_EIMS, tx_ring->eims_value);
 	return IRQ_HANDLED;
 }
 
@@ -3032,14 +3050,16 @@ static irqreturn_t igb_msix_rx(int irq, void *data, struct pt_regs *regs)
 {
 	struct igb_ring *rx_ring = data;
 	struct igb_adapter *adapter = rx_ring->adapter;
+	struct e1000_hw *hw = &adapter->hw;
+
 	if (!rx_ring->itr_val)
-		E1000_WRITE_REG(&adapter->hw, E1000_EIMC, rx_ring->eims_value);
+		wr32(E1000_EIMC, rx_ring->eims_value);
 
-	if (netif_rx_schedule_prep(adapter->netdev)) {
+	if (netif_rx_schedule_prep(rx_ring->netdev)) {
 		rx_ring->total_bytes = 0;
 		rx_ring->total_packets = 0;
 		rx_ring->no_itr_adjust = 0;
-		__netif_rx_schedule(adapter->netdev);
+		__netif_rx_schedule(rx_ring->netdev);
 	} else {
 		if (!rx_ring->no_itr_adjust) {
 			igb_lower_rx_eitr(adapter, rx_ring);
@@ -3059,13 +3079,14 @@ static irqreturn_t igb_intr_msi(int irq, void *data, struct pt_regs *regs)
 	struct net_device *netdev = data;
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-	u32 icr = E1000_READ_REG(hw, E1000_ICR);
+	/* read ICR disables interrupts using IAM */
+	u32 icr = rd32(E1000_ICR);
 
 	/* Write the ITR value calculated at the end of the
 	 * previous interrupt.
 	 */
 	if (adapter->set_itr) {
-		E1000_WRITE_REG(&adapter->hw, E1000_ITR,
+		wr32(E1000_ITR,
 			1000000000 / (adapter->itr * 256));
 		adapter->set_itr = 0;
 	}
@@ -3098,7 +3119,9 @@ static irqreturn_t igb_intr(int irq, void *data, struct pt_regs *regs)
 	struct net_device *netdev = data;
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
-	u32 icr = E1000_READ_REG(hw, E1000_ICR);
+	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
+	 * need for the IMC write */
+	u32 icr = rd32(E1000_ICR);
 	u32 eicr = 0;
 	if (!icr)
 		return IRQ_NONE;  /* Not our interrupt */
@@ -3107,7 +3130,7 @@ static irqreturn_t igb_intr(int irq, void *data, struct pt_regs *regs)
 	 * previous interrupt.
 	 */
 	if (adapter->set_itr) {
-		E1000_WRITE_REG(&adapter->hw, E1000_ITR,
+		wr32(E1000_ITR,
 			1000000000 / (adapter->itr * 256));
 		adapter->set_itr = 0;
 	}
@@ -3117,9 +3140,7 @@ static irqreturn_t igb_intr(int irq, void *data, struct pt_regs *regs)
 	if (!(icr & E1000_ICR_INT_ASSERTED))
 		return IRQ_NONE;
 
-	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
-	 * need for the IMC write */
-	eicr = E1000_READ_REG(hw, E1000_EICR);
+	eicr = rd32(E1000_EICR);
 
 	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
 		hw->mac.get_link_status = 1;
@@ -3171,12 +3192,12 @@ static int igb_clean(struct net_device *poll_dev, int *budget)
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		igb_clean_rx_irq_adv(adapter, &adapter->rx_ring[i], &work_done,
 		                     work_to_do / adapter->num_rx_queues);
-		budget -= work_done;
+		*budget -= work_done;
 		poll_dev->quota -= work_done;
 	}
 
 	/* If no Tx and not enough Rx work done, exit the polling mode */
-	if ((tx_clean_complete && (work_done < work_to_do)) ||
+	if ((tx_clean_complete && (work_done == 0)) ||
 	    !netif_running(poll_dev)) {
 quit_polling:
 		if (adapter->itr_setting & 3)
@@ -3192,10 +3213,10 @@ quit_polling:
 
 static int igb_clean_rx_ring_msix(struct net_device *netdev, int *budget)
 {
-	struct igb_ring *rxr = netdev->priv;
-	struct igb_adapter *adapter = rxr->adapter;
+	struct igb_ring *rx_ring = netdev->priv;
+	struct igb_adapter *adapter = rx_ring->adapter;
+	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *real_netdev = adapter->netdev;
-	int tx_clean_complete = 1;
 	int work_to_do = min(*budget, netdev->quota);
 	int work_done = 0;
 
@@ -3203,27 +3224,25 @@ static int igb_clean_rx_ring_msix(struct net_device *netdev, int *budget)
 	if (!netif_carrier_ok(real_netdev))
 		goto quit_polling;
 
-	igb_clean_rx_irq_adv(adapter, rxr, &work_done, work_to_do);
+	igb_clean_rx_irq_adv(adapter, rx_ring, &work_done, work_to_do);
 
 	*budget -= work_done;
 	netdev->quota -= work_done;
 
 	/* If not enough Rx work done, exit the polling mode */
-	if ((tx_clean_complete && (work_done == 0)) ||
-	    !netif_running(real_netdev)) {
+	if ((work_done == 0) || !netif_running(real_netdev)) {
 quit_polling:
 		netif_rx_complete(netdev);
 
-		E1000_WRITE_REG(&adapter->hw, E1000_EIMS, rxr->eims_value);
-
-		if ((adapter->itr_setting & 3) && !rxr->no_itr_adjust &&
-		    (rxr->total_packets > IGB_DYN_ITR_PACKET_THRESHOLD)) {
-			int mean_size = rxr->total_bytes /
-					rxr->total_packets;
+		wr32(E1000_EIMS, rx_ring->eims_value);
+		if ((adapter->itr_setting & 3) && !rx_ring->no_itr_adjust &&
+		    (rx_ring->total_packets > IGB_DYN_ITR_PACKET_THRESHOLD)) {
+			int mean_size = rx_ring->total_bytes /
+					rx_ring->total_packets;
 			if (mean_size < IGB_DYN_ITR_LENGTH_LOW)
-				igb_raise_rx_eitr(adapter, rxr);
+				igb_raise_rx_eitr(adapter, rx_ring);
 			else if (mean_size > IGB_DYN_ITR_LENGTH_HIGH)
-				igb_lower_rx_eitr(adapter, rxr);
+				igb_lower_rx_eitr(adapter, rx_ring);
 		}
 		return 0;
 	}
@@ -3233,20 +3252,21 @@ quit_polling:
 /**
  * igb_clean_tx_irq - Reclaim resources after transmit completes
  * @adapter: board private structure
- * returns 1 if ring is completely cleaned
+ * returns true if ring is completely cleaned
  **/
 static bool igb_clean_tx_irq(struct igb_adapter *adapter,
 				  struct igb_ring *tx_ring)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
 	struct e1000_tx_desc *tx_desc;
 	struct igb_buffer *buffer_info;
 	struct sk_buff *skb;
 	unsigned int i;
 	u32 head, oldhead;
 	unsigned int count = 0;
-	bool cleaned = 0;
-	bool retval = 1;
+	bool cleaned = false;
+	bool retval = true;
 	unsigned int total_bytes = 0, total_packets = 0;
 
 	rmb();
@@ -3256,7 +3276,7 @@ static bool igb_clean_tx_irq(struct igb_adapter *adapter,
 	i = tx_ring->next_to_clean;
 	while (1) {
 		while (i != head) {
-			cleaned = 1;
+			cleaned = true;
 			tx_desc = E1000_TX_DESC(*tx_ring, i);
 			buffer_info = &tx_ring->buffer_info[i];
 			skb = buffer_info->skb;
@@ -3281,7 +3301,7 @@ static bool igb_clean_tx_irq(struct igb_adapter *adapter,
 
 			count++;
 			if (count == IGB_MAX_TX_CLEAN) {
-				retval = 0;
+				retval = false;
 				goto done_cleaning;
 			}
 		}
@@ -3314,11 +3334,11 @@ done_cleaning:
 	if (tx_ring->detect_tx_hung) {
 		/* Detect a transmit hang in hardware, this serializes the
 		 * check with the clearing of time_stamp and movement of i */
-		tx_ring->detect_tx_hung = 0;
+		tx_ring->detect_tx_hung = false;
 		if (tx_ring->buffer_info[i].time_stamp &&
 		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
 			       (adapter->tx_timeout_factor * HZ))
-		    && !(E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+		    && !(rd32(E1000_STATUS) &
 			 E1000_STATUS_TXOFF)) {
 
 			tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -3408,7 +3428,7 @@ static bool igb_clean_rx_irq_adv(struct igb_adapter *adapter,
 	struct sk_buff *skb;
 	unsigned int i, j;
 	u32 length, hlen, staterr;
-	bool cleaned = 0;
+	bool cleaned = false;
 	int cleaned_count = 0;
 	unsigned int total_bytes = 0, total_packets = 0;
 
@@ -3433,7 +3453,7 @@ static bool igb_clean_rx_irq_adv(struct igb_adapter *adapter,
 			hlen = adapter->rx_ps_hdr_size;
 
 		length = le16_to_cpu(rx_desc->wb.upper.length);
-		cleaned = 1;
+		cleaned = true;
 		cleaned_count++;
 
 		if (rx_ring->pending_skb != NULL) {
@@ -3702,6 +3722,7 @@ static void igb_vlan_rx_register(struct net_device *netdev,
 				 struct vlan_group *grp)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl, rctl;
 
 	igb_irq_disable(adapter);
@@ -3709,33 +3730,33 @@ static void igb_vlan_rx_register(struct net_device *netdev,
 
 	if (grp) {
 		/* enable VLAN tag insert/strip */
-		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl = rd32(E1000_CTRL);
 		ctrl |= E1000_CTRL_VME;
-		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+		wr32(E1000_CTRL, ctrl);
 
 		/* enable VLAN receive filtering */
-		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+		rctl = rd32(E1000_RCTL);
 		rctl |= E1000_RCTL_VFE;
 		rctl &= ~E1000_RCTL_CFIEN;
-		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+		wr32(E1000_RCTL, rctl);
 		igb_update_mng_vlan(adapter);
-		E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
+		wr32(E1000_RLPML,
 				adapter->max_frame_size + VLAN_TAG_SIZE);
 	} else {
 		/* disable VLAN tag insert/strip */
-		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl = rd32(E1000_CTRL);
 		ctrl &= ~E1000_CTRL_VME;
-		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+		wr32(E1000_CTRL, ctrl);
 
 		/* disable VLAN filtering */
-		rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+		rctl = rd32(E1000_RCTL);
 		rctl &= ~E1000_RCTL_VFE;
-		E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+		wr32(E1000_RCTL, rctl);
 		if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
 			igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
 			adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
 		}
-		E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
+		wr32(E1000_RLPML,
 				adapter->max_frame_size);
 	}
 
@@ -3746,6 +3767,7 @@ static void igb_vlan_rx_register(struct net_device *netdev,
 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 	u32 vfta, index;
 
 	if ((adapter->hw.mng_cookie.status &
@@ -3754,14 +3776,15 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 		return;
 	/* add VID to filter table */
 	index = (vid >> 5) & 0x7F;
-	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
+	vfta = array_rd32(E1000_VFTA, index);
 	vfta |= (1 << (vid & 0x1F));
-	e1000_write_vfta(&adapter->hw, index, vfta);
+	igb_write_vfta(&adapter->hw, index, vfta);
 }
 
 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 	u32 vfta, index;
 
 	igb_irq_disable(adapter);
@@ -3780,9 +3803,9 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 
 	/* remove VID from filter table */
 	index = (vid >> 5) & 0x7F;
-	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
+	vfta = array_rd32(E1000_VFTA, index);
 	vfta &= ~(1 << (vid & 0x1F));
-	e1000_write_vfta(&adapter->hw, index, vfta);
+	igb_write_vfta(&adapter->hw, index, vfta);
 }
 
 static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -3844,6 +3867,7 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl, ctrl_ext, rctl, status;
 	u32 wufc = adapter->wol;
 #ifdef CONFIG_PM
@@ -3864,7 +3888,7 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
 		return retval;
 #endif
 
-	status = E1000_READ_REG(&adapter->hw, E1000_STATUS);
+	status = rd32(E1000_STATUS);
 	if (status & E1000_STATUS_LU)
 		wufc &= ~E1000_WUFC_LNKC;
 
@@ -3874,44 +3898,42 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
 
 		/* turn on all-multi mode if wake on multicast is enabled */
 		if (wufc & E1000_WUFC_MC) {
-			rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+			rctl = rd32(E1000_RCTL);
 			rctl |= E1000_RCTL_MPE;
-			E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+			wr32(E1000_RCTL, rctl);
 		}
 
-		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl = rd32(E1000_CTRL);
 		/* advertise wake from D3Cold */
 		#define E1000_CTRL_ADVD3WUC 0x00100000
 		/* phy power management enable */
 		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
 		ctrl |= E1000_CTRL_ADVD3WUC;
-		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+		wr32(E1000_CTRL, ctrl);
 
 		if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
 		   adapter->hw.phy.media_type ==
 					e1000_media_type_internal_serdes) {
 			/* keep the laser running in D3 */
-			ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+			ctrl_ext = rd32(E1000_CTRL_EXT);
 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
-			E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
+			wr32(E1000_CTRL_EXT, ctrl_ext);
 		}
 
 		/* Allow time for pending master requests to run */
-		e1000_disable_pcie_master(&adapter->hw);
+		igb_disable_pcie_master(&adapter->hw);
 
-		E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
-		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, wufc);
+		wr32(E1000_WUC, E1000_WUC_PME_EN);
+		wr32(E1000_WUFC, wufc);
 		pci_enable_wake(pdev, PCI_D3hot, 1);
 		pci_enable_wake(pdev, PCI_D3cold, 1);
 	} else {
-		E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
-		E1000_WRITE_REG(&adapter->hw, E1000_WUFC, 0);
+		wr32(E1000_WUC, 0);
+		wr32(E1000_WUFC, 0);
 		pci_enable_wake(pdev, PCI_D3hot, 0);
 		pci_enable_wake(pdev, PCI_D3cold, 0);
 	}
 
-	igb_release_manageability(adapter);
-
 	/* make sure adapter isn't asleep if manageability is enabled */
 	if (adapter->en_mng_pt) {
 		pci_enable_wake(pdev, PCI_D3hot, 1);
@@ -3934,6 +3956,7 @@ static int igb_resume(struct pci_dev *pdev)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 	u32 err;
 
 	pci_set_power_state(pdev, PCI_D0);
@@ -3958,7 +3981,7 @@ static int igb_resume(struct pci_dev *pdev)
 	/* e1000_power_up_phy(adapter); */
 
 	igb_reset(adapter);
-	E1000_WRITE_REG(&adapter->hw, E1000_WUS, ~0);
+	wr32(E1000_WUS, ~0);
 
 	igb_init_manageability(adapter);
 
@@ -4039,6 +4062,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct igb_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 
 	if (pci_enable_device(pdev)) {
 		dev_err(&pdev->dev,
@@ -4051,7 +4075,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
 	igb_reset(adapter);
-	E1000_WRITE_REG(&adapter->hw, E1000_WUS, ~0);
+	wr32(E1000_WUS, ~0);
 
 	return PCI_ERS_RESULT_RECOVERED;
 }