Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 89877e42827f16fa5f86b1df0c2860b1 > files > 1774

kernel-2.6.18-128.1.10.el5.src.rpm

From: Brad Peters <bpeters@redhat.com>
Date: Tue, 29 Jul 2008 14:53:27 -0400
Subject: [ppc] eHEA: update from version 0076-05 to 0091-00
Message-id: 20080729185327.8726.84140.sendpatchset@squad5-lp1.lab.bos.redhat.com
O-Subject: [PATCH RHEL 5.3 5/5 v2] eHEA device driver update from version 0076-05 to 0091-00
Bugzilla: 442409
RH-Acked-by: David Howells <dhowells@redhat.com>

RHBZ#:
======
https://bugzilla.redhat.com/show_bug.cgi?id=442409

Description:
===========
Updates eHEA device driver to current upstream version, 0091-00.

Upstream Status:
================
Ported from upstream to RHEL kernel 2.6.18-94.el

Mainline kernel postings:
* kexec/kdump support:
    http://lkml.org/lkml/2007/10/26/192
    applied: http://lkml.org/lkml/2007/10/29/92
    http://lkml.org/lkml/2008/2/13/276
    applied: http://lkml.org/lkml/2008/2/20/337

* LRO support:
    http://www.spinics.net/lists/netdev/msg37453.html
    applied: http://www.spinics.net/lists/netdev/msg37897.html

* ehea_treat_poll_error fix
    http://lkml.org/lkml/2007/11/21/171
    applied: http://lkml.org/lkml/2007/11/23/181

* Fix missing Kconfig dependency
    http://lkml.org/lkml/2008/3/3/144
    applied: http://lkml.org/lkml/2008/3/5/116

* Remove unnecessary memset after kzalloc
    http://lkml.org/lkml/2008/5/12/318
    applied: http://lkml.org/lkml/2008/5/22/129

* Fix use after free on reboot (Patch not sent by ehea dev team)
    http://www.spinics.net/lists/netdev/msg63167.html
    applied: http://www.spinics.net/lists/netdev/msg63933.html

* Access iph->tot_len with correct endianness (Patch not sent by ehea dev team)

    http://www.spinics.net/lists/netdev/msg67519.html
    applied: http://www.spinics.net/lists/netdev/msg67870.html

* EHEA driver version 0092
    Patch 1/3: Fix might sleep problem http://lkml.org/lkml/2008/7/3/241
    Patch 2/3: Add MODULE_DEVICE_TABLE http://lkml.org/lkml/2008/7/3/240
    Patch 3/3: Fix race condition http://lkml.org/lkml/2008/7/3/238
    applied: http://lkml.org/lkml/2008/7/4/89

===============================================================

Brad Peters 1-978-392-1000 x 23183
IBM on-site partner.

Proposed Patch:
===============
This patch is based on 2.6.18-94.el5

diff --git a/drivers/net/ehea/Makefile b/drivers/net/ehea/Makefile
index d62b347..fa2fe82 100644
--- a/drivers/net/ehea/Makefile
+++ b/drivers/net/ehea/Makefile
@@ -1,6 +1,5 @@
 #
 # Makefile for the eHEA ethernet device driver for IBM eServer System p
 #
-ehea-objs = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o ehea_phyp.o inet_lro.o
+ehea-objs = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o ehea_phyp.o
 obj-$(CONFIG_EHEA) += ehea.o
-
diff --git a/drivers/net/ehea/backlevel_kernel.h b/drivers/net/ehea/backlevel_kernel.h
index 167933c..2cddf1e 100644
--- a/drivers/net/ehea/backlevel_kernel.h
+++ b/drivers/net/ehea/backlevel_kernel.h
@@ -414,13 +414,4 @@ static inline long plpar_hcall9(u64 opcode, u64 *outs, u64 arg1, u64 arg2, u64 a
 };
 #endif
 
-/* inet_lro */
-typedef __u16 __bitwise __sum16;
-typedef __u32 __bitwise __wsum;
-
-static inline __wsum csum_unfold(__sum16 n)
-{
-	return (__force __wsum)n;
-}
-
 #endif	/* __BACKLEVEL_KERNEL_H__ */
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 1ff9023..eda20da 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -33,15 +33,14 @@
 #include <linux/ethtool.h>
 #include <linux/vmalloc.h>
 #include <linux/if_vlan.h>
+#include <linux/inet_lro.h>
 
 #include <asm/ibmebus.h>
 #include <asm/abs_addr.h>
 #include <asm/io.h>
 
-#include "inet_lro.h"
-
 #define DRV_NAME	"ehea"
-#define DRV_VERSION	"EHEA_0076-05"
+#define DRV_VERSION	"EHEA_0092-00"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
@@ -388,12 +387,18 @@ struct ehea_port_res {
 
 
 #define EHEA_MAX_PORTS 16
+
+#define EHEA_NUM_PORTRES_FW_HANDLES    6  /* QP handle, SendCQ handle,
+					     RecvCQ handle, EQ handle,
+					     SendMR handle, RecvMR handle */
+#define EHEA_NUM_PORT_FW_HANDLES       1  /* EQ handle */
+#define EHEA_NUM_ADAPTER_FW_HANDLES    2  /* MR handle, NEQ handle */
+
 struct ehea_adapter {
 	u64 handle;
 	struct ibmebus_dev *ebus_dev;
 	struct ehea_port *port[EHEA_MAX_PORTS];
 	struct ehea_eq *neq;       /* notification event queue */
-	struct workqueue_struct *ehea_wq;
 	struct tasklet_struct neq_tasklet;
 	struct ehea_mr mr;
 	u32 pd;                    /* protection domain */
@@ -408,6 +413,31 @@ struct ehea_mc_list {
 	u64 macaddr;
 };
 
+/* kdump support */
+struct ehea_fw_handle_entry {
+	u64 adh;               /* Adapter Handle */
+	u64 fwh;               /* Firmware Handle */
+};
+
+struct ehea_fw_handle_array {
+	struct ehea_fw_handle_entry *arr;
+	int num_entries;
+	struct mutex lock;
+};
+
+struct ehea_bcmc_reg_entry {
+	u64 adh;               /* Adapter Handle */
+	u32 port_id;           /* Logical Port Id */
+	u8 reg_type;           /* Registration Type */
+	u64 macaddr;
+};
+
+struct ehea_bcmc_reg_array {
+	struct ehea_bcmc_reg_entry *arr;
+	int num_entries;
+	spinlock_t lock;
+};
+
 #define EHEA_PORT_UP 1
 #define EHEA_PORT_DOWN 0
 #define EHEA_PHY_LINK_UP 1
@@ -423,7 +453,7 @@ struct ehea_port {
 	struct vlan_group *vgrp;
 	struct ehea_eq *qp_eq;
 	struct work_struct reset_task;
-	struct semaphore port_lock;
+	struct mutex port_lock;
 	char int_aff_name[EHEA_IRQ_NAME_SIZE];
 	int allmulti;			 /* Indicates IFF_ALLMULTI state */
 	int promisc;		 	 /* Indicates IFF_PROMISC state */
@@ -431,14 +461,15 @@ struct ehea_port {
 	int num_add_tx_qps;
 	int num_mcs;
 	int resets;
+	u64 flags;
 	u64 mac_addr;
 	u32 logical_port_id;
 	u32 port_speed;
 	u32 msg_enable;
 	u32 sig_comp_iv;
 	u32 state;
-	u8 phy_link;
 	u32 lro_max_aggr;
+	u8 phy_link;
 	u8 full_duplex;
 	u8 autoneg;
 	u8 num_def_qps;
@@ -454,11 +485,15 @@ struct port_res_cfg {
 };
 
 enum ehea_flag_bits {
-	__EHEA_STOP_XFER
+	__EHEA_STOP_XFER,
+	__EHEA_DISABLE_PORT_RESET
 };
 
 void ehea_set_ethtool_ops(struct net_device *netdev);
 int ehea_sense_port_attr(struct ehea_port *port);
 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
 
+extern u64 ehea_driver_flags;
+extern struct work_struct ehea_rereg_mr_task;
+
 #endif	/* __EHEA_H__ */
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 29ef7a9..fef8bdc 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -40,7 +40,7 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 		return ret;
 
 	if (netif_carrier_ok(dev)) {
-		switch(port->port_speed) {
+		switch (port->port_speed) {
 		case EHEA_SPEED_10M: cmd->speed = SPEED_10; break;
 		case EHEA_SPEED_100M: cmd->speed = SPEED_100; break;
 		case EHEA_SPEED_1G: cmd->speed = SPEED_1000; break;
@@ -78,7 +78,7 @@ static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 		goto doit;
 	}
 
-	switch(cmd->speed) {
+	switch (cmd->speed) {
 	case SPEED_10:
 		if (cmd->duplex == DUPLEX_FULL)
 			sp = H_SPEED_10M_F;
@@ -262,9 +262,6 @@ const struct ethtool_ops ehea_ethtool_ops = {
 	.get_msglevel = ehea_get_msglevel,
 	.set_msglevel = ehea_set_msglevel,
 	.get_link = ethtool_op_get_link,
-	.get_tx_csum = ethtool_op_get_tx_csum,
-	.get_sg = ethtool_op_get_sg,
-	.get_tso = ethtool_op_get_tso,
 	.set_tso = ethtool_op_set_tso,
 	.get_strings = ehea_get_strings,
 	.get_stats_count = ehea_get_stats_count,
diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ehea/ehea_hw.h
index 1af7ca4..567981b 100644
--- a/drivers/net/ehea/ehea_hw.h
+++ b/drivers/net/ehea/ehea_hw.h
@@ -29,10 +29,10 @@
 #ifndef __EHEA_HW_H__
 #define __EHEA_HW_H__
 
-#define QPX_SQA_VALUE   EHEA_BMASK_IBM(48,63)
-#define QPX_RQ1A_VALUE  EHEA_BMASK_IBM(48,63)
-#define QPX_RQ2A_VALUE  EHEA_BMASK_IBM(48,63)
-#define QPX_RQ3A_VALUE  EHEA_BMASK_IBM(48,63)
+#define QPX_SQA_VALUE   EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ1A_VALUE  EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ2A_VALUE  EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ3A_VALUE  EHEA_BMASK_IBM(48, 63)
 
 #define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
 
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index eeba0ef..cb99ef2 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -6,9 +6,9 @@
  *  (C) Copyright IBM Corp. 2006
  *
  *  Authors:
- *       Christoph Raisch <raisch@de.ibm.com>
- *       Jan-Bernd Themann <themann@de.ibm.com>
- *       Thomas Klein <tklein@de.ibm.com>
+ *	 Christoph Raisch <raisch@de.ibm.com>
+ *	 Jan-Bernd Themann <themann@de.ibm.com>
+ *	 Thomas Klein <tklein@de.ibm.com>
  *
  *
  * This program is free software; you can redistribute it and/or modify
@@ -33,6 +33,11 @@
 #include <linux/if.h>
 #include <linux/list.h>
 #include <linux/if_ether.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <asm/kexec.h>
+#include <linux/mutex.h>
+
 #include <net/ip.h>
 
 #include "ehea.h"
@@ -51,11 +56,11 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
-static int use_mcs = 0;
-static int use_lro = 0;
+static int use_mcs;
+static int use_lro;
 static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
 static int num_tx_qps = EHEA_NUM_TX_QP;
-static int prop_carrier_state = 0;
+static int prop_carrier_state;
 
 module_param(msg_level, int, 0);
 module_param(rq1_entries, int, 0);
@@ -85,18 +90,20 @@ MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
 		 "[2^x - 1], x = [6..14]. Default = "
 		 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
+
 MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
 		 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
 MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
-                 "Default = 0");
+		 "Default = 0");
 
-static int port_name_cnt = 0;
+static int port_name_cnt;
 static LIST_HEAD(adapter_list);
-u64 ehea_driver_flags = 0;
-struct workqueue_struct *ehea_driver_wq;
+u64 ehea_driver_flags;
 struct work_struct ehea_rereg_mr_task;
+static DEFINE_MUTEX(dlpar_mem_lock);
+struct ehea_fw_handle_array ehea_fw_handles;
+struct ehea_bcmc_reg_array ehea_bcmc_regs;
 
-struct semaphore dlpar_mem_lock;
 
 static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
 					const struct of_device_id *id);
@@ -110,6 +117,7 @@ static struct of_device_id ehea_device_table[] = {
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, ehea_device_table);
 
 static struct ibmebus_driver ehea_driver = {
 	.name = "ehea",
@@ -118,27 +126,188 @@ static struct ibmebus_driver ehea_driver = {
 	.remove = ehea_remove,
 };
 
-void ehea_dump(void *adr, int len, char *msg) {
+void ehea_dump(void *adr, int len, char *msg)
+{
 	int x;
 	unsigned char *deb = adr;
 	for (x = 0; x < len; x += 16) {
 		printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
-			  deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
+			  deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
 		deb += 16;
 	}
 }
 
+void ehea_schedule_port_reset(struct ehea_port *port)
+{
+	if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
+		schedule_work(&port->reset_task);
+}
+
+static void ehea_update_firmware_handles(void)
+{
+	struct ehea_fw_handle_entry *arr = NULL;
+	struct ehea_adapter *adapter;
+	int num_adapters = 0;
+	int num_ports = 0;
+	int num_portres = 0;
+	int i = 0;
+	int num_fw_handles, k, l;
+
+	/* Determine number of handles */
+	list_for_each_entry(adapter, &adapter_list, list) {
+		num_adapters++;
+
+		for (k = 0; k < EHEA_MAX_PORTS; k++) {
+			struct ehea_port *port = adapter->port[k];
+
+			if (!port || (port->state != EHEA_PORT_UP))
+				continue;
+
+			num_ports++;
+			num_portres += port->num_def_qps + port->num_add_tx_qps;
+		}
+	}
+
+	num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
+			 num_ports * EHEA_NUM_PORT_FW_HANDLES +
+			 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
+
+	if (num_fw_handles) {
+		arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
+		if (!arr)
+			return;  /* Keep the existing array */
+	} else
+		goto out_update;
+
+	list_for_each_entry(adapter, &adapter_list, list) {
+		for (k = 0; k < EHEA_MAX_PORTS; k++) {
+			struct ehea_port *port = adapter->port[k];
+
+			if (!port || (port->state != EHEA_PORT_UP))
+				continue;
+
+			for (l = 0;
+			     l < port->num_def_qps + port->num_add_tx_qps;
+			     l++) {
+				struct ehea_port_res *pr = &port->port_res[l];
+
+				arr[i].adh = adapter->handle;
+				arr[i++].fwh = pr->qp->fw_handle;
+				arr[i].adh = adapter->handle;
+				arr[i++].fwh = pr->send_cq->fw_handle;
+				arr[i].adh = adapter->handle;
+				arr[i++].fwh = pr->recv_cq->fw_handle;
+				arr[i].adh = adapter->handle;
+				arr[i++].fwh = pr->eq->fw_handle;
+				arr[i].adh = adapter->handle;
+				arr[i++].fwh = pr->send_mr.handle;
+				arr[i].adh = adapter->handle;
+				arr[i++].fwh = pr->recv_mr.handle;
+			}
+			arr[i].adh = adapter->handle;
+			arr[i++].fwh = port->qp_eq->fw_handle;
+		}
+
+		arr[i].adh = adapter->handle;
+		arr[i++].fwh = adapter->neq->fw_handle;
+
+		if (adapter->mr.handle) {
+			arr[i].adh = adapter->handle;
+			arr[i++].fwh = adapter->mr.handle;
+		}
+	}
+
+out_update:
+	kfree(ehea_fw_handles.arr);
+	ehea_fw_handles.arr = arr;
+	ehea_fw_handles.num_entries = i;
+}
+
+static void ehea_update_bcmc_registrations(void)
+{
+	struct ehea_bcmc_reg_entry *arr = NULL;
+	struct ehea_adapter *adapter;
+	struct ehea_mc_list *mc_entry;
+	int num_registrations = 0;
+	int i = 0;
+	int k;
+
+	/* Determine number of registrations */
+	list_for_each_entry(adapter, &adapter_list, list)
+		for (k = 0; k < EHEA_MAX_PORTS; k++) {
+			struct ehea_port *port = adapter->port[k];
+
+			if (!port || (port->state != EHEA_PORT_UP))
+				continue;
+
+			num_registrations += 2;	/* Broadcast registrations */
+
+			list_for_each_entry(mc_entry, &port->mc_list->list,list)
+				num_registrations += 2;
+		}
+
+	if (num_registrations) {
+		arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
+		if (!arr)
+			return;  /* Keep the existing array */
+	} else
+		goto out_update;
+
+	list_for_each_entry(adapter, &adapter_list, list) {
+		for (k = 0; k < EHEA_MAX_PORTS; k++) {
+			struct ehea_port *port = adapter->port[k];
+
+			if (!port || (port->state != EHEA_PORT_UP))
+				continue;
+
+			arr[i].adh = adapter->handle;
+			arr[i].port_id = port->logical_port_id;
+			arr[i].reg_type = EHEA_BCMC_BROADCAST |
+					  EHEA_BCMC_UNTAGGED;
+			arr[i++].macaddr = port->mac_addr;
+
+			arr[i].adh = adapter->handle;
+			arr[i].port_id = port->logical_port_id;
+			arr[i].reg_type = EHEA_BCMC_BROADCAST |
+					  EHEA_BCMC_VLANID_ALL;
+			arr[i++].macaddr = port->mac_addr;
+
+			list_for_each_entry(mc_entry,
+					    &port->mc_list->list, list) {
+				arr[i].adh = adapter->handle;
+				arr[i].port_id = port->logical_port_id;
+				arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
+						  EHEA_BCMC_MULTICAST |
+						  EHEA_BCMC_UNTAGGED;
+				arr[i++].macaddr = mc_entry->macaddr;
+
+				arr[i].adh = adapter->handle;
+				arr[i].port_id = port->logical_port_id;
+				arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
+						  EHEA_BCMC_MULTICAST |
+						  EHEA_BCMC_VLANID_ALL;
+				arr[i++].macaddr = mc_entry->macaddr;
+			}
+		}
+	}
+
+out_update:
+	kfree(ehea_bcmc_regs.arr);
+	ehea_bcmc_regs.arr = arr;
+	ehea_bcmc_regs.num_entries = i;
+}
+
 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
 {
 	struct ehea_port *port = netdev_priv(dev);
 	struct net_device_stats *stats = &port->stats;
 	struct hcp_ehea_port_cb2 *cb2;
-	u64 hret, rx_packets;
+	u64 hret, rx_packets, tx_packets;
 	int i;
 
 	memset(stats, 0, sizeof(*stats));
 
-	cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
 	if (!cb2) {
 		ehea_error("no mem for cb2");
 		goto out;
@@ -159,7 +328,11 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
 	for (i = 0; i < port->num_def_qps; i++)
 		rx_packets += port->port_res[i].rx_packets;
 
-	stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp;
+	tx_packets = 0;
+	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+		tx_packets += port->port_res[i].tx_packets;
+
+	stats->tx_packets = tx_packets;
 	stats->multicast = cb2->rxmcp;
 	stats->rx_errors = cb2->rxuerr;
 	stats->rx_bytes = cb2->rxo;
@@ -408,11 +581,6 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
 	if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
 		pr->p_stats.err_frame_crc++;
 
-	if (netif_msg_rx_err(pr->port)) {
-		ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
-		ehea_dump(cqe, sizeof(*cqe), "CQE");
-	}
-
 	if (rq == 2) {
 		*processed_rq2 += 1;
 		skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
@@ -424,8 +592,12 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
 	}
 
 	if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
-		ehea_error("Critical receive error. Resetting port.");
-		queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task);
+		if (netif_msg_rx_err(pr->port)) {
+			ehea_error("Critical receive error for QP %d. "
+				   "Resetting port.", pr->qp->init_attr.qp_nr);
+			ehea_dump(cqe, sizeof(*cqe), "CQE");
+		}
+		ehea_schedule_port_reset(pr->port);
 		return 1;
 	}
 
@@ -453,13 +625,13 @@ static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
         tcp_len = ((struct tcphdr *)(*tcph))->doff * 4;
 
         /* check if ip header and tcp header are complete */
-        if (iph->tot_len < ip_len + tcp_len)
+        if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
                 return -1;
 
         *hdr_flags = LRO_IPV4 | LRO_TCP;
         *iphdr = iph;
 
-        return 0;
+	return 0;
 }
 
 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
@@ -594,7 +766,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 	unsigned long flags;
 
 	cqe = ehea_poll_cq(send_cq);
-	while(cqe && (quota > 0)) {
+	while (cqe && (quota > 0)) {
 		ehea_inc_cq(send_cq);
 
 		cqe_counter++;
@@ -603,8 +775,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 			ehea_error("Send Completion Error: Resetting port");
 			if (netif_msg_tx_err(pr->port))
 				ehea_dump(cqe, sizeof(*cqe), "Send CQE");
-			queue_work(pr->port->adapter->ehea_wq,
-				   &pr->port->reset_task);
+			ehea_schedule_port_reset(pr->port);
 			break;
 		}
 
@@ -685,8 +856,10 @@ static int ehea_poll(struct net_device *dev, int *budget)
 static void ehea_netpoll(struct net_device *dev)
 {
 	struct ehea_port *port = netdev_priv(dev);
+	int i;
 
-	netif_rx_schedule(port->port_res[0].d_netdev);
+	for (i = 0; i < port->num_def_qps; i++)
+		netif_rx_schedule(port->port_res[i].d_netdev);
 }
 #endif
 
@@ -728,7 +901,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param,
 		eqe = ehea_poll_eq(port->qp_eq);
 	}
 
-	queue_work(port->adapter->ehea_wq, &port->reset_task);
+	ehea_schedule_port_reset(port);
 
 	return IRQ_HANDLED;
 }
@@ -751,8 +924,9 @@ int ehea_sense_port_attr(struct ehea_port *port)
 	u64 hret;
 	struct hcp_ehea_port_cb0 *cb0;
 
-	cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);   /* May be called via */
-	if (!cb0) {                             /* ehea_neq_tasklet() */
+	/* may be called via ehea_neq_tasklet() */
+	cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+	if (!cb0) {
 		ehea_error("no mem for cb0");
 		ret = -ENOMEM;
 		goto out;
@@ -770,7 +944,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
 	/* MAC address */
 	port->mac_addr = cb0->port_mac_addr << 16;
 
-	if (!is_valid_ether_addr((u8*)&port->mac_addr)) {
+	if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
 		ret = -EADDRNOTAVAIL;
 		goto out_free;
 	}
@@ -1002,7 +1176,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
 
 static void ehea_neq_tasklet(unsigned long data)
 {
-	struct ehea_adapter *adapter = (struct ehea_adapter*)data;
+	struct ehea_adapter *adapter = (struct ehea_adapter *)data;
 	struct ehea_eqe *eqe;
 	u64 event_mask;
 
@@ -1213,7 +1387,7 @@ int ehea_rem_smrs(struct ehea_port_res *pr)
 
 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
 {
-	int arr_size = sizeof(void*) * max_q_entries;
+	int arr_size = sizeof(void *) * max_q_entries;
 
 	q_skba->arr = vmalloc(arr_size);
 	if (!q_skba->arr)
@@ -1509,7 +1683,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
 
 	nfrags = skb_shinfo(skb)->nr_frags;
 	sg1entry = &swqe->u.immdata_desc.sg_entry;
-	sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list;
+	sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
 	swqe->descriptors = 0;
 	sg1entry_contains_frag_data = 0;
 
@@ -1562,7 +1736,7 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
 				     reg_type, port->mac_addr, 0, hcallid);
 	if (hret != H_SUCCESS) {
 		ehea_error("%sregistering bc address failed (tagged)",
-                           hcallid == H_REG_BCMC ? "" : "de");
+			   hcallid == H_REG_BCMC ? "" : "de");
 		ret = -EIO;
 		goto out_herr;
 	}
@@ -1615,19 +1789,29 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
 
 	memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
 
+	spin_lock(&ehea_bcmc_regs.lock);
+
 	/* Deregister old MAC in pHYP */
-	ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
-	if (ret)
-		goto out_free;
+	if (port->state == EHEA_PORT_UP) {
+		ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
+		if (ret)
+			goto out_upregs;
+	}
 
 	port->mac_addr = cb0->port_mac_addr << 16;
 
 	/* Register new MAC in pHYP */
-	ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
-	if (ret)
-		goto out_free;
+	if (port->state == EHEA_PORT_UP) {
+		ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
+		if (ret)
+			goto out_upregs;
+	}
 
 	ret = 0;
+
+out_upregs:
+	ehea_update_bcmc_registrations();
+	spin_unlock(&ehea_bcmc_regs.lock);
 out_free:
 	kfree(cb0);
 out:
@@ -1752,7 +1936,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
 		}
 }
 
-static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
+static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
 {
 	struct ehea_mc_list *ehea_mcl_entry;
 	u64 hret;
@@ -1789,9 +1973,11 @@ static void ehea_set_multicast_list(struct net_device *dev)
 	}
 	ehea_promiscuous(dev, 0);
 
+	spin_lock(&ehea_bcmc_regs.lock);
+
 	if (dev->flags & IFF_ALLMULTI) {
 		ehea_allmulti(dev, 1);
-		return;
+		goto out;
 	}
 	ehea_allmulti(dev, 0);
 
@@ -1811,13 +1997,14 @@ static void ehea_set_multicast_list(struct net_device *dev)
 			goto out;
 		}
 
-		for (i = 0, k_mcl_entry = dev->mc_list;
-		     i < dev->mc_count;
-		     i++, k_mcl_entry = k_mcl_entry->next) {
+		for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
+			     k_mcl_entry = k_mcl_entry->next)
 			ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
-		}
+
 	}
 out:
+	ehea_update_bcmc_registrations();
+	spin_unlock(&ehea_bcmc_regs.lock);
 	return;
 }
 
@@ -1949,8 +2136,7 @@ static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
 		tmp = (tcp->source + (tcp->dest << 16)) % 31;
 		tmp += ip_hdr(skb)->daddr % 31;
 		return tmp % num_qps;
-	}
-	else
+	} else
 		return 0;
 }
 
@@ -2023,6 +2209,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	}
 
 	ehea_post_swqe(pr->qp, swqe);
+	pr->tx_packets++;
 
 	if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
 		spin_lock_irqsave(&pr->netif_queue, flags);
@@ -2055,8 +2242,6 @@ static void ehea_vlan_rx_register(struct net_device *dev,
 		goto out;
 	}
 
-	memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
-
 	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
 				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
 	if (hret != H_SUCCESS)
@@ -2141,7 +2326,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
 	u64 hret;
 	u16 dummy16 = 0;
 	u64 dummy64 = 0;
-	struct hcp_modify_qp_cb0* cb0;
+	struct hcp_modify_qp_cb0 *cb0;
 
 	cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
 	if (!cb0) {
@@ -2267,7 +2452,7 @@ static int ehea_clean_all_portres(struct ehea_port *port)
 	int ret = 0;
 	int i;
 
-	for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
 		ret |= ehea_clean_portres(port, &port->port_res[i]);
 
 	ret |= ehea_destroy_eq(port->qp_eq);
@@ -2299,6 +2484,8 @@ static int ehea_up(struct net_device *dev)
 	if (port->state == EHEA_PORT_UP)
 		return 0;
 
+	mutex_lock(&ehea_fw_handles.lock);
+
 	ret = ehea_port_res_setup(port, port->num_def_qps,
 				  port->num_add_tx_qps);
 	if (ret) {
@@ -2319,7 +2506,7 @@ static int ehea_up(struct net_device *dev)
 		goto out_clean_pr;
 	}
 
-	for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
 		ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
 		if (ret) {
 			ehea_error("activate_qp failed");
@@ -2327,7 +2514,7 @@ static int ehea_up(struct net_device *dev)
 		}
 	}
 
-	for(i = 0; i < port->num_def_qps; i++) {
+	for (i = 0; i < port->num_def_qps; i++) {
 		ret = ehea_fill_port_res(&port->port_res[i]);
 		if (ret) {
 			ehea_error("out_free_irqs");
@@ -2335,8 +2522,17 @@ static int ehea_up(struct net_device *dev)
 		}
 	}
 
-	ret = 0;
+	spin_lock(&ehea_bcmc_regs.lock);
+
+	ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
+	if (ret) {
+		ret = -EIO;
+		goto out_free_irqs;
+	}
+
 	port->state = EHEA_PORT_UP;
+
+	ret = 0;
 	goto out;
 
 out_free_irqs:
@@ -2348,51 +2544,83 @@ out:
 	if (ret)
 		ehea_info("Failed starting %s. ret=%i", dev->name, ret);
 
+	ehea_update_bcmc_registrations();
+	spin_unlock(&ehea_bcmc_regs.lock);
+
+	ehea_update_firmware_handles();
+	mutex_unlock(&ehea_fw_handles.lock);
+
 	return ret;
 }
 
+static void ehea_poll_disable(struct net_device *dev)
+{
+	struct ehea_port *port = netdev_priv(dev);
+	int i;
+
+	for (i=0; i < port->num_def_qps; i++)
+		netif_poll_disable(port->port_res[i].d_netdev);
+}
+
+static void ehea_poll_enable(struct net_device *dev)
+{
+	struct ehea_port *port = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < port->num_def_qps; i++)
+		netif_poll_enable(port->port_res[i].d_netdev);
+}
+
 static int ehea_open(struct net_device *dev)
 {
 	int ret;
 	struct ehea_port *port = netdev_priv(dev);
 
-	down(&port->port_lock);
+	mutex_lock(&port->port_lock);
 
 	if (netif_msg_ifup(port))
 		ehea_info("enabling port %s", dev->name);
 
 	ret = ehea_up(dev);
-	if (!ret)
+	if (!ret) {
+		ehea_poll_enable(dev);
 		netif_start_queue(dev);
+	}
 
-	up(&port->port_lock);
+	mutex_unlock(&port->port_lock);
 
 	return ret;
 }
 
 static int ehea_down(struct net_device *dev)
 {
-	int ret, i;
+	int ret;
 	struct ehea_port *port = netdev_priv(dev);
 
 	if (port->state == EHEA_PORT_DOWN)
 		return 0;
 
+	mutex_lock(&ehea_fw_handles.lock);
+
+	spin_lock(&ehea_bcmc_regs.lock);
 	ehea_drop_multicast_list(dev);
-	ehea_free_interrupts(dev);
+	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
 
-	for (i = 0; i < port->num_def_qps; i++)
-		while (test_bit(__LINK_STATE_RX_SCHED,
-				&port->port_res[i].d_netdev->state))
-			msleep(1);
+	ehea_free_interrupts(dev);
 
 	port->state = EHEA_PORT_DOWN;
 
+	ehea_update_bcmc_registrations();
+	spin_unlock(&ehea_bcmc_regs.lock);
+
 	ret = ehea_clean_all_portres(port);
 	if (ret)
 		ehea_info("Failed freeing resources for %s. ret=%i",
 			  dev->name, ret);
 
+	ehea_update_firmware_handles();
+	mutex_unlock(&ehea_fw_handles.lock);
+
 	return ret;
 }
 
@@ -2404,15 +2632,19 @@ static int ehea_stop(struct net_device *dev)
 	if (netif_msg_ifdown(port))
 		ehea_info("disabling port %s", dev->name);
 
-	flush_workqueue(port->adapter->ehea_wq);
-	down(&port->port_lock);
+	set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
+	while (cancel_delayed_work(&port->reset_task))
+		msleep(10);
+	mutex_lock(&port->port_lock);
 	netif_stop_queue(dev);
+	ehea_poll_disable(dev);
 	ret = ehea_down(dev);
-	up(&port->port_lock);
+	mutex_unlock(&port->port_lock);
+	clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
 	return ret;
 }
 
-void ehea_purge_sq(struct ehea_qp *orig_qp)
+static void ehea_purge_sq(struct ehea_qp *orig_qp)
 {
 	struct ehea_qp qp = *orig_qp;
 	struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2433,7 +2665,7 @@ void ehea_purge_sq(struct ehea_qp *orig_qp)
  * function in any case.
  */
 
-void ehea_flush_sq(struct ehea_port *port)
+static void ehea_flush_sq(struct ehea_port *port)
 {
 	int i;
 
@@ -2455,7 +2687,7 @@ int ehea_stop_qps(struct net_device *dev)
 {
 	struct ehea_port *port = netdev_priv(dev);
 	struct ehea_adapter *adapter = port->adapter;
-	struct hcp_modify_qp_cb0* cb0;
+	struct hcp_modify_qp_cb0 *cb0;
 	int ret = -EIO;
 	int dret;
 	int i;
@@ -2520,7 +2752,7 @@ out:
 	return ret;
 }
 
-void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res * pr)
+void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
 {
 	struct ehea_qp qp = *orig_qp;
 	struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2529,6 +2761,8 @@ void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res * pr)
 	struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
 	struct sk_buff *skb;
 	u32 lkey = pr->recv_mr.lkey;
+
+
 	int i;
 	int index;
 
@@ -2558,7 +2792,7 @@ int ehea_restart_qps(struct net_device *dev)
 	int ret = 0;
 	int i;
 
-	struct hcp_modify_qp_cb0* cb0;
+	struct hcp_modify_qp_cb0 *cb0;
 	u64 hret;
 	u64 dummy64 = 0;
 	u16 dummy16 = 0;
@@ -2621,25 +2855,6 @@ out:
 	return ret;
 }
 
-void ehea_poll_disable(struct net_device *dev)
-{
-	struct ehea_port *port = netdev_priv(dev);
-	int i;
-
-	for (i=0; i < port->num_def_qps; i++)
-		netif_poll_disable(port->port_res[i].d_netdev);
-}
-
-void ehea_poll_enable(struct net_device *dev)
-{
-	struct ehea_port *port = netdev_priv(dev);
-	int i;
-
-	for (i = 0; i < port->num_def_qps; i++)
-		netif_poll_enable(port->port_res[i].d_netdev);
-}
-
-
 static void ehea_reset_port(void *data)
 {
 	int ret;
@@ -2647,9 +2862,9 @@ static void ehea_reset_port(void *data)
 	struct ehea_port *port = netdev_priv(dev);
 
 	port->resets++;
-	down(&port->port_lock);
+	mutex_lock(&port->port_lock);
 	netif_stop_queue(dev);
-	netif_poll_disable(dev);
+	ehea_poll_disable(dev);
 
 	ehea_down(dev);
 
@@ -2662,10 +2877,10 @@ static void ehea_reset_port(void *data)
 	if (netif_msg_timer(port))
 		ehea_info("Device %s resetted successfully", dev->name);
 
-	netif_poll_enable(dev);
+	ehea_poll_enable(dev);
 	netif_wake_queue(dev);
 out:
-	up(&port->port_lock);
+	mutex_unlock(&port->port_lock);
 	return;
 }
 
@@ -2674,7 +2889,7 @@ static void ehea_rereg_mrs(void *data)
 	int ret, i;
 	struct ehea_adapter *adapter;
 
-	down(&dlpar_mem_lock);
+	mutex_lock(&dlpar_mem_lock);
 	ehea_info("LPAR memory enlarged - re-initializing driver");
 
 	list_for_each_entry(adapter, &adapter_list, list)
@@ -2682,22 +2897,24 @@ static void ehea_rereg_mrs(void *data)
 			/* Shutdown all ports */
 			for (i = 0; i < EHEA_MAX_PORTS; i++) {
 				struct ehea_port *port = adapter->port[i];
+				struct net_device *dev;
 
-				if (port) {
-					struct net_device *dev = port->netdev;
+				if (!port)
+					continue;
 
-					if (dev->flags & IFF_UP) {
-						down(&port->port_lock);
-						netif_stop_queue(dev);
-						ehea_flush_sq(port);
-						ret = ehea_stop_qps(dev);
-						if (ret) {
-							up(&port->port_lock);
-							goto out;
-						}
-						ehea_poll_disable(dev);
-						up(&port->port_lock);
+				dev = port->netdev;
+
+				if (dev->flags & IFF_UP) {
+					mutex_lock(&port->port_lock);
+					netif_stop_queue(dev);
+					ehea_flush_sq(port);
+					ret = ehea_stop_qps(dev);
+					if (ret) {
+						mutex_unlock(&port->port_lock);
+						goto out;
 					}
+					ehea_poll_disable(dev);
+					mutex_unlock(&port->port_lock);
 				}
 			}
 
@@ -2737,18 +2954,18 @@ static void ehea_rereg_mrs(void *data)
 					struct net_device *dev = port->netdev;
 
 					if (dev->flags & IFF_UP) {
-						down(&port->port_lock);
+						mutex_lock(&port->port_lock);
 						ehea_poll_enable(dev);
 						ret = ehea_restart_qps(dev);
 						if (!ret)
 							netif_wake_queue(dev);
-						up(&port->port_lock);
+						mutex_unlock(&port->port_lock);
 					}
 				}
 			}
 		}
-	up(&dlpar_mem_lock);
-	ehea_info("re-initializing driver complete");
+       mutex_unlock(&dlpar_mem_lock);
+       ehea_info("re-initializing driver complete");
 out:
 	return;
 }
@@ -2757,8 +2974,9 @@ static void ehea_tx_watchdog(struct net_device *dev)
 {
 	struct ehea_port *port = netdev_priv(dev);
 
-	if (netif_carrier_ok(dev) && !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
-		queue_work(port->adapter->ehea_wq, &port->reset_task);
+	if (netif_carrier_ok(dev) &&
+	    !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
+		ehea_schedule_port_reset(port);
 }
 
 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
@@ -2944,7 +3162,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
 
 	port = netdev_priv(dev);
 
-	sema_init(&port->port_lock, 1);
+	mutex_init(&port->port_lock);
 	port->state = EHEA_PORT_DOWN;
 	port->sig_comp_iv = sq_entries / 10;
 
@@ -2978,11 +3196,10 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
 	memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
 
 	dev->open = ehea_open;
-	dev->poll = ehea_poll_firstqueue;
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	dev->poll_controller = ehea_netpoll;
 #endif
-	dev->weight = 64;
+	dev->poll = ehea_poll_firstqueue;
 	dev->stop = ehea_stop;
 	dev->hard_start_xmit = ehea_start_xmit;
 	dev->get_stats = ehea_get_stats;
@@ -3000,19 +3217,12 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
 	dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
 
 	INIT_WORK(&port->reset_task, ehea_reset_port, dev);
-
-	ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
-	if (ret) {
-		ret = -EIO;
-		goto out_unreg_port;
-	}
-
 	ehea_set_ethtool_ops(dev);
 
 	ret = register_netdev(dev);
 	if (ret) {
 		ehea_error("register_netdev failed. ret=%d", ret);
-		goto out_dereg_bc;
+		goto out_unreg_port;
 	}
 
 	port->lro_max_aggr = lro_max_aggr;
@@ -3029,9 +3239,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
 
 	return port;
 
-out_dereg_bc:
-	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
-
 out_unreg_port:
 	ehea_unregister_port(port);
 
@@ -3049,12 +3256,12 @@ out_err:
 
 static void ehea_shutdown_single_port(struct ehea_port *port)
 {
+	struct ehea_adapter *adapter = port->adapter;
 	unregister_netdev(port->netdev);
 	ehea_unregister_port(port);
-	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
 	kfree(port->mc_list);
 	free_netdev(port->netdev);
-	port->adapter->active_ports--;
+	adapter->active_ports--;
 }
 
 static int ehea_setup_ports(struct ehea_adapter *adapter)
@@ -3094,7 +3301,6 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
 
 		i++;
 	};
-
 	return 0;
 }
 
@@ -3157,7 +3363,7 @@ static ssize_t ehea_probe_port(struct device *dev,
 	of_node_put(eth_dn);
 
 	if (port) {
-		for (i=0; i < EHEA_MAX_PORTS; i++)
+		for (i = 0; i < EHEA_MAX_PORTS; i++)
 			if (!adapter->port[i]) {
 				adapter->port[i] = port;
 				break;
@@ -3192,7 +3398,7 @@ static ssize_t ehea_remove_port(struct device *dev,
 
 		ehea_shutdown_single_port(port);
 
-		for (i=0; i < EHEA_MAX_PORTS; i++)
+		for (i = 0; i < EHEA_MAX_PORTS; i++)
 			if (adapter->port[i] == port) {
 				adapter->port[i] = NULL;
 				break;
@@ -3239,6 +3445,7 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
 		ehea_error("Invalid ibmebus device probed");
 		return -EINVAL;
 	}
+	mutex_lock(&ehea_fw_handles.lock);
 
 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
 	if (!adapter) {
@@ -3257,7 +3464,7 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
 		adapter->handle = *adapter_handle;
 
 	if (!adapter->handle) {
-		dev_err(&dev->ofdev.dev, "failed getting handle for adapter"
+		dev_err(&dev->ofdev.dev, "failed getting handle for adapter\n"
 			" '%s'\n", dev->ofdev.node->full_name);
 		ret = -ENODEV;
 		goto out_free_ad;
@@ -3272,7 +3479,7 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
 	/* get adapter properties */
 	ret = ehea_sense_adapter_attr(adapter);
 	if (ret) {
-		dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret);
+		dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d\n", ret);
 		goto out_free_ad;
 	}
 
@@ -3295,19 +3502,13 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
 		goto out_kill_eq;
 	}
 
-	adapter->ehea_wq = create_workqueue("ehea_wq");
-	if (!adapter->ehea_wq) {
-		ret = -EIO;
-		goto out_free_irq;
-	}
-
 	ret = ehea_create_device_sysfs(dev);
 	if (ret)
-		goto out_kill_wq;
+		goto out_free_irq;
 
 	ret = ehea_setup_ports(adapter);
 	if (ret) {
-		dev_err(&dev->ofdev.dev, "setup_ports failed");
+		dev_err(&dev->ofdev.dev, "setup_ports failed\n");
 		goto out_rem_dev_sysfs;
 	}
 
@@ -3317,9 +3518,6 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
 out_rem_dev_sysfs:
 	ehea_remove_device_sysfs(dev);
 
-out_kill_wq:
-	destroy_workqueue(adapter->ehea_wq);
-
 out_free_irq:
 	ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
 
@@ -3328,7 +3526,10 @@ out_kill_eq:
 
 out_free_ad:
 	kfree(adapter);
+
 out:
+	ehea_update_firmware_handles();
+	mutex_unlock(&ehea_fw_handles.lock);
 	return ret;
 }
 
@@ -3345,7 +3546,9 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev)
 
 	ehea_remove_device_sysfs(dev);
 
-	destroy_workqueue(adapter->ehea_wq);
+	flush_scheduled_work();
+
+	mutex_lock(&ehea_fw_handles.lock);
 
 	ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
 	tasklet_kill(&adapter->neq_tasklet);
@@ -3353,12 +3556,47 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev)
 	ehea_destroy_eq(adapter->neq);
 	ehea_remove_adapter_mr(adapter);
 	list_del(&adapter->list);
-
 	kfree(adapter);
 
+	ehea_update_firmware_handles();
+	mutex_unlock(&ehea_fw_handles.lock);
+
 	return 0;
 }
 
+void ehea_crash_handler(void)
+{
+	int i;
+
+	if (ehea_fw_handles.arr)
+		for (i = 0; i < ehea_fw_handles.num_entries; i++)
+			ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
+					     ehea_fw_handles.arr[i].fwh,
+					     FORCE_FREE);
+
+	if (ehea_bcmc_regs.arr)
+		for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
+			ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
+					      ehea_bcmc_regs.arr[i].port_id,
+					      ehea_bcmc_regs.arr[i].reg_type,
+					      ehea_bcmc_regs.arr[i].macaddr,
+					      0, H_DEREG_BCMC);
+}
+
+static int ehea_reboot_notifier(struct notifier_block *nb,
+				unsigned long action, void *unused)
+{
+	if (action == SYS_RESTART) {
+		ehea_info("Reboot: freeing all eHEA resources");
+		ibmebus_unregister_driver(&ehea_driver);
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+	.notifier_call = ehea_reboot_notifier,
+};
+
 static int check_module_parm(void)
 {
 	int ret = 0;
@@ -3403,10 +3641,13 @@ int __init ehea_module_init(void)
 	printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
 	       DRV_VERSION);
 
-	ehea_driver_wq = create_workqueue("ehea_driver_wq");
 
 	INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs, NULL);
-	sema_init(&dlpar_mem_lock, 1);
+	memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
+	memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
+
+	mutex_init(&ehea_fw_handles.lock);
+	spin_lock_init(&ehea_bcmc_regs.lock);
 
 	ret = check_module_parm();
 	if (ret)
@@ -3416,10 +3657,18 @@ int __init ehea_module_init(void)
 	if (ret)
 		goto out;
 
+	ret = register_reboot_notifier(&ehea_reboot_nb);
+	if (ret)
+		ehea_info("failed registering reboot notifier");
+
+	ret = crash_shutdown_register(&ehea_crash_handler);
+	if (ret)
+		ehea_info("failed registering crash handler");
+
 	ret = ibmebus_register_driver(&ehea_driver);
 	if (ret) {
 		ehea_error("failed registering eHEA device driver on ebus");
-		goto out;
+		goto out2;
 	}
 
 	ret = driver_create_file(&ehea_driver.driver,
@@ -3427,19 +3676,35 @@ int __init ehea_module_init(void)
 	if (ret) {
 		ehea_error("failed to register capabilities attribute, ret=%d",
 			   ret);
-		ibmebus_unregister_driver(&ehea_driver);
-		goto out;
+		goto out3;
 	}
 
+	return ret;
+
+out3:
+	ibmebus_unregister_driver(&ehea_driver);
+out2:
+	unregister_reboot_notifier(&ehea_reboot_nb);
+	crash_shutdown_unregister(&ehea_crash_handler);
 out:
 	return ret;
 }
 
 static void __exit ehea_module_exit(void)
 {
-	destroy_workqueue(ehea_driver_wq);
+	int ret;
+
+	flush_scheduled_work();
 	driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
 	ibmebus_unregister_driver(&ehea_driver);
+	unregister_reboot_notifier(&ehea_reboot_nb);
+
+	ret = crash_shutdown_unregister(&ehea_crash_handler);
+	if (ret)
+		ehea_info("failed unregistering crash handler");
+
+	kfree(ehea_fw_handles.arr);
+	kfree(ehea_bcmc_regs.arr);
 	ehea_destroy_busmap();
 }
 
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 95c4a7f..156eb63 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -6,9 +6,9 @@
  *  (C) Copyright IBM Corp. 2006
  *
  *  Authors:
- *       Christoph Raisch <raisch@de.ibm.com>
- *       Jan-Bernd Themann <themann@de.ibm.com>
- *       Thomas Klein <tklein@de.ibm.com>
+ *	 Christoph Raisch <raisch@de.ibm.com>
+ *	 Jan-Bernd Themann <themann@de.ibm.com>
+ *	 Thomas Klein <tklein@de.ibm.com>
  *
  *
  * This program is free software; you can redistribute it and/or modify
@@ -38,11 +38,11 @@ static inline u16 get_order_of_qentries(u16 queue_entries)
 }
 
 /* Defines for H_CALL H_ALLOC_RESOURCE */
-#define H_ALL_RES_TYPE_QP        1
-#define H_ALL_RES_TYPE_CQ        2
-#define H_ALL_RES_TYPE_EQ        3
-#define H_ALL_RES_TYPE_MR        5
-#define H_ALL_RES_TYPE_MW        6
+#define H_ALL_RES_TYPE_QP	 1
+#define H_ALL_RES_TYPE_CQ	 2
+#define H_ALL_RES_TYPE_EQ	 3
+#define H_ALL_RES_TYPE_MR	 5
+#define H_ALL_RES_TYPE_MW	 6
 
 static long ehea_plpar_hcall_norets(unsigned long opcode,
 				    unsigned long arg1,
@@ -137,77 +137,77 @@ u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
 			 const u64 qp_handle, const u64 sel_mask, void *cb_addr)
 {
 	return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
-				       adapter_handle,	        /* R4 */
-				       qp_category,	        /* R5 */
-				       qp_handle,               /* R6 */
-				       sel_mask,                /* R7 */
+				       adapter_handle,		/* R4 */
+				       qp_category,		/* R5 */
+				       qp_handle,		/* R6 */
+				       sel_mask,		/* R7 */
 				       virt_to_abs(cb_addr),	/* R8 */
 				       0, 0);
 }
 
 /* input param R5 */
-#define H_ALL_RES_QP_EQPO         EHEA_BMASK_IBM(9, 11)
-#define H_ALL_RES_QP_QPP          EHEA_BMASK_IBM(12, 12)
-#define H_ALL_RES_QP_RQR          EHEA_BMASK_IBM(13, 15)
-#define H_ALL_RES_QP_EQEG         EHEA_BMASK_IBM(16, 16)
-#define H_ALL_RES_QP_LL_QP        EHEA_BMASK_IBM(17, 17)
-#define H_ALL_RES_QP_DMA128       EHEA_BMASK_IBM(19, 19)
-#define H_ALL_RES_QP_HSM          EHEA_BMASK_IBM(20, 21)
-#define H_ALL_RES_QP_SIGT         EHEA_BMASK_IBM(22, 23)
-#define H_ALL_RES_QP_TENURE       EHEA_BMASK_IBM(48, 55)
-#define H_ALL_RES_QP_RES_TYP      EHEA_BMASK_IBM(56, 63)
+#define H_ALL_RES_QP_EQPO	  EHEA_BMASK_IBM(9, 11)
+#define H_ALL_RES_QP_QPP	  EHEA_BMASK_IBM(12, 12)
+#define H_ALL_RES_QP_RQR	  EHEA_BMASK_IBM(13, 15)
+#define H_ALL_RES_QP_EQEG	  EHEA_BMASK_IBM(16, 16)
+#define H_ALL_RES_QP_LL_QP	  EHEA_BMASK_IBM(17, 17)
+#define H_ALL_RES_QP_DMA128	  EHEA_BMASK_IBM(19, 19)
+#define H_ALL_RES_QP_HSM	  EHEA_BMASK_IBM(20, 21)
+#define H_ALL_RES_QP_SIGT	  EHEA_BMASK_IBM(22, 23)
+#define H_ALL_RES_QP_TENURE	  EHEA_BMASK_IBM(48, 55)
+#define H_ALL_RES_QP_RES_TYP	  EHEA_BMASK_IBM(56, 63)
 
 /* input param R9  */
-#define H_ALL_RES_QP_TOKEN        EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_PD           EHEA_BMASK_IBM(32,63)
+#define H_ALL_RES_QP_TOKEN	  EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_PD		  EHEA_BMASK_IBM(32, 63)
 
 /* input param R10 */
-#define H_ALL_RES_QP_MAX_SWQE     EHEA_BMASK_IBM(4, 7)
-#define H_ALL_RES_QP_MAX_R1WQE    EHEA_BMASK_IBM(12, 15)
-#define H_ALL_RES_QP_MAX_R2WQE    EHEA_BMASK_IBM(20, 23)
-#define H_ALL_RES_QP_MAX_R3WQE    EHEA_BMASK_IBM(28, 31)
+#define H_ALL_RES_QP_MAX_SWQE	  EHEA_BMASK_IBM(4, 7)
+#define H_ALL_RES_QP_MAX_R1WQE	  EHEA_BMASK_IBM(12, 15)
+#define H_ALL_RES_QP_MAX_R2WQE	  EHEA_BMASK_IBM(20, 23)
+#define H_ALL_RES_QP_MAX_R3WQE	  EHEA_BMASK_IBM(28, 31)
 /* Max Send Scatter Gather Elements */
-#define H_ALL_RES_QP_MAX_SSGE     EHEA_BMASK_IBM(37, 39)
-#define H_ALL_RES_QP_MAX_R1SGE    EHEA_BMASK_IBM(45, 47)
+#define H_ALL_RES_QP_MAX_SSGE	  EHEA_BMASK_IBM(37, 39)
+#define H_ALL_RES_QP_MAX_R1SGE	  EHEA_BMASK_IBM(45, 47)
 /* Max Receive SG Elements RQ1 */
-#define H_ALL_RES_QP_MAX_R2SGE    EHEA_BMASK_IBM(53, 55)
-#define H_ALL_RES_QP_MAX_R3SGE    EHEA_BMASK_IBM(61, 63)
+#define H_ALL_RES_QP_MAX_R2SGE	  EHEA_BMASK_IBM(53, 55)
+#define H_ALL_RES_QP_MAX_R3SGE	  EHEA_BMASK_IBM(61, 63)
 
 /* input param R11 */
-#define H_ALL_RES_QP_SWQE_IDL     EHEA_BMASK_IBM(0, 7)
+#define H_ALL_RES_QP_SWQE_IDL	  EHEA_BMASK_IBM(0, 7)
 /* max swqe immediate data length */
-#define H_ALL_RES_QP_PORT_NUM     EHEA_BMASK_IBM(48, 63)
+#define H_ALL_RES_QP_PORT_NUM	  EHEA_BMASK_IBM(48, 63)
 
 /* input param R12 */
-#define H_ALL_RES_QP_TH_RQ2       EHEA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_TH_RQ2	  EHEA_BMASK_IBM(0, 15)
 /* Threshold RQ2 */
-#define H_ALL_RES_QP_TH_RQ3       EHEA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_TH_RQ3	  EHEA_BMASK_IBM(16, 31)
 /* Threshold RQ3 */
 
 /* output param R6 */
-#define H_ALL_RES_QP_ACT_SWQE     EHEA_BMASK_IBM(0, 15)
-#define H_ALL_RES_QP_ACT_R1WQE    EHEA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_ACT_R2WQE    EHEA_BMASK_IBM(32, 47)
-#define H_ALL_RES_QP_ACT_R3WQE    EHEA_BMASK_IBM(48, 63)
+#define H_ALL_RES_QP_ACT_SWQE	  EHEA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_ACT_R1WQE	  EHEA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_ACT_R2WQE	  EHEA_BMASK_IBM(32, 47)
+#define H_ALL_RES_QP_ACT_R3WQE	  EHEA_BMASK_IBM(48, 63)
 
 /* output param, R7 */
-#define H_ALL_RES_QP_ACT_SSGE     EHEA_BMASK_IBM(0, 7)
-#define H_ALL_RES_QP_ACT_R1SGE    EHEA_BMASK_IBM(8, 15)
-#define H_ALL_RES_QP_ACT_R2SGE    EHEA_BMASK_IBM(16, 23)
-#define H_ALL_RES_QP_ACT_R3SGE    EHEA_BMASK_IBM(24, 31)
+#define H_ALL_RES_QP_ACT_SSGE	  EHEA_BMASK_IBM(0, 7)
+#define H_ALL_RES_QP_ACT_R1SGE	  EHEA_BMASK_IBM(8, 15)
+#define H_ALL_RES_QP_ACT_R2SGE	  EHEA_BMASK_IBM(16, 23)
+#define H_ALL_RES_QP_ACT_R3SGE	  EHEA_BMASK_IBM(24, 31)
 #define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
 
 /* output param R8,R9 */
-#define H_ALL_RES_QP_SIZE_SQ      EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SIZE_RQ1     EHEA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_SIZE_RQ2     EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SIZE_RQ3     EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_SIZE_SQ	  EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ1	  EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_SIZE_RQ2	  EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ3	  EHEA_BMASK_IBM(32, 63)
 
 /* output param R11,R12 */
-#define H_ALL_RES_QP_LIOBN_SQ     EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_LIOBN_RQ1    EHEA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_LIOBN_RQ2    EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_LIOBN_RQ3    EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_LIOBN_SQ	  EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ1	  EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_LIOBN_RQ2	  EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ3	  EHEA_BMASK_IBM(32, 63)
 
 u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
 			     struct ehea_qp_init_attr *init_attr, const u32 pd,
@@ -334,28 +334,28 @@ u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
 }
 
 /* Defines for H_CALL H_ALLOC_RESOURCE */
-#define H_ALL_RES_TYPE_QP        1
-#define H_ALL_RES_TYPE_CQ        2
-#define H_ALL_RES_TYPE_EQ        3
-#define H_ALL_RES_TYPE_MR        5
-#define H_ALL_RES_TYPE_MW        6
+#define H_ALL_RES_TYPE_QP	 1
+#define H_ALL_RES_TYPE_CQ	 2
+#define H_ALL_RES_TYPE_EQ	 3
+#define H_ALL_RES_TYPE_MR	 5
+#define H_ALL_RES_TYPE_MW	 6
 
 /*  input param R5 */
-#define H_ALL_RES_EQ_NEQ             EHEA_BMASK_IBM(0, 0)
+#define H_ALL_RES_EQ_NEQ	     EHEA_BMASK_IBM(0, 0)
 #define H_ALL_RES_EQ_NON_NEQ_ISN     EHEA_BMASK_IBM(6, 7)
 #define H_ALL_RES_EQ_INH_EQE_GEN     EHEA_BMASK_IBM(16, 16)
-#define H_ALL_RES_EQ_RES_TYPE        EHEA_BMASK_IBM(56, 63)
+#define H_ALL_RES_EQ_RES_TYPE	     EHEA_BMASK_IBM(56, 63)
 /*  input param R6 */
-#define H_ALL_RES_EQ_MAX_EQE         EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_MAX_EQE	     EHEA_BMASK_IBM(32, 63)
 
 /*  output param R6 */
-#define H_ALL_RES_EQ_LIOBN           EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_LIOBN	     EHEA_BMASK_IBM(32, 63)
 
 /*  output param R7 */
-#define H_ALL_RES_EQ_ACT_EQE         EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_ACT_EQE	     EHEA_BMASK_IBM(32, 63)
 
 /*  output param R8 */
-#define H_ALL_RES_EQ_ACT_PS          EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_ACT_PS	     EHEA_BMASK_IBM(32, 63)
 
 /*  output param R9 */
 #define H_ALL_RES_EQ_ACT_EQ_IST_C    EHEA_BMASK_IBM(30, 31)
@@ -453,12 +453,12 @@ u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
 
 	hret = ehea_plpar_hcall9(H_REGISTER_SMR,
 				 outs,
-				 adapter_handle       ,        	 /* R4 */
-				 orig_mr_handle,                 /* R5 */
-				 vaddr_in,                       /* R6 */
-				 (((u64)access_ctrl) << 32ULL),  /* R7 */
-				 pd,                             /* R8 */
-				 0, 0, 0, 0);	   		 /* R9-R12 */
+				 adapter_handle	      ,		 /* R4 */
+				 orig_mr_handle,		 /* R5 */
+				 vaddr_in,			 /* R6 */
+				 (((u64)access_ctrl) << 32ULL),	 /* R7 */
+				 pd,				 /* R8 */
+				 0, 0, 0, 0);			 /* R9-R12 */
 
 	mr->handle = outs[0];
 	mr->lkey = (u32)outs[2];
@@ -471,11 +471,11 @@ u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
 	u64 outs[PLPAR_HCALL9_BUFSIZE];
 
 	return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
-       				 outs,
+				 outs,
 				 adapter_handle,		/* R4 */
 				 H_DISABLE_GET_EHEA_WQE_P,	/* R5 */
 				 qp_handle,			/* R6 */
-				 0, 0, 0, 0, 0, 0);             /* R7-R12 */
+				 0, 0, 0, 0, 0, 0);		/* R7-R12 */
 }
 
 u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
@@ -483,9 +483,9 @@ u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
 {
 	return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
 				       adapter_handle,	   /* R4 */
-				       res_handle,         /* R5 */
+				       res_handle,	   /* R5 */
 				       force_bit,
-				       0, 0, 0, 0);        /* R7-R10 */
+				       0, 0, 0, 0);	   /* R7-R10 */
 }
 
 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
@@ -493,13 +493,13 @@ u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
 			     const u32 pd, u64 *mr_handle, u32 *lkey)
 {
 	u64 hret;
- 	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	u64 outs[PLPAR_HCALL9_BUFSIZE];
 
 	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
 				 outs,
 				 adapter_handle,		   /* R4 */
 				 5,				   /* R5 */
-				 vaddr,			           /* R6 */
+				 vaddr,				   /* R6 */
 				 length,			   /* R7 */
 				 (((u64) access_ctrl) << 32ULL),   /* R8 */
 				 pd,				   /* R9 */
@@ -619,8 +619,8 @@ u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
 		      void *rblock)
 {
 	return ehea_plpar_hcall_norets(H_ERROR_DATA,
-				       adapter_handle,          /* R4 */
-				       ressource_handle,        /* R5 */
-				       virt_to_abs(rblock),     /* R6 */
-				       0, 0, 0, 0);             /* R7-R12 */
+				       adapter_handle,		/* R4 */
+				       ressource_handle,	/* R5 */
+				       virt_to_abs(rblock),	/* R6 */
+				       0, 0, 0, 0);		/* R7-R12 */
 }
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h
index d5ae71c..11492ba 100644
--- a/drivers/net/ehea/ehea_phyp.h
+++ b/drivers/net/ehea/ehea_phyp.h
@@ -94,7 +94,7 @@ static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
 static inline void hcp_epas_dtor(struct h_epas *epas)
 {
 	if (epas->kernel.addr)
-		iounmap((void __iomem*)((u64)epas->kernel.addr & PAGE_MASK));
+		iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK));
 
 	epas->user.addr = 0;
 	epas->kernel.addr = 0;
@@ -389,23 +389,23 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle,
 			  const u64 qp_handle,
 			  const u64 sel_mask,
 			  void *cb_addr,
-			  u64 * inv_attr_id,
-			  u64 * proc_mask, u16 * out_swr, u16 * out_rwr);
+			  u64 *inv_attr_id,
+			  u64 *proc_mask, u16 *out_swr, u16 *out_rwr);
 
 u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
-			     struct ehea_eq_attr *eq_attr, u64 * eq_handle);
+			     struct ehea_eq_attr *eq_attr, u64 *eq_handle);
 
 u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
 			     struct ehea_cq_attr *cq_attr,
-			     u64 * cq_handle, struct h_epas *epas);
+			     u64 *cq_handle, struct h_epas *epas);
 
 u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
 			     struct ehea_qp_init_attr *init_attr,
 			     const u32 pd,
-			     u64 * qp_handle, struct h_epas *h_epas);
+			     u64 *qp_handle, struct h_epas *h_epas);
 
-#define H_REG_RPAGE_PAGE_SIZE          EHEA_BMASK_IBM(48,55)
-#define H_REG_RPAGE_QT                 EHEA_BMASK_IBM(62,63)
+#define H_REG_RPAGE_PAGE_SIZE          EHEA_BMASK_IBM(48, 55)
+#define H_REG_RPAGE_QT                 EHEA_BMASK_IBM(62, 63)
 
 u64 ehea_h_register_rpage(const u64 adapter_handle,
 			  const u8 pagesize,
@@ -427,7 +427,7 @@ u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
 
 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
 			     const u64 length, const u32 access_ctrl,
-			     const u32 pd, u64 * mr_handle, u32 * lkey);
+			     const u32 pd, u64 *mr_handle, u32 *lkey);
 
 u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
 			     const u8 pagesize, const u8 queue_type,
@@ -440,8 +440,8 @@ u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
 u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr);
 
 /* output param R5 */
-#define H_MEHEAPORT_CAT		EHEA_BMASK_IBM(40,47)
-#define H_MEHEAPORT_PN		EHEA_BMASK_IBM(48,63)
+#define H_MEHEAPORT_CAT		EHEA_BMASK_IBM(40, 47)
+#define H_MEHEAPORT_PN		EHEA_BMASK_IBM(48, 63)
 
 u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
 			   const u8 cb_cat, const u64 select_mask,
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 329a252..d522e90 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -33,9 +33,6 @@
 
 
 struct ehea_busmap ehea_bmap = { 0, 0, NULL };
-extern u64 ehea_driver_flags;
-extern struct workqueue_struct *ehea_driver_wq;
-extern struct work_struct ehea_rereg_mr_task;
 
 
 static void *hw_qpageit_get_inc(struct hw_queue *queue)
@@ -66,7 +63,7 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
 	}
 
 	queue->queue_length = nr_of_pages * pagesize;
-	queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
+	queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
 	if (!queue->queue_pages) {
 		ehea_error("no mem for queue_pages");
 		return -ENOMEM;
@@ -79,11 +76,11 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
 	 */
 	i = 0;
 	while (i < nr_of_pages) {
-		u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+		u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
 		if (!kpage)
 			goto out_nomem;
 		for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
-			(queue->queue_pages)[i] = (struct ehea_page*)kpage;
+			(queue->queue_pages)[i] = (struct ehea_page *)kpage;
 			kpage += pagesize;
 			i++;
 		}
@@ -236,8 +233,8 @@ int ehea_destroy_cq(struct ehea_cq *cq)
 		return 0;
 
 	hcp_epas_dtor(&cq->epas);
-
-	if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) {
+	hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
+	if (hret == H_R_STATE) {
 		ehea_error_data(cq->adapter, cq->fw_handle);
 		hret = ehea_destroy_cq_res(cq, FORCE_FREE);
 	}
@@ -302,13 +299,13 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
 		if (i == (eq->attr.nr_pages - 1)) {
 			/* last page */
 			vpage = hw_qpageit_get_inc(&eq->hw_queue);
-			if ((hret != H_SUCCESS) || (vpage)) {
+			if ((hret != H_SUCCESS) || (vpage))
 				goto out_kill_hwq;
-			}
+
 		} else {
-			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+			if ((hret != H_PAGE_REGISTERED) || (!vpage))
 				goto out_kill_hwq;
-			}
+
 		}
 	}
 
@@ -332,7 +329,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
 	unsigned long flags;
 
 	spin_lock_irqsave(&eq->spinlock, flags);
-	eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+	eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
 	spin_unlock_irqrestore(&eq->spinlock, flags);
 
 	return eqe;
@@ -365,7 +362,8 @@ int ehea_destroy_eq(struct ehea_eq *eq)
 
 	hcp_epas_dtor(&eq->epas);
 
-	if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) {
+	hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
+	if (hret == H_R_STATE) {
 		ehea_error_data(eq->adapter, eq->fw_handle);
 		hret = ehea_destroy_eq_res(eq, FORCE_FREE);
 	}
@@ -547,7 +545,8 @@ int ehea_destroy_qp(struct ehea_qp *qp)
 
 	hcp_epas_dtor(&qp->epas);
 
-	if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
+	hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
+	if (hret == H_R_STATE) {
 		ehea_error_data(qp->adapter, qp->fw_handle);
 		hret = ehea_destroy_qp_res(qp, FORCE_FREE);
 	}
@@ -560,7 +559,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
 	return 0;
 }
 
-int ehea_create_busmap( void )
+int ehea_create_busmap(void)
 {
 	u64 vaddr = EHEA_BUSMAP_START;
 	unsigned long high_section_index = 0;
@@ -596,7 +595,7 @@ int ehea_create_busmap( void )
 	return 0;
 }
 
-void ehea_destroy_busmap( void )
+void ehea_destroy_busmap(void)
 {
 	vfree(ehea_bmap.vaddr);
 }
@@ -618,7 +617,7 @@ u64 ehea_map_vaddr(void *caddr)
 
 	if (unlikely(mapped_addr == -1))
 		if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
-			queue_work(ehea_driver_wq, &ehea_rereg_mr_task);
+			schedule_work(&ehea_rereg_mr_task);
 
 	return mapped_addr;
 }
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 562de0e..0bb6f92 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -41,8 +41,8 @@
 #define EHEA_SECTSIZE          (1UL << 24)
 #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
 
-#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
-#error eHEA module can't work if kernel sectionsize < ehea sectionsize
+#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
+#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
 #endif
 
 /* Some abbreviations used here:
@@ -145,8 +145,8 @@ struct ehea_rwqe {
 #define EHEA_CQE_VLAN_TAG_XTRACT   0x0400
 
 #define EHEA_CQE_TYPE_RQ           0x60
-#define EHEA_CQE_STAT_ERR_MASK     0x720F
-#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
+#define EHEA_CQE_STAT_ERR_MASK     0x700F
+#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
 #define EHEA_CQE_STAT_ERR_TCP      0x4000
 #define EHEA_CQE_STAT_ERR_IP       0x2000
 #define EHEA_CQE_STAT_ERR_CRC      0x1000
@@ -188,8 +188,8 @@ struct ehea_eqe {
 	u64 entry;
 };
 
-#define ERROR_DATA_LENGTH  EHEA_BMASK_IBM(52,63)
-#define ERROR_DATA_TYPE    EHEA_BMASK_IBM(0,7)
+#define ERROR_DATA_LENGTH  EHEA_BMASK_IBM(52, 63)
+#define ERROR_DATA_TYPE    EHEA_BMASK_IBM(0, 7)
 
 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
 {
@@ -279,7 +279,7 @@ static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
 static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
 {
 	void *retvalue = hw_qeit_get(queue);
-	u32 qe = *(u8*)retvalue;
+	u32 qe = *(u8 *)retvalue;
 	if ((qe >> 7) == (queue->toggle_state & 1))
 		hw_qeit_eq_get_inc(queue);
 	else
@@ -364,7 +364,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
 
 int ehea_destroy_cq(struct ehea_cq *cq);
 
-struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
 			       struct ehea_qp_init_attr *init_attr);
 
 int ehea_destroy_qp(struct ehea_qp *qp);
@@ -378,8 +378,8 @@ int ehea_rem_mr(struct ehea_mr *mr);
 
 void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
 
-int ehea_create_busmap( void );
-void ehea_destroy_busmap( void );
+int ehea_create_busmap(void);
+void ehea_destroy_busmap(void);
 u64 ehea_map_vaddr(void *caddr);
 
 #endif	/* __EHEA_QMR_H__ */
diff --git a/drivers/net/ehea/inet_lro.c b/drivers/net/ehea/inet_lro.c
index d445972..e69de29 100644
--- a/drivers/net/ehea/inet_lro.c
+++ b/drivers/net/ehea/inet_lro.c
@@ -1,588 +0,0 @@
-/*
- *  Large Receive Offload (ipv4 / tcp)
- *
- *  (C) Copyright IBM Corp. 2007
- *
- *  Authors:
- *       Jan-Bernd Themann <themann@de.ibm.com>
- *       Christoph Raisch <raisch@de.ibm.com>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-
-#include <linux/if_vlan.h>
-#include "inet_lro.h"
-
-#define TCP_HDR_LEN(tcph) (tcph->doff << 2)
-#define IP_HDR_LEN(iph) (iph->ihl << 2)
-#define TCP_PAYLOAD_LENGTH(iph, tcph) \
-	(ntohs(iph->tot_len) - IP_HDR_LEN(iph) - TCP_HDR_LEN(tcph))
-
-#define IPH_LEN_WO_OPTIONS 5
-#define TCPH_LEN_WO_OPTIONS 5
-#define TCPH_LEN_W_TIMESTAMP 8
-
-#define LRO_MAX_PG_HLEN 64
-
-#define LRO_INC_STATS(lro_mgr, attr) { lro_mgr->stats.attr++; }
-
-/*
- * Basic tcp checks whether packet is suitable for LRO
- */
-
-static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph,
-			    int len, struct net_lro_desc *lro_desc)
-{
-        /* check ip header: don't aggregate padded frames */
-	if (ntohs(iph->tot_len) != len)
-		return -1;
-
-	if (TCP_PAYLOAD_LENGTH(iph, tcph) == 0)
-		return -1;
-
-	if (iph->ihl != IPH_LEN_WO_OPTIONS)
-		return -1;
-
-	if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack
-	    || tcph->rst || tcph->syn || tcph->fin)
-		return -1;
-
-	if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
-		return -1;
-
-	if (tcph->doff != TCPH_LEN_WO_OPTIONS
-	    && tcph->doff != TCPH_LEN_W_TIMESTAMP)
-		return -1;
-
-	/* check tcp options (only timestamp allowed) */
-	if (tcph->doff == TCPH_LEN_W_TIMESTAMP) {
-		u32 *topt = (u32 *)(tcph + 1);
-
-		if (*topt != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
-				   | (TCPOPT_TIMESTAMP << 8)
-				   | TCPOLEN_TIMESTAMP))
-			return -1;
-
-		/* timestamp should be in right order */
-		topt++;
-		if (lro_desc && after(ntohl(lro_desc->tcp_rcv_tsval),
-				      ntohl(*topt)))
-			return -1;
-
-		/* timestamp reply should not be zero */
-		topt++;
-		if (*topt == 0)
-			return -1;
-	}
-
-	return 0;
-}
-
-static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc)
-{
-	struct iphdr *iph = lro_desc->iph;
-	struct tcphdr *tcph = lro_desc->tcph;
-	u32 *p;
-	__wsum tcp_hdr_csum;
-
-	tcph->ack_seq = lro_desc->tcp_ack;
-	tcph->window = lro_desc->tcp_window;
-
-	if (lro_desc->tcp_saw_tstamp) {
-		p = (u32 *)(tcph + 1);
-		*(p+2) = lro_desc->tcp_rcv_tsecr;
-	}
-
-	iph->tot_len = htons(lro_desc->ip_tot_len);
-
-	iph->check = 0;
-	iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl);
-
-	tcph->check = 0;
-	tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), 0);
-	lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum);
-	tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-					lro_desc->ip_tot_len -
-					IP_HDR_LEN(iph), IPPROTO_TCP,
-					lro_desc->data_csum);
-}
-
-static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len)
-{
-	__wsum tcp_csum;
-	__wsum tcp_hdr_csum;
-	__wsum tcp_ps_hdr_csum;
-
-	tcp_csum = ~csum_unfold(tcph->check);
-	tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), tcp_csum);
-
-	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-					     len + TCP_HDR_LEN(tcph),
-					     IPPROTO_TCP, 0);
-
-	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-			tcp_ps_hdr_csum);
-}
-
-static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
-			  struct iphdr *iph, struct tcphdr *tcph,
-			  u16 vlan_tag, struct vlan_group *vgrp)
-{
-	int nr_frags;
-	u32 *ptr;
-	u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
-
-	nr_frags = skb_shinfo(skb)->nr_frags;
-	lro_desc->parent = skb;
-	lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]);
-	lro_desc->iph = iph;
-	lro_desc->tcph = tcph;
-	lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len;
-	lro_desc->tcp_ack = ntohl(tcph->ack_seq);
-	lro_desc->tcp_window = tcph->window;
-
-	lro_desc->pkt_aggr_cnt = 1;
-	lro_desc->ip_tot_len = ntohs(iph->tot_len);
-
-	if (tcph->doff == 8) {
-		ptr = (u32 *)(tcph+1);
-		lro_desc->tcp_saw_tstamp = 1;
-		lro_desc->tcp_rcv_tsval = *(ptr+1);
-		lro_desc->tcp_rcv_tsecr = *(ptr+2);
-	}
-
-	lro_desc->mss = tcp_data_len;
-	lro_desc->vgrp = vgrp;
-	lro_desc->vlan_tag = vlan_tag;
-	lro_desc->active = 1;
-
-	lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
-						tcp_data_len);
-}
-
-static inline void lro_clear_desc(struct net_lro_desc *lro_desc)
-{
-	memset(lro_desc, 0, sizeof(struct net_lro_desc));
-}
-
-static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
-			   struct tcphdr *tcph, int tcp_data_len)
-{
-	struct sk_buff *parent = lro_desc->parent;
-	u32 *topt;
-
-	lro_desc->pkt_aggr_cnt++;
-	lro_desc->ip_tot_len += tcp_data_len;
-	lro_desc->tcp_next_seq += tcp_data_len;
-	lro_desc->tcp_window = tcph->window;
-	lro_desc->tcp_ack = tcph->ack_seq;
-
-	/* don't update tcp_rcv_tsval, would not work with PAWS */
-	if (lro_desc->tcp_saw_tstamp) {
-		topt = (u32 *) (tcph + 1);
-		lro_desc->tcp_rcv_tsecr = *(topt + 2);
-	}
-
-	lro_desc->data_csum = csum_block_add(lro_desc->data_csum,
-					     lro_tcp_data_csum(iph, tcph,
-							       tcp_data_len),
-					     parent->len);
-
-	parent->len += tcp_data_len;
-	parent->data_len += tcp_data_len;
-	if (tcp_data_len > lro_desc->mss)
-		lro_desc->mss = tcp_data_len;
-}
-
-static void lro_add_packet(struct net_lro_desc *lro_desc, struct sk_buff *skb,
-			   struct iphdr *iph, struct tcphdr *tcph)
-{
-	struct sk_buff *parent = lro_desc->parent;
-	int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
-
-	lro_add_common(lro_desc, iph, tcph, tcp_data_len);
-
-	skb_pull(skb, (skb->len - tcp_data_len));
-	parent->truesize += skb->truesize;
-
-	if (lro_desc->last_skb)
-		lro_desc->last_skb->next = skb;
-	else
-		skb_shinfo(parent)->frag_list = skb;
-
-	lro_desc->last_skb = skb;
-}
-
-static void lro_add_frags(struct net_lro_desc *lro_desc,
-			  int len, int hlen, int truesize,
-			  struct skb_frag_struct *skb_frags,
-			  struct iphdr *iph, struct tcphdr *tcph)
-{
-	struct sk_buff *skb = lro_desc->parent;
-	int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
-
-	lro_add_common(lro_desc, iph, tcph, tcp_data_len);
-
-	skb->truesize += truesize;
-
-	skb_frags[0].page_offset += hlen;
-	skb_frags[0].size -= hlen;
-
-	while (tcp_data_len > 0) {
-		*(lro_desc->next_frag) = *skb_frags;
-		tcp_data_len -= skb_frags->size;
-		lro_desc->next_frag++;
-		skb_frags++;
-		skb_shinfo(skb)->nr_frags++;
-	}
-}
-
-static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
-			      struct iphdr *iph,
-			      struct tcphdr *tcph)
-{
-	if ((lro_desc->iph->saddr != iph->saddr)
-	    || (lro_desc->iph->daddr != iph->daddr)
-	    || (lro_desc->tcph->source != tcph->source)
-	    || (lro_desc->tcph->dest != tcph->dest))
-		return -1;
-	return 0;
-}
-
-static struct net_lro_desc *lro_get_desc(struct net_lro_mgr *lro_mgr,
-					 struct net_lro_desc *lro_arr,
-					 struct iphdr *iph,
-					 struct tcphdr *tcph)
-{
-	struct net_lro_desc *lro_desc = NULL;
-	struct net_lro_desc *tmp;
-	int max_desc = lro_mgr->max_desc;
-	int i;
-
-	for (i = 0; i < max_desc; i++) {
-		tmp = &lro_arr[i];
-		if (tmp->active)
-			if (!lro_check_tcp_conn(tmp, iph, tcph)) {
-				lro_desc = tmp;
-				goto out;
-			}
-	}
-
-	for (i = 0; i < max_desc; i++) {
-		if (!lro_arr[i].active) {
-			lro_desc = &lro_arr[i];
-			goto out;
-		}
-	}
-
-	LRO_INC_STATS(lro_mgr, no_desc);
-out:
-	return lro_desc;
-}
-
-static void lro_flush(struct net_lro_mgr *lro_mgr,
-		      struct net_lro_desc *lro_desc)
-{
-	if (lro_desc->pkt_aggr_cnt > 1)
-		lro_update_tcp_ip_header(lro_desc);
-
-	skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss;
-
-	if (lro_desc->vgrp) {
-		if (test_bit(LRO_F_NAPI, &lro_mgr->features))
- 			vlan_hwaccel_receive_skb(lro_desc->parent,
-						 lro_desc->vgrp,
-						 lro_desc->vlan_tag);
-		else
- 			vlan_hwaccel_rx(lro_desc->parent,
-					lro_desc->vgrp,
-					lro_desc->vlan_tag);
-
-	} else {
-		if (test_bit(LRO_F_NAPI, &lro_mgr->features))
-			netif_receive_skb(lro_desc->parent);
-		else
-			netif_rx(lro_desc->parent);
-	}
-
-	LRO_INC_STATS(lro_mgr, flushed);
-	lro_clear_desc(lro_desc);
-}
-
-static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
-			  struct vlan_group *vgrp, u16 vlan_tag, void *priv)
-{
-	struct net_lro_desc *lro_desc;
-	struct iphdr *iph;
-	struct tcphdr *tcph;
-	u64 flags;
-	int vlan_hdr_len = 0;
-
-	if (!lro_mgr->get_skb_header
-	    || lro_mgr->get_skb_header(skb, (void *)&iph, (void *)&tcph,
-				       &flags, priv))
-		goto out;
-
-	if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
-		goto out;
-
-	lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
-	if (!lro_desc)
-		goto out;
-
-	if ((skb->protocol == htons(ETH_P_8021Q))
-	    && !test_bit(LRO_F_EXTRACT_VLAN_ID, &lro_mgr->features))
-		vlan_hdr_len = VLAN_HLEN;
-
-	if (!lro_desc->active) { /* start new lro session */
-		if (lro_tcp_ip_check(iph, tcph, skb->len - vlan_hdr_len, NULL))
-			goto out;
-
-		skb->ip_summed = lro_mgr->ip_summed_aggr;
-		lro_init_desc(lro_desc, skb, iph, tcph, vlan_tag, vgrp);
-		LRO_INC_STATS(lro_mgr, aggregated);
-		return 0;
-	}
-
-	if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
-		goto out2;
-
-	if (lro_tcp_ip_check(iph, tcph, skb->len, lro_desc))
-		goto out2;
-
-	lro_add_packet(lro_desc, skb, iph, tcph);
-	LRO_INC_STATS(lro_mgr, aggregated);
-
-	if ((lro_desc->pkt_aggr_cnt >= lro_mgr->max_aggr) ||
-	    lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
-		lro_flush(lro_mgr, lro_desc);
-
-	return 0;
-
-out2: /* send aggregated SKBs to stack */
-	lro_flush(lro_mgr, lro_desc);
-
-out:  /* Original SKB has to be posted to stack */
-	skb->ip_summed = lro_mgr->ip_summed;
-	return 1;
-}
-
-
-static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
-				   struct skb_frag_struct *frags,
-				   int len, int true_size,
-				   void *mac_hdr,
-				   int hlen, __wsum sum,
-				   u32 ip_summed)
-{
-	struct sk_buff *skb;
-	struct skb_frag_struct *skb_frags;
-	int data_len = len;
-	int hdr_len = min(len, hlen);
-
-	skb = netdev_alloc_skb(lro_mgr->dev, hlen);
-	if (!skb)
-		return NULL;
-
-	skb->len = len;
-	skb->data_len = len - hdr_len;
-	skb->truesize += true_size;
-	skb->tail += hdr_len;
-
-	memcpy(skb->data, mac_hdr, hdr_len);
-
-	skb_frags = skb_shinfo(skb)->frags;
-	while (data_len > 0) {
-		*skb_frags = *frags;
-		data_len -= frags->size;
-		skb_frags++;
-		frags++;
-		skb_shinfo(skb)->nr_frags++;
-	}
-
-	skb_shinfo(skb)->frags[0].page_offset += hdr_len;
-	skb_shinfo(skb)->frags[0].size -= hdr_len;
-
-	skb->ip_summed = ip_summed;
-	skb->csum = sum;
-	skb->protocol = eth_type_trans(skb, lro_mgr->dev);
-	return skb;
-}
-
-static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
-					  struct skb_frag_struct *frags,
-					  int len, int true_size,
-					  struct vlan_group *vgrp,
-					  u16 vlan_tag, void *priv, __wsum sum)
-{
-	struct net_lro_desc *lro_desc;
-	struct iphdr *iph;
-	struct tcphdr *tcph;
-	struct sk_buff *skb;
-	u64 flags;
-	void *mac_hdr;
-	int mac_hdr_len;
-	int hdr_len = LRO_MAX_PG_HLEN;
-	int vlan_hdr_len = 0;
-
-	if (!lro_mgr->get_frag_header
-	    || lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
-					(void *)&tcph, &flags, priv)) {
-		mac_hdr = page_address(frags->page) + frags->page_offset;
-		goto out1;
-	}
-
-	if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
-		goto out1;
-
-	hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr);
-	mac_hdr_len = (int)((void *)(iph) - mac_hdr);
-
-	lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
-	if (!lro_desc)
-		goto out1;
-
-	if (!lro_desc->active) { /* start new lro session */
-		if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL))
-			goto out1;
-
-		skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
-				  hdr_len, 0, lro_mgr->ip_summed_aggr);
-		if (!skb)
-			goto out;
-
-		if ((skb->protocol == htons(ETH_P_8021Q))
-		    && !test_bit(LRO_F_EXTRACT_VLAN_ID, &lro_mgr->features))
-			vlan_hdr_len = VLAN_HLEN;
-
-		iph = (void *)(skb->data + vlan_hdr_len);
-		tcph = (void *)((u8 *)skb->data + vlan_hdr_len
-				+ IP_HDR_LEN(iph));
-
-		lro_init_desc(lro_desc, skb, iph, tcph, 0, NULL);
-		LRO_INC_STATS(lro_mgr, aggregated);
-		return 0;
-	}
-
-	if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
-		goto out2;
-
-	if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc))
-		goto out2;
-
-	lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph);
-	LRO_INC_STATS(lro_mgr, aggregated);
-
-	if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) ||
-	    lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
-		lro_flush(lro_mgr, lro_desc);
-
-	return NULL;
-
-out2: /* send aggregated packets to the stack */
-	lro_flush(lro_mgr, lro_desc);
-
-out1:  /* Original packet has to be posted to the stack */
-	skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
-			  hdr_len, sum, lro_mgr->ip_summed);
-out:
-	return skb;
-}
-
-void lro_receive_skb(struct net_lro_mgr *lro_mgr,
-		     struct sk_buff *skb,
-		     void *priv)
-{
-	if (__lro_proc_skb(lro_mgr, skb, NULL, 0, priv)) {
-		if (test_bit(LRO_F_NAPI, &lro_mgr->features))
-			netif_receive_skb(skb);
-		else
-			netif_rx(skb);
-	}
-}
-
-void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr,
-				  struct sk_buff *skb,
-				  struct vlan_group *vgrp,
-				  u16 vlan_tag,
-				  void *priv)
-{
-	if (__lro_proc_skb(lro_mgr, skb, vgrp, vlan_tag, priv)) {
-		if (test_bit(LRO_F_NAPI, &lro_mgr->features))
-			vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag);
-		else
-			vlan_hwaccel_rx(skb, vgrp, vlan_tag);
-	}
-}
-
-void lro_receive_frags(struct net_lro_mgr *lro_mgr,
-		       struct skb_frag_struct *frags,
-		       int len, int true_size, void *priv, __wsum sum)
-{
-	struct sk_buff *skb;
-
-	skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0,
-				 priv, sum);
-	if (!skb)
-		return;
-
-	if (test_bit(LRO_F_NAPI, &lro_mgr->features))
-		netif_receive_skb(skb);
-	else
-		netif_rx(skb);
-}
-
-void lro_vlan_hwaccel_receive_frags(struct net_lro_mgr *lro_mgr,
-				    struct skb_frag_struct *frags,
-				    int len, int true_size,
-				    struct vlan_group *vgrp,
-				    u16 vlan_tag, void *priv, __wsum sum)
-{
-	struct sk_buff *skb;
-
-	skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp,
-				 vlan_tag, priv, sum);
-	if (!skb)
-		return;
-
-	if (test_bit(LRO_F_NAPI, &lro_mgr->features))
-		vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag);
-	else
-		vlan_hwaccel_rx(skb, vgrp, vlan_tag);
-}
-
-void lro_flush_all(struct net_lro_mgr *lro_mgr)
-{
-	int i;
-	struct net_lro_desc *lro_desc = lro_mgr->lro_arr;
-
-	for (i = 0; i < lro_mgr->max_desc; i++) {
-		if (lro_desc[i].active)
-			lro_flush(lro_mgr, &lro_desc[i]);
-	}
-}
-
-void lro_flush_pkt(struct net_lro_mgr *lro_mgr,
-		  struct iphdr *iph, struct tcphdr *tcph)
-{
-	struct net_lro_desc *lro_desc;
-
-	lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
-	if (lro_desc->active)
-		lro_flush(lro_mgr, lro_desc);
-}
-
diff --git a/drivers/net/ehea/inet_lro.h b/drivers/net/ehea/inet_lro.h
index b046b67..e69de29 100644
--- a/drivers/net/ehea/inet_lro.h
+++ b/drivers/net/ehea/inet_lro.h
@@ -1,176 +0,0 @@
-/*
- *  Large Receive Offload (ipv4 / tcp)
- *
- *  (C) Copyright IBM Corp. 2007
- *
- *  Authors:
- *       Jan-Bernd Themann <themann@de.ibm.com>
- *       Christoph Raisch <raisch@de.ibm.com>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __INET_LRO_H_
-#define __INET_LRO_H_
-
-#include <net/ip.h>
-#include <net/tcp.h>
-#include "backlevel_kernel.h"
-
-/*
- * LRO statistics
- */
-
-struct net_lro_stats {
-	unsigned long aggregated;
-	unsigned long flushed;
-	unsigned long no_desc;
-};
-
-/*
- * LRO descriptor for a tcp session
- */
-struct net_lro_desc {
-	struct sk_buff *parent;
-	struct sk_buff *last_skb;
-	struct skb_frag_struct *next_frag;
-	struct iphdr *iph;
-	struct tcphdr *tcph;
-	struct vlan_group *vgrp;
-	__wsum  data_csum;
-	u32 tcp_rcv_tsecr;
-	u32 tcp_rcv_tsval;
-	u32 tcp_ack;
-	u32 tcp_next_seq;
-	u32 skb_tot_frags_len;
-	u16 ip_tot_len;
-	u16 tcp_saw_tstamp; 		/* timestamps enabled */
-	u16 tcp_window;
-	u16 vlan_tag;
-	int pkt_aggr_cnt;		/* counts aggregated packets */
-	int vlan_packet;
-	int mss;
-	int active;
-};
-
-/*
- * Large Receive Offload (LRO) Manager
- *
- * Fields must be set by driver
- */
-
-struct net_lro_mgr {
-	struct net_device *dev;
-	struct net_lro_stats stats;
-
-	/* LRO features */
-	unsigned long features;
-#define LRO_F_NAPI            1  /* Pass packets to stack via NAPI */
-#define LRO_F_EXTRACT_VLAN_ID 2  /* Set flag if VLAN IDs are extracted
-				    from received packets and eth protocol
-				    is still ETH_P_8021Q */
-
-	u32 ip_summed;      /* Set in non generated SKBs in page mode */
-	u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
-			     * or CHECKSUM_NONE */
-
-	int max_desc; /* Max number of LRO descriptors  */
-	int max_aggr; /* Max number of LRO packets to be aggregated */
-
-	struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
-
-	/*
-	 * Optimized driver functions
-	 *
-	 * get_skb_header: returns tcp and ip header for packet in SKB
-	 */
-	int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr,
-			      void **tcpudp_hdr, u64 *hdr_flags, void *priv);
-
-	/* hdr_flags: */
-#define LRO_IPV4 1 /* ip_hdr is IPv4 header */
-#define LRO_TCP  2 /* tcpudp_hdr is TCP header */
-
-	/*
-	 * get_frag_header: returns mac, tcp and ip header for packet in SKB
-	 *
-	 * @hdr_flags: Indicate what kind of LRO has to be done
-	 *             (IPv4/IPv6/TCP/UDP)
-	 */
-	int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr,
-			       void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
-			       void *priv);
-};
-
-/*
- * Processes a SKB
- *
- * @lro_mgr: LRO manager to use
- * @skb: SKB to aggregate
- * @priv: Private data that may be used by driver functions
- *        (for example get_tcp_ip_hdr)
- */
-
-void lro_receive_skb(struct net_lro_mgr *lro_mgr,
-		     struct sk_buff *skb,
-		     void *priv);
-
-/*
- * Processes a SKB with VLAN HW acceleration support
- */
-
-void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr,
-				  struct sk_buff *skb,
-				  struct vlan_group *vgrp,
-				  u16 vlan_tag,
-				  void *priv);
-
-/*
- * Processes a fragment list
- *
- * This functions aggregate fragments and generate SKBs do pass
- * the packets to the stack.
- *
- * @lro_mgr: LRO manager to use
- * @frags: Fragment to be processed. Must contain entire header in first
- *         element.
- * @len: Length of received data
- * @true_size: Actual size of memory the fragment is consuming
- * @priv: Private data that may be used by driver functions
- *        (for example get_tcp_ip_hdr)
- */
-
-void lro_receive_frags(struct net_lro_mgr *lro_mgr,
-		       struct skb_frag_struct *frags,
-		       int len, int true_size, void *priv, __wsum sum);
-
-void lro_vlan_hwaccel_receive_frags(struct net_lro_mgr *lro_mgr,
-				    struct skb_frag_struct *frags,
-				    int len, int true_size,
-				    struct vlan_group *vgrp,
-				    u16 vlan_tag,
-				    void *priv, __wsum sum);
-
-/*
- * Forward all aggregated SKBs held by lro_mgr to network stack
- */
-
-void lro_flush_all(struct net_lro_mgr *lro_mgr);
-
-void lro_flush_pkt(struct net_lro_mgr *lro_mgr,
-		   struct iphdr *iph, struct tcphdr *tcph);
-
-#endif