Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 89877e42827f16fa5f86b1df0c2860b1 > files > 1716

kernel-2.6.18-128.1.10.el5.src.rpm

Date: Mon, 23 Oct 2006 13:55:22 -0400
From: Doug Ledford <dledford@redhat.com>
Subject: [Patch RHEL5] Update OpenIB code to OFED 1.1 final release

We've been testing the OpenIB support in the RHEL5 kernel for some time
now.  A few issues were found in regards to the overall reliability and
scalability of the stack.  Currently our RHEL5 kernel has a mixture of
primarily OFED 1.1 code with older OFED 1.0 code.  Basically, all the
kernel modules that have made it into the upstream 2.6.18.1 kernel are
used as the basis for the OFED 1.1 release code.  However, for those
things that haven't made it into upstream as of 2.6.18.1, OFED used the
code that was being prepared for upstream submission.  Right now, that
means the rdma_ucm.ko module, the ib_ehca.ko module, the ib_sdp.ko
module, and some of the changes to the ib_ipath.ko module are not in our
kernel via upstream and instead were integrated via the older OFED 1.0
release tarball.  So, this patch updates those specific modules to the
OFED 1.1 release code.  In addition, the OFED 1.1 release included these
targeted bug fix patches on top of the upstream kernel:

diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/core/cma.c linux-2.6.18.noarch/drivers/infiniband/core/cma.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/core/cma.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/core/cma.c	2006-10-19 12:41:11.000000000 -0400
@@ -48,8 +48,15 @@ MODULE_AUTHOR("Sean Hefty");
 MODULE_DESCRIPTION("Generic RDMA CM Agent");
 MODULE_LICENSE("Dual BSD/GPL");
 
+static int tavor_quirk = 0;
+module_param_named(tavor_quirk, tavor_quirk, int, 0644);
+MODULE_PARM_DESC(tavor_quirk, "Tavor performance quirk: limit MTU to 1K if > 0");
+
 #define CMA_CM_RESPONSE_TIMEOUT 20
-#define CMA_MAX_CM_RETRIES 15
+
+static int CMA_MAX_CM_RETRIES = 15;
+module_param_named(max_cm_retries, CMA_MAX_CM_RETRIES, int, 0644);
+MODULE_PARM_DESC(max_cm_retries, "How many times to retry a connection attempt");
 
 static void cma_add_one(struct ib_device *device);
 static void cma_remove_one(struct ib_device *device);
@@ -126,6 +133,10 @@ struct rdma_id_private {
 		struct ib_cm_id	*ib;
 	} cm_id;
 
+	union {
+		struct ib_cm_req_opt *req;
+	} options;
+
 	u32			seq_num;
 	u32			qp_num;
 	enum ib_qp_type		qp_type;
@@ -265,9 +276,9 @@ static int cma_acquire_ib_dev(struct rdm
 	union ib_gid gid;
 	int ret = -ENODEV;
 
-	ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid),
+	ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
+
 
-	mutex_lock(&lock);
 	list_for_each_entry(cma_dev, &dev_list, list) {
 		ret = ib_find_cached_gid(cma_dev->device, &gid,
 					 &id_priv->id.port_num, NULL);
@@ -276,7 +287,6 @@ static int cma_acquire_ib_dev(struct rdm
 			break;
 		}
 	}
-	mutex_unlock(&lock);
 	return ret;
 }
 
@@ -321,6 +331,7 @@ struct rdma_cm_id *rdma_create_id(rdma_c
 	init_waitqueue_head(&id_priv->wait_remove);
 	atomic_set(&id_priv->dev_remove, 0);
 	INIT_LIST_HEAD(&id_priv->listen_list);
+	INIT_LIST_HEAD(&id_priv->list);
 	get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
 
 	return &id_priv->id;
@@ -689,7 +700,9 @@ void rdma_destroy_id(struct rdma_cm_id *
 	state = cma_exch(id_priv, CMA_DESTROYING);
 	cma_cancel_operation(id_priv, state);
 
+	mutex_lock(&lock);
 	if (id_priv->cma_dev) {
+		mutex_unlock(&lock);
 		switch (id->device->node_type) {
 		case IB_NODE_CA:
 	 		if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
@@ -700,14 +713,15 @@ void rdma_destroy_id(struct rdma_cm_id *
 		}
 	  	mutex_lock(&lock);
 		cma_detach_from_dev(id_priv);
-		mutex_unlock(&lock);
 	}
+	mutex_unlock(&lock);
 
 	cma_release_port(id_priv);
 	cma_deref_id(id_priv);
 	wait_for_completion(&id_priv->comp);
 
 	kfree(id_priv->id.route.path_rec);
+	kfree(id_priv->options.req);
 	kfree(id_priv);
 }
 EXPORT_SYMBOL(rdma_destroy_id);
@@ -746,22 +760,6 @@ static int cma_verify_rep(struct rdma_id
 	return 0;
 }
 
-static int cma_rtu_recv(struct rdma_id_private *id_priv)
-{
-	int ret;
-
-	ret = cma_modify_qp_rts(&id_priv->id);
-	if (ret)
-		goto reject;
-
-	return 0;
-reject:
-	cma_modify_qp_err(&id_priv->id);
-	ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
-		       NULL, 0, NULL, 0);
-	return ret;
-}
-
 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
 {
 	struct rdma_id_private *id_priv = cm_id->context;
@@ -792,9 +790,8 @@ static int cma_ib_handler(struct ib_cm_i
 		private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
 		break;
 	case IB_CM_RTU_RECEIVED:
-		status = cma_rtu_recv(id_priv);
-		event = status ? RDMA_CM_EVENT_CONNECT_ERROR :
-				 RDMA_CM_EVENT_ESTABLISHED;
+	case IB_CM_USER_ESTABLISHED:
+		event = RDMA_CM_EVENT_ESTABLISHED;
 		break;
 	case IB_CM_DREQ_ERROR:
 		status = -ETIMEDOUT; /* fall through */
@@ -898,9 +895,12 @@ static int cma_req_handler(struct ib_cm_
 	}
 
 	atomic_inc(&conn_id->dev_remove);
+	mutex_lock(&lock);
 	ret = cma_acquire_ib_dev(conn_id);
+	mutex_unlock(&lock);
 	if (ret) {
 		ret = -ENODEV;
+		cma_exch(conn_id, CMA_DESTROYING);
 		cma_release_remove(conn_id);
 		rdma_destroy_id(&conn_id->id);
 		goto out;
@@ -1140,6 +1140,11 @@ static int cma_query_ib_route(struct rdm
 	path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
 	path_rec.numb_path = 1;
 
+	if (tavor_quirk) {
+		path_rec.mtu_selector = IB_SA_LT;
+		path_rec.mtu = IB_MTU_2048;
+	}
+
 	id_priv->query_id = ib_sa_path_rec_get(id_priv->id.device,
 				id_priv->id.port_num, &path_rec,
 				IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
@@ -1231,6 +1236,65 @@ err:
 }
 EXPORT_SYMBOL(rdma_set_ib_paths);
 
+static inline u8 cma_get_ib_remote_timeout(struct rdma_id_private *id_priv)
+{
+	return	id_priv->options.req ?
+		id_priv->options.req->remote_cm_response_timeout :
+		CMA_CM_RESPONSE_TIMEOUT;
+}
+
+static inline u8 cma_get_ib_local_timeout(struct rdma_id_private *id_priv)
+{
+	return	id_priv->options.req ?
+		id_priv->options.req->local_cm_response_timeout :
+		CMA_CM_RESPONSE_TIMEOUT;
+}
+
+static inline u8 cma_get_ib_cm_retries(struct rdma_id_private *id_priv)
+{
+	return	id_priv->options.req ?
+		id_priv->options.req->max_cm_retries : CMA_MAX_CM_RETRIES;
+}
+
+int rdma_get_ib_req_info(struct rdma_cm_id *id, struct ib_cm_req_opt *info)
+{
+	struct rdma_id_private *id_priv;
+
+	id_priv = container_of(id, struct rdma_id_private, id);
+	if (!cma_comp(id_priv, CMA_ROUTE_RESOLVED))
+		return -EINVAL;
+
+	info->remote_cm_response_timeout = cma_get_ib_remote_timeout(id_priv);
+	info->local_cm_response_timeout = cma_get_ib_local_timeout(id_priv);
+	info->max_cm_retries = cma_get_ib_cm_retries(id_priv);
+	return 0;
+}
+EXPORT_SYMBOL(rdma_get_ib_req_info);
+
+int rdma_set_ib_req_info(struct rdma_cm_id *id, struct ib_cm_req_opt *info)
+{
+	struct rdma_id_private *id_priv;
+
+	if (info->remote_cm_response_timeout > 0x1F ||
+	    info->local_cm_response_timeout > 0x1F ||
+	    info->max_cm_retries > 0xF)
+		return -EINVAL;
+
+	id_priv = container_of(id, struct rdma_id_private, id);
+	if (!cma_comp(id_priv, CMA_ROUTE_RESOLVED))
+		return -EINVAL;
+
+	if (!id_priv->options.req) {
+		id_priv->options.req = kmalloc(sizeof *info, GFP_KERNEL);
+		if (!id_priv->options.req)
+			return -ENOMEM;
+	}
+
+	*id_priv->options.req = *info;
+	return 0;
+}
+EXPORT_SYMBOL(rdma_set_ib_req_info);
+
 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
 {
 	struct rdma_id_private *id_priv;
@@ -1309,16 +1373,26 @@ static void addr_handler(int status, str
 	enum rdma_cm_event_type event;
 
 	atomic_inc(&id_priv->dev_remove);
-	if (!id_priv->cma_dev && !status)
+
+	/*
+	 * Grab mutex to block rdma_destroy_id() from removing the device while
+	 * we're trying to acquire it.
+	 */
+	mutex_lock(&lock);
+	if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
+		mutex_unlock(&lock);
+		goto out;
+	}
+
+	if (!status && !id_priv->cma_dev)
 		status = cma_acquire_dev(id_priv);
+	mutex_unlock(&lock);
 
 	if (status) {
-		if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND))
+		if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
 			goto out;
 		event = RDMA_CM_EVENT_ADDR_ERROR;
 	} else {
-		if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED))
-			goto out;
 		memcpy(&id_priv->id.route.addr.src_addr, src_addr,
 		       ip_addr_size(src_addr));
 		event = RDMA_CM_EVENT_ADDR_RESOLVED;
@@ -1542,8 +1616,11 @@ int rdma_bind_addr(struct rdma_cm_id *id
 
 	if (!cma_any_addr(addr)) {
 		ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
-		if (!ret)
+		if (!ret) {
+			mutex_lock(&lock);
 			ret = cma_acquire_dev(id_priv);
+			mutex_unlock(&lock);
+		}
 		if (ret)
 			goto err;
 	}
@@ -1638,13 +1715,18 @@ static int cma_connect_ib(struct rdma_id
 	req.flow_control = conn_param->flow_control;
 	req.retry_count = conn_param->retry_count;
 	req.rnr_retry_count = conn_param->rnr_retry_count;
-	req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
-	req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
-	req.max_cm_retries = CMA_MAX_CM_RETRIES;
+	req.remote_cm_response_timeout = cma_get_ib_remote_timeout(id_priv);
+	req.local_cm_response_timeout = cma_get_ib_local_timeout(id_priv);
+	req.max_cm_retries = cma_get_ib_cm_retries(id_priv);
 	req.srq = id_priv->srq ? 1 : 0;
 
 	ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
 out:
+	if (ret && !IS_ERR(id_priv->cm_id.ib)) {
+		ib_destroy_cm_id(id_priv->cm_id.ib);
+		id_priv->cm_id.ib = NULL;
+	}
+
 	kfree(private_data);
 	return ret;
 }
@@ -1686,11 +1768,25 @@ static int cma_accept_ib(struct rdma_id_
 			 struct rdma_conn_param *conn_param)
 {
 	struct ib_cm_rep_param rep;
-	int ret;
+	struct ib_qp_attr qp_attr;
+	int qp_attr_mask, ret;
 
-	ret = cma_modify_qp_rtr(&id_priv->id);
-	if (ret)
-		return ret;
+	if (id_priv->id.qp) {
+		ret = cma_modify_qp_rtr(&id_priv->id);
+		if (ret)
+			goto out;
+
+		qp_attr.qp_state = IB_QPS_RTS;
+		ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
+					 &qp_attr_mask);
+		if (ret)
+			goto out;
+
+		qp_attr.max_rd_atomic = conn_param->initiator_depth;
+		ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
+		if (ret)
+			goto out;
+	}
 
 	memset(&rep, 0, sizeof rep);
 	rep.qp_num = id_priv->qp_num;
@@ -1699,13 +1795,15 @@ static int cma_accept_ib(struct rdma_id_
 	rep.private_data_len = conn_param->private_data_len;
 	rep.responder_resources = conn_param->responder_resources;
 	rep.initiator_depth = conn_param->initiator_depth;
-	rep.target_ack_delay = CMA_CM_RESPONSE_TIMEOUT;
+	rep.target_ack_delay = cma_get_ib_local_timeout(id_priv);
 	rep.failover_accepted = 0;
 	rep.flow_control = conn_param->flow_control;
 	rep.rnr_retry_count = conn_param->rnr_retry_count;
 	rep.srq = id_priv->srq ? 1 : 0;
 
-	return ib_send_cm_rep(id_priv->cm_id.ib, &rep);
+	ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
+out:
+	return ret;
 }
 
 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
@@ -1746,6 +1844,27 @@ reject:
 }
 EXPORT_SYMBOL(rdma_accept);
 
+int rdma_establish(struct rdma_cm_id *id)
+{
+	struct rdma_id_private *id_priv;
+	int ret;
+
+	id_priv = container_of(id, struct rdma_id_private, id);
+	if (!cma_comp(id_priv, CMA_CONNECT))
+		return -EINVAL;
+
+	switch (id->device->node_type) {
+	case IB_NODE_CA:
+		ret = ib_cm_establish(id_priv->cm_id.ib);
+		break;
+	default:
+		ret = 0;
+		break;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(rdma_establish);
+
 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
 		u8 private_data_len)
 {
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/core/cm.c linux-2.6.18.noarch/drivers/infiniband/core/cm.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/core/cm.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/core/cm.c	2006-10-19 12:41:11.000000000 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2004-2006 Intel Corporation.  All rights reserved.
  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
  * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
@@ -41,6 +41,7 @@
 #include <linux/idr.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
+#include <linux/random.h>
 #include <linux/rbtree.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
@@ -53,6 +54,13 @@ MODULE_AUTHOR("Sean Hefty");
 MODULE_DESCRIPTION("InfiniBand CM");
 MODULE_LICENSE("Dual BSD/GPL");
 
+static int mra_timeout_limit = 30000;
+
+module_param(mra_timeout_limit, int, 0444);
+MODULE_PARM_DESC(mra_timeout_limit,
+                 "Limit the MRA timeout according to this value if != 0");
+
+
 static void cm_add_one(struct ib_device *device);
 static void cm_remove_one(struct ib_device *device);
 
@@ -73,6 +81,8 @@ static struct ib_cm {
 	struct rb_root remote_id_table;
 	struct rb_root remote_sidr_table;
 	struct idr local_id_table;
+	__be32 random_id_operand;
+	struct list_head timewait_list;
 	struct workqueue_struct *wq;
 } cm;
 
@@ -110,6 +120,7 @@ struct cm_work {
 
 struct cm_timewait_info {
 	struct cm_work work;			/* Must be first. */
+	struct list_head list;
 	struct rb_node remote_qp_node;
 	struct rb_node remote_id_node;
 	__be64 remote_ca_guid;
@@ -299,15 +310,17 @@ static int cm_init_av_by_path(struct ib_
 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
 {
 	unsigned long flags;
-	int ret;
+	int ret, id;
 	static int next_id;
 
 	do {
 		spin_lock_irqsave(&cm.lock, flags);
-		ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++,
-					(__force int *) &cm_id_priv->id.local_id);
+		ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
+					next_id++, &id);
 		spin_unlock_irqrestore(&cm.lock, flags);
 	} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
+
+	cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand);
 	return ret;
 }
 
@@ -316,7 +329,8 @@ static void cm_free_id(__be32 local_id)
 	unsigned long flags;
 
 	spin_lock_irqsave(&cm.lock, flags);
-	idr_remove(&cm.local_id_table, (__force int) local_id);
+	idr_remove(&cm.local_id_table,
+		   (__force int) (local_id ^ cm.random_id_operand));
 	spin_unlock_irqrestore(&cm.lock, flags);
 }
 
@@ -324,7 +338,8 @@ static struct cm_id_private * cm_get_id(
 {
 	struct cm_id_private *cm_id_priv;
 
-	cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
+	cm_id_priv = idr_find(&cm.local_id_table,
+			      (__force int) (local_id ^ cm.random_id_operand));
 	if (cm_id_priv) {
 		if (cm_id_priv->id.remote_id == remote_id)
 			atomic_inc(&cm_id_priv->refcount);
@@ -641,13 +656,6 @@ static inline int cm_convert_to_ms(int i
 
 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
 {
-	unsigned long flags;
-
-	if (!timewait_info->inserted_remote_id &&
-	    !timewait_info->inserted_remote_qp)
-	    return;
-
-	spin_lock_irqsave(&cm.lock, flags);
 	if (timewait_info->inserted_remote_id) {
 		rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
 		timewait_info->inserted_remote_id = 0;
@@ -657,7 +665,6 @@ static void cm_cleanup_timewait(struct c
 		rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
 		timewait_info->inserted_remote_qp = 0;
 	}
-	spin_unlock_irqrestore(&cm.lock, flags);
 }
 
 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
@@ -678,6 +685,12 @@ static struct cm_timewait_info * cm_crea
 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
 {
 	int wait_time;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cm.lock, flags);
+	cm_cleanup_timewait(cm_id_priv->timewait_info);
+	list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
+	spin_unlock_irqrestore(&cm.lock, flags);
 
 	/*
 	 * The cm_id could be destroyed by the user before we exit timewait.
@@ -693,9 +706,13 @@ static void cm_enter_timewait(struct cm_
 
 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
 {
+	unsigned long flags;
+
 	cm_id_priv->id.state = IB_CM_IDLE;
 	if (cm_id_priv->timewait_info) {
+		spin_lock_irqsave(&cm.lock, flags);
 		cm_cleanup_timewait(cm_id_priv->timewait_info);
+		spin_unlock_irqrestore(&cm.lock, flags);
 		kfree(cm_id_priv->timewait_info);
 		cm_id_priv->timewait_info = NULL;
 	}
@@ -1299,6 +1316,7 @@ static struct cm_id_private * cm_match_r
 	if (timewait_info) {
 		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
 					   timewait_info->work.remote_id);
+		cm_cleanup_timewait(cm_id_priv->timewait_info);
 		spin_unlock_irqrestore(&cm.lock, flags);
 		if (cur_cm_id_priv) {
 			cm_dup_req_handler(work, cur_cm_id_priv);
@@ -1307,7 +1325,8 @@ static struct cm_id_private * cm_match_r
 			cm_issue_rej(work->port, work->mad_recv_wc,
 				     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
 				     NULL, 0);
-		goto error;
+		listen_cm_id_priv = NULL;
+		goto out;
 	}
 
 	/* Find matching listen request. */
@@ -1315,21 +1334,20 @@ static struct cm_id_private * cm_match_r
 					   req_msg->service_id,
 					   req_msg->private_data);
 	if (!listen_cm_id_priv) {
+		cm_cleanup_timewait(cm_id_priv->timewait_info);
 		spin_unlock_irqrestore(&cm.lock, flags);
 		cm_issue_rej(work->port, work->mad_recv_wc,
 			     IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
 			     NULL, 0);
-		goto error;
+		goto out;
 	}
 	atomic_inc(&listen_cm_id_priv->refcount);
 	atomic_inc(&cm_id_priv->refcount);
 	cm_id_priv->id.state = IB_CM_REQ_RCVD;
 	atomic_inc(&cm_id_priv->work_count);
 	spin_unlock_irqrestore(&cm.lock, flags);
+out:
 	return listen_cm_id_priv;
-
-error:	cm_cleanup_timewait(cm_id_priv->timewait_info);
-	return NULL;
 }
 
 static int cm_req_handler(struct cm_work *work)
@@ -1881,6 +1899,32 @@ out:	spin_unlock_irqrestore(&cm_id_priv-
 }
 EXPORT_SYMBOL(ib_send_cm_drep);
 
+static int cm_issue_drep(struct cm_port *port,
+			 struct ib_mad_recv_wc *mad_recv_wc)
+{
+	struct ib_mad_send_buf *msg = NULL;
+	struct cm_dreq_msg *dreq_msg;
+	struct cm_drep_msg *drep_msg;
+	int ret;
+
+	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
+	if (ret)
+		return ret;
+
+	dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
+	drep_msg = (struct cm_drep_msg *) msg->mad;
+
+	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
+	drep_msg->remote_comm_id = dreq_msg->local_comm_id;
+	drep_msg->local_comm_id = dreq_msg->remote_comm_id;
+
+	ret = ib_post_send_mad(msg, NULL);
+	if (ret)
+		cm_free_msg(msg);
+
+	return ret;
+}
+
 static int cm_dreq_handler(struct cm_work *work)
 {
 	struct cm_id_private *cm_id_priv;
@@ -1892,8 +1936,10 @@ static int cm_dreq_handler(struct cm_wor
 	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
 	cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
 				   dreq_msg->local_comm_id);
-	if (!cm_id_priv)
+	if (!cm_id_priv) {
+		cm_issue_drep(work->port, work->mad_recv_wc);
 		return -EINVAL;
+	}
 
 	work->cm_event.private_data = &dreq_msg->private_data;
 
@@ -2072,8 +2118,9 @@ static struct cm_id_private * cm_acquire
 			spin_unlock_irqrestore(&cm.lock, flags);
 			return NULL;
 		}
-		cm_id_priv = idr_find(&cm.local_id_table,
-				      (__force int) timewait_info->work.local_id);
+		cm_id_priv = idr_find(&cm.local_id_table, (__force int)
+				      (timewait_info->work.local_id ^
+				       cm.random_id_operand));
 		if (cm_id_priv) {
 			if (cm_id_priv->id.remote_id == remote_id)
 				atomic_inc(&cm_id_priv->refcount);
@@ -2260,6 +2307,9 @@ static int cm_mra_handler(struct cm_work
 	timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
 		  cm_convert_to_ms(cm_id_priv->av.packet_life_time);
 
+	if (mra_timeout_limit && timeout > mra_timeout_limit)
+		timeout = mra_timeout_limit;
+
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	switch (cm_id_priv->id.state) {
 	case IB_CM_REQ_SENT:
@@ -2582,28 +2632,29 @@ static int cm_timewait_handler(struct cm
 {
 	struct cm_timewait_info *timewait_info;
 	struct cm_id_private *cm_id_priv;
-	unsigned long flags;
 	int ret;
 
 	timewait_info = (struct cm_timewait_info *)work;
-	cm_cleanup_timewait(timewait_info);
+	spin_lock_irq(&cm.lock);
+	list_del(&timewait_info->list);
+	spin_unlock_irq(&cm.lock);
 
 	cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
 				   timewait_info->work.remote_id);
 	if (!cm_id_priv)
 		return -EINVAL;
 
-	spin_lock_irqsave(&cm_id_priv->lock, flags);
+	spin_lock_irq(&cm_id_priv->lock);
 	if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
 	    cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
-		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+		spin_unlock_irq(&cm_id_priv->lock);
 		goto out;
 	}
 	cm_id_priv->id.state = IB_CM_IDLE;
 	ret = atomic_inc_and_test(&cm_id_priv->work_count);
 	if (!ret)
 		list_add_tail(&work->list, &cm_id_priv->work_list);
-	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+	spin_unlock_irq(&cm_id_priv->lock);
 
 	if (ret)
 		cm_process_work(cm_id_priv, work);
@@ -3191,6 +3242,10 @@ static int cm_init_qp_rts_attr(struct cm
 
 	spin_lock_irqsave(&cm_id_priv->lock, flags);
 	switch (cm_id_priv->id.state) {
+	/* Allow transition to RTS before sending REP */
+	case IB_CM_REQ_RCVD:
+	case IB_CM_MRA_REQ_SENT:
+
 	case IB_CM_REP_RCVD:
 	case IB_CM_MRA_REP_SENT:
 	case IB_CM_REP_SENT:
@@ -3349,7 +3404,9 @@ static int __init ib_cm_init(void)
 	cm.remote_qp_table = RB_ROOT;
 	cm.remote_sidr_table = RB_ROOT;
 	idr_init(&cm.local_id_table);
+	get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
 	idr_pre_get(&cm.local_id_table, GFP_KERNEL);
+	INIT_LIST_HEAD(&cm.timewait_list);
 
 	cm.wq = create_workqueue("ib_cm");
 	if (!cm.wq)
@@ -3367,7 +3424,20 @@ error:
 
 static void __exit ib_cm_cleanup(void)
 {
+	struct cm_timewait_info *timewait_info, *tmp;
+
+	spin_lock_irq(&cm.lock);
+	list_for_each_entry(timewait_info, &cm.timewait_list, list)
+		cancel_delayed_work(&timewait_info->work.work);
+	spin_unlock_irq(&cm.lock);
+
 	destroy_workqueue(cm.wq);
+
+	list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
+		list_del(&timewait_info->list);
+		kfree(timewait_info);
+	}
+
 	ib_unregister_client(&cm_client);
 	idr_destroy(&cm.local_id_table);
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/core/Makefile linux-2.6.18.noarch/drivers/infiniband/core/Makefile
--- linux-2.6.18.noarch.OFED/drivers/infiniband/core/Makefile	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/core/Makefile	2006-10-17 10:15:06.000000000 -0400
@@ -1,9 +1,11 @@
 infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS)	:= ib_addr.o rdma_cm.o
+user_access-$(CONFIG_INFINIBAND_ADDR_TRANS)	:= rdma_ucm.o
 
 obj-$(CONFIG_INFINIBAND) +=		ib_core.o ib_mad.o ib_sa.o \
 					ib_cm.o $(infiniband-y)
 obj-$(CONFIG_INFINIBAND_USER_MAD) +=	ib_umad.o
 obj-$(CONFIG_INFINIBAND_USER_ACCESS) +=	ib_uverbs.o ib_ucm.o
+obj-$(CONFIG_INFINIBAND_USER_ACCESS) += $(user_access-y)
 
 ib_core-y :=			packer.o ud_header.o verbs.o sysfs.o \
 				device.o fmr_pool.o cache.o
@@ -16,6 +18,8 @@ ib_cm-y :=			cm.o
 
 rdma_cm-y :=			cma.o
 
+rdma_ucm-y :=			ucma.o ucma_ib.o
+
 ib_addr-y :=			addr.o
 
 ib_umad-y :=			user_mad.o
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/core/ucma.c linux-2.6.18.noarch/drivers/infiniband/core/ucma.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/core/ucma.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.noarch/drivers/infiniband/core/ucma.c	2006-10-17 10:15:06.000000000 -0400
@@ -0,0 +1,899 @@
+/*
+ * Copyright (c) 2005 Intel Corporation.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *	copyright notice, this list of conditions and the following
+ *	disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *	copyright notice, this list of conditions and the following
+ *	disclaimer in the documentation and/or other materials
+ *	provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/idr.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/miscdevice.h>
+
+#include <rdma/rdma_user_cm.h>
+#include <rdma/ib_marshall.h>
+#include <rdma/rdma_cm.h>
+
+#include "ucma_ib.h"
+
+enum {
+	RDMA_TRANSPORT_IB
+};
+
+#define rdma_node_get_transport(x) RDMA_TRANSPORT_IB
+
+MODULE_AUTHOR("Sean Hefty");
+MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
+MODULE_LICENSE("Dual BSD/GPL");
+
+enum {
+	UCMA_MAX_BACKLOG	= 128
+};
+
+struct ucma_file {
+	struct mutex		file_mutex;
+	struct file		*filp;
+	struct list_head	ctxs;
+	struct list_head	events;
+	wait_queue_head_t	poll_wait;
+};
+
+struct ucma_context {
+	int			id;
+	struct completion	comp;
+	atomic_t		ref;
+	int			events_reported;
+	int			backlog;
+
+	struct ucma_file	*file;
+	struct rdma_cm_id	*cm_id;
+	__u64			uid;
+
+	struct list_head	events;    /* list of pending events. */
+	struct list_head	file_list; /* member in file ctx list */
+};
+
+struct ucma_event {
+	struct ucma_context	*ctx;
+	struct list_head	file_list; /* member in file event list */
+	struct list_head	ctx_list;  /* member in ctx event list */
+	struct rdma_cm_id	*cm_id;
+	struct rdma_ucm_event_resp resp;
+};
+
+static DEFINE_MUTEX(ctx_mutex);
+static DEFINE_IDR(ctx_idr);
+
+static struct ucma_context* ucma_get_ctx(struct ucma_file *file, int id)
+{
+	struct ucma_context *ctx;
+
+	mutex_lock(&ctx_mutex);
+	ctx = idr_find(&ctx_idr, id);
+	if (!ctx)
+		ctx = ERR_PTR(-ENOENT);
+	else if (ctx->file != file)
+		ctx = ERR_PTR(-EINVAL);
+	else
+		atomic_inc(&ctx->ref);
+	mutex_unlock(&ctx_mutex);
+
+	return ctx;
+}
+
+static void ucma_put_ctx(struct ucma_context *ctx)
+{
+	if (atomic_dec_and_test(&ctx->ref))
+		complete(&ctx->comp);
+}
+
+static void ucma_cleanup_events(struct ucma_context *ctx)
+{
+	struct ucma_event *uevent;
+
+	mutex_lock(&ctx->file->file_mutex);
+	list_del(&ctx->file_list);
+	while (!list_empty(&ctx->events)) {
+
+		uevent = list_entry(ctx->events.next, struct ucma_event,
+				    ctx_list);
+		list_del(&uevent->file_list);
+		list_del(&uevent->ctx_list);
+
+		/* clear incoming connections. */
+		if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
+			rdma_destroy_id(uevent->cm_id);
+
+		kfree(uevent);
+	}
+	mutex_unlock(&ctx->file->file_mutex);
+}
+
+static struct ucma_context* ucma_alloc_ctx(struct ucma_file *file)
+{
+	struct ucma_context *ctx;
+	int ret;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return NULL;
+
+	atomic_set(&ctx->ref, 1);
+	init_completion(&ctx->comp);
+	ctx->file = file;
+	INIT_LIST_HEAD(&ctx->events);
+
+	do {
+		ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
+		if (!ret)
+			goto error;
+
+		mutex_lock(&ctx_mutex);
+		ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
+		mutex_unlock(&ctx_mutex);
+	} while (ret == -EAGAIN);
+
+	if (ret)
+		goto error;
+
+	list_add_tail(&ctx->file_list, &file->ctxs);
+	return ctx;
+
+error:
+	kfree(ctx);
+	return NULL;
+}
+
+static int ucma_event_handler(struct rdma_cm_id *cm_id,
+			      struct rdma_cm_event *event)
+{
+	struct ucma_event *uevent;
+	struct ucma_context *ctx = cm_id->context;
+	int ret = 0;
+
+	uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
+	if (!uevent)
+		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
+
+	uevent->ctx = ctx;
+	uevent->cm_id = cm_id;
+	uevent->resp.uid = ctx->uid;
+	uevent->resp.id = ctx->id;
+	uevent->resp.event = event->event;
+	uevent->resp.status = event->status;
+	if ((uevent->resp.private_data_len = event->private_data_len))
+		memcpy(uevent->resp.private_data, event->private_data,
+		       event->private_data_len);
+
+	mutex_lock(&ctx->file->file_mutex);
+	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
+		if (!ctx->backlog) {
+			ret = -EDQUOT;
+			goto out;
+		}
+		ctx->backlog--;
+	}
+	list_add_tail(&uevent->file_list, &ctx->file->events);
+	list_add_tail(&uevent->ctx_list, &ctx->events);
+	wake_up_interruptible(&ctx->file->poll_wait);
+out:
+	mutex_unlock(&ctx->file->file_mutex);
+	return ret;
+}
+
+static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
+			      int in_len, int out_len)
+{
+	struct ucma_context *ctx;
+	struct rdma_ucm_get_event cmd;
+	struct ucma_event *uevent;
+	int ret = 0;
+	DEFINE_WAIT(wait);
+
+	if (out_len < sizeof(struct rdma_ucm_event_resp))
+		return -ENOSPC;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	mutex_lock(&file->file_mutex);
+	while (list_empty(&file->events)) {
+		if (file->filp->f_flags & O_NONBLOCK) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
+		mutex_unlock(&file->file_mutex);
+		schedule();
+		mutex_lock(&file->file_mutex);
+		finish_wait(&file->poll_wait, &wait);
+	}
+
+	if (ret)
+		goto done;
+
+	uevent = list_entry(file->events.next, struct ucma_event, file_list);
+
+	if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
+		ctx = ucma_alloc_ctx(file);
+		if (!ctx) {
+			ret = -ENOMEM;
+			goto done;
+		}
+		uevent->ctx->backlog++;
+		ctx->cm_id = uevent->cm_id;
+		ctx->cm_id->context = ctx;
+		uevent->resp.id = ctx->id;
+	}
+
+	if (copy_to_user((void __user *)(unsigned long)cmd.response,
+			 &uevent->resp, sizeof(uevent->resp))) {
+		ret = -EFAULT;
+		goto done;
+	}
+
+	list_del(&uevent->file_list);
+	list_del(&uevent->ctx_list);
+	uevent->ctx->events_reported++;
+	kfree(uevent);
+done:
+	mutex_unlock(&file->file_mutex);
+	return ret;
+}
+
+static ssize_t ucma_create_id(struct ucma_file *file,
+				const char __user *inbuf,
+				int in_len, int out_len)
+{
+	struct rdma_ucm_create_id cmd;
+	struct rdma_ucm_create_id_resp resp;
+	struct ucma_context *ctx;
+	int ret;
+
+	if (out_len < sizeof(resp))
+		return -ENOSPC;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	mutex_lock(&file->file_mutex);
+	ctx = ucma_alloc_ctx(file);
+	mutex_unlock(&file->file_mutex);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->uid = cmd.uid;
+	ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, RDMA_PS_TCP);
+	if (IS_ERR(ctx->cm_id)) {
+		ret = PTR_ERR(ctx->cm_id);
+		goto err1;
+	}
+
+	resp.id = ctx->id;
+	if (copy_to_user((void __user *)(unsigned long)cmd.response,
+			 &resp, sizeof(resp))) {
+		ret = -EFAULT;
+		goto err2;
+	}
+	return 0;
+
+err2:
+	rdma_destroy_id(ctx->cm_id);
+err1:
+	mutex_lock(&ctx_mutex);
+	idr_remove(&ctx_idr, ctx->id);
+	mutex_unlock(&ctx_mutex);
+	kfree(ctx);
+	return ret;
+}
+
+static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
+			       int in_len, int out_len)
+{
+	struct rdma_ucm_destroy_id cmd;
+	struct rdma_ucm_destroy_id_resp resp;
+	struct ucma_context *ctx;
+	int ret = 0;
+
+	if (out_len < sizeof(resp))
+		return -ENOSPC;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	mutex_lock(&ctx_mutex);
+	ctx = idr_find(&ctx_idr, cmd.id);
+	if (!ctx)
+		ctx = ERR_PTR(-ENOENT);
+	else if (ctx->file != file)
+		ctx = ERR_PTR(-EINVAL);
+	else
+		idr_remove(&ctx_idr, ctx->id);
+	mutex_unlock(&ctx_mutex);
+
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	ucma_put_ctx(ctx);
+	wait_for_completion(&ctx->comp);
+
+	/* No new events will be generated after destroying the id. */
+	rdma_destroy_id(ctx->cm_id);
+	/* Cleanup events not yet reported to the user. */
+	ucma_cleanup_events(ctx);
+
+	resp.events_reported = ctx->events_reported;
+	if (copy_to_user((void __user *)(unsigned long)cmd.response,
+			 &resp, sizeof(resp)))
+		ret = -EFAULT;
+
+	kfree(ctx);
+	return ret;
+}
+
+static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
+			      int in_len, int out_len)
+{
+	struct rdma_ucm_bind_addr cmd;
+	struct ucma_context *ctx;
+	int ret;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static ssize_t ucma_resolve_addr(struct ucma_file *file,
+				 const char __user *inbuf,
+				 int in_len, int out_len)
+{
+	struct rdma_ucm_resolve_addr cmd;
+	struct ucma_context *ctx;
+	int ret;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+				(struct sockaddr *) &cmd.dst_addr,
+				cmd.timeout_ms);
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static ssize_t ucma_resolve_route(struct ucma_file *file,
+				  const char __user *inbuf,
+				  int in_len, int out_len)
+{
+	struct rdma_ucm_resolve_route cmd;
+	struct ucma_context *ctx;
+	int ret;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
+			       struct rdma_route *route)
+{
+	struct rdma_dev_addr *dev_addr;
+
+	resp->num_paths = route->num_paths;
+	switch (route->num_paths) {
+	case 0:
+		dev_addr = &route->addr.dev_addr;
+		ib_addr_get_dgid(dev_addr,
+				 (union ib_gid *) &resp->ib_route[0].dgid);
+		ib_addr_get_sgid(dev_addr,
+				 (union ib_gid *) &resp->ib_route[0].sgid);
+		resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
+		break;
+	case 2:
+		ib_copy_path_rec_to_user(&resp->ib_route[1],
+					 &route->path_rec[1]);
+		/* fall through */
+	case 1:
+		ib_copy_path_rec_to_user(&resp->ib_route[0],
+					 &route->path_rec[0]);
+		break;
+	default:
+		break;
+	}
+}
+
+static ssize_t ucma_query_route(struct ucma_file *file,
+				const char __user *inbuf,
+				int in_len, int out_len)
+{
+	struct rdma_ucm_query_route cmd;
+	struct rdma_ucm_query_route_resp resp;
+	struct ucma_context *ctx;
+	struct sockaddr *addr;
+	int ret = 0;
+
+	if (out_len < sizeof(resp))
+		return -ENOSPC;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	memset(&resp, 0, sizeof resp);
+	addr = &ctx->cm_id->route.addr.src_addr;
+	memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
+				     sizeof(struct sockaddr_in) : 
+				     sizeof(struct sockaddr_in6));
+	addr = &ctx->cm_id->route.addr.dst_addr;
+	memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
+				     sizeof(struct sockaddr_in) : 
+				     sizeof(struct sockaddr_in6));
+	if (!ctx->cm_id->device)
+		goto out;
+
+	resp.node_guid = ctx->cm_id->device->node_guid;
+	resp.port_num = ctx->cm_id->port_num;
+	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
+	case RDMA_TRANSPORT_IB:
+		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
+	default:
+		break;
+	}
+
+out:
+	if (copy_to_user((void __user *)(unsigned long)cmd.response,
+			 &resp, sizeof(resp)))
+		ret = -EFAULT;
+
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static void ucma_copy_conn_param(struct rdma_conn_param *dst_conn,
+				 struct rdma_ucm_conn_param *src_conn)
+{
+	dst_conn->private_data = src_conn->private_data;
+	dst_conn->private_data_len = src_conn->private_data_len;
+	dst_conn->responder_resources =src_conn->responder_resources;
+	dst_conn->initiator_depth = src_conn->initiator_depth;
+	dst_conn->flow_control = src_conn->flow_control;
+	dst_conn->retry_count = src_conn->retry_count;
+	dst_conn->rnr_retry_count = src_conn->rnr_retry_count;
+	dst_conn->srq = src_conn->srq;
+	dst_conn->qp_num = src_conn->qp_num;
+	dst_conn->qp_type = src_conn->qp_type;
+}
+
+static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
+			    int in_len, int out_len)
+{
+	struct rdma_ucm_connect cmd;
+	struct rdma_conn_param conn_param;
+	struct ucma_context *ctx;
+	int ret;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	if (!cmd.conn_param.valid)
+		return -EINVAL;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	ucma_copy_conn_param(&conn_param, &cmd.conn_param);
+	ret = rdma_connect(ctx->cm_id, &conn_param);
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
+			   int in_len, int out_len)
+{
+	struct rdma_ucm_listen cmd;
+	struct ucma_context *ctx;
+	int ret;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
+		       cmd.backlog : UCMA_MAX_BACKLOG;
+	ret = rdma_listen(ctx->cm_id, ctx->backlog);
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
+			   int in_len, int out_len)
+{
+	struct rdma_ucm_accept cmd;
+	struct rdma_conn_param conn_param;
+	struct ucma_context *ctx;
+	int ret;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	if (cmd.conn_param.valid) {
+		ctx->uid = cmd.uid;
+		ucma_copy_conn_param(&conn_param, &cmd.conn_param);
+		ret = rdma_accept(ctx->cm_id, &conn_param);
+	} else
+		ret = rdma_accept(ctx->cm_id, NULL);
+
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
+			   int in_len, int out_len)
+{
+	struct rdma_ucm_reject cmd;
+	struct ucma_context *ctx;
+	int ret;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
+			       int in_len, int out_len)
+{
+	struct rdma_ucm_disconnect cmd;
+	struct ucma_context *ctx;
+	int ret;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	ret = rdma_disconnect(ctx->cm_id);
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static ssize_t ucma_init_qp_attr(struct ucma_file *file,
+				 const char __user *inbuf,
+				 int in_len, int out_len)
+{
+	struct rdma_ucm_init_qp_attr cmd;
+	struct ib_uverbs_qp_attr resp;
+	struct ucma_context *ctx;
+	struct ib_qp_attr qp_attr;
+	int ret;
+
+	if (out_len < sizeof(resp))
+		return -ENOSPC;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	resp.qp_attr_mask = 0;
+	memset(&qp_attr, 0, sizeof qp_attr);
+	qp_attr.qp_state = cmd.qp_state;
+	ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
+	if (ret)
+		goto out;
+
+	ib_copy_qp_attr_to_user(&resp, &qp_attr);
+	if (copy_to_user((void __user *)(unsigned long)cmd.response,
+			 &resp, sizeof(resp)))
+		ret = -EFAULT;
+
+out:
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static ssize_t ucma_get_option(struct ucma_file *file, const char __user *inbuf,
+			       int in_len, int out_len)
+{
+	struct rdma_ucm_get_option cmd;
+	struct rdma_ucm_get_option_resp resp;
+	struct ucma_context *ctx;
+	int ret;
+
+	if (out_len < sizeof(resp))
+		return -ENOSPC;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	resp.optlen = cmd.optlen;
+
+	switch (cmd.level) {
+	case RDMA_PROTO_IP:
+		ret = -ENOSYS;
+		break;
+	case RDMA_PROTO_IB:
+		ret = ucma_get_ib_option(ctx->cm_id, cmd.optname,
+					 (void *) (unsigned long) cmd.optval,
+					 &resp.optlen);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret)
+		goto out;
+
+	if (copy_to_user((void __user *)(unsigned long)cmd.response,
+			 &resp, sizeof(resp)))
+		ret = -EFAULT;
+out:
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
+			       int in_len, int out_len)
+{
+	struct rdma_ucm_set_option cmd;
+	struct ucma_context *ctx;
+	int ret;
+
+	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+		return -EFAULT;
+
+	ctx = ucma_get_ctx(file, cmd.id);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	switch (cmd.level) {
+	case RDMA_PROTO_IP:
+		ret = -ENOSYS;
+		break;
+	case RDMA_PROTO_IB:
+		ret = ucma_set_ib_option(ctx->cm_id, cmd.optname,
+					 (void *) (unsigned long) cmd.optval,
+					 cmd.optlen);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	ucma_put_ctx(ctx);
+	return ret;
+}
+
+static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
+				   const char __user *inbuf,
+				   int in_len, int out_len) = {
+	[RDMA_USER_CM_CMD_CREATE_ID]	= ucma_create_id,
+	[RDMA_USER_CM_CMD_DESTROY_ID]	= ucma_destroy_id,
+	[RDMA_USER_CM_CMD_BIND_ADDR]	= ucma_bind_addr,
+	[RDMA_USER_CM_CMD_RESOLVE_ADDR]	= ucma_resolve_addr,
+	[RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
+	[RDMA_USER_CM_CMD_QUERY_ROUTE]	= ucma_query_route,
+	[RDMA_USER_CM_CMD_CONNECT]	= ucma_connect,
+	[RDMA_USER_CM_CMD_LISTEN]	= ucma_listen,
+	[RDMA_USER_CM_CMD_ACCEPT]	= ucma_accept,
+	[RDMA_USER_CM_CMD_REJECT]	= ucma_reject,
+	[RDMA_USER_CM_CMD_DISCONNECT]	= ucma_disconnect,
+	[RDMA_USER_CM_CMD_INIT_QP_ATTR]	= ucma_init_qp_attr,
+	[RDMA_USER_CM_CMD_GET_EVENT]	= ucma_get_event,
+	[RDMA_USER_CM_CMD_GET_OPTION]	= ucma_get_option,
+	[RDMA_USER_CM_CMD_SET_OPTION]	= ucma_set_option
+};
+
+static ssize_t ucma_write(struct file *filp, const char __user *buf,
+			  size_t len, loff_t *pos)
+{
+	struct ucma_file *file = filp->private_data;
+	struct rdma_ucm_cmd_hdr hdr;
+	ssize_t ret;
+
+	if (len < sizeof(hdr))
+		return -EINVAL;
+
+	if (copy_from_user(&hdr, buf, sizeof(hdr)))
+		return -EFAULT;
+
+	if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
+		return -EINVAL;
+
+	if (hdr.in + sizeof(hdr) > len)
+		return -EINVAL;
+
+	ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
+	if (!ret)
+		ret = len;
+
+	return ret;
+}
+
+static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
+{
+	struct ucma_file *file = filp->private_data;
+	unsigned int mask = 0;
+
+	poll_wait(filp, &file->poll_wait, wait);
+
+	mutex_lock(&file->file_mutex);
+	if (!list_empty(&file->events))
+		mask = POLLIN | POLLRDNORM;
+	mutex_unlock(&file->file_mutex);
+
+	return mask;
+}
+
+static int ucma_open(struct inode *inode, struct file *filp)
+{
+	struct ucma_file *file;
+
+	file = kmalloc(sizeof *file, GFP_KERNEL);
+	if (!file)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&file->events);
+	INIT_LIST_HEAD(&file->ctxs);
+	init_waitqueue_head(&file->poll_wait);
+	mutex_init(&file->file_mutex);
+
+	filp->private_data = file;
+	file->filp = filp;
+	return 0;
+}
+
+static int ucma_close(struct inode *inode, struct file *filp)
+{
+	struct ucma_file *file = filp->private_data;
+	struct ucma_context *ctx;
+
+	mutex_lock(&file->file_mutex);
+	while (!list_empty(&file->ctxs)) {
+		ctx = list_entry(file->ctxs.next, struct ucma_context,
+				 file_list);
+		mutex_unlock(&file->file_mutex);
+
+		mutex_lock(&ctx_mutex);
+		idr_remove(&ctx_idr, ctx->id);
+		mutex_unlock(&ctx_mutex);
+
+		rdma_destroy_id(ctx->cm_id);
+		ucma_cleanup_events(ctx);
+		kfree(ctx);
+
+		mutex_lock(&file->file_mutex);
+	}
+	mutex_unlock(&file->file_mutex);
+	kfree(file);
+	return 0;
+}
+
+static struct file_operations ucma_fops = {
+	.owner 	 = THIS_MODULE,
+	.open 	 = ucma_open,
+	.release = ucma_close,
+	.write	 = ucma_write,
+	.poll    = ucma_poll,
+};
+
+static struct miscdevice ucma_misc = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "rdma_cm",
+	.fops	= &ucma_fops,
+};
+
+static ssize_t show_abi_version(struct class_device *class_dev, char *buf)
+{
+	return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
+}
+static CLASS_DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+
+static int __init ucma_init(void)
+{
+	int ret;
+
+	ret = misc_register(&ucma_misc);
+	if (ret)
+		return ret;
+
+	ret = class_device_create_file(ucma_misc.class,
+				       &class_device_attr_abi_version);
+	if (ret) {
+		printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
+		goto err;
+	}
+	return 0;
+err:
+	misc_deregister(&ucma_misc);
+	return ret;
+}
+
+static void __exit ucma_cleanup(void)
+{
+	class_device_remove_file(ucma_misc.class, 
+				 &class_device_attr_abi_version);
+	misc_deregister(&ucma_misc);
+	idr_destroy(&ctx_idr);
+}
+
+module_init(ucma_init);
+module_exit(ucma_cleanup);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/core/ucma_ib.c linux-2.6.18.noarch/drivers/infiniband/core/ucma_ib.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/core/ucma_ib.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.noarch/drivers/infiniband/core/ucma_ib.c	2006-10-17 10:15:06.000000000 -0400
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2006 Intel Corporation.  All rights reserved.
+ *
+ * This Software is licensed under one of the following licenses:
+ *
+ * 1) under the terms of the "Common Public License 1.0" a copy of which is
+ *    available from the Open Source Initiative, see
+ *    http://www.opensource.org/licenses/cpl.php.
+ *
+ * 2) under the terms of the "The BSD License" a copy of which is
+ *    available from the Open Source Initiative, see
+ *    http://www.opensource.org/licenses/bsd-license.php.
+ *
+ * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
+ *    copy of which is available from the Open Source Initiative, see
+ *    http://www.opensource.org/licenses/gpl-license.php.
+ *
+ * Licensee has the right to choose one of the above licenses.
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice and one of the license notices.
+ *
+ * Redistributions in binary form must reproduce both the above copyright
+ * notice, one of the license notices in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ */
+
+#include <rdma/ib_addr.h>
+#include <rdma/ib_marshall.h>
+#include <rdma/rdma_cm_ib.h>
+#include <rdma/rdma_user_cm.h>
+
+#include "ucma_ib.h"
+
+static int ucma_get_req_opt(struct rdma_cm_id *id, void __user *opt,
+			    int *optlen)
+{
+	struct ib_cm_req_opt req_opt;
+	int ret = 0;
+
+	if (!opt)
+		goto out;
+
+ 	if (*optlen < sizeof req_opt) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = rdma_get_ib_req_info(id, &req_opt);
+	if (!ret)
+		if (copy_to_user(opt, &req_opt, sizeof req_opt))
+			ret = -EFAULT;
+out:
+	*optlen = sizeof req_opt;
+	return ret;	
+}
+
+int ucma_get_ib_option(struct rdma_cm_id *id, int optname,
+		       void *optval, int *optlen)
+{
+	switch (optname) {
+	case IB_PATH_OPTIONS:
+		return -EINVAL;
+	case IB_CM_REQ_OPTIONS:
+		return ucma_get_req_opt(id, optval, optlen);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int ucma_set_req_opt(struct rdma_cm_id *id, void __user *opt, int optlen)
+{
+	struct ib_cm_req_opt req_opt;
+
+	if (optlen != sizeof req_opt)
+		return -EINVAL;
+
+	if (copy_from_user(&req_opt, opt, sizeof req_opt))
+		return -EFAULT;
+
+	return rdma_set_ib_req_info(id, &req_opt);
+}
+
+int ucma_set_ib_option(struct rdma_cm_id *id, int optname,
+		       void *optval, int optlen)
+{
+	switch (optname) {
+	case IB_PATH_OPTIONS:
+		return -EINVAL;
+	case IB_CM_REQ_OPTIONS:
+		return ucma_set_req_opt(id, optval, optlen);
+	default:
+		return -EINVAL;
+	}
+}
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/core/ucma_ib.h linux-2.6.18.noarch/drivers/infiniband/core/ucma_ib.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/core/ucma_ib.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.noarch/drivers/infiniband/core/ucma_ib.h	2006-10-17 10:15:06.000000000 -0400
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2006 Intel Corporation.  All rights reserved.
+ *
+ * This Software is licensed under one of the following licenses:
+ *
+ * 1) under the terms of the "Common Public License 1.0" a copy of which is
+ *    available from the Open Source Initiative, see
+ *    http://www.opensource.org/licenses/cpl.php.
+ *
+ * 2) under the terms of the "The BSD License" a copy of which is
+ *    available from the Open Source Initiative, see
+ *    http://www.opensource.org/licenses/bsd-license.php.
+ *
+ * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
+ *    copy of which is available from the Open Source Initiative, see
+ *    http://www.opensource.org/licenses/gpl-license.php.
+ *
+ * Licensee has the right to choose one of the above licenses.
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice and one of the license notices.
+ *
+ * Redistributions in binary form must reproduce both the above copyright
+ * notice, one of the license notices in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ */
+
+#if !defined(UCMA_IB_H)
+#define UCMA_IB_H
+
+#include <rdma/rdma_cm.h>
+
+int ucma_get_ib_option(struct rdma_cm_id *id, int optname,
+		       void *optval, int *optlen);
+
+int ucma_set_ib_option(struct rdma_cm_id *id, int optname,
+		       void *optval, int optlen);
+
+#endif /* UCMA_IB_H */
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/core/uverbs_cmd.c linux-2.6.18.noarch/drivers/infiniband/core/uverbs_cmd.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/core/uverbs_cmd.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/core/uverbs_cmd.c	2006-10-19 12:41:11.000000000 -0400
@@ -37,6 +37,7 @@
 
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/lockdep.h>
 
 #include <asm/uaccess.h>
 
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_av.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_av.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_av.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_av.c	2006-10-17 10:15:06.000000000 -0400
@@ -42,38 +42,30 @@
  */
 
 
-#define DEB_PREFIX "ehav"
-
 #include <asm/current.h>
 
 #include "ehca_tools.h"
 #include "ehca_iverbs.h"
 #include "hcp_if.h"
 
+static struct kmem_cache *av_cache;
+
 struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
 {
-	extern struct ehca_module ehca_module;
-	extern int ehca_static_rate;
-	int ret = 0;
-	struct ehca_av *av = NULL;
-	struct ehca_shca *shca = NULL;
-
-	EHCA_CHECK_PD_P(pd);
-	EHCA_CHECK_ADR_P(ah_attr);
+	int ret;
+	struct ehca_av *av;
+	struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
+					      ib_device);
 
-	shca = container_of(pd->device, struct ehca_shca, ib_device);
-
-	EDEB_EN(7, "pd=%p ah_attr=%p", pd, ah_attr);
-
-	av = kmem_cache_alloc(ehca_module.cache_av, SLAB_KERNEL);
+	av = kmem_cache_alloc(av_cache, SLAB_KERNEL);
 	if (!av) {
-		EDEB_ERR(4, "Out of memory pd=%p ah_attr=%p", pd, ah_attr);
-		ret = -ENOMEM;
-		goto create_ah_exit0;
+		ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
+			 pd, ah_attr);
+		return ERR_PTR(-ENOMEM);
 	}
 
 	av->av.sl = ah_attr->sl;
-	av->av.dlid = ntohs(ah_attr->dlid);
+	av->av.dlid = ah_attr->dlid;
 	av->av.slid_path_bits = ah_attr->src_path_bits;
 
 	if (ehca_static_rate < 0) {
@@ -89,12 +81,8 @@ struct ib_ah *ehca_create_ah(struct ib_p
 	} else
 	        av->av.ipd = ehca_static_rate;
 
-	EDEB(7, "IPD av->av.ipd set =%x  ah_attr->static_rate=%x "
-	     "shca_ib_rate=%x ",av->av.ipd, ah_attr->static_rate,
-	     shca->sport[ah_attr->port_num].rate);
-
 	av->av.lnh = ah_attr->ah_flags;
-	av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
+	av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
 	av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
 					    ah_attr->grh.traffic_class);
 	av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
@@ -102,11 +90,9 @@ struct ib_ah *ehca_create_ah(struct ib_p
 	av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
 					    ah_attr->grh.hop_limit);
 	av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
-	/* IB transport */
-	av->av.grh.word_0 = be64_to_cpu(av->av.grh.word_0);
 	/* set sgid in grh.word_1 */
 	if (ah_attr->ah_flags & IB_AH_GRH) {
-		int rc = 0;
+		int rc;
 		struct ib_port_attr port_attr;
 		union ib_gid gid;
 		memset(&port_attr, 0, sizeof(port_attr));
@@ -114,7 +100,7 @@ struct ib_ah *ehca_create_ah(struct ib_p
 				     &port_attr);
 		if (rc) { /* invalid port number */
 			ret = -EINVAL;
-			EDEB_ERR(4, "Invalid port number "
+			ehca_err(pd->device, "Invalid port number "
 				 "ehca_query_port() returned %x "
 				 "pd=%p ah_attr=%p", rc, pd, ah_attr);
 			goto create_ah_exit1;
@@ -125,7 +111,7 @@ struct ib_ah *ehca_create_ah(struct ib_p
 				    ah_attr->grh.sgid_index, &gid);
 		if (rc) {
 			ret = -EINVAL;
-			EDEB_ERR(4, "Failed to retrieve sgid "
+			ehca_err(pd->device, "Failed to retrieve sgid "
 				 "ehca_query_gid() returned %x "
 				 "pd=%p ah_attr=%p", rc, pd, ah_attr);
 			goto create_ah_exit1;
@@ -139,48 +125,35 @@ struct ib_ah *ehca_create_ah(struct ib_p
 	memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
 	       sizeof(ah_attr->grh.dgid));
 
-	EHCA_REGISTER_AV(device, pd);
-
-	EDEB_EX(7, "pd=%p ah_attr=%p av=%p", pd, ah_attr, av);
 	return &av->ib_ah;
 
 create_ah_exit1:
-	kmem_cache_free(ehca_module.cache_av, av);
-
-create_ah_exit0:
-	EDEB_EX(7, "ret=%x pd=%p ah_attr=%p", ret, pd, ah_attr);
+	kmem_cache_free(av_cache, av);
 
 	return ERR_PTR(ret);
 }
 
 int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
 {
-	struct ehca_av *av = NULL;
+	struct ehca_av *av;
 	struct ehca_ud_av new_ehca_av;
-	struct ehca_pd *my_pd = NULL;
+	struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
 	u32 cur_pid = current->tgid;
-	int ret = 0;
-
-	EHCA_CHECK_AV(ah);
-	EHCA_CHECK_ADR(ah_attr);
 
-	EDEB_EN(7, "ah=%p ah_attr=%p", ah, ah_attr);
-
-	my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
 	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
 	    my_pd->ownpid != cur_pid) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_pd->ownpid);
 		return -EINVAL;
 	}
 
 	memset(&new_ehca_av, 0, sizeof(new_ehca_av));
 	new_ehca_av.sl = ah_attr->sl;
-	new_ehca_av.dlid = ntohs(ah_attr->dlid);
+	new_ehca_av.dlid = ah_attr->dlid;
 	new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
 	new_ehca_av.ipd = ah_attr->static_rate;
 	new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
-					 ((ah_attr->ah_flags & IB_AH_GRH) > 0));
+					 (ah_attr->ah_flags & IB_AH_GRH) > 0);
 	new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
 						ah_attr->grh.traffic_class);
 	new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
@@ -188,37 +161,34 @@ int ehca_modify_ah(struct ib_ah *ah, str
 	new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
 						 ah_attr->grh.hop_limit);
 	new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
-	new_ehca_av.grh.word_0 = be64_to_cpu(new_ehca_av.grh.word_0);
 
 	/* set sgid in grh.word_1 */
 	if (ah_attr->ah_flags & IB_AH_GRH) {
-		int rc = 0;
+		int rc;
 		struct ib_port_attr port_attr;
 		union ib_gid gid;
 		memset(&port_attr, 0, sizeof(port_attr));
 		rc = ehca_query_port(ah->device, ah_attr->port_num,
 				     &port_attr);
 		if (rc) { /* invalid port number */
-			ret = -EINVAL;
-			EDEB_ERR(4, "Invalid port number "
+			ehca_err(ah->device, "Invalid port number "
 				 "ehca_query_port() returned %x "
 				 "ah=%p ah_attr=%p port_num=%x",
 				 rc, ah, ah_attr, ah_attr->port_num);
-			goto modify_ah_exit1;
+			return -EINVAL;
 		}
 		memset(&gid, 0, sizeof(gid));
 		rc = ehca_query_gid(ah->device,
 				    ah_attr->port_num,
 				    ah_attr->grh.sgid_index, &gid);
 		if (rc) {
-			ret = -EINVAL;
-			EDEB_ERR(4, "Failed to retrieve sgid "
+			ehca_err(ah->device, "Failed to retrieve sgid "
 				 "ehca_query_gid() returned %x "
 				 "ah=%p ah_attr=%p port_num=%x "
 				 "sgid_index=%x",
 				 rc, ah, ah_attr, ah_attr->port_num,
 				 ah_attr->grh.sgid_index);
-			goto modify_ah_exit1;
+			return -EINVAL;
 		}
 		memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
 	}
@@ -231,33 +201,22 @@ int ehca_modify_ah(struct ib_ah *ah, str
 	av = container_of(ah, struct ehca_av, ib_ah);
 	av->av = new_ehca_av;
 
-modify_ah_exit1:
-	EDEB_EX(7, "ret=%x ah=%p ah_attr=%p", ret, ah, ah_attr);
-
-	return ret;
+	return 0;
 }
 
 int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
 {
-	int ret = 0;
-	struct ehca_av *av = NULL;
-	struct ehca_pd *my_pd = NULL;
+	struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
+	struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
 	u32 cur_pid = current->tgid;
 
-	EHCA_CHECK_AV(ah);
-	EHCA_CHECK_ADR(ah_attr);
-
-	EDEB_EN(7, "ah=%p ah_attr=%p", ah, ah_attr);
-
-	my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
 	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
 	    my_pd->ownpid != cur_pid) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_pd->ownpid);
 		return -EINVAL;
 	}
 
-	av = container_of(ah, struct ehca_av, ib_ah);
 	memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
 	       sizeof(ah_attr->grh.dgid));
 	ah_attr->sl = av->av.sl;
@@ -274,33 +233,39 @@ int ehca_query_ah(struct ib_ah *ah, stru
 	ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
 						 av->av.grh.word_0);
 
-	EDEB_EX(7, "ah=%p ah_attr=%p ret=%x", ah, ah_attr, ret);
-	return ret;
+	return 0;
 }
 
 int ehca_destroy_ah(struct ib_ah *ah)
 {
-	extern struct ehca_module ehca_module;
-	struct ehca_pd *my_pd = NULL;
+	struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
 	u32 cur_pid = current->tgid;
-	int ret = 0;
-
-	EHCA_CHECK_AV(ah);
-	EHCA_DEREGISTER_AV(ah);
-
-	EDEB_EN(7, "ah=%p", ah);
 
-	my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
 	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
 	    my_pd->ownpid != cur_pid) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_pd->ownpid);
 		return -EINVAL;
 	}
 
-	kmem_cache_free(ehca_module.cache_av,
-			container_of(ah, struct ehca_av, ib_ah));
+	kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
 
-	EDEB_EX(7, "ret=%x ah=%p", ret, ah);
-	return ret;
+	return 0;
+}
+
+int ehca_init_av_cache(void)
+{
+	av_cache = kmem_cache_create("ehca_cache_av",
+				   sizeof(struct ehca_av), 0,
+				   SLAB_HWCACHE_ALIGN,
+				   NULL, NULL);
+	if (!av_cache)
+		return -ENOMEM;
+	return 0;
+}
+
+void ehca_cleanup_av_cache(void)
+{
+	if (av_cache)
+		kmem_cache_destroy(av_cache);
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_classes.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_classes.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_classes.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_classes.h	2006-10-17 10:15:06.000000000 -0400
@@ -63,19 +63,6 @@ struct ehca_av;
 
 #include "ehca_irq.h"
 
-struct ehca_module {
-	struct list_head shca_list;
-	spinlock_t shca_lock;
-	struct timer_list timer;
-	kmem_cache_t *cache_pd;
-	kmem_cache_t *cache_cq;
-	kmem_cache_t *cache_qp;
-	kmem_cache_t *cache_av;
-	kmem_cache_t *cache_mr;
-	kmem_cache_t *cache_mw;
-	struct ehca_pfmodule pf;
-};
-
 struct ehca_eq {
 	u32 length;
 	struct ipz_queue ipz_queue;
@@ -108,14 +95,12 @@ struct ehca_shca {
 	struct ehca_eq neq;
 	struct ehca_mr *maxmr;
 	struct ehca_pd *pd;
-	struct ehca_pfshca pf;
 	struct h_galpas galpas;
 };
 
 struct ehca_pd {
 	struct ib_pd ib_pd;
 	struct ipz_pd fw_pd;
-	struct ehca_pfpd pf;
 	u32 ownpid;
 };
 
@@ -195,8 +180,6 @@ struct ehca_mr {
 	/* data for userspace bridge */
 	u32 nr_of_pages;
 	void *pagearray;
-
-	struct ehca_pfmr pf;	/* platform specific part of MR */
 };
 
 struct ehca_mw {
@@ -206,8 +189,6 @@ struct ehca_mw {
 	u8 never_bound;		/* indication MW was never bound */
 	struct ipz_mrmw_handle ipz_mw_handle;	/* MW handle for h-calls */
 	struct h_galpas galpas;
-
-	struct ehca_pfmw pf;	/* platform specific part of MW */
 };
 
 enum ehca_mr_pgi_type {
@@ -281,11 +262,26 @@ int ehca_shca_delete(struct ehca_shca *m
 
 struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor);
 
+int ehca_init_pd_cache(void);
+void ehca_cleanup_pd_cache(void);
+int ehca_init_cq_cache(void);
+void ehca_cleanup_cq_cache(void);
+int ehca_init_qp_cache(void);
+void ehca_cleanup_qp_cache(void);
+int ehca_init_av_cache(void);
+void ehca_cleanup_av_cache(void);
+int ehca_init_mrmw_cache(void);
+void ehca_cleanup_mrmw_cache(void);
+
 extern spinlock_t ehca_qp_idr_lock;
 extern spinlock_t ehca_cq_idr_lock;
 extern struct idr ehca_qp_idr;
 extern struct idr ehca_cq_idr;
 
+extern int ehca_static_rate;
+extern int ehca_port_act_time;
+extern int ehca_use_hp_mr;
+
 struct ipzu_queue_resp {
 	u64 queue;        /* points to first queue entry */
 	u32 qe_size;      /* queue entry size */
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h	2006-10-17 10:15:06.000000000 -0400
@@ -46,12 +46,6 @@
 #include "ipz_pt_fn.h"
 
 
-struct ehca_pfmodule {
-};
-
-struct ehca_pfshca {
-};
-
 struct ehca_pfqp {
 	struct ipz_qpt sqpt;
 	struct ipz_qpt rqpt;
@@ -68,15 +62,6 @@ struct ehca_pfeq {
 	u32 eqnr;
 };
 
-struct ehca_pfpd {
-};
-
-struct ehca_pfmr {
-};
-
-struct ehca_pfmw {
-};
-
 struct ipz_adapter_handle {
 	u64 handle;
 };
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_cq.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_cq.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_cq.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_cq.c	2006-10-17 10:15:06.000000000 -0400
@@ -43,8 +43,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#define DEB_PREFIX "e_cq"
-
 #include <asm/current.h>
 
 #include "ehca_iverbs.h"
@@ -52,17 +50,20 @@
 #include "ehca_irq.h"
 #include "hcp_if.h"
 
+static struct kmem_cache *cq_cache;
+
 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
 {
 	unsigned int qp_num = qp->real_qp_num;
 	unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
-	unsigned long spl_flags = 0;
+	unsigned long spl_flags;
 
 	spin_lock_irqsave(&cq->spinlock, spl_flags);
 	hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
 	spin_unlock_irqrestore(&cq->spinlock, spl_flags);
 
-	EDEB(7, "cq_num=%x real_qp_num=%x", cq->cq_number, qp_num);
+	ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
+		 cq->cq_number, qp_num);
 
 	return 0;
 }
@@ -71,26 +72,27 @@ int ehca_cq_unassign_qp(struct ehca_cq *
 {
 	int ret = -EINVAL;
 	unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
-	struct hlist_node *iter = NULL;
-	struct ehca_qp *qp = NULL;
-	unsigned long spl_flags = 0;
+	struct hlist_node *iter;
+	struct ehca_qp *qp;
+	unsigned long spl_flags;
 
 	spin_lock_irqsave(&cq->spinlock, spl_flags);
 	hlist_for_each(iter, &cq->qp_hashtab[key]) {
 		qp = hlist_entry(iter, struct ehca_qp, list_entries);
 		if (qp->real_qp_num == real_qp_num) {
 			hlist_del(iter);
-			EDEB(7, "removed qp from cq .cq_num=%x real_qp_num=%x",
-			     cq->cq_number, real_qp_num);
+			ehca_dbg(cq->ib_cq.device,
+				 "removed qp from cq .cq_num=%x real_qp_num=%x",
+				 cq->cq_number, real_qp_num);
 			ret = 0;
 			break;
 		}
 	}
 	spin_unlock_irqrestore(&cq->spinlock, spl_flags);
-	if (ret) {
-		EDEB_ERR(4, "qp not found cq_num=%x real_qp_num=%x",
+	if (ret)
+		ehca_err(cq->ib_cq.device,
+			 "qp not found cq_num=%x real_qp_num=%x",
 			 cq->cq_number, real_qp_num);
-	}
 
 	return ret;
 }
@@ -99,8 +101,8 @@ struct ehca_qp* ehca_cq_get_qp(struct eh
 {
 	struct ehca_qp *ret = NULL;
 	unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
-	struct hlist_node *iter = NULL;
-	struct ehca_qp *qp = NULL;
+	struct hlist_node *iter;
+	struct ehca_qp *qp;
 	hlist_for_each(iter, &cq->qp_hashtab[key]) {
 		qp = hlist_entry(iter, struct ehca_qp, list_entries);
 		if (qp->real_qp_num == real_qp_num) {
@@ -115,37 +117,28 @@ struct ib_cq *ehca_create_cq(struct ib_d
 			     struct ib_ucontext *context,
 			     struct ib_udata *udata)
 {
-	extern struct ehca_module ehca_module;
-	struct ib_cq *cq = NULL;
-	struct ehca_cq *my_cq = NULL;
-	struct ehca_shca *shca = NULL;
+	static const u32 additional_cqe = 20;
+	struct ib_cq *cq;
+	struct ehca_cq *my_cq;
+	struct ehca_shca *shca =
+		container_of(device, struct ehca_shca, ib_device);
 	struct ipz_adapter_handle adapter_handle;
-	/* h_call's out parameters */
-	struct ehca_alloc_cq_parms param;
-	u32 counter = 0;
-	void *vpage = NULL;
-	u64 rpage = 0;
+	struct ehca_alloc_cq_parms param; /* h_call's out parameters */
 	struct h_galpa gal;
-	u64 cqx_fec = 0;
-	u64 h_ret = 0;
-	int ipz_rc = 0;
-	int ret = 0;
-	const u32 additional_cqe=20;
-	int i= 0;
+	void *vpage;
+	u32 counter;
+	u64 rpage, cqx_fec, h_ret;
+	int ipz_rc, ret, i;
 	unsigned long flags;
 
-	EHCA_CHECK_DEVICE_P(device);
-	EDEB_EN(7,  "device=%p cqe=%x context=%p", device, cqe, context);
-
 	if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
 		return ERR_PTR(-EINVAL);
 
-	my_cq = kmem_cache_alloc(ehca_module.cache_cq, SLAB_KERNEL);
+	my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL);
 	if (!my_cq) {
-		cq = ERR_PTR(-ENOMEM);
-		EDEB_ERR(4, "Out of memory for ehca_cq struct device=%p",
+		ehca_err(device, "Out of memory for ehca_cq struct device=%p",
 			 device);
-		goto create_cq_exit0;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	memset(my_cq, 0, sizeof(struct ehca_cq));
@@ -158,17 +151,14 @@ struct ib_cq *ehca_create_cq(struct ib_d
 
 	cq = &my_cq->ib_cq;
 
-	shca = container_of(device, struct ehca_shca, ib_device);
 	adapter_handle = shca->ipz_hca_handle;
 	param.eq_handle = shca->eq.ipz_eq_handle;
 
-
 	do {
 		if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
 			cq = ERR_PTR(-ENOMEM);
-			EDEB_ERR(4,
-				 "Can't reserve idr resources. "
-				 "device=%p", device);
+			ehca_err(device, "Can't reserve idr nr. device=%p",
+				 device);
 			goto create_cq_exit1;
 		}
 
@@ -180,20 +170,20 @@ struct ib_cq *ehca_create_cq(struct ib_d
 
 	if (ret) {
 		cq = ERR_PTR(-ENOMEM);
-		EDEB_ERR(4,
-			 "Can't allocate new idr entry. "
-			 "device=%p", device);
+		ehca_err(device, "Can't allocate new idr entry. device=%p",
+			 device);
 		goto create_cq_exit1;
 	}
 
-	/* CQs maximum depth is 4GB-64, but we need additional 20 as buffer
+	/*
+	 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
 	 * for receiving errors CQEs.
 	 */
 	param.nr_cqe = cqe + additional_cqe;
 	h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
 
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4,"hipz_h_alloc_resource_cq() failed "
+		ehca_err(device, "hipz_h_alloc_resource_cq() failed "
 			 "h_ret=%lx device=%p", h_ret, device);
 		cq = ERR_PTR(ehca2ib_return_code(h_ret));
 		goto create_cq_exit2;
@@ -202,9 +192,8 @@ struct ib_cq *ehca_create_cq(struct ib_d
 	ipz_rc = ipz_queue_ctor(&my_cq->ipz_queue, param.act_pages,
 				EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0);
 	if (!ipz_rc) {
-		EDEB_ERR(4,
-			 "ipz_queue_ctor() failed "
-			 "ipz_rc=%x device=%p", ipz_rc, device);
+		ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%x device=%p",
+			 ipz_rc, device);
 		cq = ERR_PTR(-EINVAL);
 		goto create_cq_exit3;
 	}
@@ -212,7 +201,7 @@ struct ib_cq *ehca_create_cq(struct ib_d
 	for (counter = 0; counter < param.act_pages; counter++) {
 		vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
 		if (!vpage) {
-			EDEB_ERR(4, "ipz_qpageit_get_inc() "
+			ehca_err(device, "ipz_qpageit_get_inc() "
 				 "returns NULL device=%p", device);
 			cq = ERR_PTR(-EAGAIN);
 			goto create_cq_exit4;
@@ -230,10 +219,9 @@ struct ib_cq *ehca_create_cq(struct ib_d
 						 kernel);
 
 		if (h_ret < H_SUCCESS) {
-			EDEB_ERR(4, "hipz_h_register_rpage_cq() failed "
-				 "ehca_cq=%p cq_num=%x h_ret=%lx "
-				 "counter=%i act_pages=%i",
-				 my_cq, my_cq->cq_number,
+			ehca_err(device, "hipz_h_register_rpage_cq() failed "
+				 "ehca_cq=%p cq_num=%x h_ret=%lx counter=%i "
+				 "act_pages=%i", my_cq, my_cq->cq_number,
 				 h_ret, counter, param.act_pages);
 			cq = ERR_PTR(-EINVAL);
 			goto create_cq_exit4;
@@ -242,16 +230,16 @@ struct ib_cq *ehca_create_cq(struct ib_d
 		if (counter == (param.act_pages - 1)) {
 			vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
 			if ((h_ret != H_SUCCESS) || vpage) {
-				EDEB_ERR(4, "Registration of pages not "
+				ehca_err(device, "Registration of pages not "
 					 "complete ehca_cq=%p cq_num=%x "
-					 "h_ret=%lx",
-					 my_cq, my_cq->cq_number, h_ret);
+					 "h_ret=%lx", my_cq, my_cq->cq_number,
+					 h_ret);
 				cq = ERR_PTR(-EAGAIN);
 				goto create_cq_exit4;
 			}
 		} else {
 			if (h_ret != H_PAGE_REGISTERED) {
-				EDEB_ERR(4, "Registration of page failed "
+				ehca_err(device, "Registration of page failed "
 					 "ehca_cq=%p cq_num=%x h_ret=%lx"
 					 "counter=%i act_pages=%i",
 					 my_cq, my_cq->cq_number,
@@ -266,8 +254,8 @@ struct ib_cq *ehca_create_cq(struct ib_d
 
 	gal = my_cq->galpas.kernel;
 	cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
-	EDEB(8, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
-	     my_cq, my_cq->cq_number, cqx_fec);
+	ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
+		 my_cq, my_cq->cq_number, cqx_fec);
 
 	my_cq->ib_cq.cqe = my_cq->nr_of_entries =
 		param.act_nr_of_entries - additional_cqe;
@@ -279,7 +267,7 @@ struct ib_cq *ehca_create_cq(struct ib_d
 	if (context) {
 		struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
 		struct ehca_create_cq_resp resp;
-		struct vm_area_struct *vma = NULL;
+		struct vm_area_struct *vma;
 		memset(&resp, 0, sizeof(resp));
 		resp.cq_number = my_cq->cq_number;
 		resp.token = my_cq->token;
@@ -288,33 +276,48 @@ struct ib_cq *ehca_create_cq(struct ib_d
 		resp.ipz_queue.queue_length = ipz_queue->queue_length;
 		resp.ipz_queue.pagesize = ipz_queue->pagesize;
 		resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
-		ehca_mmap_nopage(((u64) (my_cq->token) << 32) | 0x12000000,
-				 ipz_queue->queue_length,
-				 ((void**)&resp.ipz_queue.queue),
-				 &vma);
+		ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
+				       ipz_queue->queue_length,
+				       (void**)&resp.ipz_queue.queue,
+				       &vma);
+		if (ret) {
+			ehca_err(device, "Could not mmap queue pages");
+			cq = ERR_PTR(ret);
+			goto create_cq_exit4;
+		}
 		my_cq->uspace_queue = resp.ipz_queue.queue;
 		resp.galpas = my_cq->galpas;
-		ehca_mmap_register(my_cq->galpas.user.fw_handle,
-				   ((void**)&resp.galpas.kernel.fw_handle),
-				   &vma);
+		ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
+					 (void**)&resp.galpas.kernel.fw_handle,
+					 &vma);
+		if (ret) {
+			ehca_err(device, "Could not mmap fw_handle");
+			cq = ERR_PTR(ret);
+			goto create_cq_exit5;
+		}
 		my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
-			EDEB_ERR(4,  "Copy to udata failed.");
-			goto create_cq_exit4;
+			ehca_err(device, "Copy to udata failed.");
+			goto create_cq_exit6;
 		}
 	}
 
-	EDEB_EX(7,"retcode=%p ehca_cq=%p cq_num=%x cq_size=%x",
-		cq, my_cq, my_cq->cq_number, param.act_nr_of_entries);
 	return cq;
 
+create_cq_exit6:
+	ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
+
+create_cq_exit5:
+	ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
+
 create_cq_exit4:
 	ipz_queue_dtor(&my_cq->ipz_queue);
 
 create_cq_exit3:
 	h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
-	EDEB(3, "hipz_h_destroy_cq() failed ehca_cq=%p cq_num=%x h_ret=%lx",
-	     my_cq, my_cq->cq_number, h_ret);
+	if (h_ret != H_SUCCESS)
+		ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
+			 "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret);
 
 create_cq_exit2:
 	spin_lock_irqsave(&ehca_cq_idr_lock, flags);
@@ -322,36 +325,24 @@ create_cq_exit2:
 	spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
 
 create_cq_exit1:
-	kmem_cache_free(ehca_module.cache_cq, my_cq);
+	kmem_cache_free(cq_cache, my_cq);
 
-create_cq_exit0:
-	EDEB_EX(7,  "An error has occured retcode=%p ", cq);
 	return cq;
 }
 
 int ehca_destroy_cq(struct ib_cq *cq)
 {
-	extern struct ehca_module ehca_module;
-	u64 h_ret = 0;
-	int ret = 0;
-	struct ehca_cq *my_cq = NULL;
-	int cq_num = 0;
-	struct ib_device *device = NULL;
-	struct ehca_shca *shca = NULL;
-	struct ipz_adapter_handle adapter_handle;
+	u64 h_ret;
+	int ret;
+	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+	int cq_num = my_cq->cq_number;
+	struct ib_device *device = cq->device;
+	struct ehca_shca *shca = container_of(device, struct ehca_shca,
+					      ib_device);
+	struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
 	u32 cur_pid = current->tgid;
 	unsigned long flags;
 
-	EHCA_CHECK_CQ(cq);
-	my_cq = container_of(cq, struct ehca_cq, ib_cq);
-	cq_num = my_cq->cq_number;
-	device = cq->device;
-	EHCA_CHECK_DEVICE(device);
-	shca = container_of(device, struct ehca_shca, ib_device);
-	adapter_handle = shca->ipz_hca_handle;
-	EDEB_EN(7, "ehca_cq=%p cq_num=%x",
-		my_cq, my_cq->cq_number);
-
 	spin_lock_irqsave(&ehca_cq_idr_lock, flags);
 	while (my_cq->nr_callbacks)
 		yield();
@@ -360,7 +351,7 @@ int ehca_destroy_cq(struct ib_cq *cq)
 	spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
 
 	if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_cq->ownpid);
 		return -EINVAL;
 	}
@@ -368,64 +359,69 @@ int ehca_destroy_cq(struct ib_cq *cq)
 	/* un-mmap if vma alloc */
 	if (my_cq->uspace_queue ) {
 		ret = ehca_munmap(my_cq->uspace_queue,
-				      my_cq->ipz_queue.queue_length);
-		ret = ehca_munmap(my_cq->uspace_fwh, 4096);
+				  my_cq->ipz_queue.queue_length);
+		if (ret)
+			ehca_err(device, "Could not munmap queue ehca_cq=%p "
+				 "cq_num=%x", my_cq, cq_num);
+		ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
+		if (ret)
+			ehca_err(device, "Could not munmap fwh ehca_cq=%p "
+				 "cq_num=%x", my_cq, cq_num);
 	}
 
 	h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
 	if (h_ret == H_R_STATE) {
 		/* cq in err: read err data and destroy it forcibly */
-		EDEB(4, "ehca_cq=%p cq_num=%x ressource=%lx in err state. "
-		     "Try to delete it forcibly.",
-		     my_cq, my_cq->cq_number, my_cq->ipz_cq_handle.handle);
+		ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err "
+			 "state. Try to delete it forcibly.",
+			 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
 		ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
 		h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
 		if (h_ret == H_SUCCESS)
-			EDEB(4, "ehca_cq=%p cq_num=%x deleted successfully.",
-			     my_cq, my_cq->cq_number);
+			ehca_dbg(device, "cq_num=%x deleted successfully.",
+				 cq_num);
 	}
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4,"hipz_h_destroy_cq() failed "
-			 "h_ret=%lx ehca_cq=%p cq_num=%x",
-			 h_ret, my_cq, my_cq->cq_number);
-		ret = ehca2ib_return_code(h_ret);
-		goto destroy_cq_exit0;
+		ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lx "
+			 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
+		return ehca2ib_return_code(h_ret);
 	}
 	ipz_queue_dtor(&my_cq->ipz_queue);
-	kmem_cache_free(ehca_module.cache_cq, my_cq);
+	kmem_cache_free(cq_cache, my_cq);
 
-destroy_cq_exit0:
-	EDEB_EX(7, "ehca_cq=%p cq_num=%x ret=%x ",
-		my_cq, cq_num, ret);
-	return ret;
+	return 0;
 }
 
 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
 {
-	int ret = 0;
-	struct ehca_cq *my_cq = NULL;
+	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
 	u32 cur_pid = current->tgid;
 
-	if (unlikely(!cq)) {
-		EDEB_ERR(4, "cq is NULL");
-		return -EFAULT;
-	}
-
-	my_cq = container_of(cq, struct ehca_cq, ib_cq);
-	EDEB_EN(7, "ehca_cq=%p cq_num=%x",
-		my_cq, my_cq->cq_number);
-
 	if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_cq->ownpid);
 		return -EINVAL;
 	}
 
 	/* TODO: proper resize needs to be done */
-	ret = -EFAULT;
-	EDEB_ERR(4, "not implemented yet");
+	ehca_err(cq->device, "not implemented yet");
 
-	EDEB_EX(7, "ehca_cq=%p cq_num=%x",
-		my_cq, my_cq->cq_number);
-	return ret;
+	return -EFAULT;
+}
+
+int ehca_init_cq_cache(void)
+{
+	cq_cache = kmem_cache_create("ehca_cache_cq",
+				     sizeof(struct ehca_cq), 0,
+				     SLAB_HWCACHE_ALIGN,
+				     NULL, NULL);
+	if (!cq_cache)
+		return -ENOMEM;
+	return 0;
+}
+
+void ehca_cleanup_cq_cache(void)
+{
+	if (cq_cache)
+		kmem_cache_destroy(cq_cache);
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_eq.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_eq.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_eq.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_eq.c	2006-10-17 10:15:06.000000000 -0400
@@ -43,8 +43,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#define DEB_PREFIX "e_eq"
-
 #include "ehca_classes.h"
 #include "ehca_irq.h"
 #include "ehca_iverbs.h"
@@ -56,24 +54,21 @@ int ehca_create_eq(struct ehca_shca *shc
 		   struct ehca_eq *eq,
 		   const enum ehca_eq_type type, const u32 length)
 {
-	u64 ret = H_SUCCESS;
-	u32 nr_pages = 0;
+	u64 ret;
+	u32 nr_pages;
 	u32 i;
-	void *vpage = NULL;
-
-	EDEB_EN(7, "shca=%p eq=%p length=%x", shca, eq, length);
-	EHCA_CHECK_ADR(shca);
-	EHCA_CHECK_ADR(eq);
+	void *vpage;
+	struct ib_device *ib_dev = &shca->ib_device;
 
 	spin_lock_init(&eq->spinlock);
 	eq->is_initialized = 0;
 
 	if (type != EHCA_EQ && type != EHCA_NEQ) {
-		EDEB_ERR(4, "Invalid EQ type %x. eq=%p", type, eq);
+		ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
 		return -EINVAL;
 	}
-	if (length == 0) {
-		EDEB_ERR(4, "EQ length must not be zero. eq=%p", eq);
+	if (!length) {
+		ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
 		return -EINVAL;
 	}
 
@@ -86,14 +81,14 @@ int ehca_create_eq(struct ehca_shca *shc
 				       &nr_pages, &eq->ist);
 
 	if (ret != H_SUCCESS) {
-		EDEB_ERR(4, "Can't allocate EQ / NEQ. eq=%p", eq);
+		ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
 		return -EINVAL;
 	}
 
 	ret = ipz_queue_ctor(&eq->ipz_queue, nr_pages,
 			     EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0);
 	if (!ret) {
-		EDEB_ERR(4, "Can't allocate EQ pages. eq=%p", eq);
+		ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
 		goto create_eq_exit1;
 	}
 
@@ -130,7 +125,7 @@ int ehca_create_eq(struct ehca_shca *shc
 					  SA_INTERRUPT, "ehca_eq",
 					  (void *)shca);
 		if (ret < 0)
-			EDEB_ERR(4, "Can't map interrupt handler.");
+			ehca_err(ib_dev, "Can't map interrupt handler.");
 
 		tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
 	} else if (type == EHCA_NEQ) {
@@ -138,15 +133,13 @@ int ehca_create_eq(struct ehca_shca *shc
 					  SA_INTERRUPT, "ehca_neq",
 					  (void *)shca);
 		if (ret < 0)
-			EDEB_ERR(4, "Can't map interrupt handler.");
+			ehca_err(ib_dev, "Can't map interrupt handler.");
 
 		tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
 	}
 
 	eq->is_initialized = 1;
 
-	EDEB_EX(7, "ret=%lx", ret);
-
 	return 0;
 
 create_eq_exit2:
@@ -155,53 +148,25 @@ create_eq_exit2:
 create_eq_exit1:
 	hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
 
-	EDEB_EX(7, "ret=%lx", ret);
-
 	return -EINVAL;
 }
 
 void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
 {
-	unsigned long flags = 0;
-	void *eqe = NULL;
-
-	EDEB_EN(7, "shca=%p  eq=%p", shca, eq);
-	EHCA_CHECK_ADR_P(shca);
-	EHCA_CHECK_EQ_P(eq);
+	unsigned long flags;
+	void *eqe;
 
 	spin_lock_irqsave(&eq->spinlock, flags);
 	eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
 	spin_unlock_irqrestore(&eq->spinlock, flags);
 
-	EDEB_EX(7, "eq=%p eqe=%p", eq, eqe);
-
 	return eqe;
 }
 
-void ehca_poll_eqs(unsigned long data)
-{
-	struct ehca_shca *shca;
-	struct ehca_module *module = (struct ehca_module*)data;
-
-	spin_lock(&module->shca_lock);
-	list_for_each_entry(shca, &module->shca_list, shca_list) {
-		if (shca->eq.is_initialized)
-			ehca_tasklet_eq((unsigned long)(void*)shca);
-	}
-	mod_timer(&module->timer, jiffies + HZ);
-	spin_unlock(&module->shca_lock);
-
-	return;
-}
-
 int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
 {
-	unsigned long flags = 0;
-	u64 h_ret = H_SUCCESS;
-
-	EDEB_EN(7, "shca=%p  eq=%p", shca, eq);
-	EHCA_CHECK_ADR(shca);
-	EHCA_CHECK_EQ(eq);
+	unsigned long flags;
+	u64 h_ret;
 
 	spin_lock_irqsave(&eq->spinlock, flags);
 	ibmebus_free_irq(NULL, eq->ist, (void *)shca);
@@ -211,12 +176,10 @@ int ehca_destroy_eq(struct ehca_shca *sh
 	spin_unlock_irqrestore(&eq->spinlock, flags);
 
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "Can't free EQ resources.");
+		ehca_err(&shca->ib_device, "Can't free EQ resources.");
 		return -EINVAL;
 	}
 	ipz_queue_dtor(&eq->ipz_queue);
 
-	EDEB_EX(7, "h_ret=%lx", h_ret);
-
-	return h_ret;
+	return 0;
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_hca.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_hca.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_hca.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_hca.c	2006-10-17 10:15:06.000000000 -0400
@@ -39,36 +39,29 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#undef DEB_PREFIX
-#define DEB_PREFIX "shca"
-
 #include "ehca_tools.h"
-
 #include "hcp_if.h"
 
 int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
 {
 	int ret = 0;
-	struct ehca_shca *shca;
+	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+					      ib_device);
 	struct hipz_query_hca *rblock;
 
-	EDEB_EN(7, "");
-
-	memset(props, 0, sizeof(struct ib_device_attr));
-	shca = container_of(ibdev, struct ehca_shca, ib_device);
-
 	rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
 	if (!rblock) {
-		EDEB_ERR(4, "Can't allocate rblock memory.");
-		ret = -ENOMEM;
-		goto query_device0;
+		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+		return -ENOMEM;
 	}
 
 	if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
-		EDEB_ERR(4, "Can't query device properties");
+		ehca_err(&shca->ib_device, "Can't query device properties");
 		ret = -EINVAL;
 		goto query_device1;
 	}
+
+	memset(props, 0, sizeof(struct ib_device_attr));
 	props->fw_ver          = rblock->hw_ver;
 	props->max_mr_size     = rblock->max_mr_size;
 	props->vendor_id       = rblock->vendor_id >> 8;
@@ -105,9 +98,6 @@ int ehca_query_device(struct ib_device *
 query_device1:
 	kfree(rblock);
 
-query_device0:
-	EDEB_EX(7, "ret=%x", ret);
-
 	return ret;
 }
 
@@ -115,27 +105,23 @@ int ehca_query_port(struct ib_device *ib
 		    u8 port, struct ib_port_attr *props)
 {
 	int ret = 0;
-	struct ehca_shca *shca;
+	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+					      ib_device);
 	struct hipz_query_port *rblock;
 
-	EDEB_EN(7, "port=%x", port);
-
-	memset(props, 0, sizeof(struct ib_port_attr));
-	shca = container_of(ibdev, struct ehca_shca, ib_device);
-
 	rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
 	if (!rblock) {
-		EDEB_ERR(4, "Can't allocate rblock memory.");
-		ret = -ENOMEM;
-		goto query_port0;
+		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+		return -ENOMEM;
 	}
 
 	if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
-		EDEB_ERR(4, "Can't query port properties");
+		ehca_err(&shca->ib_device, "Can't query port properties");
 		ret = -EINVAL;
 		goto query_port1;
 	}
 
+	memset(props, 0, sizeof(struct ib_port_attr));
 	props->state = rblock->state;
 
 	switch (rblock->max_mtu) {
@@ -155,7 +141,9 @@ int ehca_query_port(struct ib_device *ib
 		props->active_mtu = props->max_mtu = IB_MTU_4096;
 		break;
 	default:
-		EDEB_ERR(4, "Unknown MTU size: %x.", rblock->max_mtu);
+		ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
+			 rblock->max_mtu);
+		break;
 	}
 
 	props->gid_tbl_len     = rblock->gid_tbl_len;
@@ -176,37 +164,28 @@ int ehca_query_port(struct ib_device *ib
 query_port1:
 	kfree(rblock);
 
-query_port0:
-	EDEB_EX(7, "ret=%x", ret);
-
 	return ret;
 }
 
 int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
 {
 	int ret = 0;
-	struct ehca_shca *shca;
+	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
 	struct hipz_query_port *rblock;
 
-	EDEB_EN(7, "port=%x index=%x", port, index);
-
 	if (index > 16) {
-		EDEB_ERR(4, "Invalid index: %x.", index);
-		ret = -EINVAL;
-		goto query_pkey0;
+		ehca_err(&shca->ib_device, "Invalid index: %x.", index);
+		return -EINVAL;
 	}
 
-	shca = container_of(ibdev, struct ehca_shca, ib_device);
-
 	rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
 	if (!rblock) {
-		EDEB_ERR(4,  "Can't allocate rblock memory.");
-		ret = -ENOMEM;
-		goto query_pkey0;
+		ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
+		return -ENOMEM;
 	}
 
 	if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
-		EDEB_ERR(4, "Can't query port properties");
+		ehca_err(&shca->ib_device, "Can't query port properties");
 		ret = -EINVAL;
 		goto query_pkey1;
 	}
@@ -216,9 +195,6 @@ int ehca_query_pkey(struct ib_device *ib
 query_pkey1:
 	kfree(rblock);
 
-query_pkey0:
-	EDEB_EX(7, "ret=%x", ret);
-
 	return ret;
 }
 
@@ -226,28 +202,23 @@ int ehca_query_gid(struct ib_device *ibd
 		   int index, union ib_gid *gid)
 {
 	int ret = 0;
-	struct ehca_shca *shca;
+	struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+					      ib_device);
 	struct hipz_query_port *rblock;
 
-	EDEB_EN(7, "port=%x index=%x", port, index);
-
 	if (index > 255) {
-		EDEB_ERR(4, "Invalid index: %x.", index);
-		ret = -EINVAL;
-		goto query_gid0;
+		ehca_err(&shca->ib_device, "Invalid index: %x.", index);
+		return -EINVAL;
 	}
 
-	shca = container_of(ibdev, struct ehca_shca, ib_device);
-
 	rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
 	if (!rblock) {
-		EDEB_ERR(4, "Can't allocate rblock memory.");
-		ret = -ENOMEM;
-		goto query_gid0;
+		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+		return -ENOMEM;
 	}
 
 	if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
-		EDEB_ERR(4, "Can't query port properties");
+		ehca_err(&shca->ib_device, "Can't query port properties");
 		ret = -EINVAL;
 		goto query_gid1;
 	}
@@ -258,11 +229,6 @@ int ehca_query_gid(struct ib_device *ibd
 query_gid1:
 	kfree(rblock);
 
-query_gid0:
-	EDEB_EX(7, "ret=%x GID=%lx%lx", ret,
-		*(u64 *) & gid->raw[0],
-		*(u64 *) & gid->raw[8]);
-
 	return ret;
 }
 
@@ -270,13 +236,6 @@ int ehca_modify_port(struct ib_device *i
 		     u8 port, int port_modify_mask,
 		     struct ib_port_modify *props)
 {
-	int ret = 0;
-
-	EDEB_EN(7, "port=%x", port);
-
-	/* Not implemented yet. */
-
-	EDEB_EX(7, "ret=%x", ret);
-
-	return ret;
+	/* Not implemented yet */
+	return -EFAULT;
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_irq.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_irq.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_irq.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_irq.c	2006-10-17 10:15:06.000000000 -0400
@@ -39,8 +39,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#define DEB_PREFIX "eirq"
-
 #include "ehca_classes.h"
 #include "ehca_irq.h"
 #include "ehca_iverbs.h"
@@ -64,10 +62,17 @@
 #define ERROR_DATA_LENGTH      EHCA_BMASK_IBM(52,63)
 #define ERROR_DATA_TYPE        EHCA_BMASK_IBM(0,7)
 
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+
+static void queue_comp_task(struct ehca_cq *__cq);
+
+static struct ehca_comp_pool* pool;
+static struct notifier_block comp_pool_callback_nb;
+
+#endif
+
 static inline void comp_event_callback(struct ehca_cq *cq)
 {
-	EDEB_EN(7, "cq=%p", cq);
-
 	if (!cq->ib_cq.comp_handler)
 		return;
 
@@ -75,8 +80,6 @@ static inline void comp_event_callback(s
 	cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
 	spin_unlock(&cq->cb_lock);
 
-	EDEB_EX(7, "cq=%p", cq);
-
 	return;
 }
 
@@ -86,9 +89,6 @@ static void print_error_data(struct ehca
 	u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
 	u64 resource = rblock[1];
 
-	EDEB_EN(7, "shca=%p data=%p rblock=%p length=%x",
-		shca, data, rblock, length);
-
 	switch (type) {
 	case 0x1: /* Queue Pair */
 	{
@@ -98,7 +98,8 @@ static void print_error_data(struct ehca
 		if (rblock[6] == 0)
 			return;
 
-		EDEB_ERR(4, "QP 0x%x (resource=%lx) has errors.",
+		ehca_err(&shca->ib_device,
+			 "QP 0x%x (resource=%lx) has errors.",
 			 qp->ib_qp.qp_num, resource);
 		break;
 	}
@@ -106,25 +107,25 @@ static void print_error_data(struct ehca
 	{
 		struct ehca_cq *cq = (struct ehca_cq*)data;
 
-		EDEB_ERR(4, "CQ 0x%x (resource=%lx) has errors.",
+		ehca_err(&shca->ib_device,
+			 "CQ 0x%x (resource=%lx) has errors.",
 			 cq->cq_number, resource);
 		break;
 	}
 	default:
-		EDEB_ERR(4, "Unknown errror type: %lx on %s.",
+		ehca_err(&shca->ib_device,
+			 "Unknown errror type: %lx on %s.",
 			 type, shca->ib_device.name);
 		break;
 	}
 
-	EDEB_ERR(4, "Error data is available: %lx.", resource);
-	EDEB_ERR(4, "EHCA ----- error data begin "
+	ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
+	ehca_err(&shca->ib_device, "EHCA ----- error data begin "
 		 "---------------------------------------------------");
-	EDEB_DMP(4, rblock, length, "resource=%lx", resource);
-	EDEB_ERR(4, "EHCA ----- error data end "
+	ehca_dmp(rblock, length, "resource=%lx", resource);
+	ehca_err(&shca->ib_device, "EHCA ----- error data end "
 		 "----------------------------------------------------");
 
-	EDEB_EX(7, "");
-
 	return;
 }
 
@@ -132,15 +133,13 @@ int ehca_error_data(struct ehca_shca *sh
 		    u64 resource)
 {
 
-	unsigned long ret = 0;
+	unsigned long ret;
 	u64 *rblock;
 	unsigned long block_count;
 
-	EDEB_EN(7, "shca=%p data=%p resource=%lx", shca, data, resource);
-
 	rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
 	if (!rblock) {
-		EDEB_ERR(4, "Cannot allocate rblock memory.");
+		ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
 		ret = -ENOMEM;
 		goto error_data1;
 	}
@@ -151,7 +150,8 @@ int ehca_error_data(struct ehca_shca *sh
 				&block_count);
 
 	if (ret == H_R_STATE) {
-		EDEB_ERR(4, "No error data is available: %lx.", resource);
+		ehca_err(&shca->ib_device,
+			 "No error data is available: %lx.", resource);
 	}
 	else if (ret == H_SUCCESS) {
 		int length;
@@ -164,7 +164,8 @@ int ehca_error_data(struct ehca_shca *sh
 		print_error_data(shca, data, rblock, length);
 	}
 	else {
-		EDEB_ERR(4, "Error data could not be fetched: %lx", resource);
+		ehca_err(&shca->ib_device,
+			 "Error data could not be fetched: %lx", resource);
 	}
 
 	kfree(rblock);
@@ -183,8 +184,6 @@ static void qp_event_callback(struct ehc
 	unsigned long flags;
 	u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
 
-	EDEB_EN(7, "eqe=%lx", eqe);
-
 	spin_lock_irqsave(&ehca_qp_idr_lock, flags);
 	qp = idr_find(&ehca_qp_idr, token);
 	spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
@@ -204,8 +203,6 @@ static void qp_event_callback(struct ehc
 
 	qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
 
-	EDEB_EX(7, "qp=%p", qp);
-
 	return;
 }
 
@@ -216,8 +213,6 @@ static void cq_event_callback(struct ehc
 	unsigned long flags;
 	u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
 
-	EDEB_EN(7, "eqe=%lx", eqe);
-
 	spin_lock_irqsave(&ehca_cq_idr_lock, flags);
 	cq = idr_find(&ehca_cq_idr, token);
 	spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
@@ -227,8 +222,6 @@ static void cq_event_callback(struct ehc
 
 	ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
 
-	EDEB_EX(7, "cq=%p", cq);
-
 	return;
 }
 
@@ -236,8 +229,6 @@ static void parse_identifier(struct ehca
 {
 	u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
 
-	EDEB_EN(7, "shca=%p eqe=%lx", shca, eqe);
-
 	switch (identifier) {
 	case 0x02: /* path migrated */
 		qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG);
@@ -257,41 +248,39 @@ static void parse_identifier(struct ehca
 		cq_event_callback(shca, eqe);
 		break;
 	case 0x09: /* MRMWPTE error */
-		EDEB_ERR(4, "MRMWPTE error.");
+		ehca_err(&shca->ib_device, "MRMWPTE error.");
 		break;
 	case 0x0A: /* port event */
-		EDEB_ERR(4, "Port event.");
+		ehca_err(&shca->ib_device, "Port event.");
 		break;
 	case 0x0B: /* MR access error */
-		EDEB_ERR(4, "MR access error.");
+		ehca_err(&shca->ib_device, "MR access error.");
 		break;
 	case 0x0C: /* EQ error */
-		EDEB_ERR(4, "EQ error.");
+		ehca_err(&shca->ib_device, "EQ error.");
 		break;
 	case 0x0D: /* P/Q_Key mismatch */
-		EDEB_ERR(4, "P/Q_Key mismatch.");
+		ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
 		break;
 	case 0x10: /* sampling complete */
-		EDEB_ERR(4, "Sampling complete.");
+		ehca_err(&shca->ib_device, "Sampling complete.");
 		break;
 	case 0x11: /* unaffiliated access error */
-		EDEB_ERR(4, "Unaffiliated access error.");
+		ehca_err(&shca->ib_device, "Unaffiliated access error.");
 		break;
 	case 0x12: /* path migrating error */
-		EDEB_ERR(4, "Path migration error.");
+		ehca_err(&shca->ib_device, "Path migration error.");
 		break;
 	case 0x13: /* interface trace stopped */
-		EDEB_ERR(4, "Interface trace stopped.");
+		ehca_err(&shca->ib_device, "Interface trace stopped.");
 		break;
 	case 0x14: /* first error capture info available */
 	default:
-		EDEB_ERR(4, "Unknown identifier: %x on %s.",
+		ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
 			 identifier, shca->ib_device.name);
 		break;
 	}
 
-	EDEB_EX(7, "eqe=%lx identifier=%x", eqe, identifier);
-
 	return;
 }
 
@@ -301,21 +290,19 @@ static void parse_ec(struct ehca_shca *s
 	u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
 	u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
 
-	EDEB_EN(7, "shca=%p eqe=%lx", shca, eqe);
-
 	switch (ec) {
-	case 0x30:		/* port availability change */
+	case 0x30: /* port availability change */
 		if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
-			EDEB(4, "%s: port %x is active.",
-			     shca->ib_device.name, port);
+			ehca_info(&shca->ib_device,
+				  "port %x is active.", port);
 			event.device = &shca->ib_device;
 			event.event = IB_EVENT_PORT_ACTIVE;
 			event.element.port_num = port;
 			shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
 			ib_dispatch_event(&event);
 		} else {
-			EDEB(4, "%s: port %x is inactive.",
-			     shca->ib_device.name, port);
+			ehca_info(&shca->ib_device,
+				  "port %x is inactive.", port);
 			event.device = &shca->ib_device;
 			event.event = IB_EVENT_PORT_ERR;
 			event.element.port_num = port;
@@ -324,22 +311,23 @@ static void parse_ec(struct ehca_shca *s
 		}
 		break;
 	case 0x31:
-		/* port configuration change      */
-		/* disruptive change is caused by */
-		/* LID, PKEY or SM change         */
-		EDEB(4, "EHCA disruptive port %x "
-		     "configuration change.", port);
+		/* port configuration change
+		 * disruptive change is caused by
+		 * LID, PKEY or SM change
+		 */
+		ehca_warn(&shca->ib_device,
+			  "disruptive port %x configuration change", port);
 
-		EDEB(4, "%s: port %x is inactive.",
-		     shca->ib_device.name, port);
+		ehca_info(&shca->ib_device,
+			 "port %x is inactive.", port);
 		event.device = &shca->ib_device;
 		event.event = IB_EVENT_PORT_ERR;
 		event.element.port_num = port;
 		shca->sport[port - 1].port_state = IB_PORT_DOWN;
 		ib_dispatch_event(&event);
 
-		EDEB(4, "%s: port %x is active.",
-			     shca->ib_device.name, port);
+		ehca_info(&shca->ib_device,
+			 "port %x is active.", port);
 		event.device = &shca->ib_device;
 		event.event = IB_EVENT_PORT_ACTIVE;
 		event.element.port_num = port;
@@ -347,34 +335,27 @@ static void parse_ec(struct ehca_shca *s
 		ib_dispatch_event(&event);
 		break;
 	case 0x32: /* adapter malfunction */
-		EDEB_ERR(4, "Adapter malfunction.");
+		ehca_err(&shca->ib_device, "Adapter malfunction.");
 		break;
 	case 0x33:  /* trace stopped */
-		EDEB_ERR(4, "Traced stopped.");
+		ehca_err(&shca->ib_device, "Traced stopped.");
 		break;
 	default:
-		EDEB_ERR(4, "Unknown event code: %x on %s.",
+		ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
 			 ec, shca->ib_device.name);
 		break;
 	}
 
-	EDEB_EN(7, "eqe=%lx ec=%x", eqe, ec);
-
 	return;
 }
 
 static inline void reset_eq_pending(struct ehca_cq *cq)
 {
-	u64 CQx_EP = 0;
+	u64 CQx_EP;
 	struct h_galpa gal = cq->galpas.kernel;
 
-	EDEB_EN(7, "cq=%p", cq);
-
 	hipz_galpa_store_cq(gal, cqx_ep, 0x0);
 	CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
-	EDEB(7, "CQx_EP=%lx", CQx_EP);
-
-	EDEB_EX(7, "cq=%p", cq);
 
 	return;
 }
@@ -383,12 +364,8 @@ irqreturn_t ehca_interrupt_neq(int irq, 
 {
 	struct ehca_shca *shca = (struct ehca_shca*)dev_id;
 
-	EDEB_EN(7, "dev_id=%p", dev_id);
-
 	tasklet_hi_schedule(&shca->neq.interrupt_task);
 
-	EDEB_EX(7, "");
-
 	return IRQ_HANDLED;
 }
 
@@ -396,9 +373,7 @@ void ehca_tasklet_neq(unsigned long data
 {
 	struct ehca_shca *shca = (struct ehca_shca*)data;
 	struct ehca_eqe *eqe;
-	u64 ret = H_SUCCESS;
-
-	EDEB_EN(7, "shca=%p", shca);
+	u64 ret;
 
 	eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
 
@@ -413,9 +388,7 @@ void ehca_tasklet_neq(unsigned long data
 				 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
 
 	if (ret != H_SUCCESS)
-		EDEB_ERR(4, "Can't clear notification events.");
-
-	EDEB_EX(7, "shca=%p", shca);
+		ehca_err(&shca->ib_device, "Can't clear notification events.");
 
 	return;
 }
@@ -424,12 +397,8 @@ irqreturn_t ehca_interrupt_eq(int irq, v
 {
 	struct ehca_shca *shca = (struct ehca_shca*)dev_id;
 
-	EDEB_EN(7, "dev_id=%p", dev_id);
-
 	tasklet_hi_schedule(&shca->eq.interrupt_task);
 
-	EDEB_EX(7, "");
-
 	return IRQ_HANDLED;
 }
 
@@ -438,8 +407,7 @@ void ehca_tasklet_eq(unsigned long data)
 	struct ehca_shca *shca = (struct ehca_shca*)data;
 	struct ehca_eqe *eqe;
 	int int_state;
-
-	EDEB_EN(7, "shca=%p", shca);
+	int query_cnt = 0;
 
 	do {
 		eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
@@ -453,18 +421,18 @@ void ehca_tasklet_eq(unsigned long data)
 			while (eqe) {
 				u64 eqe_value = eqe->entry;
 
-				EDEB(7, "eqe_value=%lx", eqe_value);
+				ehca_dbg(&shca->ib_device,
+					 "eqe_value=%lx", eqe_value);
 
 				/* TODO: better structure */
 				if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT,
 						   eqe_value)) {
-					extern struct ehca_comp_pool* ehca_pool;
-					extern struct idr ehca_cq_idr;
 					unsigned long flags;
 					u32 token;
 					struct ehca_cq *cq;
 
-					EDEB(6, "... completion event");
+					ehca_dbg(&shca->ib_device,
+						 "... completion event");
 					token =
 						EHCA_BMASK_GET(EQE_CQ_TOKEN,
 							       eqe_value);
@@ -478,11 +446,18 @@ void ehca_tasklet_eq(unsigned long data)
 					}
 
 					reset_eq_pending(cq);
-					ehca_queue_comp_task(ehca_pool, cq);
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+					queue_comp_task(cq);
+					spin_unlock_irqrestore(&ehca_cq_idr_lock,
+							       flags);
+#else
 					spin_unlock_irqrestore(&ehca_cq_idr_lock,
 							       flags);
+					comp_event_callback(cq);
+#endif
 				} else {
-					EDEB(6, "... non completion event");
+					ehca_dbg(&shca->ib_device,
+						 "... non completion event");
 					parse_identifier(shca, eqe_value);
 				}
 				eqe =
@@ -490,60 +465,56 @@ void ehca_tasklet_eq(unsigned long data)
 								    &shca->eq);
 			}
 
-			if (shca->hw_level >= 2)
+			if (shca->hw_level >= 2) {
 				int_state =
 				    hipz_h_query_int_state(shca->ipz_hca_handle,
 							   shca->eq.ist);
+				query_cnt++;
+				iosync();
+				if (query_cnt >= 100) {
+					query_cnt = 0;
+					int_state = 0;
+				}
+			}
 			eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
 
 		}
 	} while (int_state != 0);
 
-	EDEB_EX(7, "shca=%p", shca);
-
 	return;
 }
 
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+
 static inline int find_next_online_cpu(struct ehca_comp_pool* pool)
 {
 	unsigned long flags_last_cpu;
 
+	if (ehca_debug_level)
+		ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
+
 	spin_lock_irqsave(&pool->last_cpu_lock, flags_last_cpu);
 	pool->last_cpu = next_cpu(pool->last_cpu, cpu_online_map);
-
 	if (pool->last_cpu == NR_CPUS)
-		pool->last_cpu = 0;
-
+		pool->last_cpu = first_cpu(cpu_online_map);
 	spin_unlock_irqrestore(&pool->last_cpu_lock, flags_last_cpu);
 
 	return pool->last_cpu;
 }
 
-void ehca_queue_comp_task(struct ehca_comp_pool *pool, struct ehca_cq *__cq)
+static void __queue_comp_task(struct ehca_cq *__cq,
+			      struct ehca_cpu_comp_task *cct)
 {
-	int cpu;
-	int cpu_id;
-	struct ehca_cpu_comp_task *cct;
 	unsigned long flags_cct;
 	unsigned long flags_cq;
 
-	cpu = get_cpu();
-	cpu_id = find_next_online_cpu(pool);
-
-	EDEB_EN(7, "pool=%p cq=%p cq_nr=%x CPU=%x:%x:%x:%x",
-		pool, __cq, __cq->cq_number,
-		cpu, cpu_id, num_online_cpus(), num_possible_cpus());
-
-	BUG_ON(!cpu_online(cpu_id));
-
-	cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
-
 	spin_lock_irqsave(&cct->task_lock, flags_cct);
 	spin_lock_irqsave(&__cq->task_lock, flags_cq);
 
 	if (__cq->nr_callbacks == 0) {
 		__cq->nr_callbacks++;
 		list_add_tail(&__cq->entry, &cct->cq_list);
+		cct->cq_jobs++;
 		wake_up(&cct->wait_queue);
 	}
 	else
@@ -551,23 +522,39 @@ void ehca_queue_comp_task(struct ehca_co
 
 	spin_unlock_irqrestore(&__cq->task_lock, flags_cq);
 	spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+}
 
-	put_cpu();
+static void queue_comp_task(struct ehca_cq *__cq)
+{
+	int cpu;
+	int cpu_id;
+	struct ehca_cpu_comp_task *cct;
 
-	EDEB_EX(7, "cct=%p", cct);
+	cpu = get_cpu();
+	cpu_id = find_next_online_cpu(pool);
+
+	BUG_ON(!cpu_online(cpu_id));
+
+	cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
+
+	if (cct->cq_jobs > 0) {
+		cpu_id = find_next_online_cpu(pool);
+		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
+	}
+
+	__queue_comp_task(__cq, cct);
+
+	put_cpu();
 
 	return;
 }
 
 static void run_comp_task(struct ehca_cpu_comp_task* cct)
 {
-	struct ehca_cq *cq = NULL;
+	struct ehca_cq *cq;
 	unsigned long flags_cct;
 	unsigned long flags_cq;
 
-
-	EDEB_EN(7, "cct=%p", cct);
-
 	spin_lock_irqsave(&cct->task_lock, flags_cct);
 
 	while (!list_empty(&cct->cq_list)) {
@@ -578,16 +565,16 @@ static void run_comp_task(struct ehca_cp
 
 		spin_lock_irqsave(&cq->task_lock, flags_cq);
 		cq->nr_callbacks--;
-		if (cq->nr_callbacks == 0)
+		if (cq->nr_callbacks == 0) {
 			list_del_init(cct->cq_list.next);
+			cct->cq_jobs--;
+		}
 		spin_unlock_irqrestore(&cq->task_lock, flags_cq);
 
 	}
 
 	spin_unlock_irqrestore(&cct->task_lock, flags_cct);
 
-	EDEB_EX(7, "cct=%p cq=%p", cct, cq);
-
 	return;
 }
 
@@ -596,8 +583,6 @@ static int comp_task(void *__cct)
 	struct ehca_cpu_comp_task* cct = __cct;
 	DECLARE_WAITQUEUE(wait, current);
 
-	EDEB_EN(7, "cct=%p", cct);
-
 	set_current_state(TASK_INTERRUPTIBLE);
 	while(!kthread_should_stop()) {
 		add_wait_queue(&cct->wait_queue, &wait);
@@ -616,8 +601,6 @@ static int comp_task(void *__cct)
 	}
 	__set_current_state(TASK_RUNNING);
 
-	EDEB_EX(7, "");
-
 	return 0;
 }
 
@@ -626,16 +609,12 @@ static struct task_struct *create_comp_t
 {
 	struct ehca_cpu_comp_task *cct;
 
-	EDEB_EN(7, "cpu=%d:%d", cpu, NR_CPUS);
-
 	cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
 	spin_lock_init(&cct->task_lock);
 	INIT_LIST_HEAD(&cct->cq_list);
 	init_waitqueue_head(&cct->wait_queue);
 	cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
 
-	EDEB_EX(7, "cct/%d=%p", cpu, cct);
-
 	return cct->task;
 }
 
@@ -644,32 +623,102 @@ static void destroy_comp_task(struct ehc
 {
 	struct ehca_cpu_comp_task *cct;
 	struct task_struct *task;
-
-	EDEB_EN(7, "pool=%p cpu=%d:%d", pool, cpu, NR_CPUS);
+	unsigned long flags_cct;
 
 	cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-	cct->task = NULL;
+
+	spin_lock_irqsave(&cct->task_lock, flags_cct);
+
 	task = cct->task;
+	cct->task = NULL;
+	cct->cq_jobs = 0;
+
+	spin_unlock_irqrestore(&cct->task_lock, flags_cct);
 
 	if (task)
 		kthread_stop(task);
 
-	EDEB_EX(7, "");
-
 	return;
 }
 
-struct ehca_comp_pool *ehca_create_comp_pool(void)
+static void take_over_work(struct ehca_comp_pool *pool,
+			   int cpu)
 {
-	struct ehca_comp_pool *pool;
+	struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+	LIST_HEAD(list);
+	struct ehca_cq *cq;
+	unsigned long flags_cct;
+
+	spin_lock_irqsave(&cct->task_lock, flags_cct);
+
+	list_splice_init(&cct->cq_list, &list);
+
+	while(!list_empty(&list)) {
+	       cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
+
+	       list_del(&cq->entry);
+	       __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
+						 smp_processor_id()));
+	}
+
+	spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+
+}
+
+static int comp_pool_callback(struct notifier_block *nfb,
+			      unsigned long action,
+			      void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+	struct ehca_cpu_comp_task *cct;
+
+	switch (action) {
+	case CPU_UP_PREPARE:
+		ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
+		if(!create_comp_task(pool, cpu)) {
+			ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
+			return NOTIFY_BAD;
+		}
+		break;
+	case CPU_UP_CANCELED:
+		ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
+		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+		kthread_bind(cct->task, any_online_cpu(cpu_online_map));
+		destroy_comp_task(pool, cpu);
+		break;
+	case CPU_ONLINE:
+		ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
+		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+		kthread_bind(cct->task, cpu);
+		wake_up_process(cct->task);
+		break;
+	case CPU_DOWN_PREPARE:
+		ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
+		break;
+	case CPU_DOWN_FAILED:
+		ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
+		break;
+	case CPU_DEAD:
+		ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
+		destroy_comp_task(pool, cpu);
+		take_over_work(pool, cpu);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+#endif
+
+int ehca_create_comp_pool(void)
+{
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
 	int cpu;
 	struct task_struct *task;
 
-	EDEB_EN(7, "");
-
 	pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
 	if (pool == NULL)
-		return NULL;
+		return -ENOMEM;
 
 	spin_lock_init(&pool->last_cpu_lock);
 	pool->last_cpu = any_online_cpu(cpu_online_map);
@@ -677,7 +726,7 @@ struct ehca_comp_pool *ehca_create_comp_
 	pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
 	if (pool->cpu_comp_tasks == NULL) {
 		kfree(pool);
-		return NULL;
+		return -EINVAL;
 	}
 
 	for_each_online_cpu(cpu) {
@@ -688,23 +737,26 @@ struct ehca_comp_pool *ehca_create_comp_
 		}
 	}
 
-	EDEB_EX(7, "pool=%p", pool);
+	comp_pool_callback_nb.notifier_call = comp_pool_callback;
+	comp_pool_callback_nb.priority =0;
+	register_cpu_notifier(&comp_pool_callback_nb);
+#endif
 
-	return pool;
+	return 0;
 }
 
-void ehca_destroy_comp_pool(struct ehca_comp_pool *pool)
+void ehca_destroy_comp_pool(void)
 {
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
 	int i;
 
-	EDEB_EN(7, "pool=%p", pool);
+	unregister_cpu_notifier(&comp_pool_callback_nb);
 
 	for (i = 0; i < NR_CPUS; i++) {
 		if (cpu_online(i))
 			destroy_comp_task(pool, i);
 	}
-
-	EDEB_EN(7, "");
+#endif
 
 	return;
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_irq.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_irq.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_irq.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_irq.h	2006-10-17 10:15:06.000000000 -0400
@@ -62,6 +62,7 @@ struct ehca_cpu_comp_task {
 	struct list_head cq_list;
 	struct task_struct *task;
 	spinlock_t task_lock;
+	int cq_jobs;
 };
 
 struct ehca_comp_pool {
@@ -70,8 +71,7 @@ struct ehca_comp_pool {
 	spinlock_t last_cpu_lock;
 };
 
-struct ehca_comp_pool *ehca_create_comp_pool(void);
-void ehca_destroy_comp_pool(struct ehca_comp_pool *pool);
-void ehca_queue_comp_task(struct ehca_comp_pool *pool, struct ehca_cq *__cq);
+int ehca_create_comp_pool(void);
+void ehca_destroy_comp_pool(void);
 
 #endif
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_kernel.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_kernel.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_kernel.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_kernel.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,162 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Generalized functions for code shared between kernel and userspace
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Khadija Souissi <souissik@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- *  $Id: ehca_kernel.h,v 1.13 2006/04/03 06:40:54 schickhj Exp $
- */
-
-#ifndef _EHCA_KERNEL_H_
-#define _EHCA_KERNEL_H_
-
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/idr.h>
-#include <linux/kthread.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/vmalloc.h>
-#include <linux/version.h>
-
-#include <asm/abs_addr.h>
-#include <asm/ibmebus.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-
-/**
- * ehca_adr_bad - Handle to be used for adress translation mechanisms,
- * currently a placeholder.
- */
-inline static int ehca_adr_bad(void *adr)
-{
-	return (adr == 0);
-};
-
-/* We will remove this lines in SVN when it is included in the Linux kernel.
- * We don't want to introducte unnecessary dependencies to a patched kernel.
- */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-#include <asm/hvcall.h>
-#define H_SUCCESS              0
-#define H_BUSY		       1
-#define H_CONSTRAINED	       4
-#define H_LONG_BUSY_ORDER_1_MSEC   9900
-#define H_LONG_BUSY_ORDER_10_MSEC  9901
-#define H_LONG_BUSY_ORDER_100_MSEC 9902
-#define H_LONG_BUSY_ORDER_1_SEC    9903
-#define H_LONG_BUSY_ORDER_10_SEC   9904
-#define H_LONG_BUSY_ORDER_100_SEC  9905
-
-#define H_IS_LONG_BUSY(x)  ((x >= H_LongBusyStartRange) && (x <= H_LongBusyEndRange))
-
-#define H_PARTIAL_STORE        16
-#define H_PAGE_REGISTERED      15
-#define H_IN_PROGRESS          14
-#define H_PARTIAL              5
-#define H_NOT_AVAILABLE        3
-#define H_Closed               2
-
-#define H_HARDWARE	       -1
-#define H_PARAMETER	       -4
-#define H_NO_MEM               -9
-#define H_RESOURCE             -16
-
-#define H_ADAPTER_PARM         -17
-#define H_RH_PARM              -18
-#define H_RCQ_PARM             -19
-#define H_SCQ_PARM             -20
-#define H_EQ_PARM              -21
-#define H_RT_PARM              -22
-#define H_ST_PARM              -23
-#define H_SIGT_PARM            -24
-#define H_TOKEN_PARM           -25
-#define H_MLENGTH_PARM         -27
-#define H_MEM_PARM             -28
-#define H_MEM_ACCESS_PARM      -29
-#define H_ATTR_PARM            -30
-#define H_PORT_PARM            -31
-#define H_MCG_PARM             -32
-#define H_VL_PARM              -33
-#define H_TSIZE_PARM           -34
-#define H_TRACE_PARM           -35
-#define H_MASK_PARM            -37
-#define H_MCG_FULL             -38
-#define H_ALIAS_EXIST          -39
-#define H_P_COUNTER            -40
-#define H_TABLE_FULL           -41
-#define H_ALT_TABLE            -42
-#define H_MR_CONDITION         -43
-#define H_NOT_ENOUGH_RESOURCES -44
-#define H_R_STATE              -45
-#define H_RESCINDEND           -46
-
-/* H call defines to be moved to kernel */
-#define H_RESET_EVENTS         0x15C
-#define H_ALLOC_RESOURCE       0x160
-#define H_FREE_RESOURCE        0x164
-#define H_MODIFY_QP            0x168
-#define H_QUERY_QP             0x16C
-#define H_REREGISTER_PMR       0x170
-#define H_REGISTER_SMR         0x174
-#define H_QUERY_MR             0x178
-#define H_QUERY_MW             0x17C
-#define H_QUERY_HCA            0x180
-#define H_QUERY_PORT           0x184
-#define H_MODIFY_PORT          0x188
-#define H_DEFINE_AQP1          0x18C
-#define H_GET_TRACE_BUFFER     0x190
-#define H_DEFINE_AQP0          0x194
-#define H_RESIZE_MR            0x198
-#define H_ATTACH_MCQP          0x19C
-#define H_DETACH_MCQP          0x1A0
-#define H_CREATE_RPT           0x1A4
-#define H_REMOVE_RPT           0x1A8
-#define H_REGISTER_RPAGES      0x1AC
-#define H_DISABLE_AND_GETC     0x1B0
-#define H_ERROR_DATA           0x1B4
-#define H_GET_HCA_INFO         0x1B8
-#define H_GET_PERF_COUNT       0x1BC
-#define H_MANAGE_TRACE         0x1C0
-#define H_QUERY_INT_STATE      0x1E4
-#define H_CB_ALIGNMENT         4096
-#endif /* LINUX_VERSION_CODE */
-
-#endif /* _EHCA_KERNEL_H_ */
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_main.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_main.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_main.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_main.c	2006-10-19 12:41:11.000000000 -0400
@@ -4,6 +4,8 @@
  *  module start stop, hca detection
  *
  *  Authors: Heiko J Schick <schickhj@de.ibm.com>
+ *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *           Joachim Fenkes <fenkes@de.ibm.com>
  *
  *  Copyright (c) 2005 IBM Corporation
  *
@@ -38,8 +40,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#define DEB_PREFIX "shca"
-
 #include "ehca_classes.h"
 #include "ehca_iverbs.h"
 #include "ehca_mrmw.h"
@@ -49,12 +49,10 @@
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
 MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION("SVNEHCA_0006");
-
-struct ehca_comp_pool* ehca_pool;
+MODULE_VERSION("SVNEHCA_0015");
 
 int ehca_open_aqp1     = 0;
-int ehca_debug_level   = -1;
+int ehca_debug_level   = 0;
 int ehca_hw_level      = 0;
 int ehca_nr_ports      = 2;
 int ehca_use_hp_mr     = 0;
@@ -75,7 +73,7 @@ MODULE_PARM_DESC(open_aqp1,
 		 "AQP1 on startup (0: no (default), 1: yes)");
 MODULE_PARM_DESC(debug_level,
 		 "debug level"
-		 " (0: node, 6: only errors (default), 9: all)");
+		 " (0: no debug traces (default), 1: with debug traces)");
 MODULE_PARM_DESC(hw_level,
 		 "hardware level"
 		 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
@@ -91,169 +89,74 @@ MODULE_PARM_DESC(poll_all_eqs,
 MODULE_PARM_DESC(static_rate,
 		 "set permanent static rate (default: disabled)");
 
-/* This external trace mask controls what will end up in the
- * kernel ring buffer. Number 6 means, that everything between
- * 0 and 5 will be stored.
- */
-u8 ehca_edeb_mask[EHCA_EDEB_TRACE_MASK_SIZE]={6, 6, 6, 6,
-					      6, 6, 6, 6,
-					      6, 6, 6, 6,
-					      6, 6, 6, 6,
-					      6, 6, 6, 6,
-					      6, 6, 6, 6,
-					      6, 6, 6, 6,
-					      6, 6, 0, 0};
-
 spinlock_t ehca_qp_idr_lock;
 spinlock_t ehca_cq_idr_lock;
 DEFINE_IDR(ehca_qp_idr);
 DEFINE_IDR(ehca_cq_idr);
 
-struct ehca_module ehca_module;
-
-void ehca_init_trace(void)
-{
-	EDEB_EN(7, "");
-
-	if (ehca_debug_level != -1) {
-		int i;
-		for (i = 0; i < EHCA_EDEB_TRACE_MASK_SIZE; i++)
-			ehca_edeb_mask[i] = ehca_debug_level;
-	}
+static struct list_head shca_list; /* list of all registered ehcas */
+static spinlock_t shca_list_lock;
 
-	EDEB_EX(7, "");
-}
+static struct timer_list poll_eqs_timer;
 
-int ehca_create_slab_caches(struct ehca_module *ehca_module)
+static int ehca_create_slab_caches(void)
 {
-	int ret = 0;
-
-	EDEB_EN(7, "");
+	int ret;
 
-	ehca_module->cache_pd =
-		kmem_cache_create("ehca_cache_pd",
-				  sizeof(struct ehca_pd),
-				  0, SLAB_HWCACHE_ALIGN,
-				  NULL, NULL);
-	if (!ehca_module->cache_pd) {
-		EDEB_ERR(4, "Cannot create PD SLAB cache.");
-		ret = -ENOMEM;
-		goto create_slab_caches1;
+	ret = ehca_init_pd_cache();
+	if (ret) {
+		ehca_gen_err("Cannot create PD SLAB cache.");
+		return ret;
 	}
 
-	ehca_module->cache_cq =
-		kmem_cache_create("ehca_cache_cq",
-				  sizeof(struct ehca_cq),
-				  0, SLAB_HWCACHE_ALIGN,
-				  NULL, NULL);
-	if (!ehca_module->cache_cq) {
-		EDEB_ERR(4, "Cannot create CQ SLAB cache.");
-		ret = -ENOMEM;
+	ret = ehca_init_cq_cache();
+	if (ret) {
+		ehca_gen_err("Cannot create CQ SLAB cache.");
 		goto create_slab_caches2;
 	}
 
-	ehca_module->cache_qp =
-		kmem_cache_create("ehca_cache_qp",
-				  sizeof(struct ehca_qp),
-				  0, SLAB_HWCACHE_ALIGN,
-				  NULL, NULL);
-	if (!ehca_module->cache_qp) {
-		EDEB_ERR(4, "Cannot create QP SLAB cache.");
-		ret = -ENOMEM;
+	ret = ehca_init_qp_cache();
+	if (ret) {
+		ehca_gen_err("Cannot create QP SLAB cache.");
 		goto create_slab_caches3;
 	}
 
-	ehca_module->cache_av =
-		kmem_cache_create("ehca_cache_av",
-				  sizeof(struct ehca_av),
-				  0, SLAB_HWCACHE_ALIGN,
-				  NULL, NULL);
-	if (!ehca_module->cache_av) {
-		EDEB_ERR(4, "Cannot create AV SLAB cache.");
-		ret = -ENOMEM;
+	ret = ehca_init_av_cache();
+	if (ret) {
+		ehca_gen_err("Cannot create AV SLAB cache.");
 		goto create_slab_caches4;
 	}
 
-	ehca_module->cache_mw =
-		kmem_cache_create("ehca_cache_mw",
-				  sizeof(struct ehca_mw),
-				  0, SLAB_HWCACHE_ALIGN,
-				  NULL, NULL);
-	if (!ehca_module->cache_mw) {
-		EDEB_ERR(4, "Cannot create MW SLAB cache.");
-		ret = -ENOMEM;
+	ret = ehca_init_mrmw_cache();
+	if (ret) {
+		ehca_gen_err("Cannot create MR&MW SLAB cache.");
 		goto create_slab_caches5;
 	}
 
-	ehca_module->cache_mr =
-		kmem_cache_create("ehca_cache_mr",
-				  sizeof(struct ehca_mr),
-				  0, SLAB_HWCACHE_ALIGN,
-				  NULL, NULL);
-	if (!ehca_module->cache_mr) {
-		EDEB_ERR(4, "Cannot create MR SLAB cache.");
-		ret = -ENOMEM;
-		goto create_slab_caches6;
-	}
-
-	EDEB_EX(7, "ret=%x", ret);
-
-	return ret;
-
-create_slab_caches6:
-	kmem_cache_destroy(ehca_module->cache_mw);
+	return 0;
 
 create_slab_caches5:
-	kmem_cache_destroy(ehca_module->cache_av);
+	ehca_cleanup_av_cache();
 
 create_slab_caches4:
-	kmem_cache_destroy(ehca_module->cache_qp);
+	ehca_cleanup_qp_cache();
 
 create_slab_caches3:
-	kmem_cache_destroy(ehca_module->cache_cq);
+	ehca_cleanup_cq_cache();
 
 create_slab_caches2:
-	kmem_cache_destroy(ehca_module->cache_pd);
-
-create_slab_caches1:
-	EDEB_EX(7, "ret=%x", ret);
+	ehca_cleanup_pd_cache();
 
 	return ret;
 }
 
-int ehca_destroy_slab_caches(struct ehca_module *ehca_module)
+static void ehca_destroy_slab_caches(void)
 {
-	int ret;
-
-	EDEB_EN(7, "");
-
-	ret = kmem_cache_destroy(ehca_module->cache_pd);
-	if (ret)
-		EDEB_ERR(4, "Cannot destroy PD SLAB cache. ret=%x", ret);
-
-	ret = kmem_cache_destroy(ehca_module->cache_cq);
-	if (ret)
-		EDEB_ERR(4, "Cannot destroy CQ SLAB cache. ret=%x", ret);
-
-	ret = kmem_cache_destroy(ehca_module->cache_qp);
-	if (ret)
-		EDEB_ERR(4, "Cannot destroy QP SLAB cache. ret=%x", ret);
-
-	ret = kmem_cache_destroy(ehca_module->cache_av);
-	if (ret)
-		EDEB_ERR(4, "Cannot destroy AV SLAB cache. ret=%x", ret);
-
-	ret = kmem_cache_destroy(ehca_module->cache_mw);
-	if (ret)
-		EDEB_ERR(4, "Cannot destroy MW SLAB cache. ret=%x", ret);
-
-	ret = kmem_cache_destroy(ehca_module->cache_mr);
-	if (ret)
-		EDEB_ERR(4, "Cannot destroy MR SLAB cache. ret=%x", ret);
-
-	EDEB_EX(7, "");
-
-	return 0;
+	ehca_cleanup_mrmw_cache();
+	ehca_cleanup_av_cache();
+	ehca_cleanup_qp_cache();
+	ehca_cleanup_cq_cache();
+	ehca_cleanup_pd_cache();
 }
 
 #define EHCA_HCAAVER  EHCA_BMASK_IBM(32,39)
@@ -261,22 +164,20 @@ int ehca_destroy_slab_caches(struct ehca
 
 int ehca_sense_attributes(struct ehca_shca *shca)
 {
-	int ret = -EINVAL;
-	u64 h_ret = H_SUCCESS;
+	int ret = 0;
+	u64 h_ret;
 	struct hipz_query_hca *rblock;
 
-	EDEB_EN(7, "shca=%p", shca);
-
 	rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
 	if (!rblock) {
-		EDEB_ERR(4, "Cannot allocate rblock memory.");
-		ret = -ENOMEM;
-		goto num_ports0;
+		ehca_gen_err("Cannot allocate rblock memory.");
+		return -ENOMEM;
 	}
 
 	h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "Cannot query device properties. h_ret=%lx", h_ret);
+		ehca_gen_err("Cannot query device properties. h_ret=%lx",
+			     h_ret);
 		ret = -EPERM;
 		goto num_ports1;
 	}
@@ -286,7 +187,7 @@ int ehca_sense_attributes(struct ehca_sh
 	else
 		shca->num_ports = (u8)rblock->num_ports;
 
-	EDEB(6, " ... found %x ports", rblock->num_ports);
+	ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
 
 	if (ehca_hw_level == 0) {
 		u32 hcaaver;
@@ -295,8 +196,7 @@ int ehca_sense_attributes(struct ehca_sh
 		hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
 		revid   = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
 
-		EDEB(6, " ... hardware version=%x:%x",
-		     hcaaver, revid);
+		ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
 
 		if ((hcaaver == 1) && (revid == 0))
 			shca->hw_level = 0;
@@ -305,58 +205,43 @@ int ehca_sense_attributes(struct ehca_sh
 		else if ((hcaaver == 1) && (revid == 2))
 			shca->hw_level = 2;
 	}
-	EDEB(6, " ... hardware level=%x", shca->hw_level);
+	ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
 
 	shca->sport[0].rate = IB_RATE_30_GBPS;
 	shca->sport[1].rate = IB_RATE_30_GBPS;
 
-	ret = 0;
-
 num_ports1:
 	kfree(rblock);
-
-num_ports0:
-	EDEB_EX(7, "ret=%x", ret);
-
 	return ret;
 }
 
-static int init_node_guid(struct ehca_shca* shca)
+static int init_node_guid(struct ehca_shca *shca)
 {
 	int ret = 0;
 	struct hipz_query_hca *rblock;
 
-	EDEB_EN(7, "");
-
 	rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
 	if (!rblock) {
-		EDEB_ERR(4, "Can't allocate rblock memory.");
-		ret = -ENOMEM;
-		goto init_node_guid0;
+		ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+		return -ENOMEM;
 	}
 
 	if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
-		EDEB_ERR(4, "Can't query device properties");
+		ehca_err(&shca->ib_device, "Can't query device properties");
 		ret = -EINVAL;
 		goto init_node_guid1;
 	}
 
-	memcpy(&shca->ib_device.node_guid, &rblock->node_guid, (sizeof(u64)));
+	memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
 
 init_node_guid1:
 	kfree(rblock);
-
-init_node_guid0:
-	EDEB_EX(7, "node_guid=%lx ret=%x", shca->ib_device.node_guid, ret);
-
 	return ret;
 }
 
-int ehca_register_device(struct ehca_shca *shca)
+int ehca_init_device(struct ehca_shca *shca)
 {
-	int ret = 0;
-
-	EDEB_EN(7, "shca=%p", shca);
+	int ret;
 
 	ret = init_node_guid(shca);
 	if (ret)
@@ -432,39 +317,31 @@ int ehca_register_device(struct ehca_shc
 	/* shca->ib_device.process_mad	    = ehca_process_mad;	    */
 	shca->ib_device.mmap		    = ehca_mmap;
 
-	ret = ib_register_device(&shca->ib_device);
-
-	EDEB_EX(7, "ret=%x", ret);
-
 	return ret;
 }
 
 static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
 {
-	struct ehca_sport *sport;
+	struct ehca_sport *sport = &shca->sport[port - 1];
 	struct ib_cq *ibcq;
 	struct ib_qp *ibqp;
 	struct ib_qp_init_attr qp_init_attr;
-	int ret = 0;
-
-	EDEB_EN(7, "shca=%p port=%x", shca, port);
-
-	sport = &shca->sport[port - 1];
+	int ret;
 
 	if (sport->ibcq_aqp1) {
-		EDEB_ERR(4, "AQP1 CQ is already created.");
+		ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
 		return -EPERM;
 	}
 
 	ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10);
 	if (IS_ERR(ibcq)) {
-		EDEB_ERR(4, "Cannot create AQP1 CQ.");
+		ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
 		return PTR_ERR(ibcq);
 	}
 	sport->ibcq_aqp1 = ibcq;
 
 	if (sport->ibqp_aqp1) {
-		EDEB_ERR(4, "AQP1 QP is already created.");
+		ehca_err(&shca->ib_device, "AQP1 QP is already created.");
 		ret = -EPERM;
 		goto create_aqp1;
 	}
@@ -485,85 +362,62 @@ static int ehca_create_aqp1(struct ehca_
 
 	ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
 	if (IS_ERR(ibqp)) {
-		EDEB_ERR(4, "Cannot create AQP1 QP.");
+		ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
 		ret = PTR_ERR(ibqp);
 		goto create_aqp1;
 	}
 	sport->ibqp_aqp1 = ibqp;
 
-	EDEB_EX(7, "ret=%x", ret);
-
-	return ret;
+	return 0;
 
 create_aqp1:
 	ib_destroy_cq(sport->ibcq_aqp1);
-
-	EDEB_EX(7, "ret=%x", ret);
-
 	return ret;
 }
 
 static int ehca_destroy_aqp1(struct ehca_sport *sport)
 {
-	int ret = 0;
-
-	EDEB_EN(7, "sport=%p", sport);
+	int ret;
 
 	ret = ib_destroy_qp(sport->ibqp_aqp1);
 	if (ret) {
-		EDEB_ERR(4, "Cannot destroy AQP1 QP. ret=%x", ret);
-		goto destroy_aqp1;
+		ehca_gen_err("Cannot destroy AQP1 QP. ret=%x", ret);
+		return ret;
 	}
 
 	ret = ib_destroy_cq(sport->ibcq_aqp1);
 	if (ret)
-		EDEB_ERR(4, "Cannot destroy AQP1 CQ. ret=%x", ret);
-
-destroy_aqp1:
-	EDEB_EX(7, "ret=%x", ret);
+		ehca_gen_err("Cannot destroy AQP1 CQ. ret=%x", ret);
 
 	return ret;
 }
 
-static ssize_t ehca_show_debug_mask(struct device_driver *ddp, char *buf)
+static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
 {
-	int i;
-	int total = 0;
-	total += snprintf(buf + total, PAGE_SIZE - total, "%d",
-			  ehca_edeb_mask[0]);
-	for (i = 1; i < EHCA_EDEB_TRACE_MASK_SIZE; i++) {
-		total += snprintf(buf + total, PAGE_SIZE - total, "%d",
-				  ehca_edeb_mask[i]);
-	}
-
-	total += snprintf(buf + total, PAGE_SIZE - total, "\n");
-
-	return total;
+	return  snprintf(buf, PAGE_SIZE, "%d\n",
+			 ehca_debug_level);
 }
 
-static ssize_t ehca_store_debug_mask(struct device_driver *ddp,
-				     const char *buf, size_t count)
+static ssize_t ehca_store_debug_level(struct device_driver *ddp,
+				      const char *buf, size_t count)
 {
-	int i;
-	for (i = 0; i < EHCA_EDEB_TRACE_MASK_SIZE; i++) {
-		char value = buf[i] - '0';
-		if ((value <= 9) && (count >= i)) {
-			ehca_edeb_mask[i] = value;
-		}
-	}
-	return count;
+	int value = (*buf) - '0';
+	if (value >= 0 && value <= 9)
+		ehca_debug_level = value;
+	return 1;
 }
-DRIVER_ATTR(debug_mask, S_IRUSR | S_IWUSR,
-	    ehca_show_debug_mask, ehca_store_debug_mask);
+
+DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
+	    ehca_show_debug_level, ehca_store_debug_level);
 
 void ehca_create_driver_sysfs(struct ibmebus_driver *drv)
 {
-	driver_create_file(&drv->driver, &driver_attr_debug_mask);
+	driver_create_file(&drv->driver, &driver_attr_debug_level);
 }
 
 void ehca_remove_driver_sysfs(struct ibmebus_driver *drv)
 {
-	driver_remove_file(&drv->driver, &driver_attr_debug_mask);
+	driver_remove_file(&drv->driver, &driver_attr_debug_level);
 }
 
 #define EHCA_RESOURCE_ATTR(name)                                           \
@@ -579,16 +433,16 @@ static ssize_t  ehca_show_##name(struct 
 									   \
 	rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);			   \
 	if (!rblock) {						           \
-		EDEB_ERR(4, "Can't allocate rblock memory.");		   \
+		dev_err(dev, "Can't allocate rblock memory.");		   \
 		return 0;						   \
 	}								   \
 									   \
 	if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
-			EDEB_ERR(4, "Can't query device properties");	   \
-			kfree(rblock);					   \
-			return 0;					   \
+		dev_err(dev, "Can't query device properties");	   	   \
+		kfree(rblock);					   	   \
+		return 0;					   	   \
 	}								   \
-                                                                           \
+									   \
 	data = rblock->name;                                               \
 	kfree(rblock);                                                     \
 									   \
@@ -627,7 +481,6 @@ static ssize_t ehca_show_adapter_handle(
 static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
 
 
-
 void ehca_create_device_sysfs(struct ibmebus_dev *dev)
 {
 	device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
@@ -672,26 +525,24 @@ static int __devinit ehca_probe(struct i
 	struct ehca_shca *shca;
 	u64 *handle;
 	struct ib_pd *ibpd;
-	int ret = 0;
-
-	EDEB_EN(7, "name=%s", dev->name);
+	int ret;
 
 	handle = (u64 *)get_property(dev->ofdev.node, "ibm,hca-handle", NULL);
 	if (!handle) {
-		EDEB_ERR(4, "Cannot get eHCA handle for adapter: %s.",
-			 dev->ofdev.node->full_name);
+		ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
+			     dev->ofdev.node->full_name);
 		return -ENODEV;
 	}
 
 	if (!(*handle)) {
-		EDEB_ERR(4, "Wrong eHCA handle for adapter: %s.",
-			 dev->ofdev.node->full_name);
+		ehca_gen_err("Wrong eHCA handle for adapter: %s.",
+			     dev->ofdev.node->full_name);
 		return -ENODEV;
 	}
 
 	shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
-	if (shca == NULL) {
-		EDEB_ERR(4, "Cannot allocate shca memory.");
+	if (!shca) {
+		ehca_gen_err("Cannot allocate shca memory.");
 		return -ENOMEM;
 	}
 
@@ -701,29 +552,35 @@ static int __devinit ehca_probe(struct i
 
 	ret = ehca_sense_attributes(shca);
 	if (ret < 0) {
-		EDEB_ERR(4, "Cannot sense eHCA attributes.");
+		ehca_gen_err("Cannot sense eHCA attributes.");
+		goto probe1;
+	}
+
+	ret = ehca_init_device(shca);
+	if (ret) {
+		ehca_gen_err("Cannot init ehca  device struct");
 		goto probe1;
 	}
 
 	/* create event queues */
 	ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, 2048);
 	if (ret) {
-		EDEB_ERR(4, "Cannot create EQ.");
+		ehca_err(&shca->ib_device, "Cannot create EQ.");
 		goto probe1;
 	}
 
 	ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
 	if (ret) {
-		EDEB_ERR(4, "Cannot create NEQ.");
-		goto probe2;
+		ehca_err(&shca->ib_device, "Cannot create NEQ.");
+		goto probe3;
 	}
 
 	/* create internal protection domain */
 	ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL);
 	if (IS_ERR(ibpd)) {
-		EDEB_ERR(4, "Cannot create internal PD.");
+		ehca_err(&shca->ib_device, "Cannot create internal PD.");
 		ret = PTR_ERR(ibpd);
-		goto probe3;
+		goto probe4;
 	}
 
 	shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
@@ -731,15 +588,18 @@ static int __devinit ehca_probe(struct i
 
 	/* create internal max MR */
 	ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
+
 	if (ret) {
-		EDEB_ERR(4, "Cannot create internal MR. ret=%x", ret);
-		goto probe4;
+		ehca_err(&shca->ib_device, "Cannot create internal MR ret=%x",
+			 ret);
+		goto probe5;
 	}
 
-	ret = ehca_register_device(shca);
+	ret = ib_register_device(&shca->ib_device);
 	if (ret) {
-		EDEB_ERR(4, "Cannot register Infiniband device.");
-		goto probe5;
+		ehca_err(&shca->ib_device,
+			 "ib_register_device() failed ret=%x", ret);
+		goto probe6;
 	}
 
 	/* create AQP1 for port 1 */
@@ -747,8 +607,9 @@ static int __devinit ehca_probe(struct i
 		shca->sport[0].port_state = IB_PORT_DOWN;
 		ret = ehca_create_aqp1(shca, 1);
 		if (ret) {
-			EDEB_ERR(4, "Cannot create AQP1 for port 1.");
-			goto probe6;
+			ehca_err(&shca->ib_device,
+				 "Cannot create AQP1 for port 1.");
+			goto probe7;
 		}
 	}
 
@@ -757,54 +618,56 @@ static int __devinit ehca_probe(struct i
 		shca->sport[1].port_state = IB_PORT_DOWN;
 		ret = ehca_create_aqp1(shca, 2);
 		if (ret) {
-			EDEB_ERR(4, "Cannot create AQP1 for port 2.");
-			goto probe7;
+			ehca_err(&shca->ib_device,
+				 "Cannot create AQP1 for port 2.");
+			goto probe8;
 		}
 	}
 
 	ehca_create_device_sysfs(dev);
 
-	spin_lock(&ehca_module.shca_lock);
-	list_add(&shca->shca_list, &ehca_module.shca_list);
-	spin_unlock(&ehca_module.shca_lock);
-
-	EDEB_EX(7, "ret=%x", ret);
+	spin_lock(&shca_list_lock);
+	list_add(&shca->shca_list, &shca_list);
+	spin_unlock(&shca_list_lock);
 
 	return 0;
 
-probe7:
+probe8:
 	ret = ehca_destroy_aqp1(&shca->sport[0]);
 	if (ret)
-		EDEB_ERR(4, "Cannot destroy AQP1 for port 1. ret=%x", ret);
+		ehca_err(&shca->ib_device,
+			 "Cannot destroy AQP1 for port 1. ret=%x", ret);
 
-probe6:
+probe7:
 	ib_unregister_device(&shca->ib_device);
 
-probe5:
+probe6:
 	ret = ehca_dereg_internal_maxmr(shca);
 	if (ret)
-		EDEB_ERR(4, "Cannot destroy internal MR. ret=%x", ret);
+		ehca_err(&shca->ib_device,
+			 "Cannot destroy internal MR. ret=%x", ret);
 
-probe4:
+probe5:
 	ret = ehca_dealloc_pd(&shca->pd->ib_pd);
-	if (ret != 0)
-		EDEB_ERR(4, "Cannot destroy internal PD. ret=%x", ret);
+	if (ret)
+		ehca_err(&shca->ib_device,
+			 "Cannot destroy internal PD. ret=%x", ret);
 
-probe3:
+probe4:
 	ret = ehca_destroy_eq(shca, &shca->neq);
-	if (ret != 0)
-		EDEB_ERR(4, "Cannot destroy NEQ. ret=%x", ret);
+	if (ret)
+		ehca_err(&shca->ib_device,
+			 "Cannot destroy NEQ. ret=%x", ret);
 
-probe2:
+probe3:
 	ret = ehca_destroy_eq(shca, &shca->eq);
-	if (ret != 0)
-		EDEB_ERR(4, "Cannot destroy EQ. ret=%x", ret);
+	if (ret)
+		ehca_err(&shca->ib_device,
+			 "Cannot destroy EQ. ret=%x", ret);
 
 probe1:
 	ib_dealloc_device(&shca->ib_device);
 
-	EDEB_EX(4, "ret=%x", ret);
-
 	return -EINVAL;
 }
 
@@ -813,18 +676,16 @@ static int __devexit ehca_remove(struct 
 	struct ehca_shca *shca = dev->ofdev.dev.driver_data;
 	int ret;
 
-	EDEB_EN(7, "shca=%p", shca);
-
 	ehca_remove_device_sysfs(dev);
 
 	if (ehca_open_aqp1 == 1) {
 		int i;
-
 		for (i = 0; i < shca->num_ports; i++) {
 			ret = ehca_destroy_aqp1(&shca->sport[i]);
-			if (ret != 0)
-				EDEB_ERR(4, "Cannot destroy AQP1 for port %x."
-					 " ret=%x", ret, i);
+			if (ret)
+				ehca_err(&shca->ib_device,
+					 "Cannot destroy AQP1 for port %x "
+					 "ret=%x", ret, i);
 		}
 	}
 
@@ -832,27 +693,27 @@ static int __devexit ehca_remove(struct 
 
 	ret = ehca_dereg_internal_maxmr(shca);
 	if (ret)
-		EDEB_ERR(4, "Cannot destroy internal MR. ret=%x", ret);
+		ehca_err(&shca->ib_device,
+			 "Cannot destroy internal MR. ret=%x", ret);
 
 	ret = ehca_dealloc_pd(&shca->pd->ib_pd);
 	if (ret)
-		EDEB_ERR(4, "Cannot destroy internal PD. ret=%x", ret);
+		ehca_err(&shca->ib_device,
+			 "Cannot destroy internal PD. ret=%x", ret);
 
 	ret = ehca_destroy_eq(shca, &shca->eq);
 	if (ret)
-		EDEB_ERR(4, "Cannot destroy EQ. ret=%x", ret);
+		ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%x", ret);
 
 	ret = ehca_destroy_eq(shca, &shca->neq);
 	if (ret)
-		EDEB_ERR(4, "Canot destroy NEQ. ret=%x", ret);
+		ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%x", ret);
 
 	ib_dealloc_device(&shca->ib_device);
 
-	spin_lock(&ehca_module.shca_lock);
+	spin_lock(&shca_list_lock);
 	list_del(&shca->shca_list);
-	spin_unlock(&ehca_module.shca_lock);
-
-	EDEB_EX(7, "ret=%x", ret);
+	spin_unlock(&shca_list_lock);
 
 	return ret;
 }
@@ -873,39 +734,46 @@ static struct ibmebus_driver ehca_driver
 	.remove   = ehca_remove,
 };
 
+void ehca_poll_eqs(unsigned long data)
+{
+	struct ehca_shca *shca;
+
+	spin_lock(&shca_list_lock);
+	list_for_each_entry(shca, &shca_list, shca_list) {
+		if (shca->eq.is_initialized)
+			ehca_tasklet_eq((unsigned long)(void*)shca);
+	}
+	mod_timer(&poll_eqs_timer, jiffies + HZ);
+	spin_unlock(&shca_list_lock);
+}
+
 int __init ehca_module_init(void)
 {
-	int ret = 0;
+	int ret;
 
 	printk(KERN_INFO "eHCA Infiniband Device Driver "
-	                 "(Rel.: SVNEHCA_0006)\n");
-	EDEB_EN(7, "");
-
+	                 "(Rel.: SVNEHCA_0015)\n");
 	idr_init(&ehca_qp_idr);
 	idr_init(&ehca_cq_idr);
 	spin_lock_init(&ehca_qp_idr_lock);
 	spin_lock_init(&ehca_cq_idr_lock);
 
-	INIT_LIST_HEAD(&ehca_module.shca_list);
-	spin_lock_init(&ehca_module.shca_lock);
+	INIT_LIST_HEAD(&shca_list);
+	spin_lock_init(&shca_list_lock);
 
-	ehca_init_trace();
-
-	ehca_pool = ehca_create_comp_pool();
-	if (ehca_pool == NULL) {
-		EDEB_ERR(4, "Cannot create comp pool.");
-		ret = -EINVAL;
-		goto module_init0;
+	if ((ret = ehca_create_comp_pool())) {
+		ehca_gen_err("Cannot create comp pool.");
+		return ret;
 	}
 
-	if ((ret = ehca_create_slab_caches(&ehca_module))) {
-		EDEB_ERR(4, "Cannot create SLAB caches");
+	if ((ret = ehca_create_slab_caches())) {
+		ehca_gen_err("Cannot create SLAB caches");
 		ret = -ENOMEM;
 		goto module_init1;
 	}
 
 	if ((ret = ibmebus_register_driver(&ehca_driver))) {
-		EDEB_ERR(4, "Cannot register eHCA device driver");
+		ehca_gen_err("Cannot register eHCA device driver");
 		ret = -EINVAL;
 		goto module_init2;
 	}
@@ -913,53 +781,39 @@ int __init ehca_module_init(void)
 	ehca_create_driver_sysfs(&ehca_driver);
 
 	if (ehca_poll_all_eqs != 1) {
-		EDEB_ERR(4, "WARNING!!!");
-		EDEB_ERR(4, "It is possible to lose interrupts.");
-
-		return 0;
+		ehca_gen_err("WARNING!!!");
+		ehca_gen_err("It is possible to lose interrupts.");
+	} else {
+		init_timer(&poll_eqs_timer);
+		poll_eqs_timer.function = ehca_poll_eqs;
+		poll_eqs_timer.expires = jiffies + HZ;
+		add_timer(&poll_eqs_timer);
 	}
 
-	init_timer(&ehca_module.timer);
-	ehca_module.timer.function = ehca_poll_eqs;
-	ehca_module.timer.data = (unsigned long)(void*)&ehca_module;
-	ehca_module.timer.expires = jiffies + HZ;
-	add_timer(&ehca_module.timer);
-
-	EDEB_EX(7, "ret=%x", ret);
-
 	return 0;
 
 module_init2:
-	ehca_destroy_slab_caches(&ehca_module);
+	ehca_destroy_slab_caches();
 
 module_init1:
-	ehca_destroy_comp_pool(ehca_pool);
-
-module_init0:
-	EDEB_EX(7, "ret=%x", ret);
-
+	ehca_destroy_comp_pool();
 	return ret;
 };
 
 void __exit ehca_module_exit(void)
 {
-	EDEB_EN(7, "");
-
 	if (ehca_poll_all_eqs == 1)
-		del_timer_sync(&ehca_module.timer);
+		del_timer_sync(&poll_eqs_timer);
 
 	ehca_remove_driver_sysfs(&ehca_driver);
 	ibmebus_unregister_driver(&ehca_driver);
 
-	if (ehca_destroy_slab_caches(&ehca_module) != 0)
-		EDEB_ERR(4, "Cannot destroy SLAB caches");
+	ehca_destroy_slab_caches();
 
-	ehca_destroy_comp_pool(ehca_pool);
+	ehca_destroy_comp_pool();
 
 	idr_destroy(&ehca_cq_idr);
 	idr_destroy(&ehca_qp_idr);
-
-	EDEB_EX(7, "");
 };
 
 module_init(ehca_module_init);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_mcast.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_mcast.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_mcast.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_mcast.c	2006-10-17 10:15:06.000000000 -0400
@@ -42,153 +42,90 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#define DEB_PREFIX "mcas"
-
 #include <linux/module.h>
 #include <linux/err.h>
 #include "ehca_classes.h"
 #include "ehca_tools.h"
 #include "ehca_qes.h"
 #include "ehca_iverbs.h"
-
 #include "hcp_if.h"
 
 #define MAX_MC_LID 0xFFFE
 #define MIN_MC_LID 0xC000	/* Multicast limits */
 #define EHCA_VALID_MULTICAST_GID(gid)  ((gid)[0] == 0xFF)
-#define EHCA_VALID_MULTICAST_LID(lid)  (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
+#define EHCA_VALID_MULTICAST_LID(lid) \
+	(((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
 
 int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
-	struct ehca_qp *my_qp = NULL;
-	struct ehca_shca *shca = NULL;
+	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+	struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
+					      ib_device);
 	union ib_gid my_gid;
-	u64 h_ret = H_SUCCESS;
-	int ret = 0;
-
-	EHCA_CHECK_ADR(ibqp);
-	EHCA_CHECK_ADR(gid);
-
-	my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+	u64 subnet_prefix, interface_id, h_ret;
 
-	EHCA_CHECK_QP(my_qp);
 	if (ibqp->qp_type != IB_QPT_UD) {
-		EDEB_ERR(4, "invalid qp_type %x gid, ret=%x",
-			 ibqp->qp_type, EINVAL);
+		ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
 		return -EINVAL;
 	}
 
-	shca = container_of(ibqp->pd->device, struct ehca_shca, ib_device);
-	EHCA_CHECK_ADR(shca);
-
 	if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
-		EDEB_ERR(4, "gid is not valid mulitcast gid ret=%x",
-			 EINVAL);
+		ehca_err(ibqp->device, "invalid mulitcast gid");
 		return -EINVAL;
 	} else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
-		EDEB_ERR(4, "lid=%x is not valid mulitcast lid ret=%x",
-			 lid, EINVAL);
+		ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
 		return -EINVAL;
 	}
 
 	memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
 
+	subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
+	interface_id = be64_to_cpu(my_gid.global.interface_id);
 	h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
-				     my_qp->ipz_qp_handle,
-				     my_qp->galpas.kernel,
-				     lid, my_gid.global.subnet_prefix,
-				     my_gid.global.interface_id);
-	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4,
+				   my_qp->ipz_qp_handle,
+				   my_qp->galpas.kernel,
+				   lid, subnet_prefix, interface_id);
+	if (h_ret != H_SUCCESS)
+		ehca_err(ibqp->device,
 			 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
 			 "h_ret=%lx", my_qp, ibqp->qp_num, h_ret);
-	}
-	ret = ehca2ib_return_code(h_ret);
-
-	EDEB_EX(7, "mcast attach ret=%x\n"
-		   "ehca_qp=%p qp_num=%x  lid=%x\n"
-		   "my_gid=  %x %x %x %x\n"
-		   "         %x %x %x %x\n"
-		   "         %x %x %x %x\n"
-		   "         %x %x %x %x\n",
-		   ret, my_qp, ibqp->qp_num, lid,
-		   my_gid.raw[0], my_gid.raw[1],
-		   my_gid.raw[2], my_gid.raw[3],
-		   my_gid.raw[4], my_gid.raw[5],
-		   my_gid.raw[6], my_gid.raw[7],
-		   my_gid.raw[8], my_gid.raw[9],
-		   my_gid.raw[10], my_gid.raw[11],
-		   my_gid.raw[12], my_gid.raw[13],
-		   my_gid.raw[14], my_gid.raw[15]);
 
-	return ret;
+	return ehca2ib_return_code(h_ret);
 }
 
 int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
-	struct ehca_qp *my_qp = NULL;
-	struct ehca_shca *shca = NULL;
+	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+	struct ehca_shca *shca = container_of(ibqp->pd->device,
+					      struct ehca_shca, ib_device);
 	union ib_gid my_gid;
-	u64 h_ret = H_SUCCESS;
-	int ret = 0;
+	u64 subnet_prefix, interface_id, h_ret;
 
-	EHCA_CHECK_ADR(ibqp);
-	EHCA_CHECK_ADR(gid);
-
-	my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-
-	EHCA_CHECK_QP(my_qp);
 	if (ibqp->qp_type != IB_QPT_UD) {
-		EDEB_ERR(4, "invalid qp_type %x gid, ret=%x",
-			 ibqp->qp_type, EINVAL);
+		ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
 		return -EINVAL;
 	}
 
-	shca = container_of(ibqp->pd->device, struct ehca_shca, ib_device);
-	EHCA_CHECK_ADR(shca);
-
 	if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
-		EDEB_ERR(4, "gid is not valid mulitcast gid ret=%x",
-			 EINVAL);
+		ehca_err(ibqp->device, "invalid mulitcast gid");
 		return -EINVAL;
 	} else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
-		EDEB_ERR(4, "lid=%x is not valid mulitcast lid ret=%x",
-			 lid, EINVAL);
+		ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
 		return -EINVAL;
 	}
 
-	EDEB_EN(7, "dgid=%p qp_numl=%x lid=%x",
-		gid, ibqp->qp_num, lid);
-
 	memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
 
+	subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
+	interface_id = be64_to_cpu(my_gid.global.interface_id);
 	h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
-				     my_qp->ipz_qp_handle,
-				     my_qp->galpas.kernel,
-				     lid, my_gid.global.subnet_prefix,
-				     my_gid.global.interface_id);
-	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4,
+				   my_qp->ipz_qp_handle,
+				   my_qp->galpas.kernel,
+				   lid, subnet_prefix, interface_id);
+	if (h_ret != H_SUCCESS)
+		ehca_err(ibqp->device,
 			 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
 			 "h_ret=%lx", my_qp, ibqp->qp_num, h_ret);
-	}
-	ret = ehca2ib_return_code(h_ret);
-
-	EDEB_EX(7, "mcast detach ret=%x\n"
-		"ehca_qp=%p qp_num=%x  lid=%x\n"
-		"my_gid=  %x %x %x %x\n"
-		"         %x %x %x %x\n"
-		"         %x %x %x %x\n"
-		"         %x %x %x %x\n",
-		ret, my_qp, ibqp->qp_num, lid,
-		my_gid.raw[0], my_gid.raw[1],
-		my_gid.raw[2], my_gid.raw[3],
-		my_gid.raw[4], my_gid.raw[5],
-		my_gid.raw[6], my_gid.raw[7],
-		my_gid.raw[8], my_gid.raw[9],
-		my_gid.raw[10], my_gid.raw[11],
-		my_gid.raw[12], my_gid.raw[13],
-		my_gid.raw[14], my_gid.raw[15]);
 
-	return ret;
+	return ehca2ib_return_code(h_ret);
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_mrmw.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_mrmw.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_mrmw.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_mrmw.c	2006-10-17 10:15:06.000000000 -0400
@@ -39,9 +39,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#undef DEB_PREFIX
-#define DEB_PREFIX "mrmw"
-
 #include <asm/current.h>
 
 #include "ehca_iverbs.h"
@@ -49,79 +46,62 @@
 #include "hcp_if.h"
 #include "hipz_hw.h"
 
-extern int ehca_use_hp_mr;
+static struct kmem_cache *mr_cache;
+static struct kmem_cache *mw_cache;
 
 static struct ehca_mr *ehca_mr_new(void)
 {
-	extern struct ehca_module ehca_module;
 	struct ehca_mr *me;
 
-	me = kmem_cache_alloc(ehca_module.cache_mr, SLAB_KERNEL);
+	me = kmem_cache_alloc(mr_cache, SLAB_KERNEL);
 	if (me) {
 		memset(me, 0, sizeof(struct ehca_mr));
 		spin_lock_init(&me->mrlock);
-		EDEB_EX(7, "ehca_mr=%p sizeof(ehca_mr_t)=%x", me,
-			(u32) sizeof(struct ehca_mr));
-	} else {
-		EDEB_ERR(3, "alloc failed");
-	}
+	} else
+		ehca_gen_err("alloc failed");
 
 	return me;
 }
 
 static void ehca_mr_delete(struct ehca_mr *me)
 {
-	extern struct ehca_module ehca_module;
-
-	kmem_cache_free(ehca_module.cache_mr, me);
+	kmem_cache_free(mr_cache, me);
 }
 
 static struct ehca_mw *ehca_mw_new(void)
 {
-	extern struct ehca_module ehca_module;
 	struct ehca_mw *me;
 
-	me = kmem_cache_alloc(ehca_module.cache_mw, SLAB_KERNEL);
+	me = kmem_cache_alloc(mw_cache, SLAB_KERNEL);
 	if (me) {
 		memset(me, 0, sizeof(struct ehca_mw));
 		spin_lock_init(&me->mwlock);
-		EDEB_EX(7, "ehca_mw=%p sizeof(ehca_mw_t)=%x", me,
-			(u32) sizeof(struct ehca_mw));
-	} else {
-		EDEB_ERR(3, "alloc failed");
-	}
+	} else
+		ehca_gen_err("alloc failed");
 
 	return me;
 }
 
 static void ehca_mw_delete(struct ehca_mw *me)
 {
-	extern struct ehca_module ehca_module;
-
-	kmem_cache_free(ehca_module.cache_mw, me);
+	kmem_cache_free(mw_cache, me);
 }
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
 {
-	struct ib_mr *ib_mr = NULL;
-	int ret = 0;
-	struct ehca_mr *e_maxmr = NULL;
-	struct ehca_pd *e_pd = NULL;
-	struct ehca_shca *shca = NULL;
-
-	EDEB_EN(7, "pd=%p mr_access_flags=%x", pd, mr_access_flags);
-
-	EHCA_CHECK_PD_P(pd);
-	e_pd = container_of(pd, struct ehca_pd, ib_pd);
-	shca = container_of(pd->device, struct ehca_shca, ib_device);
+	struct ib_mr *ib_mr;
+	int ret;
+	struct ehca_mr *e_maxmr;
+	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+	struct ehca_shca *shca =
+		container_of(pd->device, struct ehca_shca, ib_device);
 
 	if (shca->maxmr) {
 		e_maxmr = ehca_mr_new();
 		if (!e_maxmr) {
-			EDEB_ERR(4, "out of memory");
+			ehca_err(&shca->ib_device, "out of memory");
 			ib_mr = ERR_PTR(-ENOMEM);
 			goto get_dma_mr_exit0;
 		}
@@ -136,23 +116,19 @@ struct ib_mr *ehca_get_dma_mr(struct ib_
 		}
 		ib_mr = &e_maxmr->ib.ib_mr;
 	} else {
-		EDEB_ERR(4, "no internal max-MR exist!");
+		ehca_err(&shca->ib_device, "no internal max-MR exist!");
 		ib_mr = ERR_PTR(-EINVAL);
 		goto get_dma_mr_exit0;
 	}
 
 get_dma_mr_exit0:
 	if (IS_ERR(ib_mr))
-		EDEB_EX(4, "rc=%lx pd=%p mr_access_flags=%x ",
-			PTR_ERR(ib_mr), pd, mr_access_flags);
-	else
-		EDEB_EX(7, "ib_mr=%p lkey=%x rkey=%x",
-			ib_mr, ib_mr->lkey, ib_mr->rkey);
+		ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
+			 PTR_ERR(ib_mr), pd, mr_access_flags);
 	return ib_mr;
 } /* end ehca_get_dma_mr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
 			       struct ib_phys_buf *phys_buf_array,
@@ -160,23 +136,20 @@ struct ib_mr *ehca_reg_phys_mr(struct ib
 			       int mr_access_flags,
 			       u64 *iova_start)
 {
-	struct ib_mr *ib_mr = NULL;
-	int ret = 0;
-	struct ehca_mr *e_mr = NULL;
-	struct ehca_shca *shca = NULL;
-	struct ehca_pd *e_pd = NULL;
-	u64 size = 0;
+	struct ib_mr *ib_mr;
+	int ret;
+	struct ehca_mr *e_mr;
+	struct ehca_shca *shca =
+		container_of(pd->device, struct ehca_shca, ib_device);
+	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+
+	u64 size;
 	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
-	u32 num_pages_mr = 0;
-	u32 num_pages_4k = 0; /* 4k portion "pages" */
+	u32 num_pages_mr;
+	u32 num_pages_4k; /* 4k portion "pages" */
 
-	EDEB_EN(7, "pd=%p phys_buf_array=%p num_phys_buf=%x "
-		"mr_access_flags=%x iova_start=%p", pd, phys_buf_array,
-		num_phys_buf, mr_access_flags, iova_start);
-
-	EHCA_CHECK_PD_P(pd);
-	if ((num_phys_buf <= 0) || ehca_adr_bad(phys_buf_array)) {
-		EDEB_ERR(4, "bad input values: num_phys_buf=%x "
+	if ((num_phys_buf <= 0) || !phys_buf_array) {
+		ehca_err(pd->device, "bad input values: num_phys_buf=%x "
 			 "phys_buf_array=%p", num_phys_buf, phys_buf_array);
 		ib_mr = ERR_PTR(-EINVAL);
 		goto reg_phys_mr_exit0;
@@ -185,9 +158,11 @@ struct ib_mr *ehca_reg_phys_mr(struct ib
 	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
 	    ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
 	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
-		/* Remote Write Access requires Local Write Access */
-		/* Remote Atomic Access requires Local Write Access */
-		EDEB_ERR(4, "bad input values: mr_access_flags=%x",
+		/*
+		 * Remote Write Access requires Local Write Access
+		 * Remote Atomic Access requires Local Write Access
+		 */
+		ehca_err(pd->device, "bad input values: mr_access_flags=%x",
 			 mr_access_flags);
 		ib_mr = ERR_PTR(-EINVAL);
 		goto reg_phys_mr_exit0;
@@ -202,18 +177,15 @@ struct ib_mr *ehca_reg_phys_mr(struct ib
 	}
 	if ((size == 0) ||
 	    (((u64)iova_start + size) < (u64)iova_start)) {
-		EDEB_ERR(4, "bad input values: size=%lx iova_start=%p",
+		ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
 			 size, iova_start);
 		ib_mr = ERR_PTR(-EINVAL);
 		goto reg_phys_mr_exit0;
 	}
 
-	e_pd = container_of(pd, struct ehca_pd, ib_pd);
-	shca = container_of(pd->device, struct ehca_shca, ib_device);
-
 	e_mr = ehca_mr_new();
 	if (!e_mr) {
-		EDEB_ERR(4, "out of memory");
+		ehca_err(pd->device, "out of memory");
 		ib_mr = ERR_PTR(-ENOMEM);
 		goto reg_phys_mr_exit0;
 	}
@@ -253,46 +225,42 @@ struct ib_mr *ehca_reg_phys_mr(struct ib
 	}
 
 	/* successful registration of all pages */
-	ib_mr = &e_mr->ib.ib_mr;
-	goto reg_phys_mr_exit0;
+	return &e_mr->ib.ib_mr;
 
 reg_phys_mr_exit1:
 	ehca_mr_delete(e_mr);
 reg_phys_mr_exit0:
 	if (IS_ERR(ib_mr))
-		EDEB_EX(4, "rc=%lx pd=%p phys_buf_array=%p "
-			"num_phys_buf=%x mr_access_flags=%x iova_start=%p",
-			PTR_ERR(ib_mr), pd, phys_buf_array,
-			num_phys_buf, mr_access_flags, iova_start);
-	else
-		EDEB_EX(7, "ib_mr=%p lkey=%x rkey=%x",
-			ib_mr, ib_mr->lkey, ib_mr->rkey);
+		ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
+			 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
+			 PTR_ERR(ib_mr), pd, phys_buf_array,
+			 num_phys_buf, mr_access_flags, iova_start);
 	return ib_mr;
 } /* end ehca_reg_phys_mr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
 			       struct ib_umem *region,
 			       int mr_access_flags,
 			       struct ib_udata *udata)
 {
-	struct ib_mr *ib_mr = NULL;
-	struct ehca_mr *e_mr = NULL;
-	struct ehca_shca *shca = NULL;
-	struct ehca_pd *e_pd = NULL;
+	struct ib_mr *ib_mr;
+	struct ehca_mr *e_mr;
+	struct ehca_shca *shca =
+		container_of(pd->device, struct ehca_shca, ib_device);
+	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
 	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
-	int ret = 0;
-	u32 num_pages_mr = 0;
-	u32 num_pages_4k = 0; /* 4k portion "pages" */
-
-	EDEB_EN(7, "pd=%p region=%p mr_access_flags=%x udata=%p",
-		pd, region, mr_access_flags, udata);
-
-	EHCA_CHECK_PD_P(pd);
-	if (ehca_adr_bad(region)) {
-		EDEB_ERR(4, "bad input values: region=%p", region);
+	int ret;
+	u32 num_pages_mr;
+	u32 num_pages_4k; /* 4k portion "pages" */
+
+        if (!pd) {
+		ehca_gen_err("bad pd=%p", pd);
+		return ERR_PTR(-EFAULT);
+	}
+	if (!region) {
+		ehca_err(pd->device, "bad input values: region=%p", region);
 		ib_mr = ERR_PTR(-EINVAL);
 		goto reg_user_mr_exit0;
 	}
@@ -300,38 +268,33 @@ struct ib_mr *ehca_reg_user_mr(struct ib
 	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
 	    ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
 	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
-		/* Remote Write Access requires Local Write Access */
-		/* Remote Atomic Access requires Local Write Access */
-		EDEB_ERR(4, "bad input values: mr_access_flags=%x",
+		/*
+		 * Remote Write Access requires Local Write Access
+		 * Remote Atomic Access requires Local Write Access
+		 */
+		ehca_err(pd->device, "bad input values: mr_access_flags=%x",
 			 mr_access_flags);
 		ib_mr = ERR_PTR(-EINVAL);
 		goto reg_user_mr_exit0;
 	}
-	EDEB(7, "user_base=%lx virt_base=%lx length=%lx offset=%x page_size=%x "
-	     "chunk_list.next=%p",
-	     region->user_base, region->virt_base, region->length,
-	     region->offset, region->page_size, region->chunk_list.next);
 	if (region->page_size != PAGE_SIZE) {
-		EDEB_ERR(4, "page size not supported, region->page_size=%x",
-			 region->page_size);
+		ehca_err(pd->device, "page size not supported, "
+			 "region->page_size=%x", region->page_size);
 		ib_mr = ERR_PTR(-EINVAL);
 		goto reg_user_mr_exit0;
 	}
 
 	if ((region->length == 0) ||
 	    ((region->virt_base + region->length) < region->virt_base)) {
-		EDEB_ERR(4, "bad input values: length=%lx virt_base=%lx",
-			 region->length, region->virt_base);
+		ehca_err(pd->device, "bad input values: length=%lx "
+			 "virt_base=%lx", region->length, region->virt_base);
 		ib_mr = ERR_PTR(-EINVAL);
 		goto reg_user_mr_exit0;
 	}
 
-	e_pd = container_of(pd, struct ehca_pd, ib_pd);
-	shca = container_of(pd->device, struct ehca_shca, ib_device);
-
 	e_mr = ehca_mr_new();
 	if (!e_mr) {
-		EDEB_ERR(4, "out of memory");
+		ehca_err(pd->device, "out of memory");
 		ib_mr = ERR_PTR(-ENOMEM);
 		goto reg_user_mr_exit0;
 	}
@@ -361,24 +324,19 @@ struct ib_mr *ehca_reg_user_mr(struct ib
 	}
 
 	/* successful registration of all pages */
-	ib_mr = &e_mr->ib.ib_mr;
-	goto reg_user_mr_exit0;
+	return &e_mr->ib.ib_mr;
 
 reg_user_mr_exit1:
 	ehca_mr_delete(e_mr);
 reg_user_mr_exit0:
 	if (IS_ERR(ib_mr))
-		EDEB_EX(4, "rc=%lx pd=%p region=%p mr_access_flags=%x "
-			"udata=%p",
-			PTR_ERR(ib_mr), pd, region, mr_access_flags, udata);
-	else
-		EDEB_EX(7, "ib_mr=%p lkey=%x rkey=%x",
-			ib_mr, ib_mr->lkey, ib_mr->rkey);
+		ehca_err(pd->device, "rc=%lx pd=%p region=%p mr_access_flags=%x"
+			 " udata=%p",
+			 PTR_ERR(ib_mr), pd, region, mr_access_flags, udata);
 	return ib_mr;
 } /* end ehca_reg_user_mr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_rereg_phys_mr(struct ib_mr *mr,
 		       int mr_rereg_mask,
@@ -388,49 +346,46 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
 		       int mr_access_flags,
 		       u64 *iova_start)
 {
-	int ret = 0;
-	struct ehca_shca *shca = NULL;
-	struct ehca_mr *e_mr = NULL;
-	u64 new_size = 0;
-	u64 *new_start = NULL;
-	u32 new_acl = 0;
-	struct ehca_pd *new_pd = NULL;
-	u32 tmp_lkey = 0;
-	u32 tmp_rkey = 0;
+	int ret;
+
+	struct ehca_shca *shca =
+		container_of(mr->device, struct ehca_shca, ib_device);
+	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
+	u64 new_size;
+	u64 *new_start;
+	u32 new_acl;
+	struct ehca_pd *new_pd;
+	u32 tmp_lkey, tmp_rkey;
 	unsigned long sl_flags;
 	u32 num_pages_mr = 0;
 	u32 num_pages_4k = 0; /* 4k portion "pages" */
 	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
-	struct ehca_pd *my_pd = NULL;
 	u32 cur_pid = current->tgid;
 
-	EDEB_EN(7, "mr=%p mr_rereg_mask=%x pd=%p phys_buf_array=%p "
-		"num_phys_buf=%x mr_access_flags=%x iova_start=%p",
-		mr, mr_rereg_mask, pd, phys_buf_array, num_phys_buf,
-		mr_access_flags, iova_start);
-
-	EHCA_CHECK_MR(mr);
-	my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
 	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
 	    (my_pd->ownpid != cur_pid)) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_pd->ownpid);
 		ret = -EINVAL;
 		goto rereg_phys_mr_exit0;
 	}
 
 	if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
-		/* TODO not supported, because PHYP rereg hCall needs pages*/
-		/* TODO: We will follow this with Tom ....*/
-		EDEB_ERR(4, "rereg without IB_MR_REREG_TRANS not supported yet,"
-			 " mr_rereg_mask=%x", mr_rereg_mask);
+		/* TODO not supported, because PHYP rereg hCall needs pages */
+		ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
+			 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
 		ret = -EINVAL;
 		goto rereg_phys_mr_exit0;
 	}
 
-	e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
 	if (mr_rereg_mask & IB_MR_REREG_PD) {
-		EHCA_CHECK_PD(pd);
+		if (!pd) {
+			ehca_err(mr->device, "rereg with bad pd, pd=%p "
+				 "mr_rereg_mask=%x", pd, mr_rereg_mask);
+			ret = -EINVAL;
+			goto rereg_phys_mr_exit0;
+		}
 	}
 
 	if ((mr_rereg_mask &
@@ -440,12 +395,10 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
 		goto rereg_phys_mr_exit0;
 	}
 
-	shca = container_of(mr->device, struct ehca_shca, ib_device);
-
 	/* check other parameters */
 	if (e_mr == shca->maxmr) {
 		/* should be impossible, however reject to be sure */
-		EDEB_ERR(3, "rereg internal max-MR impossible, mr=%p "
+		ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
 			 "shca->maxmr=%p mr->lkey=%x",
 			 mr, shca->maxmr, mr->lkey);
 		ret = -EINVAL;
@@ -453,14 +406,14 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
 	}
 	if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
 		if (e_mr->flags & EHCA_MR_FLAG_FMR) {
-			EDEB_ERR(4, "not supported for FMR, mr=%p flags=%x",
-				 mr, e_mr->flags);
+			ehca_err(mr->device, "not supported for FMR, mr=%p "
+				 "flags=%x", mr, e_mr->flags);
 			ret = -EINVAL;
 			goto rereg_phys_mr_exit0;
 		}
-		if (ehca_adr_bad(phys_buf_array) || num_phys_buf <= 0) {
-			EDEB_ERR(4, "bad input values: mr_rereg_mask=%x "
-				 "phys_buf_array=%p num_phys_buf=%x",
+		if (!phys_buf_array || num_phys_buf <= 0) {
+			ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
+				 " phys_buf_array=%p num_phys_buf=%x",
 				 mr_rereg_mask, phys_buf_array, num_phys_buf);
 			ret = -EINVAL;
 			goto rereg_phys_mr_exit0;
@@ -471,9 +424,11 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
 	      !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
 	     ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
 	      !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
-		/* Remote Write Access requires Local Write Access */
-		/* Remote Atomic Access requires Local Write Access */
-		EDEB_ERR(4, "bad input values: mr_rereg_mask=%x "
+		/*
+		 * Remote Write Access requires Local Write Access
+		 * Remote Atomic Access requires Local Write Access
+		 */
+		ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
 			 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
 		ret = -EINVAL;
 		goto rereg_phys_mr_exit0;
@@ -496,7 +451,7 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
 			goto rereg_phys_mr_exit1;
 		if ((new_size == 0) ||
 		    (((u64)iova_start + new_size) < (u64)iova_start)) {
-			EDEB_ERR(4, "bad input values: new_size=%lx "
+			ehca_err(mr->device, "bad input values: new_size=%lx "
 				 "iova_start=%p", new_size, iova_start);
 			ret = -EINVAL;
 			goto rereg_phys_mr_exit1;
@@ -518,10 +473,6 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
 	if (mr_rereg_mask & IB_MR_REREG_PD)
 		new_pd = container_of(pd, struct ehca_pd, ib_pd);
 
-	EDEB(7, "mr=%p new_start=%p new_size=%lx new_acl=%x new_pd=%p "
-	     "num_pages_mr=%x num_pages_4k=%x", e_mr, new_start, new_size,
-	     new_acl, new_pd, num_pages_mr, num_pages_4k);
-
 	ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
 			    new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
 	if (ret)
@@ -537,67 +488,49 @@ rereg_phys_mr_exit1:
 	spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
 rereg_phys_mr_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
-			"phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
-			"iova_start=%p",
-			ret, mr, mr_rereg_mask, pd, phys_buf_array,
-			num_phys_buf, mr_access_flags, iova_start);
-	else
-		EDEB_EX(7, "mr=%p mr_rereg_mask=%x pd=%p phys_buf_array=%p "
-			"num_phys_buf=%x mr_access_flags=%x iova_start=%p",
-			mr, mr_rereg_mask, pd, phys_buf_array, num_phys_buf,
-			mr_access_flags, iova_start);
-
+		ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
+			 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
+			 "iova_start=%p",
+			 ret, mr, mr_rereg_mask, pd, phys_buf_array,
+			 num_phys_buf, mr_access_flags, iova_start);
 	return ret;
 } /* end ehca_rereg_phys_mr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
 {
 	int ret = 0;
-	u64 h_ret = H_SUCCESS;
-	struct ehca_shca *shca = NULL;
-	struct ehca_mr *e_mr = NULL;
-	struct ehca_pd *my_pd = NULL;
+	u64 h_ret;
+	struct ehca_shca *shca =
+		container_of(mr->device, struct ehca_shca, ib_device);
+	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
 	u32 cur_pid = current->tgid;
 	unsigned long sl_flags;
 	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
 
-	EDEB_EN(7, "mr=%p mr_attr=%p", mr, mr_attr);
-
-	EHCA_CHECK_MR(mr);
-
-	my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
 	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
 	    (my_pd->ownpid != cur_pid)) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_pd->ownpid);
 		ret = -EINVAL;
 		goto query_mr_exit0;
 	}
 
-	e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
-	if (ehca_adr_bad(mr_attr)) {
-		EDEB_ERR(4, "bad input values: mr_attr=%p", mr_attr);
-		ret = -EINVAL;
-		goto query_mr_exit0;
-	}
 	if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
-		EDEB_ERR(4, "not supported for FMR, mr=%p e_mr=%p "
+		ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
 			 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
 		ret = -EINVAL;
 		goto query_mr_exit0;
 	}
 
-	shca = container_of(mr->device, struct ehca_shca, ib_device);
 	memset(mr_attr, 0, sizeof(struct ib_mr_attr));
 	spin_lock_irqsave(&e_mr->mrlock, sl_flags);
 
 	h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "hipz_mr_query failed, h_ret=%lx mr=%p "
+		ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
 			 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
 			 h_ret, mr, shca->ipz_hca_handle.handle,
 			 e_mr->ipz_mr_handle.handle, mr->lkey);
@@ -615,51 +548,39 @@ query_mr_exit1:
 	spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
 query_mr_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x mr=%p mr_attr=%p", ret, mr, mr_attr);
-	else
-		EDEB_EX(7, "pd=%p device_virt_addr=%lx size=%lx "
-			"mr_access_flags=%x lkey=%x rkey=%x",
-			mr_attr->pd, mr_attr->device_virt_addr,
-			mr_attr->size, mr_attr->mr_access_flags,
-			mr_attr->lkey, mr_attr->rkey);
+		ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
+			 ret, mr, mr_attr);
 	return ret;
 } /* end ehca_query_mr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_dereg_mr(struct ib_mr *mr)
 {
 	int ret = 0;
-	u64 h_ret = H_SUCCESS;
-	struct ehca_shca *shca = NULL;
-	struct ehca_mr *e_mr = NULL;
-	struct ehca_pd *my_pd = NULL;
+	u64 h_ret;
+	struct ehca_shca *shca =
+		container_of(mr->device, struct ehca_shca, ib_device);
+	struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+	struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
 	u32 cur_pid = current->tgid;
 
-	EDEB_EN(7, "mr=%p", mr);
-
-	EHCA_CHECK_MR(mr);
-	my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
 	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
 	    (my_pd->ownpid != cur_pid)) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_pd->ownpid);
 		ret = -EINVAL;
 		goto dereg_mr_exit0;
 	}
 
-	e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
-	shca = container_of(mr->device, struct ehca_shca, ib_device);
-
 	if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
-		EDEB_ERR(4, "not supported for FMR, mr=%p e_mr=%p "
+		ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
 			 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
 		ret = -EINVAL;
 		goto dereg_mr_exit0;
 	} else if (e_mr == shca->maxmr) {
 		/* should be impossible, however reject to be sure */
-		EDEB_ERR(3, "dereg internal max-MR impossible, mr=%p "
+		ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
 			 "shca->maxmr=%p mr->lkey=%x",
 			 mr, shca->maxmr, mr->lkey);
 		ret = -EINVAL;
@@ -669,8 +590,8 @@ int ehca_dereg_mr(struct ib_mr *mr)
 	/* TODO: BUSY: MR still has bound window(s) */
 	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "hipz_free_mr failed, h_ret=%lx shca=%p e_mr=%p"
-			 " hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
+		ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
+			 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
 			 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
 			 e_mr->ipz_mr_handle.handle, mr->lkey);
 		ret = ehca_mrmw_map_hrc_free_mr(h_ret);
@@ -682,30 +603,22 @@ int ehca_dereg_mr(struct ib_mr *mr)
 
 dereg_mr_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x mr=%p", ret, mr);
-	else
-		EDEB_EX(7, "");
+		ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
 	return ret;
 } /* end ehca_dereg_mr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
 {
-	struct ib_mw *ib_mw = NULL;
-	u64 h_ret = H_SUCCESS;
-	struct ehca_shca *shca = NULL;
-	struct ehca_mw *e_mw = NULL;
-	struct ehca_pd *e_pd = NULL;
+	struct ib_mw *ib_mw;
+	u64 h_ret;
+	struct ehca_mw *e_mw;
+	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+	struct ehca_shca *shca =
+		container_of(pd->device, struct ehca_shca, ib_device);
 	struct ehca_mw_hipzout_parms hipzout = {{0},0};
 
-	EDEB_EN(7, "pd=%p", pd);
-
-	EHCA_CHECK_PD_P(pd);
-	e_pd = container_of(pd, struct ehca_pd, ib_pd);
-	shca = container_of(pd->device, struct ehca_shca, ib_device);
-
 	e_mw = ehca_mw_new();
 	if (!e_mw) {
 		ib_mw = ERR_PTR(-ENOMEM);
@@ -715,136 +628,96 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd
 	h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
 					 e_pd->fw_pd, &hipzout);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "hipz_mw_allocate failed, h_ret=%lx shca=%p "
-			 "hca_hndl=%lx mw=%p", h_ret, shca,
-			 shca->ipz_hca_handle.handle, e_mw);
+		ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
+			 "shca=%p hca_hndl=%lx mw=%p",
+			 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
 		ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret));
 		goto alloc_mw_exit1;
 	}
 	/* successful MW allocation */
 	e_mw->ipz_mw_handle = hipzout.handle;
 	e_mw->ib_mw.rkey    = hipzout.rkey;
-	ib_mw = &e_mw->ib_mw;
-	goto alloc_mw_exit0;
+	return &e_mw->ib_mw;
 
 alloc_mw_exit1:
 	ehca_mw_delete(e_mw);
 alloc_mw_exit0:
 	if (IS_ERR(ib_mw))
-		EDEB_EX(4, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
-	else
-		EDEB_EX(7, "ib_mw=%p rkey=%x", ib_mw, ib_mw->rkey);
+		ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
 	return ib_mw;
 } /* end ehca_alloc_mw() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_bind_mw(struct ib_qp *qp,
 		 struct ib_mw *mw,
 		 struct ib_mw_bind *mw_bind)
 {
-	int ret = 0;
-
 	/* TODO: not supported up to now */
-	EDEB_ERR(4, "bind MW currently not supported by HCAD");
-	ret = -EPERM;
-	goto bind_mw_exit0;
+	ehca_gen_err("bind MW currently not supported by HCAD");
 
-bind_mw_exit0:
-	if (ret)
-		EDEB_EX(4, "ret=%x qp=%p mw=%p mw_bind=%p",
-			ret, qp, mw, mw_bind);
-	else
-		EDEB_EX(7, "qp=%p mw=%p mw_bind=%p", qp, mw, mw_bind);
-	return ret;
+	return -EPERM;
 } /* end ehca_bind_mw() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_dealloc_mw(struct ib_mw *mw)
 {
-	int ret = 0;
-	u64 h_ret = H_SUCCESS;
-	struct ehca_shca *shca = NULL;
-	struct ehca_mw *e_mw = NULL;
-
-	EDEB_EN(7, "mw=%p", mw);
-
-	EHCA_CHECK_MW(mw);
-	e_mw = container_of(mw, struct ehca_mw, ib_mw);
-	shca = container_of(mw->device, struct ehca_shca, ib_device);
+	u64 h_ret;
+	struct ehca_shca *shca =
+		container_of(mw->device, struct ehca_shca, ib_device);
+	struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
 
 	h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "hipz_free_mw failed, h_ret=%lx shca=%p mw=%p "
-			 "rkey=%x hca_hndl=%lx mw_hndl=%lx",
+		ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
+			 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
 			 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
 			 e_mw->ipz_mw_handle.handle);
-		ret = ehca_mrmw_map_hrc_free_mw(h_ret);
-		goto dealloc_mw_exit0;
+		return ehca_mrmw_map_hrc_free_mw(h_ret);
 	}
 	/* successful deallocation */
 	ehca_mw_delete(e_mw);
-
-dealloc_mw_exit0:
-	if (ret)
-		EDEB_EX(4, "ret=%x mw=%p", ret, mw);
-	else
-		EDEB_EX(7, "");
-	return ret;
+	return 0;
 } /* end ehca_dealloc_mw() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
 			      int mr_access_flags,
 			      struct ib_fmr_attr *fmr_attr)
 {
-	struct ib_fmr *ib_fmr = NULL;
-	struct ehca_shca *shca = NULL;
-	struct ehca_mr *e_fmr = NULL;
-	int ret = 0;
-	struct ehca_pd *e_pd = NULL;
-	u32 tmp_lkey = 0;
-	u32 tmp_rkey = 0;
+	struct ib_fmr *ib_fmr;
+	struct ehca_shca *shca =
+		container_of(pd->device, struct ehca_shca, ib_device);
+	struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+	struct ehca_mr *e_fmr;
+	int ret;
+	u32 tmp_lkey, tmp_rkey;
 	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
 
-	EDEB_EN(7, "pd=%p mr_access_flags=%x fmr_attr=%p",
-		pd, mr_access_flags, fmr_attr);
-
-	EHCA_CHECK_PD_P(pd);
-	if (ehca_adr_bad(fmr_attr)) {
-		EDEB_ERR(4, "bad input values: fmr_attr=%p", fmr_attr);
-		ib_fmr = ERR_PTR(-EINVAL);
-		goto alloc_fmr_exit0;
-	}
-
-	EDEB(7, "max_pages=%x max_maps=%x page_shift=%x",
-	     fmr_attr->max_pages, fmr_attr->max_maps, fmr_attr->page_shift);
-
 	/* check other parameters */
 	if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
 	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
 	    ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
 	     !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
-		/* Remote Write Access requires Local Write Access */
-		/* Remote Atomic Access requires Local Write Access */
-		EDEB_ERR(4, "bad input values: mr_access_flags=%x",
+		/*
+		 * Remote Write Access requires Local Write Access
+		 * Remote Atomic Access requires Local Write Access
+		 */
+		ehca_err(pd->device, "bad input values: mr_access_flags=%x",
 			 mr_access_flags);
 		ib_fmr = ERR_PTR(-EINVAL);
 		goto alloc_fmr_exit0;
 	}
 	if (mr_access_flags & IB_ACCESS_MW_BIND) {
-		EDEB_ERR(4, "bad input values: mr_access_flags=%x",
+		ehca_err(pd->device, "bad input values: mr_access_flags=%x",
 			 mr_access_flags);
 		ib_fmr = ERR_PTR(-EINVAL);
 		goto alloc_fmr_exit0;
 	}
 	if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
-		EDEB_ERR(4, "bad input values: fmr_attr->max_pages=%x "
+		ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
 			 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
 			 fmr_attr->max_pages, fmr_attr->max_maps,
 			 fmr_attr->page_shift);
@@ -853,15 +726,12 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_
 	}
 	if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
 	    ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
-		EDEB_ERR(4, "unsupported fmr_attr->page_shift=%x",
+		ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
 			 fmr_attr->page_shift);
 		ib_fmr = ERR_PTR(-EINVAL);
 		goto alloc_fmr_exit0;
 	}
 
-	e_pd = container_of(pd, struct ehca_pd, ib_pd);
-	shca = container_of(pd->device, struct ehca_shca, ib_device);
-
 	e_fmr = ehca_mr_new();
 	if (!e_fmr) {
 		ib_fmr = ERR_PTR(-ENOMEM);
@@ -884,48 +754,35 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_
 	e_fmr->fmr_max_pages = fmr_attr->max_pages;
 	e_fmr->fmr_max_maps = fmr_attr->max_maps;
 	e_fmr->fmr_map_cnt = 0;
-	ib_fmr = &e_fmr->ib.ib_fmr;
-	goto alloc_fmr_exit0;
+	return &e_fmr->ib.ib_fmr;
 
 alloc_fmr_exit1:
 	ehca_mr_delete(e_fmr);
 alloc_fmr_exit0:
 	if (IS_ERR(ib_fmr))
-		EDEB_EX(4, "rc=%lx pd=%p mr_access_flags=%x "
-			"fmr_attr=%p", PTR_ERR(ib_fmr), pd,
-			mr_access_flags, fmr_attr);
-	else
-		EDEB_EX(7, "ib_fmr=%p tmp_lkey=%x tmp_rkey=%x",
-			ib_fmr, tmp_lkey, tmp_rkey);
+		ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
+			 "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
+			 mr_access_flags, fmr_attr);
 	return ib_fmr;
 } /* end ehca_alloc_fmr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_map_phys_fmr(struct ib_fmr *fmr,
 		      u64 *page_list,
 		      int list_len,
 		      u64 iova)
 {
-	int ret = 0;
-	struct ehca_shca *shca = NULL;
-	struct ehca_mr *e_fmr = NULL;
-	struct ehca_pd *e_pd = NULL;
+	int ret;
+	struct ehca_shca *shca =
+		container_of(fmr->device, struct ehca_shca, ib_device);
+	struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
+	struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
 	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
-	u32 tmp_lkey = 0;
-	u32 tmp_rkey = 0;
-
-	EDEB_EN(7, "fmr=%p page_list=%p list_len=%x iova=%lx",
-		fmr, page_list, list_len, iova);
-
-	EHCA_CHECK_FMR(fmr);
-	e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
-	shca = container_of(fmr->device, struct ehca_shca, ib_device);
-	e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
+	u32 tmp_lkey, tmp_rkey;
 
 	if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-		EDEB_ERR(4, "not a FMR, e_fmr=%p e_fmr->flags=%x",
+		ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
 			 e_fmr, e_fmr->flags);
 		ret = -EINVAL;
 		goto map_phys_fmr_exit0;
@@ -935,20 +792,21 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr
 		goto map_phys_fmr_exit0;
 	if (iova % e_fmr->fmr_page_size) {
 		/* only whole-numbered pages */
-		EDEB_ERR(4, "bad iova, iova=%lx fmr_page_size=%x",
+		ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
 			 iova, e_fmr->fmr_page_size);
 		ret = -EINVAL;
 		goto map_phys_fmr_exit0;
 	}
 	if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
 		/* HCAD does not limit the maps, however trace this anyway */
-		EDEB(6, "map limit exceeded, fmr=%p e_fmr->fmr_map_cnt=%x "
-		     "e_fmr->fmr_max_maps=%x",
-		     fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
+		ehca_info(fmr->device, "map limit exceeded, fmr=%p "
+			  "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
+			  fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
 	}
 
 	pginfo.type      = EHCA_MR_PGI_FMR;
 	pginfo.num_pages = list_len;
+	pginfo.num_4k    = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
 	pginfo.page_list = page_list;
 	pginfo.next_4k   = ((iova & (e_fmr->fmr_page_size-1)) /
 			    EHCA_PAGESIZE);
@@ -963,48 +821,49 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr
 	e_fmr->fmr_map_cnt++;
 	e_fmr->ib.ib_fmr.lkey = tmp_lkey;
 	e_fmr->ib.ib_fmr.rkey = tmp_rkey;
+	return 0;
 
 map_phys_fmr_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x fmr=%p page_list=%p list_len=%x iova=%lx",
-			ret, fmr, page_list, list_len, iova);
-	else
-		EDEB_EX(7, "lkey=%x rkey=%x",
-			e_fmr->ib.ib_fmr.lkey, e_fmr->ib.ib_fmr.rkey);
+		ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
+			 "iova=%lx",
+			 ret, fmr, page_list, list_len, iova);
 	return ret;
 } /* end ehca_map_phys_fmr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_unmap_fmr(struct list_head *fmr_list)
 {
 	int ret = 0;
-	struct ib_fmr *ib_fmr = NULL;
+	struct ib_fmr *ib_fmr;
 	struct ehca_shca *shca = NULL;
-	struct ehca_shca *prev_shca = NULL;
-	struct ehca_mr *e_fmr = NULL;
+	struct ehca_shca *prev_shca;
+	struct ehca_mr *e_fmr;
 	u32 num_fmr = 0;
 	u32 unmap_fmr_cnt = 0;
 
-	EDEB_EN(7, "fmr_list=%p", fmr_list);
-
 	/* check all FMR belong to same SHCA, and check internal flag */
 	list_for_each_entry(ib_fmr, fmr_list, list) {
 		prev_shca = shca;
+		if (!ib_fmr) {
+			ehca_gen_err("bad fmr=%p in list", ib_fmr);
+			ret = -EINVAL;
+			goto unmap_fmr_exit0;
+		}
 		shca = container_of(ib_fmr->device, struct ehca_shca,
 				    ib_device);
-		EHCA_CHECK_FMR(ib_fmr);
 		e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
 		if ((shca != prev_shca) && prev_shca) {
-			EDEB_ERR(4, "SHCA mismatch, shca=%p prev_shca=%p "
-				 "e_fmr=%p", shca, prev_shca, e_fmr);
+			ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
+				 "prev_shca=%p e_fmr=%p",
+				 shca, prev_shca, e_fmr);
 			ret = -EINVAL;
 			goto unmap_fmr_exit0;
 		}
 		if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-			EDEB_ERR(4, "not a FMR, e_fmr=%p e_fmr->flags=%x",
-				 e_fmr, e_fmr->flags);
+			ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
+				 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
 			ret = -EINVAL;
 			goto unmap_fmr_exit0;
 		}
@@ -1020,41 +879,33 @@ int ehca_unmap_fmr(struct list_head *fmr
 		ret = ehca_unmap_one_fmr(shca, e_fmr);
 		if (ret) {
 			/* unmap failed, stop unmapping of rest of FMRs */
-			EDEB_ERR(4, "unmap of one FMR failed, stop rest, "
-				 "e_fmr=%p num_fmr=%x unmap_fmr_cnt=%x lkey=%x",
-				 e_fmr, num_fmr, unmap_fmr_cnt,
-				 e_fmr->ib.ib_fmr.lkey);
+			ehca_err(&shca->ib_device, "unmap of one FMR failed, "
+				 "stop rest, e_fmr=%p num_fmr=%x "
+				 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
+				 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
 			goto unmap_fmr_exit0;
 		}
 	}
 
 unmap_fmr_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
-			ret, fmr_list, num_fmr, unmap_fmr_cnt);
-	else
-		EDEB_EX(7, "num_fmr=%x", num_fmr);
+		ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
+			     ret, fmr_list, num_fmr, unmap_fmr_cnt);
 	return ret;
 } /* end ehca_unmap_fmr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_dealloc_fmr(struct ib_fmr *fmr)
 {
-	int ret = 0;
-	u64 h_ret = H_SUCCESS;
-	struct ehca_shca *shca = NULL;
-	struct ehca_mr *e_fmr = NULL;
-
-	EDEB_EN(7, "fmr=%p", fmr);
-
-	EHCA_CHECK_FMR(fmr);
-	e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
-	shca = container_of(fmr->device, struct ehca_shca, ib_device);
+	int ret;
+	u64 h_ret;
+	struct ehca_shca *shca =
+		container_of(fmr->device, struct ehca_shca, ib_device);
+	struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
 
 	if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-		EDEB_ERR(4, "not a FMR, e_fmr=%p e_fmr->flags=%x",
+		ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
 			 e_fmr, e_fmr->flags);
 		ret = -EINVAL;
 		goto free_fmr_exit0;
@@ -1062,26 +913,24 @@ int ehca_dealloc_fmr(struct ib_fmr *fmr)
 
 	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
+		ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
 			 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
 			 h_ret, e_fmr, shca->ipz_hca_handle.handle,
 			 e_fmr->ipz_mr_handle.handle, fmr->lkey);
-		ehca_mrmw_map_hrc_free_mr(h_ret);
+		ret = ehca_mrmw_map_hrc_free_mr(h_ret);
 		goto free_fmr_exit0;
 	}
 	/* successful deregistration */
 	ehca_mr_delete(e_fmr);
+	return 0;
 
 free_fmr_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x fmr=%p", ret, fmr);
-	else
-		EDEB_EX(7, "");
+		ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
 	return ret;
 } /* end ehca_dealloc_fmr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_reg_mr(struct ehca_shca *shca,
 		struct ehca_mr *e_mr,
@@ -1093,15 +942,11 @@ int ehca_reg_mr(struct ehca_shca *shca,
 		u32 *lkey, /*OUT*/
 		u32 *rkey) /*OUT*/
 {
-	int ret = 0;
-	u64 h_ret = H_SUCCESS;
-	u32 hipz_acl = 0;
+	int ret;
+	u64 h_ret;
+	u32 hipz_acl;
 	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
 
-	EDEB_EN(7, "shca=%p e_mr=%p iova_start=%p size=%lx acl=%x e_pd=%p "
-		"pginfo=%p num_pages=%lx num_4k=%lx", shca, e_mr, iova_start,
-		size, acl, e_pd, pginfo, pginfo->num_pages, pginfo->num_4k);
-
 	ehca_mrmw_map_acl(acl, &hipz_acl);
 	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
 	if (ehca_use_hp_mr == 1)
@@ -1111,8 +956,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
 					 (u64)iova_start, size, hipz_acl,
 					 e_pd->fw_pd, &hipzout);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "hipz_alloc_mr failed, h_ret=%lx hca_hndl=%lx",
-			 h_ret, shca->ipz_hca_handle.handle);
+		ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
+			 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
 		ret = ehca_mrmw_map_hrc_alloc(h_ret);
 		goto ehca_reg_mr_exit0;
 	}
@@ -1131,49 +976,46 @@ int ehca_reg_mr(struct ehca_shca *shca,
 	e_mr->acl       = acl;
 	*lkey = hipzout.lkey;
 	*rkey = hipzout.rkey;
-	goto ehca_reg_mr_exit0;
+	return 0;
 
 ehca_reg_mr_exit1:
 	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(1, "h_ret=%lx shca=%p e_mr=%p iova_start=%p "
-			 "size=%lx acl=%x e_pd=%p lkey=%x pginfo=%p "
-			 "num_pages=%lx num_4k=%lx ret=%x", h_ret, shca, e_mr,
-			 iova_start, size, acl, e_pd, hipzout.lkey, pginfo,
-			 pginfo->num_pages, pginfo->num_4k, ret);
-		EDEB_ERR(1, "internal error in ehca_reg_mr, not recoverable");
+		ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
+			 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
+			 "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
+			 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
+			 hipzout.lkey, pginfo, pginfo->num_pages,
+			 pginfo->num_4k, ret);
+		ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
+			 "not recoverable");
 	}
 ehca_reg_mr_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x shca=%p e_mr=%p iova_start=%p size=%lx "
-			"acl=%x e_pd=%p pginfo=%p num_pages=%lx num_4k=%lx",
-			ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
-			pginfo->num_pages, pginfo->num_4k);
-	else
-		EDEB_EX(7, "ret=%x lkey=%x rkey=%x", ret, *lkey, *rkey);
+		ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
+			 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
+			 "num_pages=%lx num_4k=%lx",
+			 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
+			 pginfo->num_pages, pginfo->num_4k);
 	return ret;
 } /* end ehca_reg_mr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_reg_mr_rpages(struct ehca_shca *shca,
 		       struct ehca_mr *e_mr,
 		       struct ehca_mr_pginfo *pginfo)
 {
 	int ret = 0;
-	u64 h_ret = H_SUCCESS;
-	u32 rnum = 0;
-	u64 rpage = 0;
+	u64 h_ret;
+	u32 rnum;
+	u64 rpage;
 	u32 i;
-	u64 *kpage = NULL;
-
-	EDEB_EN(7, "shca=%p e_mr=%p pginfo=%p num_pages=%lx num_4k=%lx",
-		shca, e_mr, pginfo, pginfo->num_pages, pginfo->num_4k);
+	u64 *kpage;
 
 	kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
 	if (!kpage) {
-		EDEB_ERR(4, "kpage alloc failed");
+		ehca_err(&shca->ib_device, "kpage alloc failed");
 		ret = -ENOMEM;
 		goto ehca_reg_mr_rpages_exit0;
 	}
@@ -1191,40 +1033,43 @@ int ehca_reg_mr_rpages(struct ehca_shca 
 		if (rnum > 1) {
 			ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
 			if (ret) {
-				EDEB_ERR(4, "ehca_set_pagebuf bad rc, ret=%x "
-					 "rnum=%x kpage=%p", ret, rnum, kpage);
+				ehca_err(&shca->ib_device, "ehca_set_pagebuf "
+					 "bad rc, ret=%x rnum=%x kpage=%p",
+					 ret, rnum, kpage);
 				ret = -EFAULT;
 				goto ehca_reg_mr_rpages_exit1;
 			}
 			rpage = virt_to_abs(kpage);
 			if (!rpage) {
-				EDEB_ERR(4, "kpage=%p i=%x", kpage, i);
+				ehca_err(&shca->ib_device, "kpage=%p i=%x",
+					 kpage, i);
 				ret = -EFAULT;
 				goto ehca_reg_mr_rpages_exit1;
 			}
 		} else {  /* rnum==1 */
 			ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
 			if (ret) {
-				EDEB_ERR(4, "ehca_set_pagebuf_1 bad rc, "
-					 "ret=%x i=%x", ret, i);
+				ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
+					 "bad rc, ret=%x i=%x", ret, i);
 				ret = -EFAULT;
 				goto ehca_reg_mr_rpages_exit1;
 			}
 		}
 
-		EDEB(9, "i=%x rnum=%x rpage=%lx", i, rnum, rpage);
-
 		h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
 						 0, /* pagesize 4k */
 						 0, rpage, rnum);
 
 		if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
-			/* check for 'registration complete'==H_SUCCESS */
-			/* and for 'page registered'==H_PAGE_REGISTERED */
+			/*
+			 * check for 'registration complete'==H_SUCCESS
+			 * and for 'page registered'==H_PAGE_REGISTERED
+			 */
 			if (h_ret != H_SUCCESS) {
-				EDEB_ERR(4, "last hipz_reg_rpage_mr failed, "
-					 "h_ret=%lx e_mr=%p i=%x hca_hndl=%lx "
-					 "mr_hndl=%lx lkey=%x", h_ret, e_mr, i,
+				ehca_err(&shca->ib_device, "last "
+					 "hipz_reg_rpage_mr failed, h_ret=%lx "
+					 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
+					 " lkey=%x", h_ret, e_mr, i,
 					 shca->ipz_hca_handle.handle,
 					 e_mr->ipz_mr_handle.handle,
 					 e_mr->ib.ib_mr.lkey);
@@ -1233,8 +1078,8 @@ int ehca_reg_mr_rpages(struct ehca_shca 
 			} else
 				ret = 0;
 		} else if (h_ret != H_PAGE_REGISTERED) {
-			EDEB_ERR(4, "hipz_reg_rpage_mr failed, h_ret=%lx "
-				 "e_mr=%p i=%x lkey=%x hca_hndl=%lx "
+			ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
+				 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
 				 "mr_hndl=%lx", h_ret, e_mr, i,
 				 e_mr->ib.ib_mr.lkey,
 				 shca->ipz_hca_handle.handle,
@@ -1250,16 +1095,13 @@ ehca_reg_mr_rpages_exit1:
 	kfree(kpage);
 ehca_reg_mr_rpages_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x shca=%p e_mr=%p pginfo=%p num_pages=%lx "
-			"num_4k=%lx", ret, shca, e_mr, pginfo,
-			pginfo->num_pages, pginfo->num_4k);
-	else
-		EDEB_EX(7, "ret=%x", ret);
+		ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
+			 "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
+			 pginfo->num_pages, pginfo->num_4k);
 	return ret;
 } /* end ehca_reg_mr_rpages() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
 				struct ehca_mr *e_mr,
@@ -1271,25 +1113,20 @@ inline int ehca_rereg_mr_rereg1(struct e
 				u32 *lkey, /*OUT*/
 				u32 *rkey) /*OUT*/
 {
-	int ret = 0;
-	u64 h_ret = H_SUCCESS;
-	u32 hipz_acl = 0;
-	u64 *kpage = NULL;
-	u64 rpage = 0;
+	int ret;
+	u64 h_ret;
+	u32 hipz_acl;
+	u64 *kpage;
+	u64 rpage;
 	struct ehca_mr_pginfo pginfo_save;
 	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
 
-	EDEB_EN(7, "shca=%p e_mr=%p iova_start=%p size=%lx acl=%x "
-		"e_pd=%p pginfo=%p num_pages=%lx num_4k=%lx", shca, e_mr,
-		iova_start, size, acl, e_pd, pginfo, pginfo->num_pages,
-		pginfo->num_4k);
-
 	ehca_mrmw_map_acl(acl, &hipz_acl);
 	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
 
 	kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
 	if (!kpage) {
-		EDEB_ERR(4, "kpage alloc failed");
+		ehca_err(&shca->ib_device, "kpage alloc failed");
 		ret = -ENOMEM;
 		goto ehca_rereg_mr_rereg1_exit0;
 	}
@@ -1297,14 +1134,15 @@ inline int ehca_rereg_mr_rereg1(struct e
 	pginfo_save = *pginfo;
 	ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
 	if (ret) {
-		EDEB_ERR(4, "set pagebuf failed, e_mr=%p pginfo=%p type=%x "
-			 "num_pages=%lx num_4k=%lx kpage=%p", e_mr, pginfo,
-			 pginfo->type, pginfo->num_pages, pginfo->num_4k,kpage);
+		ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
+			 "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
+			 e_mr, pginfo, pginfo->type, pginfo->num_pages,
+			 pginfo->num_4k,kpage);
 		goto ehca_rereg_mr_rereg1_exit1;
 	}
 	rpage = virt_to_abs(kpage);
 	if (!rpage) {
-		EDEB_ERR(4, "kpage=%p", kpage);
+		ehca_err(&shca->ib_device, "kpage=%p", kpage);
 		ret = -EFAULT;
 		goto ehca_rereg_mr_rereg1_exit1;
 	}
@@ -1312,24 +1150,27 @@ inline int ehca_rereg_mr_rereg1(struct e
 				      (u64)iova_start, size, hipz_acl,
 				      e_pd->fw_pd, rpage, &hipzout);
 	if (h_ret != H_SUCCESS) {
-		/* reregistration unsuccessful,                 */
-		/* try it again with the 3 hCalls,              */
-		/* e.g. this is required in case H_MR_CONDITION */
-		/* (MW bound or MR is shared)                   */
-		EDEB(6, "hipz_h_reregister_pmr failed (Rereg1), h_ret=%lx "
-		     "e_mr=%p", h_ret, e_mr);
+		/*
+		 * reregistration unsuccessful, try it again with the 3 hCalls,
+		 * e.g. this is required in case H_MR_CONDITION
+		 * (MW bound or MR is shared)
+		 */
+		ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
+			  "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
 		*pginfo = pginfo_save;
 		ret = -EAGAIN;
 	} else if ((u64*)hipzout.vaddr != iova_start) {
-		EDEB_ERR(4, "PHYP changed iova_start in rereg_pmr, "
-			 "iova_start=%p iova_start_out=%lx e_mr=%p "
+		ehca_err(&shca->ib_device, "PHYP changed iova_start in "
+			 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
 			 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
 			 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
 			 e_mr->ib.ib_mr.lkey, hipzout.lkey);
 		ret = -EFAULT;
 	} else {
-		/* successful reregistration */
-		/* note: start and start_out are identical for eServer HCAs */
+		/*
+		 * successful reregistration
+		 * note: start and start_out are identical for eServer HCAs
+		 */
 		e_mr->num_pages = pginfo->num_pages;
 		e_mr->num_4k    = pginfo->num_4k;
 		e_mr->start     = iova_start;
@@ -1343,18 +1184,14 @@ ehca_rereg_mr_rereg1_exit1:
 	kfree(kpage);
 ehca_rereg_mr_rereg1_exit0:
 	if ( ret && (ret != -EAGAIN) )
-		EDEB_EX(4, "ret=%x h_ret=%lx lkey=%x rkey=%x pginfo=%p "
-			"num_pages=%lx num_4k=%lx", ret, h_ret, *lkey, *rkey,
-			pginfo, pginfo->num_pages, pginfo->num_4k);
-	else
-		EDEB_EX(7, "ret=%x h_ret=%lx lkey=%x rkey=%x pginfo=%p "
-			"num_pages=%lx num_4k=%lx", ret, h_ret, *lkey, *rkey,
-			pginfo, pginfo->num_pages, pginfo->num_4k);
+		ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
+			 "pginfo=%p num_pages=%lx num_4k=%lx",
+			 ret, *lkey, *rkey, pginfo, pginfo->num_pages,
+			 pginfo->num_4k);
 	return ret;
 } /* end ehca_rereg_mr_rereg1() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_rereg_mr(struct ehca_shca *shca,
 		  struct ehca_mr *e_mr,
@@ -1367,20 +1204,15 @@ int ehca_rereg_mr(struct ehca_shca *shca
 		  u32 *rkey)
 {
 	int ret = 0;
-	u64 h_ret = H_SUCCESS;
+	u64 h_ret;
 	int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
 	int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
 
-	EDEB_EN(7, "shca=%p e_mr=%p iova_start=%p size=%lx acl=%x "
-		"e_pd=%p pginfo=%p num_pages=%lx num_4k=%lx", shca, e_mr,
-		iova_start, size, acl, e_pd, pginfo, pginfo->num_pages,
-		pginfo->num_4k);
-
 	/* first determine reregistration hCall(s) */
 	if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) ||
 	    (pginfo->num_4k > e_mr->num_4k)) {
-		EDEB(7, "Rereg3 case, pginfo->num_4k=%lx "
-		     "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
+		ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
+			 "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
 		rereg_1_hcall = 0;
 		rereg_3_hcall = 1;
 	}
@@ -1389,7 +1221,8 @@ int ehca_rereg_mr(struct ehca_shca *shca
 		rereg_1_hcall = 0;
 		rereg_3_hcall = 1;
 		e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
-		EDEB(4, "Rereg MR for max-MR! e_mr=%p", e_mr);
+		ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
+			 e_mr);
 	}
 
 	if (rereg_1_hcall) {
@@ -1409,8 +1242,9 @@ int ehca_rereg_mr(struct ehca_shca *shca
 		/* first deregister old MR */
 		h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
 		if (h_ret != H_SUCCESS) {
-			EDEB_ERR(4, "hipz_free_mr failed, h_ret=%lx e_mr=%p "
-				 "hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
+			ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+				 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
+				 "mr->lkey=%x",
 				 h_ret, e_mr, shca->ipz_hca_handle.handle,
 				 e_mr->ipz_mr_handle.handle,
 				 e_mr->ib.ib_mr.lkey);
@@ -1440,60 +1274,53 @@ int ehca_rereg_mr(struct ehca_shca *shca
 
 ehca_rereg_mr_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x shca=%p e_mr=%p iova_start=%p size=%lx "
-			"acl=%x e_pd=%p pginfo=%p num_pages=%lx lkey=%x rkey=%x"
-			" rereg_1_hcall=%x rereg_3_hcall=%x", ret, shca, e_mr,
-			iova_start, size, acl, e_pd, pginfo, pginfo->num_pages,
-			*lkey, *rkey, rereg_1_hcall, rereg_3_hcall);
-	else
-		EDEB_EX(7, "ret=%x shca=%p e_mr=%p iova_start=%p size=%lx "
-			"acl=%x e_pd=%p pginfo=%p num_pages=%lx lkey=%x rkey=%x"
-			" rereg_1_hcall=%x rereg_3_hcall=%x", ret, shca, e_mr,
-			iova_start, size, acl, e_pd, pginfo, pginfo->num_pages,
-			*lkey, *rkey, rereg_1_hcall, rereg_3_hcall);
-
+		ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
+			 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
+			 "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
+			 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
+			 acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
+			 rereg_1_hcall, rereg_3_hcall);
 	return ret;
 } /* end ehca_rereg_mr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_unmap_one_fmr(struct ehca_shca *shca,
 		       struct ehca_mr *e_fmr)
 {
 	int ret = 0;
-	u64 h_ret = H_SUCCESS;
+	u64 h_ret;
 	int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
 	int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
-	struct ehca_pd *e_pd = NULL;
+	struct ehca_pd *e_pd =
+		container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
 	struct ehca_mr save_fmr;
-	u32 tmp_lkey = 0;
-	u32 tmp_rkey = 0;
+	u32 tmp_lkey, tmp_rkey;
 	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
 	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
 
-	EDEB_EN(7, "shca=%p e_fmr=%p", shca, e_fmr);
-
 	/* first check if reregistration hCall can be used for unmap */
 	if (e_fmr->fmr_max_pages > 512) {
 		rereg_1_hcall = 0;
 		rereg_3_hcall = 1;
 	}
 
-	e_pd = container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
-
 	if (rereg_1_hcall) {
-		/* note: after using rereg hcall with len=0,            */
-		/* rereg hcall must be used again for registering pages */
+		/*
+		 * note: after using rereg hcall with len=0,
+		 * rereg hcall must be used again for registering pages
+		 */
 		h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
 					      0, 0, e_pd->fw_pd, 0, &hipzout);
 		if (h_ret != H_SUCCESS) {
-			/* should not happen, because length checked above, */
-			/* FMRs are not shared and no MW bound to FMRs      */
-			EDEB_ERR(4, "hipz_reregister_pmr failed (Rereg1), "
-				 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
-				 "lkey=%x lkey_out=%x", h_ret, e_fmr,
-				 shca->ipz_hca_handle.handle,
+			/*
+			 * should not happen, because length checked above,
+			 * FMRs are not shared and no MW bound to FMRs
+			 */
+			ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
+				 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
+				 "mr_hndl=%lx lkey=%x lkey_out=%x",
+				 h_ret, e_fmr, shca->ipz_hca_handle.handle,
 				 e_fmr->ipz_mr_handle.handle,
 				 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
 			rereg_3_hcall = 1;
@@ -1512,9 +1339,10 @@ int ehca_unmap_one_fmr(struct ehca_shca 
 		/* first free old FMR */
 		h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
 		if (h_ret != H_SUCCESS) {
-			EDEB_ERR(4, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
-				 "hca_hndl=%lx mr_hndl=%lx lkey=%x", h_ret,
-				 e_fmr, shca->ipz_hca_handle.handle,
+			ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+				 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
+				 "lkey=%x",
+				 h_ret, e_fmr, shca->ipz_hca_handle.handle,
 				 e_fmr->ipz_mr_handle.handle,
 				 e_fmr->ib.ib_fmr.lkey);
 			ret = ehca_mrmw_map_hrc_free_mr(h_ret);
@@ -1548,14 +1376,15 @@ int ehca_unmap_one_fmr(struct ehca_shca 
 	}
 
 ehca_unmap_one_fmr_exit0:
-	EDEB_EX(7, "ret=%x tmp_lkey=%x tmp_rkey=%x fmr_max_pages=%x "
-		"rereg_1_hcall=%x rereg_3_hcall=%x", ret, tmp_lkey, tmp_rkey,
-		e_fmr->fmr_max_pages, rereg_1_hcall, rereg_3_hcall);
+	if (ret)
+		ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
+			 "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
+			 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
+			 rereg_1_hcall, rereg_3_hcall);
 	return ret;
 } /* end ehca_unmap_one_fmr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_reg_smr(struct ehca_shca *shca,
 		 struct ehca_mr *e_origmr,
@@ -1567,13 +1396,10 @@ int ehca_reg_smr(struct ehca_shca *shca,
 		 u32 *rkey) /*OUT*/
 {
 	int ret = 0;
-	u64 h_ret = H_SUCCESS;
-	u32 hipz_acl = 0;
+	u64 h_ret;
+	u32 hipz_acl;
 	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
 
-	EDEB_EN(7,"shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x e_pd=%p",
-		shca, e_origmr, e_newmr, iova_start, acl, e_pd);
-
 	ehca_mrmw_map_acl(acl, &hipz_acl);
 	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
 
@@ -1581,10 +1407,11 @@ int ehca_reg_smr(struct ehca_shca *shca,
 				    (u64)iova_start, hipz_acl, e_pd->fw_pd,
 				    &hipzout);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "hipz_reg_smr failed, h_ret=%lx shca=%p e_origmr=%p"
-			 " e_newmr=%p iova_start=%p acl=%x e_pd=%p hca_hndl=%lx"
-			 " mr_hndl=%lx lkey=%x", h_ret, shca, e_origmr, e_newmr,
-			 iova_start, acl, e_pd, shca->ipz_hca_handle.handle,
+		ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
+			 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
+			 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
+			 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
+			 shca->ipz_hca_handle.handle,
 			 e_origmr->ipz_mr_handle.handle,
 			 e_origmr->ib.ib_mr.lkey);
 		ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
@@ -1599,20 +1426,17 @@ int ehca_reg_smr(struct ehca_shca *shca,
 	e_newmr->ipz_mr_handle = hipzout.handle;
 	*lkey = hipzout.lkey;
 	*rkey = hipzout.rkey;
-	goto ehca_reg_smr_exit0;
+	return 0;
 
 ehca_reg_smr_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x shca=%p e_origmr=%p e_newmr=%p "
-			"iova_start=%p acl=%x e_pd=%p",
-			ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
-	else
-		EDEB_EX(7, "ret=%x lkey=%x rkey=%x", ret, *lkey, *rkey);
+		ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
+			 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
+			 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
 	return ret;
 } /* end ehca_reg_smr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 /* register internal max-MR to internal SHCA */
 int ehca_reg_internal_maxmr(
@@ -1620,27 +1444,18 @@ int ehca_reg_internal_maxmr(
 	struct ehca_pd *e_pd,
 	struct ehca_mr **e_maxmr)  /*OUT*/
 {
-	int ret = 0;
-	struct ehca_mr *e_mr = NULL;
-	u64 *iova_start = NULL;
-	u64 size_maxmr = 0;
+	int ret;
+	struct ehca_mr *e_mr;
+	u64 *iova_start;
+	u64 size_maxmr;
 	struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
 	struct ib_phys_buf ib_pbuf;
-	u32 num_pages_mr = 0;
-	u32 num_pages_4k = 0; /* 4k portion "pages" */
-
-	EDEB_EN(7, "shca=%p e_pd=%p e_maxmr=%p", shca, e_pd, e_maxmr);
-
-	if (ehca_adr_bad(shca) || ehca_adr_bad(e_pd) || ehca_adr_bad(e_maxmr)) {
-		EDEB_ERR(4, "bad input values: shca=%p e_pd=%p e_maxmr=%p",
-			 shca, e_pd, e_maxmr);
-		ret = -EINVAL;
-		goto ehca_reg_internal_maxmr_exit0;
-	}
+	u32 num_pages_mr;
+	u32 num_pages_4k; /* 4k portion "pages" */
 
 	e_mr = ehca_mr_new();
 	if (!e_mr) {
-		EDEB_ERR(4, "out of memory");
+		ehca_err(&shca->ib_device, "out of memory");
 		ret = -ENOMEM;
 		goto ehca_reg_internal_maxmr_exit0;
 	}
@@ -1648,7 +1463,6 @@ int ehca_reg_internal_maxmr(
 
 	/* register internal max-MR on HCA */
 	size_maxmr = (u64)high_memory - PAGE_OFFSET;
-	EDEB(7, "high_memory=%p PAGE_OFFSET=%lx", high_memory, PAGE_OFFSET);
 	iova_start = (u64*)KERNELBASE;
 	ib_pbuf.addr = 0;
 	ib_pbuf.size = size_maxmr;
@@ -1664,11 +1478,11 @@ int ehca_reg_internal_maxmr(
 	pginfo.phys_buf_array = &ib_pbuf;
 
 	ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
-			      &pginfo, &e_mr->ib.ib_mr.lkey,
-			      &e_mr->ib.ib_mr.rkey);
+			  &pginfo, &e_mr->ib.ib_mr.lkey,
+			  &e_mr->ib.ib_mr.rkey);
 	if (ret) {
-		EDEB_ERR(4, "reg of internal max MR failed, e_mr=%p "
-			 "iova_start=%p size_maxmr=%lx num_pages_mr=%x "
+		ehca_err(&shca->ib_device, "reg of internal max MR failed, "
+			 "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
 			 "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
 			 num_pages_mr, num_pages_4k);
 		goto ehca_reg_internal_maxmr_exit1;
@@ -1681,23 +1495,18 @@ int ehca_reg_internal_maxmr(
 	atomic_inc(&(e_pd->ib_pd.usecnt));
 	atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
 	*e_maxmr = e_mr;
-	goto ehca_reg_internal_maxmr_exit0;
+	return 0;
 
 ehca_reg_internal_maxmr_exit1:
 	ehca_mr_delete(e_mr);
 ehca_reg_internal_maxmr_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
-			ret, shca, e_pd, e_maxmr);
-	else
-		EDEB_EX(7, "*e_maxmr=%p lkey=%x rkey=%x",
-			*e_maxmr, (*e_maxmr)->ib.ib_mr.lkey,
-			(*e_maxmr)->ib.ib_mr.rkey);
+		ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
+			 ret, shca, e_pd, e_maxmr);
 	return ret;
 } /* end ehca_reg_internal_maxmr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_reg_maxmr(struct ehca_shca *shca,
 		   struct ehca_mr *e_newmr,
@@ -1707,15 +1516,11 @@ int ehca_reg_maxmr(struct ehca_shca *shc
 		   u32 *lkey,
 		   u32 *rkey)
 {
-	int ret = 0;
-	u64 h_ret = H_SUCCESS;
+	u64 h_ret;
 	struct ehca_mr *e_origmr = shca->maxmr;
-	u32 hipz_acl = 0;
+	u32 hipz_acl;
 	struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
 
-	EDEB_EN(7,"shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x e_pd=%p",
-		shca, e_origmr, e_newmr, iova_start, acl, e_pd);
-
 	ehca_mrmw_map_acl(acl, &hipz_acl);
 	ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
 
@@ -1723,13 +1528,12 @@ int ehca_reg_maxmr(struct ehca_shca *shc
 				    (u64)iova_start, hipz_acl, e_pd->fw_pd,
 				    &hipzout);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "hipz_reg_smr failed, h_ret=%lx e_origmr=%p "
-			 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
+		ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
+			 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
 			 h_ret, e_origmr, shca->ipz_hca_handle.handle,
 			 e_origmr->ipz_mr_handle.handle,
 			 e_origmr->ib.ib_mr.lkey);
-		ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
-		goto ehca_reg_maxmr_exit0;
+		return ehca_mrmw_map_hrc_reg_smr(h_ret);
 	}
 	/* successful registration */
 	e_newmr->num_pages     = e_origmr->num_pages;
@@ -1740,25 +1544,19 @@ int ehca_reg_maxmr(struct ehca_shca *shc
 	e_newmr->ipz_mr_handle = hipzout.handle;
 	*lkey = hipzout.lkey;
 	*rkey = hipzout.rkey;
-
-ehca_reg_maxmr_exit0:
-	EDEB_EX(7, "ret=%x lkey=%x rkey=%x", ret, *lkey, *rkey);
-	return ret;
+	return 0;
 } /* end ehca_reg_maxmr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
 {
-	int ret = 0;
-	struct ehca_mr *e_maxmr = NULL;
-	struct ib_pd *ib_pd = NULL;
-
-	EDEB_EN(7, "shca=%p shca->maxmr=%p", shca, shca->maxmr);
+	int ret;
+	struct ehca_mr *e_maxmr;
+	struct ib_pd *ib_pd;
 
 	if (!shca->maxmr) {
-		EDEB_ERR(4, "bad call, shca=%p", shca);
+		ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
 		ret = -EINVAL;
 		goto ehca_dereg_internal_maxmr_exit0;
 	}
@@ -1769,7 +1567,7 @@ int ehca_dereg_internal_maxmr(struct ehc
 
 	ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
 	if (ret) {
-		EDEB_ERR(3, "dereg internal max-MR failed, "
+		ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
 			 "ret=%x e_maxmr=%p shca=%p lkey=%x",
 			 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
 		shca->maxmr = e_maxmr;
@@ -1780,17 +1578,15 @@ int ehca_dereg_internal_maxmr(struct ehc
 
 ehca_dereg_internal_maxmr_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x shca=%p shca->maxmr=%p",
-			ret, shca, shca->maxmr);
-	else
-		EDEB_EX(7, "");
+		ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
+			 ret, shca, shca->maxmr);
 	return ret;
 } /* end ehca_dereg_internal_maxmr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
-/* check physical buffer array of MR verbs for validness and
+/*
+ * check physical buffer array of MR verbs for validness and
  * calculates MR size
  */
 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
@@ -1803,34 +1599,35 @@ int ehca_mr_chk_buf_and_calc_size(struct
 	u32 i;
 
 	if (num_phys_buf == 0) {
-		EDEB_ERR(4, "bad phys buf array len, num_phys_buf=0");
+		ehca_gen_err("bad phys buf array len, num_phys_buf=0");
 		return -EINVAL;
 	}
 	/* check first buffer */
 	if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
-		EDEB_ERR(4, "iova_start/addr mismatch, iova_start=%p "
-			 "pbuf->addr=%lx pbuf->size=%lx",
-			 iova_start, pbuf->addr, pbuf->size);
+		ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
+			     "pbuf->addr=%lx pbuf->size=%lx",
+			     iova_start, pbuf->addr, pbuf->size);
 		return -EINVAL;
 	}
 	if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
 	    (num_phys_buf > 1)) {
-		EDEB_ERR(4, "addr/size mismatch in 1st buf, pbuf->addr=%lx "
-			 "pbuf->size=%lx", pbuf->addr, pbuf->size);
+		ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
+			     "pbuf->size=%lx", pbuf->addr, pbuf->size);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < num_phys_buf; i++) {
 		if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
-			EDEB_ERR(4, "bad address, i=%x pbuf->addr=%lx "
-				 "pbuf->size=%lx", i, pbuf->addr, pbuf->size);
+			ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
+				     "pbuf->size=%lx",
+				     i, pbuf->addr, pbuf->size);
 			return -EINVAL;
 		}
 		if (((i > 0) &&	/* not 1st */
 		     (i < (num_phys_buf - 1)) &&	/* not last */
 		     (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
-			EDEB_ERR(4, "bad size, i=%x pbuf->size=%lx",
-				 i, pbuf->size);
+			ehca_gen_err("bad size, i=%x pbuf->size=%lx",
+				     i, pbuf->size);
 			return -EINVAL;
 		}
 		size_count += pbuf->size;
@@ -1842,7 +1639,6 @@ int ehca_mr_chk_buf_and_calc_size(struct
 } /* end ehca_mr_chk_buf_and_calc_size() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 /* check page list of map FMR verb for validness */
 int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
@@ -1850,17 +1646,12 @@ int ehca_fmr_check_page_list(struct ehca
 			     int list_len)
 {
 	u32 i;
-	u64 *page = NULL;
-
-	if (ehca_adr_bad(page_list)) {
-		EDEB_ERR(4, "bad page_list, page_list=%p fmr=%p",
-			 page_list, e_fmr);
-		return -EINVAL;
-	}
+	u64 *page;
 
 	if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
-		EDEB_ERR(4, "bad list_len, list_len=%x e_fmr->fmr_max_pages=%x "
-			 "fmr=%p", list_len, e_fmr->fmr_max_pages, e_fmr);
+		ehca_gen_err("bad list_len, list_len=%x "
+			     "e_fmr->fmr_max_pages=%x fmr=%p",
+			     list_len, e_fmr->fmr_max_pages, e_fmr);
 		return -EINVAL;
 	}
 
@@ -1868,9 +1659,9 @@ int ehca_fmr_check_page_list(struct ehca
 	page = page_list;
 	for (i = 0; i < list_len; i++) {
 		if (*page % e_fmr->fmr_page_size) {
-			EDEB_ERR(4, "bad page, i=%x *page=%lx page=%p "
-				 "fmr=%p fmr_page_size=%x",
-				 i, *page, page, e_fmr, e_fmr->fmr_page_size);
+			ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
+				     "fmr_page_size=%x", i, *page, page, e_fmr,
+				     e_fmr->fmr_page_size);
 			return -EINVAL;
 		}
 		page++;
@@ -1880,7 +1671,6 @@ int ehca_fmr_check_page_list(struct ehca
 } /* end ehca_fmr_check_page_list() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 /* setup page buffer from page info */
 int ehca_set_pagebuf(struct ehca_mr *e_mr,
@@ -1889,24 +1679,14 @@ int ehca_set_pagebuf(struct ehca_mr *e_m
 		     u64 *kpage)
 {
 	int ret = 0;
-	struct ib_umem_chunk *prev_chunk = NULL;
-	struct ib_umem_chunk *chunk      = NULL;
-	struct ib_phys_buf *pbuf         = NULL;
-	u64 *fmrlist = NULL;
-	u64 num4k  = 0;
-	u64 pgaddr = 0;
-	u64 offs4k = 0;
+	struct ib_umem_chunk *prev_chunk;
+	struct ib_umem_chunk *chunk;
+	struct ib_phys_buf *pbuf;
+	u64 *fmrlist;
+	u64 num4k, pgaddr, offs4k;
 	u32 i = 0;
 	u32 j = 0;
 
-	EDEB_EN(7, "pginfo=%p type=%x num_pages=%lx num_4k=%lx next_buf=%lx "
-		"next_4k=%lx number=%x kpage=%p page_cnt=%lx page_4k_cnt=%lx "
-		"next_listelem=%lx region=%p next_chunk=%p next_nmap=%lx",
-		pginfo, pginfo->type, pginfo->num_pages, pginfo->num_4k,
-		pginfo->next_buf, pginfo->next_4k, number, kpage,
-		pginfo->page_cnt, pginfo->page_4k_cnt, pginfo->next_listelem,
-		pginfo->region, pginfo->next_chunk, pginfo->next_nmap);
-
 	if (pginfo->type == EHCA_MR_PGI_PHYS) {
 		/* loop over desired phys_buf_array entries */
 		while (i < number) {
@@ -1918,29 +1698,34 @@ int ehca_set_pagebuf(struct ehca_mr *e_m
 				/* sanity check */
 				if ((pginfo->page_cnt >= pginfo->num_pages) ||
 				    (pginfo->page_4k_cnt >= pginfo->num_4k)) {
-					EDEB_ERR(4, "page_cnt >= num_pages, "
-						 "page_cnt=%lx num_pages=%lx "
-						 "page_4k_cnt=%lx num_4k=%lx "
-						 "i=%x", pginfo->page_cnt,
-						 pginfo->num_pages,
-						 pginfo->page_4k_cnt,
-						 pginfo->num_4k, i);
+					ehca_gen_err("page_cnt >= num_pages, "
+						     "page_cnt=%lx "
+						     "num_pages=%lx "
+						     "page_4k_cnt=%lx "
+						     "num_4k=%lx i=%x",
+						     pginfo->page_cnt,
+						     pginfo->num_pages,
+						     pginfo->page_4k_cnt,
+						     pginfo->num_4k, i);
 					ret = -EFAULT;
+					goto ehca_set_pagebuf_exit0;
 				}
 				*kpage = phys_to_abs(
-					(pbuf->addr & (EHCA_PAGESIZE-1))
+					(pbuf->addr & EHCA_PAGEMASK)
 					+ (pginfo->next_4k * EHCA_PAGESIZE));
 				if ( !(*kpage) && pbuf->addr ) {
-					EDEB_ERR(4, "pbuf->addr=%lx "
-						 "pbuf->size=%lx next_4k=%lx",
-						 pbuf->addr, pbuf->size,
-						 pginfo->next_4k);
+					ehca_gen_err("pbuf->addr=%lx "
+						     "pbuf->size=%lx "
+						     "next_4k=%lx", pbuf->addr,
+						     pbuf->size,
+						     pginfo->next_4k);
 					ret = -EFAULT;
 					goto ehca_set_pagebuf_exit0;
 				}
 				(pginfo->page_4k_cnt)++;
 				(pginfo->next_4k)++;
-				if(pginfo->next_4k >= PAGE_SIZE/EHCA_PAGESIZE)
+				if (pginfo->next_4k %
+				    (PAGE_SIZE / EHCA_PAGESIZE) == 0)
 					(pginfo->page_cnt)++;
 				kpage++;
 				i++;
@@ -1958,30 +1743,29 @@ int ehca_set_pagebuf(struct ehca_mr *e_m
 		list_for_each_entry_continue(chunk,
 					     (&(pginfo->region->chunk_list)),
 					     list) {
-			EDEB(9, "chunk->page_list[0]=%lx",
-			     (u64)sg_dma_address(&chunk->page_list[0]));
 			for (i = pginfo->next_nmap; i < chunk->nmap; ) {
 				pgaddr = ( page_to_pfn(chunk->page_list[i].page)
 					   << PAGE_SHIFT );
 				*kpage = phys_to_abs(pgaddr +
 						     (pginfo->next_4k *
 						      EHCA_PAGESIZE));
-				EDEB(9,"pgaddr=%lx *kpage=%lx next_4k=%lx",
-				     pgaddr, *kpage, pginfo->next_4k);
 				if ( !(*kpage) ) {
-					EDEB_ERR(4, "pgaddr=%lx "
-						 "chunk->page_list[i]=%lx i=%x "
-						 "next_4k=%lx mr=%p", pgaddr,
-						 (u64)sg_dma_address(
-							 &chunk->page_list[i]),
-						 i, pginfo->next_4k, e_mr);
+					ehca_gen_err("pgaddr=%lx "
+						     "chunk->page_list[i]=%lx "
+						     "i=%x next_4k=%lx mr=%p",
+						     pgaddr,
+						     (u64)sg_dma_address(
+							     &chunk->
+							     page_list[i]),
+						     i, pginfo->next_4k, e_mr);
 					ret = -EFAULT;
 					goto ehca_set_pagebuf_exit0;
 				}
 				(pginfo->page_4k_cnt)++;
 				(pginfo->next_4k)++;
 				kpage++;
-				if (pginfo->next_4k >= PAGE_SIZE/EHCA_PAGESIZE) {
+				if (pginfo->next_4k %
+				    (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
 					(pginfo->page_cnt)++;
 					(pginfo->next_nmap)++;
 					pginfo->next_4k = 0;
@@ -2011,21 +1795,22 @@ int ehca_set_pagebuf(struct ehca_mr *e_m
 		/* loop over desired page_list entries */
 		fmrlist = pginfo->page_list + pginfo->next_listelem;
 		for (i = 0; i < number; i++) {
-			*kpage = phys_to_abs((*fmrlist  & (EHCA_PAGESIZE-1)) +
+			*kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
 					     pginfo->next_4k * EHCA_PAGESIZE);
 			if ( !(*kpage) ) {
-				EDEB_ERR(4, "*fmrlist=%lx fmrlist=%p "
-					 "next_listelem=%lx next_4k=%lx",
-					 *fmrlist, fmrlist,
-					 pginfo->next_listelem,pginfo->next_4k);
+				ehca_gen_err("*fmrlist=%lx fmrlist=%p "
+					     "next_listelem=%lx next_4k=%lx",
+					     *fmrlist, fmrlist,
+					     pginfo->next_listelem,
+					     pginfo->next_4k);
 				ret = -EFAULT;
 				goto ehca_set_pagebuf_exit0;
 			}
 			(pginfo->page_4k_cnt)++;
 			(pginfo->next_4k)++;
 			kpage++;
-			if ( pginfo->next_4k >=
-			     ((e_mr->fmr_page_size) / EHCA_PAGESIZE) ) {
+			if (pginfo->next_4k %
+			    (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
 				(pginfo->page_cnt)++;
 				(pginfo->next_listelem)++;
 				fmrlist++;
@@ -2033,37 +1818,27 @@ int ehca_set_pagebuf(struct ehca_mr *e_m
 			}
 		}
 	} else {
-		EDEB_ERR(4, "bad pginfo->type=%x", pginfo->type);
+		ehca_gen_err("bad pginfo->type=%x", pginfo->type);
 		ret = -EFAULT;
 		goto ehca_set_pagebuf_exit0;
 	}
 
 ehca_set_pagebuf_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
-			"num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
-			"kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
-			"next_listelem=%lx region=%p next_chunk=%p "
-			"next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
-			pginfo->num_pages, pginfo->num_4k, pginfo->next_buf,
-			pginfo->next_4k, number, kpage, pginfo->page_cnt,
-			pginfo->page_4k_cnt, i, pginfo->next_listelem,
-			pginfo->region, pginfo->next_chunk, pginfo->next_nmap);
-	else
-		EDEB_EX(7, "ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
-			"num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
-			"kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
-			"next_listelem=%lx region=%p next_chunk=%p "
-			"next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
-			pginfo->num_pages, pginfo->num_4k, pginfo->next_buf,
-			pginfo->next_4k, number, kpage, pginfo->page_cnt,
-			pginfo->page_4k_cnt, i, pginfo->next_listelem,
-			pginfo->region, pginfo->next_chunk, pginfo->next_nmap);
+		ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
+			     "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
+			     "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
+			     "next_listelem=%lx region=%p next_chunk=%p "
+			     "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
+			     pginfo->num_pages, pginfo->num_4k,
+			     pginfo->next_buf, pginfo->next_4k, number, kpage,
+			     pginfo->page_cnt, pginfo->page_4k_cnt, i,
+			     pginfo->next_listelem, pginfo->region,
+			     pginfo->next_chunk, pginfo->next_nmap);
 	return ret;
 } /* end ehca_set_pagebuf() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 /* setup 1 page from page info page buffer */
 int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
@@ -2071,30 +1846,20 @@ int ehca_set_pagebuf_1(struct ehca_mr *e
 		       u64 *rpage)
 {
 	int ret = 0;
-	struct ib_phys_buf *tmp_pbuf = NULL;
-	u64 *fmrlist = NULL;
-	struct ib_umem_chunk *chunk = NULL;
-	struct ib_umem_chunk *prev_chunk = NULL;
-	u64 pgaddr = 0;
-	u64 num4k = 0;
-	u64 offs4k = 0;
-
-	EDEB_EN(7, "pginfo=%p type=%x num_pages=%lx num_4k=%lx next_buf=%lx "
-		"next_4k=%lx rpage=%p page_cnt=%lx page_4k_cnt=%lx "
-		"next_listelem=%lx region=%p next_chunk=%p next_nmap=%lx",
-		pginfo, pginfo->type, pginfo->num_pages, pginfo->num_4k,
-		pginfo->next_buf, pginfo->next_4k, rpage, pginfo->page_cnt,
-		pginfo->page_4k_cnt, pginfo->next_listelem, pginfo->region,
-		pginfo->next_chunk, pginfo->next_nmap);
+	struct ib_phys_buf *tmp_pbuf;
+	u64 *fmrlist;
+	struct ib_umem_chunk *chunk;
+	struct ib_umem_chunk *prev_chunk;
+	u64 pgaddr, num4k, offs4k;
 
 	if (pginfo->type == EHCA_MR_PGI_PHYS) {
 		/* sanity check */
 		if ((pginfo->page_cnt >= pginfo->num_pages) ||
 		    (pginfo->page_4k_cnt >= pginfo->num_4k)) {
-			EDEB_ERR(4, "page_cnt >= num_pages, page_cnt=%lx "
-				 "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
-				 pginfo->page_cnt, pginfo->num_pages,
-				 pginfo->page_4k_cnt, pginfo->num_4k);
+			ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
+				     "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
+				     pginfo->page_cnt, pginfo->num_pages,
+				     pginfo->page_4k_cnt, pginfo->num_4k);
 			ret = -EFAULT;
 			goto ehca_set_pagebuf_1_exit0;
 		}
@@ -2102,19 +1867,19 @@ int ehca_set_pagebuf_1(struct ehca_mr *e
 		num4k  = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size +
 			  EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
 		offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
-		*rpage = phys_to_abs((tmp_pbuf->addr & (EHCA_PAGESIZE-1)) +
+		*rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
 				     (pginfo->next_4k * EHCA_PAGESIZE));
 		if ( !(*rpage) && tmp_pbuf->addr ) {
-			EDEB_ERR(4, "tmp_pbuf->addr=%lx"
-				 " tmp_pbuf->size=%lx next_4k=%lx",
-				 tmp_pbuf->addr, tmp_pbuf->size,
-				 pginfo->next_4k);
+			ehca_gen_err("tmp_pbuf->addr=%lx"
+				     " tmp_pbuf->size=%lx next_4k=%lx",
+				     tmp_pbuf->addr, tmp_pbuf->size,
+				     pginfo->next_4k);
 			ret = -EFAULT;
 			goto ehca_set_pagebuf_1_exit0;
 		}
 		(pginfo->page_4k_cnt)++;
 		(pginfo->next_4k)++;
-		if(pginfo->next_4k >= PAGE_SIZE/EHCA_PAGESIZE)
+		if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
 			(pginfo->page_cnt)++;
 		if (pginfo->next_4k >= offs4k + num4k) {
 			(pginfo->next_buf)++;
@@ -2131,22 +1896,22 @@ int ehca_set_pagebuf_1(struct ehca_mr *e
 				   << PAGE_SHIFT);
 			*rpage = phys_to_abs(pgaddr +
 					     (pginfo->next_4k * EHCA_PAGESIZE));
-			EDEB(9,"pgaddr=%lx *rpage=%lx next_4k=%lx", pgaddr,
-			     *rpage, pginfo->next_4k);
 			if ( !(*rpage) ) {
-				EDEB_ERR(4, "pgaddr=%lx chunk->page_list[]=%lx "
-					 "next_nmap=%lx next_4k=%lx mr=%p",
-					 pgaddr, (u64)sg_dma_address(
-						 &chunk->page_list[
-							 pginfo->next_nmap]),
-					 pginfo->next_nmap, pginfo->next_4k,
-					 e_mr);
+				ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
+					     " next_nmap=%lx next_4k=%lx mr=%p",
+					     pgaddr, (u64)sg_dma_address(
+						     &chunk->page_list[
+							     pginfo->
+							     next_nmap]),
+					     pginfo->next_nmap, pginfo->next_4k,
+					     e_mr);
 				ret = -EFAULT;
 				goto ehca_set_pagebuf_1_exit0;
 			}
 			(pginfo->page_4k_cnt)++;
 			(pginfo->next_4k)++;
-			if (pginfo->next_4k >= PAGE_SIZE/EHCA_PAGESIZE) {
+			if (pginfo->next_4k %
+			    (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
 				(pginfo->page_cnt)++;
 				(pginfo->next_nmap)++;
 				pginfo->next_4k = 0;
@@ -2163,56 +1928,48 @@ int ehca_set_pagebuf_1(struct ehca_mr *e
 					   list);
 	} else if (pginfo->type == EHCA_MR_PGI_FMR) {
 		fmrlist = pginfo->page_list + pginfo->next_listelem;
-		*rpage = phys_to_abs((*fmrlist  & (EHCA_PAGESIZE-1)) +
+		*rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
 				     pginfo->next_4k * EHCA_PAGESIZE);
 		if ( !(*rpage) ) {
-			EDEB_ERR(4, "*fmrlist=%lx fmrlist=%p next_listelem=%lx "
-				 "next_4k=%lx", *fmrlist, fmrlist,
-				 pginfo->next_listelem, pginfo->next_4k);
+			ehca_gen_err("*fmrlist=%lx fmrlist=%p "
+				     "next_listelem=%lx next_4k=%lx",
+				     *fmrlist, fmrlist, pginfo->next_listelem,
+				     pginfo->next_4k);
 			ret = -EFAULT;
 			goto ehca_set_pagebuf_1_exit0;
 		}
 		(pginfo->page_4k_cnt)++;
 		(pginfo->next_4k)++;
-		if (pginfo->next_4k >= (e_mr->fmr_page_size)/EHCA_PAGESIZE) {
+		if (pginfo->next_4k %
+		    (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
 			(pginfo->page_cnt)++;
 			(pginfo->next_listelem)++;
 			pginfo->next_4k = 0;
 		}
 	} else {
-		EDEB_ERR(4, "bad pginfo->type=%x", pginfo->type);
+		ehca_gen_err("bad pginfo->type=%x", pginfo->type);
 		ret = -EFAULT;
 		goto ehca_set_pagebuf_1_exit0;
 	}
 
 ehca_set_pagebuf_1_exit0:
 	if (ret)
-		EDEB_EX(4, "ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
-			"num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
-			"page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
-			"region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
-			pginfo, pginfo->type, pginfo->num_pages, pginfo->num_4k,
-			pginfo->next_buf, pginfo->next_4k, rpage,
-			pginfo->page_cnt, pginfo->page_4k_cnt,
-			pginfo->next_listelem, pginfo->region,
-			pginfo->next_chunk, pginfo->next_nmap);
-	else
-		EDEB_EX(7, "ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
-			"num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
-			"page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
-			"region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
-			pginfo, pginfo->type, pginfo->num_pages, pginfo->num_4k,
-			pginfo->next_buf, pginfo->next_4k, rpage,
-			pginfo->page_cnt, pginfo->page_4k_cnt,
-			pginfo->next_listelem, pginfo->region,
-			pginfo->next_chunk, pginfo->next_nmap);
+		ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
+			     "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
+			     "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
+			     "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
+			     pginfo, pginfo->type, pginfo->num_pages,
+			     pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
+			     rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
+			     pginfo->next_listelem, pginfo->region,
+			     pginfo->next_chunk, pginfo->next_nmap);
 	return ret;
 } /* end ehca_set_pagebuf_1() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
-/* check MR if it is a max-MR, i.e. uses whole memory
+/*
+ * check MR if it is a max-MR, i.e. uses whole memory
  * in case it's a max-MR 1 is returned, else 0
  */
 int ehca_mr_is_maxmr(u64 size,
@@ -2221,14 +1978,14 @@ int ehca_mr_is_maxmr(u64 size,
 	/* a MR is treated as max-MR only if it fits following: */
 	if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
 	    (iova_start == (void*)KERNELBASE)) {
-		EDEB(6, "this is a max-MR");
+		ehca_gen_dbg("this is a max-MR");
 		return 1;
 	} else
 		return 0;
 } /* end ehca_mr_is_maxmr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
+
 /* map access control for MR/MW. This routine is used for MR and MW. */
 void ehca_mrmw_map_acl(int ib_acl,
 		       u32 *hipz_acl)
@@ -2247,7 +2004,6 @@ void ehca_mrmw_map_acl(int ib_acl,
 } /* end ehca_mrmw_map_acl() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 /* sets page size in hipz access control for MR/MW. */
 void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
@@ -2256,9 +2012,9 @@ void ehca_mrmw_set_pgsize_hipz_acl(u32 *
 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
-/* reverse map access control for MR/MW.
+/*
+ * reverse map access control for MR/MW.
  * This routine is used for MR and MW.
  */
 void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
@@ -2279,9 +2035,9 @@ void ehca_mrmw_reverse_map_acl(const u32
 
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
-/* map HIPZ rc to IB retcodes for MR/MW allocations
+/*
+ * map HIPZ rc to IB retcodes for MR/MW allocations
  * Used for hipz_mr_reg_alloc and hipz_mw_alloc.
  */
 int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc)
@@ -2304,9 +2060,9 @@ int ehca_mrmw_map_hrc_alloc(const u64 hi
 } /* end ehca_mrmw_map_hrc_alloc() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
-/* map HIPZ rc to IB retcodes for MR register rpage
+/*
+ * map HIPZ rc to IB retcodes for MR register rpage
  * Used for hipz_h_register_rpage_mr at registering last page
  */
 int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc)
@@ -2318,8 +2074,10 @@ int ehca_mrmw_map_hrc_rrpg_last(const u6
 	case H_ADAPTER_PARM:    /* invalid adapter handle */
 	case H_RH_PARM:         /* invalid resource handle */
 /*	case H_QT_PARM:            invalid queue type */
-	case H_PARAMETER:       /* invalid logical address, */
-		                /* or count zero or greater 512 */
+	case H_PARAMETER:       /*
+				 * invalid logical address,
+				 * or count zero or greater 512
+				 */
 	case H_TABLE_FULL:      /* page table full */
 	case H_HARDWARE:        /* HCA not operational */
 		return -EINVAL;
@@ -2331,9 +2089,9 @@ int ehca_mrmw_map_hrc_rrpg_last(const u6
 } /* end ehca_mrmw_map_hrc_rrpg_last() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
-/* map HIPZ rc to IB retcodes for MR register rpage
+/*
+ * map HIPZ rc to IB retcodes for MR register rpage
  * Used for hipz_h_register_rpage_mr at registering one page, but not last page
  */
 int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc)
@@ -2345,8 +2103,10 @@ int ehca_mrmw_map_hrc_rrpg_notlast(const
 	case H_ADAPTER_PARM:    /* invalid adapter handle */
 	case H_RH_PARM:         /* invalid resource handle */
 /*	case H_QT_PARM:            invalid queue type */
-	case H_PARAMETER:       /* invalid logical address, */
-		                /* or count zero or greater 512 */
+	case H_PARAMETER:       /*
+				 * invalid logical address,
+				 * or count zero or greater 512
+				 */
 	case H_TABLE_FULL:      /* page table full */
 	case H_HARDWARE:        /* HCA not operational */
 		return -EINVAL;
@@ -2358,7 +2118,6 @@ int ehca_mrmw_map_hrc_rrpg_notlast(const
 } /* end ehca_mrmw_map_hrc_rrpg_notlast() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
 /* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */
 int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc)
@@ -2379,13 +2138,14 @@ int ehca_mrmw_map_hrc_query_mr(const u64
 /*----------------------------------------------------------------------*/
 /*----------------------------------------------------------------------*/
 
-/* map HIPZ rc to IB retcodes for freeing MR resource
+/*
+ * map HIPZ rc to IB retcodes for freeing MR resource
  * Used for hipz_h_free_resource_mr
  */
 int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc)
 {
 	switch (hipz_rc) {
-	case H_SUCCESS:     /* resource freed */
+	case H_SUCCESS:      /* resource freed */
 		return 0;
 	case H_ADAPTER_PARM: /* invalid adapter handle */
 	case H_RH_PARM:      /* invalid resource handle */
@@ -2401,9 +2161,9 @@ int ehca_mrmw_map_hrc_free_mr(const u64 
 } /* end ehca_mrmw_map_hrc_free_mr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
-/* map HIPZ rc to IB retcodes for freeing MW resource
+/*
+ * map HIPZ rc to IB retcodes for freeing MW resource
  * Used for hipz_h_free_resource_mw
  */
 int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc)
@@ -2425,9 +2185,9 @@ int ehca_mrmw_map_hrc_free_mw(const u64 
 } /* end ehca_mrmw_map_hrc_free_mw() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
-/* map HIPZ rc to IB retcodes for SMR registrations
+/*
+ * map HIPZ rc to IB retcodes for SMR registrations
  * Used for hipz_h_register_smr.
  */
 int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc)
@@ -2449,9 +2209,9 @@ int ehca_mrmw_map_hrc_reg_smr(const u64 
 } /* end ehca_mrmw_map_hrc_reg_smr() */
 
 /*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
 
-/* MR destructor and constructor
+/*
+ * MR destructor and constructor
  * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
  * except struct ib_mr and spinlock
  */
@@ -2470,5 +2230,32 @@ void ehca_mr_deletenew(struct ehca_mr *m
 	memset(&mr->galpas, 0, sizeof(mr->galpas));
 	mr->nr_of_pages   = 0;
 	mr->pagearray     = NULL;
-	memset(&mr->pf, 0, sizeof(mr->pf));
 } /* end ehca_mr_deletenew() */
+
+int ehca_init_mrmw_cache(void)
+{
+	mr_cache = kmem_cache_create("ehca_cache_mr",
+				     sizeof(struct ehca_mr), 0,
+				     SLAB_HWCACHE_ALIGN,
+				     NULL, NULL);
+	if (!mr_cache)
+		return -ENOMEM;
+	mw_cache = kmem_cache_create("ehca_cache_mw",
+				     sizeof(struct ehca_mw), 0,
+				     SLAB_HWCACHE_ALIGN,
+				     NULL, NULL);
+	if (!mw_cache) {
+		kmem_cache_destroy(mr_cache);
+		mr_cache = NULL;
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void ehca_cleanup_mrmw_cache(void)
+{
+	if (mr_cache)
+		kmem_cache_destroy(mr_cache);
+	if (mw_cache)
+		kmem_cache_destroy(mw_cache);
+}
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_mrmw.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_mrmw.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_mrmw.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_mrmw.h	2006-10-17 10:15:06.000000000 -0400
@@ -42,9 +42,6 @@
 #ifndef _EHCA_MRMW_H_
 #define _EHCA_MRMW_H_
 
-#undef DEB_PREFIX
-#define DEB_PREFIX "mrmw"
-
 int ehca_reg_mr(struct ehca_shca *shca,
 		struct ehca_mr *e_mr,
 		u64 *iova_start,
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_pd.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_pd.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_pd.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_pd.c	2006-10-17 10:15:06.000000000 -0400
@@ -38,40 +38,35 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-
-#define DEB_PREFIX "vpd "
-
 #include <asm/current.h>
 
 #include "ehca_tools.h"
 #include "ehca_iverbs.h"
 
+static struct kmem_cache *pd_cache;
+
 struct ib_pd *ehca_alloc_pd(struct ib_device *device,
 			    struct ib_ucontext *context, struct ib_udata *udata)
 {
-	extern struct ehca_module ehca_module;
-	struct ib_pd *mypd = NULL;
-	struct ehca_pd *pd = NULL;
-
-	EDEB_EN(7, "device=%p context=%p udata=%p", device, context, udata);
+	struct ehca_pd *pd;
 
-	EHCA_CHECK_DEVICE_P(device);
-
-	pd = kmem_cache_alloc(ehca_module.cache_pd, SLAB_KERNEL);
+	pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL);
 	if (!pd) {
-		EDEB_ERR(4, "ERROR device=%p context=%p pd=%p"
-			 " out of memory", device, context, mypd);
+		ehca_err(device, "device=%p context=%p out of memory",
+			 device, context);
 		return ERR_PTR(-ENOMEM);
 	}
 
 	memset(pd, 0, sizeof(struct ehca_pd));
 	pd->ownpid = current->tgid;
 
-	/* Kernel PD: when device = -1, 0
+	/*
+	 * Kernel PD: when device = -1, 0
 	 * User   PD: when context != -1
 	 */
 	if (!context) {
-		/* Kernel PDs after init reuses always
+		/*
+		 * Kernel PDs after init reuses always
 		 * the one created in ehca_shca_reopen()
 		 */
 		struct ehca_shca *shca = container_of(device, struct ehca_shca,
@@ -80,39 +75,40 @@ struct ib_pd *ehca_alloc_pd(struct ib_de
 	} else
 		pd->fw_pd.value = (u64)pd;
 
-	mypd = &pd->ib_pd;
-
-	EHCA_REGISTER_PD(device, pd);
-
-	EDEB_EX(7, "device=%p context=%p pd=%p", device, context, mypd);
-
-	return mypd;
+	return &pd->ib_pd;
 }
 
 int ehca_dealloc_pd(struct ib_pd *pd)
 {
-	extern struct ehca_module ehca_module;
-	int ret = 0;
 	u32 cur_pid = current->tgid;
-	struct ehca_pd *my_pd = NULL;
+	struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
 
-	EDEB_EN(7, "pd=%p", pd);
-
-	EHCA_CHECK_PD(pd);
-	my_pd = container_of(pd, struct ehca_pd, ib_pd);
 	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
 	    my_pd->ownpid != cur_pid) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(pd->device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_pd->ownpid);
 		return -EINVAL;
 	}
 
-	EHCA_DEREGISTER_PD(pd);
-
-	kmem_cache_free(ehca_module.cache_pd,
+	kmem_cache_free(pd_cache,
 			container_of(pd, struct ehca_pd, ib_pd));
 
-	EDEB_EX(7, "pd=%p", pd);
+	return 0;
+}
 
-	return ret;
+int ehca_init_pd_cache(void)
+{
+	pd_cache = kmem_cache_create("ehca_cache_pd",
+				     sizeof(struct ehca_pd), 0,
+				     SLAB_HWCACHE_ALIGN,
+				     NULL, NULL);
+	if (!pd_cache)
+		return -ENOMEM;
+	return 0;
+}
+
+void ehca_cleanup_pd_cache(void)
+{
+	if (pd_cache)
+		kmem_cache_destroy(pd_cache);
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_qes.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_qes.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_qes.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_qes.h	2006-10-17 10:15:06.000000000 -0400
@@ -46,9 +46,7 @@
 
 #include "ehca_tools.h"
 
-/**
- * virtual scatter gather entry to specify remote adresses with length
- */
+/* virtual scatter gather entry to specify remote adresses with length */
 struct ehca_vsgentry {
 	u64 vaddr;
 	u32 lkey;
@@ -63,7 +61,7 @@ struct ehca_vsgentry {
 #define GRH_NEXTHEADER_MASK  EHCA_BMASK_IBM(48,55)
 #define GRH_HOPLIMIT_MASK    EHCA_BMASK_IBM(56,63)
 
-/**
+/*
  * Unreliable Datagram Address Vector Format
  * see IBTA Vol1 chapter 8.3 Global Routing Header
  */
@@ -96,30 +94,17 @@ struct ehca_ud_av {
 			/* DWord_1 --> SGID */
 
 			u32 sgid_wd3;
-			/* bits 127 - 96       */
-
 			u32 sgid_wd2;
-			/* bits  95 - 64 */
-			/* DWord_2 */
 
 			u32 sgid_wd1;
-			/* bits  63 - 32 */
-
 			u32 sgid_wd0;
-			/* bits  31 -  0 */
 			/* DWord_3 --> DGID */
 
 			u32 dgid_wd3;
-			/* bits 127 - 96
-			 **/
 			u32 dgid_wd2;
-			/* bits  95 - 64
-			 DWord_4 */
-			u32 dgid_wd1;
-			/* bits  63 - 32 */
 
+			u32 dgid_wd1;
 			u32 dgid_wd0;
-			/* bits  31 -  0    */
 		} grh_l;
 	};
 };
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_qp.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_qp.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_qp.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_qp.c	2006-10-17 10:15:06.000000000 -0400
@@ -42,8 +42,6 @@
  */
 
 
-#define DEB_PREFIX "e_qp"
-
 #include <asm/current.h>
 
 #include "ehca_classes.h"
@@ -53,7 +51,9 @@
 #include "hcp_if.h"
 #include "hipz_fns.h"
 
-/**
+static struct kmem_cache *qp_cache;
+
+/*
  * attributes not supported by query qp
  */
 #define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \
@@ -61,7 +61,7 @@
 				     IB_QP_ACCESS_FLAGS       | \
 				     IB_QP_EN_SQD_ASYNC_NOTIFY)
 
-/**
+/*
  * ehca (internal) qp state values
  */
 enum ehca_qp_state {
@@ -74,7 +74,7 @@ enum ehca_qp_state {
 	EHCA_QPS_ERR = 128
 };
 
-/**
+/*
  * qp state transitions as defined by IB Arch Rel 1.1 page 431
  */
 enum ib_qp_statetrans {
@@ -92,8 +92,8 @@ enum ib_qp_statetrans {
 	IB_QPST_MAX	/* nr of transitions, this must be last!!! */
 };
 
-/**
- * ib2ehca_qp_state - maps IB to ehca qp_state
+/*
+ * ib2ehca_qp_state maps IB to ehca qp_state
  * returns ehca qp state corresponding to given ib qp state
  */
 static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
@@ -114,13 +114,13 @@ static inline enum ehca_qp_state ib2ehca
 	case IB_QPS_ERR:
 		return EHCA_QPS_ERR;
 	default:
-		EDEB_ERR(4, "invalid ib_qp_state=%x", ib_qp_state);
+		ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
 		return -EINVAL;
 	}
 }
 
-/**
- * ehca2ib_qp_state - maps ehca to IB qp_state
+/*
+ * ehca2ib_qp_state maps ehca to IB qp_state
  * returns ib qp state corresponding to given ehca qp state
  */
 static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
@@ -142,13 +142,13 @@ static inline enum ib_qp_state ehca2ib_q
 	case EHCA_QPS_ERR:
 		return IB_QPS_ERR;
 	default:
-		EDEB_ERR(4,"invalid ehca_qp_state=%x",ehca_qp_state);
+		ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
 		return -EINVAL;
 	}
 }
 
-/**
- * ehca_qp_type - used as index for req_attr and opt_attr of
+/*
+ * ehca_qp_type used as index for req_attr and opt_attr of
  * struct ehca_modqp_statetrans
  */
 enum ehca_qp_type {
@@ -159,8 +159,8 @@ enum ehca_qp_type {
 	QPT_MAX
 };
 
-/**
- * ib2ehcaqptype - maps Ib to ehca qp_type
+/*
+ * ib2ehcaqptype maps Ib to ehca qp_type
  * returns ehca qp type corresponding to ib qp type
  */
 static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
@@ -176,7 +176,7 @@ static inline enum ehca_qp_type ib2ehcaq
 	case IB_QPT_UD:
 		return QPT_UD;
 	default:
-		EDEB_ERR(4,"Invalid ibqptype=%x", ibqptype);
+		ehca_gen_err("Invalid ibqptype=%x", ibqptype);
 		return -EINVAL;
 	}
 }
@@ -190,24 +190,34 @@ static inline enum ib_qp_statetrans get_
 		index = IB_QPST_ANY2RESET;
 		break;
 	case IB_QPS_INIT:
-		if (ib_fromstate == IB_QPS_RESET)
+		switch (ib_fromstate) {
+		case IB_QPS_RESET:
 			index = IB_QPST_RESET2INIT;
-		else if (ib_fromstate == IB_QPS_INIT)
+			break;
+		case IB_QPS_INIT:
 			index = IB_QPST_INIT2INIT;
+			break;
+		}
 		break;
 	case IB_QPS_RTR:
 		if (ib_fromstate == IB_QPS_INIT)
 			index = IB_QPST_INIT2RTR;
 		break;
 	case IB_QPS_RTS:
-		if (ib_fromstate == IB_QPS_RTR)
+		switch (ib_fromstate) {
+		case IB_QPS_RTR:
 			index = IB_QPST_RTR2RTS;
-		else if (ib_fromstate == IB_QPS_RTS)
+			break;
+		case IB_QPS_RTS:
 			index = IB_QPST_RTS2RTS;
-		else if (ib_fromstate == IB_QPS_SQD)
+			break;
+		case IB_QPS_SQD:
 			index = IB_QPST_SQD2RTS;
-		else if (ib_fromstate == IB_QPS_SQE)
+			break;
+		case IB_QPS_SQE:
 			index = IB_QPST_SQE2RTS;
+			break;
+		}
 		break;
 	case IB_QPS_SQD:
 		if (ib_fromstate == IB_QPS_RTS)
@@ -231,8 +241,8 @@ enum ehca_service_type {
 	ST_UD = 3
 };
 
-/**
- * ibqptype2servicetype - returns hcp service type corresponding to given
+/*
+ * ibqptype2servicetype returns hcp service type corresponding to given
  * ib qp type used by create_qp()
  */
 static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
@@ -252,15 +262,15 @@ static inline int ibqptype2servicetype(e
 	case IB_QPT_RAW_ETY:
 		return -EINVAL;
 	default:
-		EDEB_ERR(4, "Invalid ibqptype=%x", ibqptype);
+		ehca_gen_err("Invalid ibqptype=%x", ibqptype);
 		return -EINVAL;
 	}
 }
 
-/**
- * init_qp_queues - Initializes/constructs r/squeue and registers queue pages.
+/*
+ * init_qp_queues initializes/constructs r/squeue and registers queue pages.
  */
-static inline int init_qp_queues(struct ipz_adapter_handle ipz_hca_handle,
+static inline int init_qp_queues(struct ehca_shca *shca,
 				 struct ehca_qp *my_qp,
 				 int nr_sq_pages,
 				 int nr_rq_pages,
@@ -268,28 +278,26 @@ static inline int init_qp_queues(struct 
 				 int rwqe_size,
 				 int nr_send_sges, int nr_receive_sges)
 {
-	int ret = -EINVAL;
-	int cnt = 0;
-	void *vpage = NULL;
-	u64 rpage = 0;
-	int ipz_rc = -1;
-	u64 h_ret = H_PARAMETER;
+	int ret, cnt, ipz_rc;
+	void *vpage;
+	u64 rpage, h_ret;
+	struct ib_device *ib_dev = &shca->ib_device;
+	struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
 
 	ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue,
 				nr_sq_pages,
 				EHCA_PAGESIZE, swqe_size, nr_send_sges);
 	if (!ipz_rc) {
-		EDEB_ERR(4, "Cannot allocate page for squeue. ipz_rc=%x",
+		ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x",
 			 ipz_rc);
-		ret = -EBUSY;
-		return ret;
+		return -EBUSY;
 	}
 
 	ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue,
 				nr_rq_pages,
 				EHCA_PAGESIZE, rwqe_size, nr_receive_sges);
 	if (!ipz_rc) {
-		EDEB_ERR(4, "Cannot allocate page for rqueue. ipz_rc=%x",
+		ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x",
 			 ipz_rc);
 		ret = -EBUSY;
 		goto init_qp_queues0;
@@ -298,7 +306,7 @@ static inline int init_qp_queues(struct 
 	for (cnt = 0; cnt < nr_sq_pages; cnt++) {
 		vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue);
 		if (!vpage) {
-			EDEB_ERR(4, "SQ ipz_qpageit_get_inc() "
+			ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() "
 				 "failed p_vpage= %p", vpage);
 			ret = -EINVAL;
 			goto init_qp_queues1;
@@ -311,8 +319,8 @@ static inline int init_qp_queues(struct 
 						 rpage, 1,
 						 my_qp->galpas.kernel);
 		if (h_ret < H_SUCCESS) {
-			EDEB_ERR(4,"SQ  hipz_qp_register_rpage() faield "
-				 "rc=%lx", h_ret);
+			ehca_err(ib_dev, "SQ hipz_qp_register_rpage()"
+				 " failed rc=%lx", h_ret);
 			ret = ehca2ib_return_code(h_ret);
 			goto init_qp_queues1;
 		}
@@ -324,9 +332,8 @@ static inline int init_qp_queues(struct 
 	for (cnt = 0; cnt < nr_rq_pages; cnt++) {
 		vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
 		if (!vpage) {
-			EDEB_ERR(4,"RQ ipz_qpageit_get_inc() "
+			ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() "
 				 "failed p_vpage = %p", vpage);
-			h_ret = H_RESOURCE;
 			ret = -EINVAL;
 			goto init_qp_queues1;
 		}
@@ -338,29 +345,28 @@ static inline int init_qp_queues(struct 
 						 &my_qp->pf, 0, 1,
 						 rpage, 1,my_qp->galpas.kernel);
 		if (h_ret < H_SUCCESS) {
-			EDEB_ERR(4, "RQ hipz_qp_register_rpage() failed "
+			ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed "
 				 "rc=%lx", h_ret);
 			ret = ehca2ib_return_code(h_ret);
 			goto init_qp_queues1;
 		}
 		if (cnt == (nr_rq_pages - 1)) {	/* last page! */
 			if (h_ret != H_SUCCESS) {
-				EDEB_ERR(4,"RQ hipz_qp_register_rpage() "
+				ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
 					 "h_ret= %lx ", h_ret);
 				ret = ehca2ib_return_code(h_ret);
 				goto init_qp_queues1;
 			}
 			vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
 			if (vpage) {
-				EDEB_ERR(4,"ipz_qpageit_get_inc() "
-					 "should not succeed vpage=%p",
-					 vpage);
+				ehca_err(ib_dev, "ipz_qpageit_get_inc() "
+					 "should not succeed vpage=%p", vpage);
 				ret = -EINVAL;
 				goto init_qp_queues1;
 			}
 		} else {
 			if (h_ret != H_PAGE_REGISTERED) {
-				EDEB_ERR(4,"RQ hipz_qp_register_rpage() "
+				ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
 					 "h_ret= %lx ", h_ret);
 				ret = ehca2ib_return_code(h_ret);
 				goto init_qp_queues1;
@@ -379,37 +385,30 @@ init_qp_queues0:
 	return ret;
 }
 
-
 struct ib_qp *ehca_create_qp(struct ib_pd *pd,
 			     struct ib_qp_init_attr *init_attr,
 			     struct ib_udata *udata)
 {
-	extern struct ehca_module ehca_module;
-	static int da_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 };
-	int ret = -EINVAL;
-
-	struct ehca_qp *my_qp = NULL;
-	struct ehca_pd *my_pd = NULL;
-	struct ehca_shca *shca = NULL;
+	static int da_rc_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 };
+	static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 };
+	struct ehca_qp *my_qp;
+	struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
+	struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
+					      ib_device);
 	struct ib_ucontext *context = NULL;
-	u64 h_ret = H_PARAMETER;
-	int max_send_sge;
-	int max_recv_sge;
+	u64 h_ret;
+	int max_send_sge, max_recv_sge, ret;
 
 	/* h_call's out parameters */
 	struct ehca_alloc_qp_parms parms;
-	u32 qp_nr = 0, swqe_size = 0, rwqe_size = 0;
+	u32 swqe_size = 0, rwqe_size = 0;
 	u8 daqp_completion, isdaqp;
 	unsigned long flags;
 
-	EDEB_EN(7,"pd=%p init_attr=%p", pd, init_attr);
-	EHCA_CHECK_PD_P(pd);
-	EHCA_CHECK_ADR_P(init_attr);
-
 	if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
 		init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
-		EDEB_ERR(4, "init_attr->sg_sig_type=%x not allowed",
-			init_attr->sq_sig_type);
+		ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
+			 init_attr->sq_sig_type);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -424,20 +423,36 @@ struct ib_qp *ehca_create_qp(struct ib_p
 	    init_attr->qp_type != IB_QPT_GSI &&
 	    init_attr->qp_type != IB_QPT_UC &&
 	    init_attr->qp_type != IB_QPT_RC) {
-		EDEB_ERR(4,"wrong QP Type=%x",init_attr->qp_type);
+		ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type);
 		return ERR_PTR(-EINVAL);
 	}
-	if (init_attr->qp_type != IB_QPT_RC && isdaqp != 0) {
-		EDEB_ERR(4,"unsupported LL QP Type=%x",init_attr->qp_type);
+	if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD)
+	    && isdaqp) {
+		ehca_err(pd->device, "unsupported LL QP Type=%x",
+			 init_attr->qp_type);
+		return ERR_PTR(-EINVAL);
+	} else if (init_attr->qp_type == IB_QPT_RC && isdaqp &&
+		   (init_attr->cap.max_send_wr > 255 ||
+		    init_attr->cap.max_recv_wr > 255 )) {
+		       ehca_err(pd->device, "Invalid Number of max_sq_wr =%x "
+				"or max_rq_wr=%x for QP Type=%x",
+				init_attr->cap.max_send_wr,
+				init_attr->cap.max_recv_wr,init_attr->qp_type);
+		       return ERR_PTR(-EINVAL);
+	} else if (init_attr->qp_type == IB_QPT_UD && isdaqp &&
+		  init_attr->cap.max_send_wr > 255) {
+		ehca_err(pd->device,
+			 "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x",
+			 init_attr->cap.max_send_wr, init_attr->qp_type);
 		return ERR_PTR(-EINVAL);
 	}
 
 	if (pd->uobject && udata)
 		context = pd->uobject->context;
 
-	my_qp = kmem_cache_alloc(ehca_module.cache_qp, SLAB_KERNEL);
+	my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL);
 	if (!my_qp) {
-		EDEB_ERR(4, "pd=%p not enough memory to alloc qp", pd);
+		ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -446,9 +461,6 @@ struct ib_qp *ehca_create_qp(struct ib_p
 	spin_lock_init(&my_qp->spinlock_s);
 	spin_lock_init(&my_qp->spinlock_r);
 
-	my_pd = container_of(pd, struct ehca_pd, ib_pd);
-
-	shca = container_of(pd->device, struct ehca_shca, ib_device);
 	my_qp->recv_cq =
 		container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
 	my_qp->send_cq =
@@ -459,7 +471,7 @@ struct ib_qp *ehca_create_qp(struct ib_p
 	do {
 		if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
 			ret = -ENOMEM;
-			EDEB_ERR(4, "Can't reserve idr resources.");
+			ehca_err(pd->device, "Can't reserve idr resources.");
 			goto create_qp_exit0;
 		}
 
@@ -471,14 +483,14 @@ struct ib_qp *ehca_create_qp(struct ib_p
 
 	if (ret) {
 		ret = -ENOMEM;
-		EDEB_ERR(4, "Can't allocate new idr entry.");
+		ehca_err(pd->device, "Can't allocate new idr entry.");
 		goto create_qp_exit0;
 	}
 
 	parms.servicetype = ibqptype2servicetype(init_attr->qp_type);
 	if (parms.servicetype < 0) {
 		ret = -EINVAL;
-		EDEB_ERR(4, "Invalid qp_type=%x", init_attr->qp_type);
+		ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type);
 		goto create_qp_exit0;
 	}
 
@@ -497,8 +509,6 @@ struct ib_qp *ehca_create_qp(struct ib_p
 		max_recv_sge += 2;
 	}
 
-	EDEB(7, "isdaqp=%x daqp_completion=%x", isdaqp, daqp_completion);
-
 	parms.ipz_eq_handle = shca->eq.ipz_eq_handle;
 	parms.daqp_ctrl = isdaqp | daqp_completion;
 	parms.pd = my_pd->fw_pd;
@@ -508,7 +518,8 @@ struct ib_qp *ehca_create_qp(struct ib_p
 	h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms);
 
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "h_alloc_resource_qp() failed h_ret=%lx", h_ret);
+		ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx",
+			 h_ret);
 		ret = ehca2ib_return_code(h_ret);
 		goto create_qp_exit1;
 	}
@@ -521,8 +532,8 @@ struct ib_qp *ehca_create_qp(struct ib_p
 			rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
 					     (parms.act_nr_recv_sges)]);
 		} else { /* for daqp we need to use msg size, not wqe size */
-		        swqe_size = da_msg_size[max_send_sge];
-			rwqe_size = da_msg_size[max_recv_sge];
+		        swqe_size = da_rc_msg_size[max_send_sge];
+			rwqe_size = da_rc_msg_size[max_recv_sge];
 			parms.act_nr_send_sges = 1;
 			parms.act_nr_recv_sges = 1;
 		}
@@ -540,10 +551,17 @@ struct ib_qp *ehca_create_qp(struct ib_p
 		/* UD circumvention */
 		parms.act_nr_recv_sges -= 2;
 		parms.act_nr_send_sges -= 2;
-                swqe_size = offsetof(struct ehca_wqe,
-				     u.ud_av.sg_list[parms.act_nr_send_sges]);
-		rwqe_size = offsetof(struct ehca_wqe,
-				     u.ud_av.sg_list[parms.act_nr_recv_sges]);
+		if (isdaqp) {
+		        swqe_size = da_ud_sq_msg_size[max_send_sge];
+			rwqe_size = da_rc_msg_size[max_recv_sge];
+			parms.act_nr_send_sges = 1;
+			parms.act_nr_recv_sges = 1;
+		} else {
+			swqe_size = offsetof(struct ehca_wqe,
+					     u.ud_av.sg_list[parms.act_nr_send_sges]);
+			rwqe_size = offsetof(struct ehca_wqe,
+					     u.ud_av.sg_list[parms.act_nr_recv_sges]);
+		}
 
 		if (IB_QPT_GSI == init_attr->qp_type ||
 		    IB_QPT_SMI == init_attr->qp_type) {
@@ -562,13 +580,13 @@ struct ib_qp *ehca_create_qp(struct ib_p
 	}
 
 	/* initializes r/squeue and registers queue pages */
-	ret = init_qp_queues(shca->ipz_hca_handle, my_qp,
+	ret = init_qp_queues(shca, my_qp,
 			     parms.nr_sq_pages, parms.nr_rq_pages,
 			     swqe_size, rwqe_size,
 			     parms.act_nr_send_sges, parms.act_nr_recv_sges);
 	if (ret) {
-		EDEB_ERR(4,"Couldn't initialize r/squeue and pages ret=%x",
-			 ret);
+		ehca_err(pd->device,
+			 "Couldn't initialize r/squeue and pages ret=%x", ret);
 		goto create_qp_exit2;
 	}
 
@@ -597,7 +615,8 @@ struct ib_qp *ehca_create_qp(struct ib_p
 	if (init_attr->qp_type == IB_QPT_GSI) {
 		h_ret = ehca_define_sqp(shca, my_qp, init_attr);
 		if (h_ret != H_SUCCESS) {
-			EDEB_ERR(4, "ehca_define_sqp() failed rc=%lx",h_ret);
+			ehca_err(pd->device, "ehca_define_sqp() failed rc=%lx",
+				 h_ret);
 			ret = ehca2ib_return_code(h_ret);
 			goto create_qp_exit3;
 		}
@@ -607,7 +626,7 @@ struct ib_qp *ehca_create_qp(struct ib_p
 						  struct ehca_cq, ib_cq);
 		ret = ehca_cq_assign_qp(cq, my_qp);
 		if (ret) {
-			EDEB_ERR(4, "Couldn't assign qp to send_cq ret=%x",
+			ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x",
 				 ret);
 			goto create_qp_exit3;
 		}
@@ -632,10 +651,14 @@ struct ib_qp *ehca_create_qp(struct ib_p
 		resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
 		resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
 		resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
-		ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
-				 ipz_rqueue->queue_length,
-				 ((void**)&resp.ipz_rqueue.queue),
-				 &vma);
+		ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
+				       ipz_rqueue->queue_length,
+				       (void**)&resp.ipz_rqueue.queue,
+				       &vma);
+		if (ret) {
+			ehca_err(pd->device, "Could not mmap rqueue pages");
+			goto create_qp_exit3;
+		}
 		my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
 		/* squeue properties */
 		resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
@@ -643,29 +666,44 @@ struct ib_qp *ehca_create_qp(struct ib_p
 		resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
 		resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
 		resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
-		ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
-				 ipz_squeue->queue_length,
-				 ((void**)&resp.ipz_squeue.queue),
-				 &vma);
+		ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
+				       ipz_squeue->queue_length,
+				       (void**)&resp.ipz_squeue.queue,
+				       &vma);
+		if (ret) {
+			ehca_err(pd->device, "Could not mmap squeue pages");
+			goto create_qp_exit4;
+		}
 		my_qp->uspace_squeue = resp.ipz_squeue.queue;
 		/* fw_handle */
 		resp.galpas = my_qp->galpas;
-		ehca_mmap_register(my_qp->galpas.user.fw_handle,
-				   ((void**)&resp.galpas.kernel.fw_handle),
-				   &vma);
+		ret = ehca_mmap_register(my_qp->galpas.user.fw_handle,
+					 (void**)&resp.galpas.kernel.fw_handle,
+					 &vma);
+		if (ret) {
+			ehca_err(pd->device, "Could not mmap fw_handle");
+			goto create_qp_exit5;
+		}
 		my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
 
 		if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
-			EDEB_ERR(4, "Copy to udata failed");
+			ehca_err(pd->device, "Copy to udata failed");
 			ret = -EINVAL;
-			goto create_qp_exit3;
+			goto create_qp_exit6;
 		}
 	}
 
-	EDEB_EX(7, "ehca_qp=%p qp_num=%x, token=%x",
-		my_qp, qp_nr, my_qp->token);
 	return &my_qp->ib_qp;
 
+create_qp_exit6:
+	ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
+
+create_qp_exit5:
+	ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length);
+
+create_qp_exit4:
+	ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length);
+
 create_qp_exit3:
 	ipz_queue_dtor(&my_qp->ipz_rqueue);
 	ipz_queue_dtor(&my_qp->ipz_squeue);
@@ -679,62 +717,57 @@ create_qp_exit1:
 	spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
 
 create_qp_exit0:
-	kmem_cache_free(ehca_module.cache_qp, my_qp);
-	EDEB_EX(4, "failed ret=%x", ret);
+	kmem_cache_free(qp_cache, my_qp);
 	return ERR_PTR(ret);
-
 }
 
-/**
- * prepare_sqe_rts - called by internal_modify_qp() at trans sqe -> rts
+/*
+ * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
  * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
  * returns total number of bad wqes in bad_wqe_cnt
  */
 static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
 			   int *bad_wqe_cnt)
 {
-	int ret = 0;
-	u64 h_ret = H_SUCCESS;
-	struct ipz_queue *squeue = NULL;
-	void *bad_send_wqe_p = NULL;
-	void *bad_send_wqe_v = NULL;
-	void *squeue_start_p = NULL;
-	void *squeue_end_p = NULL;
-	void *squeue_start_v = NULL;
-	void *squeue_end_v = NULL;
-	struct ehca_wqe *wqe = NULL;
+	u64 h_ret;
+	struct ipz_queue *squeue;
+	void *bad_send_wqe_p, *bad_send_wqe_v;
+	void *squeue_start_p, *squeue_end_p;
+	void *squeue_start_v, *squeue_end_v;
+	struct ehca_wqe *wqe;
 	int qp_num = my_qp->ib_qp.qp_num;
 
-	EDEB_EN(7, "ehca_qp=%p qp_num=%x ", my_qp, qp_num);
-
 	/* get send wqe pointer */
 	h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
 					   my_qp->ipz_qp_handle, &my_qp->pf,
 					   &bad_send_wqe_p, NULL, 2);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "hipz_h_disable_and_get_wqe() failed "
-			 "ehca_qp=%p qp_num=%x h_ret=%lx",my_qp, qp_num, h_ret);
-		ret = ehca2ib_return_code(h_ret);
-		goto prepare_sqe_rts_exit1;
+		ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
+			 " ehca_qp=%p qp_num=%x h_ret=%lx",
+			 my_qp, qp_num, h_ret);
+		return ehca2ib_return_code(h_ret);
 	}
 	bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63)));
-	EDEB(7, "qp_num=%x bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
+	ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
+		 qp_num, bad_send_wqe_p);
 	/* convert wqe pointer to vadr */
 	bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
-	EDEB_DMP(6, bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
+	if (ehca_debug_level)
+		ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
 	squeue = &my_qp->ipz_squeue;
 	squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L));
 	squeue_end_p = squeue_start_p+squeue->queue_length;
 	squeue_start_v = abs_to_virt((u64)squeue_start_p);
 	squeue_end_v = abs_to_virt((u64)squeue_end_p);
-	EDEB(6, "qp_num=%x squeue_start_v=%p squeue_end_v=%p",
-	     qp_num, squeue_start_v, squeue_end_v);
+	ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p",
+		 qp_num, squeue_start_v, squeue_end_v);
 
 	/* loop sets wqe's purge bit */
 	wqe = (struct ehca_wqe*)bad_send_wqe_v;
 	*bad_wqe_cnt = 0;
 	while (wqe->optype != 0xff && wqe->wqef != 0xff) {
-		EDEB_DMP(6, wqe, 32, "qp_num=%x wqe", qp_num);
+		if (ehca_debug_level)
+			ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
 		wqe->nr_of_data_seg = 0; /* suppress data access */
 		wqe->wqef = WQEF_PURGE; /* WQE to be purged */
 		wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size);
@@ -743,20 +776,19 @@ static int prepare_sqe_rts(struct ehca_q
 			wqe = squeue_start_v;
 		}
 	}
-	/* bad wqe will be reprocessed and ignored when pol_cq() is called,
+	/*
+	 * bad wqe will be reprocessed and ignored when pol_cq() is called,
 	 *  i.e. nr of wqes with flush error status is one less
 	 */
-	EDEB(6, "qp_num=%x flusherr_wqe_cnt=%x", qp_num, (*bad_wqe_cnt)-1);
+	ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
+		 qp_num, (*bad_wqe_cnt)-1);
 	wqe->wqef = 0;
 
-prepare_sqe_rts_exit1:
-
-	EDEB_EX(7, "ehca_qp=%p qp_num=%x ret=%x", my_qp, qp_num, ret);
-	return ret;
+	return 0;
 }
 
-/**
- * internal_modify_qp - with circumvention to handle aqp0 properly
+/*
+ * internal_modify_qp with circumvention to handle aqp0 properly
  * smi_reset2init indicates if this is an internal reset-to-init-call for
  * smi. This flag must always be zero if called from ehca_modify_qp()!
  * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
@@ -765,34 +797,25 @@ static int internal_modify_qp(struct ib_
 			      struct ib_qp_attr *attr,
 			      int attr_mask, int smi_reset2init)
 {
-	enum ib_qp_state qp_cur_state = 0, qp_new_state = 0;
-	int cnt = 0, qp_attr_idx = 0, ret = 0;
-
+	enum ib_qp_state qp_cur_state, qp_new_state;
+	int cnt, qp_attr_idx, ret = 0;
 	enum ib_qp_statetrans statetrans;
-	struct hcp_modify_qp_control_block *mqpcb = NULL;
-	struct ehca_qp *my_qp = NULL;
-	struct ehca_shca *shca = NULL;
-	u64 update_mask = 0;
-	u64 h_ret = H_SUCCESS;
+	struct hcp_modify_qp_control_block *mqpcb;
+	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+	struct ehca_shca *shca =
+		container_of(ibqp->pd->device, struct ehca_shca, ib_device);
+	u64 update_mask;
+	u64 h_ret;
 	int bad_wqe_cnt = 0;
 	int squeue_locked = 0;
 	unsigned long spl_flags = 0;
 
-	my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-	shca = container_of(ibqp->pd->device, struct ehca_shca, ib_device);
-
-	EDEB_EN(7, "ehca_qp=%p qp_num=%x ibqp_type=%x "
-		"new qp_state=%x attribute_mask=%x",
-		my_qp, ibqp->qp_num, ibqp->qp_type,
-		attr->qp_state, attr_mask);
-
 	/* do query_qp to obtain current attr values */
 	mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
 	if (mqpcb == NULL) {
-		ret = -ENOMEM;
-		EDEB_ERR(4, "Could not get zeroed page for mqpcb "
+		ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
 			 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
-		goto modify_qp_exit0;
+		return -ENOMEM;
 	}
 
 	h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
@@ -800,26 +823,26 @@ static int internal_modify_qp(struct ib_
 				&my_qp->pf,
 				mqpcb, my_qp->galpas.kernel);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "hipz_h_query_qp() failed "
+		ehca_err(ibqp->device, "hipz_h_query_qp() failed "
 			 "ehca_qp=%p qp_num=%x h_ret=%lx",
 			 my_qp, ibqp->qp_num, h_ret);
 		ret = ehca2ib_return_code(h_ret);
 		goto modify_qp_exit1;
 	}
-	EDEB(7, "ehca_qp=%p qp_num=%x ehca_qp_state=%x",
-	     my_qp, ibqp->qp_num, mqpcb->qp_state);
 
 	qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
 
 	if (qp_cur_state == -EINVAL) {	/* invalid qp state */
 		ret = -EINVAL;
-		EDEB_ERR(4, "Invalid current ehca_qp_state=%x "
+		ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
 			 "ehca_qp=%p qp_num=%x",
 			 mqpcb->qp_state, my_qp, ibqp->qp_num);
 		goto modify_qp_exit1;
 	}
-	/* circumvention to set aqp0 initial state to init
-	   as expected by IB spec */
+	/*
+	 * circumvention to set aqp0 initial state to init
+	 * as expected by IB spec
+	 */
 	if (smi_reset2init == 0 &&
 	    ibqp->qp_type == IB_QPT_SMI &&
 	    qp_cur_state == IB_QPS_RESET &&
@@ -836,37 +859,38 @@ static int internal_modify_qp(struct ib_
 		int smirc = internal_modify_qp(
 			ibqp, &smiqp_attr, smiqp_attr_mask, 1);
 		if (smirc) {
-			EDEB_ERR(4, "SMI RESET -> INIT failed. "
+			ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
 				 "ehca_modify_qp() rc=%x", smirc);
 			ret = H_PARAMETER;
 			goto modify_qp_exit1;
 		}
 		qp_cur_state = IB_QPS_INIT;
-		EDEB(7, "SMI RESET -> INIT succeeded");
+		ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
 	}
 	/* is transmitted current state  equal to "real" current state */
 	if ((attr_mask & IB_QP_CUR_STATE) &&
 	    qp_cur_state != attr->cur_qp_state) {
 		ret = -EINVAL;
-		EDEB_ERR(4, "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
+		ehca_err(ibqp->device,
+			 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
 			 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
 			 attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
 		goto modify_qp_exit1;
 	}
 
-	EDEB(7,	"ehca_qp=%p qp_num=%x current qp_state=%x "
-	     "new qp_state=%x attribute_mask=%x",
-	     my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
+	ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x "
+		 "new qp_state=%x attribute_mask=%x",
+		 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
 
 	qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
 	if (!smi_reset2init &&
 	    !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
 				attr_mask)) {
 		ret = -EINVAL;
-		EDEB_ERR(4, "Invalid qp transition new_state=%x cur_state=%x "
-			 "ehca_qp=%p qp_num=%x attr_mask=%x",
-			 qp_new_state, qp_cur_state, my_qp, ibqp->qp_num,
-			 attr_mask);
+		ehca_err(ibqp->device,
+			 "Invalid qp transition new_state=%x cur_state=%x "
+			 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
+			 qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
 		goto modify_qp_exit1;
 	}
 
@@ -874,7 +898,7 @@ static int internal_modify_qp(struct ib_
 		update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
 	else {
 		ret = -EINVAL;
-		EDEB_ERR(4, "Invalid new qp state=%x "
+		ehca_err(ibqp->device, "Invalid new qp state=%x "
 			 "ehca_qp=%p qp_num=%x",
 			 qp_new_state, my_qp, ibqp->qp_num);
 		goto modify_qp_exit1;
@@ -884,10 +908,9 @@ static int internal_modify_qp(struct ib_
 	statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
 	if (statetrans < 0) {
 		ret = -EINVAL;
-		EDEB_ERR(4, "<INVALID STATE CHANGE> qp_cur_state=%x "
-			 "new_qp_state=%x State_xsition=%x "
-			 "ehca_qp=%p qp_num=%x",
-			 qp_cur_state, qp_new_state,
+		ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
+			 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
+			 "qp_num=%x", qp_cur_state, qp_new_state,
 			 statetrans, my_qp, ibqp->qp_num);
 		goto modify_qp_exit1;
 	}
@@ -896,13 +919,15 @@ static int internal_modify_qp(struct ib_
 
 	if (qp_attr_idx < 0) {
 		ret = qp_attr_idx;
-		EDEB_ERR(4, "Invalid QP type=%x ehca_qp=%p qp_num=%x",
+		ehca_err(ibqp->device,
+			 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
 			 ibqp->qp_type, my_qp, ibqp->qp_num);
 		goto modify_qp_exit1;
 	}
 
-	EDEB(7, "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
-	     my_qp, ibqp->qp_num, statetrans);
+	ehca_dbg(ibqp->device,
+		 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
+		 my_qp, ibqp->qp_num, statetrans);
 
 	/* sqe -> rts: set purge bit of bad wqe before actual trans */
 	if ((my_qp->qp_type == IB_QPT_UD ||
@@ -911,7 +936,7 @@ static int internal_modify_qp(struct ib_
 	    statetrans == IB_QPST_SQE2RTS) {
 		/* mark next free wqe if kernel */
 		if (my_qp->uspace_squeue == 0) {
-			struct ehca_wqe *wqe = NULL;
+			struct ehca_wqe *wqe;
 			/* lock send queue */
 			spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
 			squeue_locked = 1;
@@ -919,21 +944,23 @@ static int internal_modify_qp(struct ib_
 			wqe = (struct ehca_wqe*)
 				ipz_qeit_get(&my_qp->ipz_squeue);
 			wqe->optype = wqe->wqef = 0xff;
-			EDEB(7, "qp_num=%x next_free_wqe=%p",
-			     ibqp->qp_num, wqe);
+			ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
+				 ibqp->qp_num, wqe);
 		}
 		ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
 		if (ret) {
-			EDEB_ERR(4, "prepare_sqe_rts() failed "
+			ehca_err(ibqp->device, "prepare_sqe_rts() failed "
 				 "ehca_qp=%p qp_num=%x ret=%x",
 				 my_qp, ibqp->qp_num, ret);
 			goto modify_qp_exit2;
 		}
 	}
 
-	/* enable RDMA_Atomic_Control if reset->init und reliable con
-	   this is necessary since gen2 does not provide that flag,
-	   but pHyp requires it */
+	/*
+	 * enable RDMA_Atomic_Control if reset->init und reliable con
+	 * this is necessary since gen2 does not provide that flag,
+	 * but pHyp requires it
+	 */
 	if (statetrans == IB_QPST_RESET2INIT &&
 	    (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
 		mqpcb->rdma_atomic_ctrl = 3;
@@ -951,14 +978,11 @@ static int internal_modify_qp(struct ib_
 	if (attr_mask & IB_QP_PKEY_INDEX) {
 		mqpcb->prim_p_key_idx = attr->pkey_index;
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x "
-		     "IB_QP_PKEY_INDEX update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 	if (attr_mask & IB_QP_PORT) {
 		if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
 			ret = -EINVAL;
-			EDEB_ERR(4, "Invalid port=%x. "
+			ehca_err(ibqp->device, "Invalid port=%x. "
 				 "ehca_qp=%p qp_num=%x num_ports=%x",
 				 attr->port_num, my_qp, ibqp->qp_num,
 				 shca->num_ports);
@@ -966,14 +990,10 @@ static int internal_modify_qp(struct ib_
 		}
 		mqpcb->prim_phys_port = attr->port_num;
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_PORT update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 	if (attr_mask & IB_QP_QKEY) {
 		mqpcb->qkey = attr->qkey;
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_QKEY update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 	if (attr_mask & IB_QP_AV) {
 		int ah_mult = ib_rate_to_mult(attr->ah_attr.static_rate);
@@ -987,21 +1007,16 @@ static int internal_modify_qp(struct ib_
 		mqpcb->service_level = attr->ah_attr.sl;
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
 
-                if (ah_mult < ehca_mult)
+		if (ah_mult < ehca_mult)
 			mqpcb->max_static_rate = (ah_mult > 0) ?
 			((ehca_mult - 1) / ah_mult) : 0;
 		else
 			mqpcb->max_static_rate = 0;
 
-		EDEB(7, " ipd=mqpcb->max_static_rate set %x "
-			" ah_mult=%x  ehca_mult=%x "
-			" attr->ah_attr.static_rate=%x",
-		     mqpcb->max_static_rate,ah_mult,ehca_mult,
-		     attr->ah_attr.static_rate);
-
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
 
-		/* only if GRH is TRUE we might consider SOURCE_GID_IDX
+		/*
+		 * only if GRH is TRUE we might consider SOURCE_GID_IDX
 		 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
 		 */
 		if (attr->ah_attr.ah_flags == IB_AH_GRH) {
@@ -1025,56 +1040,40 @@ static int internal_modify_qp(struct ib_
 			update_mask |=
 				EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
 		}
-
-		EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_AV update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 
 	if (attr_mask & IB_QP_PATH_MTU) {
 		mqpcb->path_mtu = attr->path_mtu;
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_PATH_MTU update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 	if (attr_mask & IB_QP_TIMEOUT) {
 		mqpcb->timeout = attr->timeout;
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_TIMEOUT update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 	if (attr_mask & IB_QP_RETRY_CNT) {
 		mqpcb->retry_count = attr->retry_cnt;
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_RETRY_CNT update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 	if (attr_mask & IB_QP_RNR_RETRY) {
 		mqpcb->rnr_retry_count = attr->rnr_retry;
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_RNR_RETRY update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 	if (attr_mask & IB_QP_RQ_PSN) {
 		mqpcb->receive_psn = attr->rq_psn;
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_RQ_PSN update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
 		mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
-			attr->max_dest_rd_atomic : 2; /* max is 2 */
+			attr->max_dest_rd_atomic : 2;
 		update_mask |=
 			EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_MAX_DEST_RD_ATOMIC "
-		     "update_mask=%lx", my_qp, ibqp->qp_num, update_mask);
 	}
 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
-		mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic;
+		mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
+			attr->max_rd_atomic : 2;
 		update_mask |=
 			EHCA_BMASK_SET
 			(MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_MAX_QP_RD_ATOMIC "
-		     "update_mask=%lx", my_qp, ibqp->qp_num, update_mask);
 	}
 	if (attr_mask & IB_QP_ALT_PATH) {
 		int ah_mult = ib_rate_to_mult(attr->alt_ah_attr.static_rate);
@@ -1095,13 +1094,10 @@ static int internal_modify_qp(struct ib_
 		else
 			mqpcb->max_static_rate_al = 0;
 
-		EDEB(7, " ipd=mqpcb->max_static_rate set %x,"
-			" ah_mult=%x ehca_mult=%x",
-		     mqpcb->max_static_rate,ah_mult,ehca_mult);
-
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1);
 
-		/* only if GRH is TRUE we might consider SOURCE_GID_IDX
+		/*
+		 * only if GRH is TRUE we might consider SOURCE_GID_IDX
 		 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
 		 */
 		if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
@@ -1130,43 +1126,28 @@ static int internal_modify_qp(struct ib_
 			update_mask |=
 				EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
 		}
-
-		EDEB(7, "ehca_qp=%p qp_num=%x IB_QP_ALT_PATH update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 
 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
 		mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
 		update_mask |=
 			EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x "
-		     "IB_QP_MIN_RNR_TIMER update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 
 	if (attr_mask & IB_QP_SQ_PSN) {
 		mqpcb->send_psn = attr->sq_psn;
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x "
-		     "IB_QP_SQ_PSN update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 
 	if (attr_mask & IB_QP_DEST_QPN) {
 		mqpcb->dest_qp_nr = attr->dest_qp_num;
 		update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x "
-		     "IB_QP_DEST_QPN update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 	}
 
 	if (attr_mask & IB_QP_PATH_MIG_STATE) {
 		mqpcb->path_migration_state = attr->path_mig_state;
 		update_mask |=
 			EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x "
-		     "IB_QP_PATH_MIG_STATE update_mask=%lx", my_qp,
-		     ibqp->qp_num, update_mask);
 	}
 
 	if (attr_mask & IB_QP_CAP) {
@@ -1176,13 +1157,11 @@ static int internal_modify_qp(struct ib_
 		mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
 		update_mask |=
 			EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
-		EDEB(7, "ehca_qp=%p qp_num=%x "
-		     "IB_QP_CAP update_mask=%lx",
-		     my_qp, ibqp->qp_num, update_mask);
 		/* no support for max_send/recv_sge yet */
 	}
 
-	EDEB_DMP(7, mqpcb, 4*70, "ehca_qp=%p qp_num=%x", my_qp, ibqp->qp_num);
+	if (ehca_debug_level)
+		ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
 
 	h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
 				 my_qp->ipz_qp_handle,
@@ -1192,9 +1171,8 @@ static int internal_modify_qp(struct ib_
 
 	if (h_ret != H_SUCCESS) {
 		ret = ehca2ib_return_code(h_ret);
-		EDEB_ERR(4, "hipz_h_modify_qp() failed rc=%lx "
-			 "ehca_qp=%p qp_num=%x",
-			 h_ret, my_qp, ibqp->qp_num);
+		ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx "
+			 "ehca_qp=%p qp_num=%x",h_ret, my_qp, ibqp->qp_num);
 		goto modify_qp_exit2;
 	}
 
@@ -1205,7 +1183,7 @@ static int internal_modify_qp(struct ib_
 		/* doorbell to reprocessing wqes */
 		iosync(); /* serialize GAL register access */
 		hipz_update_sqa(my_qp, bad_wqe_cnt-1);
-		EDEB(6, "doorbell for %x wqes", bad_wqe_cnt);
+		ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
 	}
 
 	if (statetrans == IB_QPST_RESET2INIT ||
@@ -1215,10 +1193,6 @@ static int internal_modify_qp(struct ib_
 		update_mask = 0;
 		update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
 
-		EDEB(7, "ehca_qp=%p qp_num=%x "
-		     "RESET_2_INIT needs an additional enable "
-		     "-> update_mask=%lx", my_qp, ibqp->qp_num, update_mask);
-
 		h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
 					 my_qp->ipz_qp_handle,
 					 &my_qp->pf,
@@ -1228,10 +1202,9 @@ static int internal_modify_qp(struct ib_
 
 		if (h_ret != H_SUCCESS) {
 			ret = ehca2ib_return_code(h_ret);
-			EDEB_ERR(4, "ENABLE in context of "
-				 "RESET_2_INIT failed! "
-				 "Maybe you didn't get a LID"
-				 "h_ret=%lx ehca_qp=%p qp_num=%x",
+			ehca_err(ibqp->device, "ENABLE in context of "
+				 "RESET_2_INIT failed! Maybe you didn't get "
+				 "a LID h_ret=%lx ehca_qp=%p qp_num=%x",
 				 h_ret, my_qp, ibqp->qp_num);
 			goto modify_qp_exit2;
 		}
@@ -1254,91 +1227,60 @@ modify_qp_exit2:
 modify_qp_exit1:
 	kfree(mqpcb);
 
-modify_qp_exit0:
-	EDEB_EX(7, "ehca_qp=%p qp_num=%x ibqp_type=%x ret=%x",
-		my_qp, ibqp->qp_num, ibqp->qp_type, ret);
 	return ret;
 }
 
 int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
 {
-	int ret = 0;
-	struct ehca_qp *my_qp = NULL;
-	struct ehca_pd *my_pd = NULL;
+	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+	struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
+					     ib_pd);
 	u32 cur_pid = current->tgid;
 
-	EHCA_CHECK_ADR(ibqp);
-	EHCA_CHECK_ADR(attr);
-	EHCA_CHECK_ADR(ibqp->device);
-
-	my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-
-	EDEB_EN(7, "ehca_qp=%p qp_num=%x ibqp_type=%x attr_mask=%x",
-		my_qp, ibqp->qp_num, ibqp->qp_type, attr_mask);
-
-	my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, ib_pd);
 	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
 	    my_pd->ownpid != cur_pid) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(ibqp->pd->device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_pd->ownpid);
-		ret = -EINVAL;
-	} else
-		ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
+		return -EINVAL;
+	}
 
-	EDEB_EX(7, "ehca_qp=%p qp_num=%x ibqp_type=%x ret=%x",
-		my_qp, ibqp->qp_num, ibqp->qp_type, ret);
-	return ret;
+	return internal_modify_qp(ibqp, attr, attr_mask, 0);
 }
 
 int ehca_query_qp(struct ib_qp *qp,
 		  struct ib_qp_attr *qp_attr,
 		  int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
 {
-	struct ehca_qp *my_qp = NULL;
-	struct ehca_shca *shca = NULL;
-	struct hcp_modify_qp_control_block *qpcb = NULL;
-	struct ipz_adapter_handle adapter_handle;
-	struct ehca_pd *my_pd = NULL;
+	struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+	struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
+					     ib_pd);
+	struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
+					      ib_device);
+	struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
+	struct hcp_modify_qp_control_block *qpcb;
 	u32 cur_pid = current->tgid;
-	int cnt = 0, ret = 0;
-	u64 h_ret = H_SUCCESS;
+	int cnt, ret = 0;
+	u64 h_ret;
 
-	EHCA_CHECK_ADR(qp);
-	EHCA_CHECK_ADR(qp_attr);
-	EHCA_CHECK_DEVICE(qp->device);
-
-	my_qp = container_of(qp, struct ehca_qp, ib_qp);
-
-	EDEB_EN(7, "ehca_qp=%p qp_num=%x "
-		"qp_attr=%p qp_attr_mask=%x qp_init_attr=%p",
-		my_qp, qp->qp_num, qp_attr, qp_attr_mask, qp_init_attr);
-
-	my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, ib_pd);
 	if (my_pd->ib_pd.uobject  && my_pd->ib_pd.uobject->context  &&
 	    my_pd->ownpid != cur_pid) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(qp->device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_pd->ownpid);
-		ret = -EINVAL;
-		goto query_qp_exit0;
+		return -EINVAL;
 	}
 
-	shca = container_of(qp->device, struct ehca_shca, ib_device);
-	adapter_handle = shca->ipz_hca_handle;
-
 	if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
-		ret = -EINVAL;
-		EDEB_ERR(4,"Invalid attribute mask "
+		ehca_err(qp->device,"Invalid attribute mask "
 			 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
 			 my_qp, qp->qp_num, qp_attr_mask);
-		goto query_qp_exit0;
+		return -EINVAL;
 	}
 
 	qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL );
 	if (!qpcb) {
-		ret = -ENOMEM;
-		EDEB_ERR(4,"Out of memory for qpcb "
+		ehca_err(qp->device,"Out of memory for qpcb "
 			 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
-		goto query_qp_exit0;
+		return -ENOMEM;
 	}
 
 	h_ret = hipz_h_query_qp(adapter_handle,
@@ -1348,7 +1290,7 @@ int ehca_query_qp(struct ib_qp *qp,
 
 	if (h_ret != H_SUCCESS) {
 		ret = ehca2ib_return_code(h_ret);
-		EDEB_ERR(4,"hipz_h_query_qp() failed "
+		ehca_err(qp->device,"hipz_h_query_qp() failed "
 			 "ehca_qp=%p qp_num=%x h_ret=%lx",
 			 my_qp, qp->qp_num, h_ret);
 		goto query_qp_exit1;
@@ -1356,9 +1298,10 @@ int ehca_query_qp(struct ib_qp *qp,
 
 	qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
 	qp_attr->qp_state = qp_attr->cur_qp_state;
+
 	if (qp_attr->cur_qp_state == -EINVAL) {
 		ret = -EINVAL;
-		EDEB_ERR(4,"Got invalid ehca_qp_state=%x "
+		ehca_err(qp->device,"Got invalid ehca_qp_state=%x "
 			 "ehca_qp=%p qp_num=%x",
 			 qpcb->qp_state, my_qp, qp->qp_num);
 		goto query_qp_exit1;
@@ -1453,54 +1396,33 @@ int ehca_query_qp(struct ib_qp *qp,
 	if (qp_init_attr)
 		*qp_init_attr = my_qp->init_attr;
 
-	EDEB(7,	"ehca_qp=%p qp_number=%x dest_qp_number=%x "
-	     "dlid=%x path_mtu=%x dest_gid=%lx_%lx "
-	     "service_level=%x qp_state=%x",
-	     my_qp, qpcb->qp_number, qpcb->dest_qp_nr,
-	     qpcb->dlid, qpcb->path_mtu,
-	     qpcb->dest_gid.dw[0], qpcb->dest_gid.dw[1],
-	     qpcb->service_level, qpcb->qp_state);
-
-	EDEB_DMP(7, qpcb, 4*70, "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
+	if (ehca_debug_level)
+		ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
 
 query_qp_exit1:
 	kfree(qpcb);
 
-query_qp_exit0:
-	EDEB_EX(7, "ehca_qp=%p qp_num=%x ret=%x",
-		my_qp, qp->qp_num, ret);
 	return ret;
 }
 
 int ehca_destroy_qp(struct ib_qp *ibqp)
 {
-	extern struct ehca_module ehca_module;
-	struct ehca_qp *my_qp = NULL;
-	struct ehca_shca *shca = NULL;
-	struct ehca_pfqp *qp_pf = NULL;
-	struct ehca_pd *my_pd = NULL;
+	struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+	struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
+					      ib_device);
+	struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
+					     ib_pd);
 	u32 cur_pid = current->tgid;
-	u32 qp_num = 0;
-	int ret = 0;
-	u64 h_ret = H_SUCCESS;
-	u8 port_num = 0;
+	u32 qp_num = ibqp->qp_num;
+	int ret;
+	u64 h_ret;
+	u8 port_num;
 	enum ib_qp_type	qp_type;
 	unsigned long flags;
 
-	EHCA_CHECK_ADR(ibqp);
-
-	my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-	qp_num = ibqp->qp_num;
-	qp_pf = &my_qp->pf;
-
-	shca = container_of(ibqp->device, struct ehca_shca, ib_device);
-
-	EDEB_EN(7, "ehca_qp=%p qp_num=%x", my_qp, ibqp->qp_num);
-
-	my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, ib_pd);
 	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
 	    my_pd->ownpid != cur_pid) {
-		EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+		ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
 			 cur_pid, my_pd->ownpid);
 		return -EINVAL;
 	}
@@ -1509,11 +1431,10 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
 		ret = ehca_cq_unassign_qp(my_qp->send_cq,
 					      my_qp->real_qp_num);
 		if (ret) {
-			EDEB_ERR(4, "Couldn't unassign qp from send_cq "
-				 "ret=%x qp_num=%x cq_num=%x",
-				 ret, my_qp->ib_qp.qp_num,
-				 my_qp->send_cq->cq_number);
-			goto destroy_qp_exit0;
+			ehca_err(ibqp->device, "Couldn't unassign qp from "
+				 "send_cq ret=%x qp_num=%x cq_num=%x", ret,
+				 my_qp->ib_qp.qp_num, my_qp->send_cq->cq_number);
+			return ret;
 		}
 	}
 
@@ -1525,17 +1446,25 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
 	if (my_qp->uspace_rqueue) {
 		ret = ehca_munmap(my_qp->uspace_rqueue,
 				  my_qp->ipz_rqueue.queue_length);
+		if (ret)
+			ehca_err(ibqp->device, "Could not munmap rqueue "
+				 "qp_num=%x", qp_num);
 		ret = ehca_munmap(my_qp->uspace_squeue,
 				  my_qp->ipz_squeue.queue_length);
-		ret = ehca_munmap(my_qp->uspace_fwh, 4096);
+		if (ret)
+			ehca_err(ibqp->device, "Could not munmap squeue "
+				 "qp_num=%x", qp_num);
+		ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
+		if (ret)
+			ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x",
+				 qp_num);
 	}
 
 	h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
 	if (h_ret != H_SUCCESS) {
-		EDEB_ERR(4, "hipz_h_destroy_qp() failed "
-			 "rc=%lx ehca_qp=%p qp_num=%x",
-			 h_ret, qp_pf, qp_num);
-		goto destroy_qp_exit0;
+		ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
+			 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
+		return ehca2ib_return_code(h_ret);
 	}
 
 	port_num = my_qp->init_attr.port_num;
@@ -1544,9 +1473,8 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
 	/* no support for IB_QPT_SMI yet */
 	if (qp_type == IB_QPT_GSI) {
 		struct ib_event event;
-
-		EDEB(4, "device %s: port %x is inactive.",
-		     shca->ib_device.name, port_num);
+		ehca_info(ibqp->device, "device %s: port %x is inactive.",
+			  shca->ib_device.name, port_num);
 		event.device = &shca->ib_device;
 		event.event = IB_EVENT_PORT_ERR;
 		event.element.port_num = port_num;
@@ -1556,10 +1484,23 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
 
 	ipz_queue_dtor(&my_qp->ipz_rqueue);
 	ipz_queue_dtor(&my_qp->ipz_squeue);
-	kmem_cache_free(ehca_module.cache_qp, my_qp);
+	kmem_cache_free(qp_cache, my_qp);
+	return 0;
+}
 
-destroy_qp_exit0:
-	ret = ehca2ib_return_code(h_ret);
-	EDEB_EX(7,"ret=%x", ret);
-	return ret;
+int ehca_init_qp_cache(void)
+{
+	qp_cache = kmem_cache_create("ehca_cache_qp",
+				     sizeof(struct ehca_qp), 0,
+				     SLAB_HWCACHE_ALIGN,
+				     NULL, NULL);
+	if (!qp_cache)
+		return -ENOMEM;
+	return 0;
+}
+
+void ehca_cleanup_qp_cache(void)
+{
+	if (qp_cache)
+		kmem_cache_destroy(qp_cache);
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_reqs.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_reqs.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_reqs.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_reqs.c	2006-10-17 10:15:06.000000000 -0400
@@ -41,8 +41,7 @@
  */
 
 
-#define DEB_PREFIX "reqs"
-
+#include <asm-powerpc/system.h>
 #include "ehca_classes.h"
 #include "ehca_tools.h"
 #include "ehca_qes.h"
@@ -57,7 +56,7 @@ static inline int ehca_write_rwqe(struct
 	u8 cnt_ds;
 	if (unlikely((recv_wr->num_sge < 0) ||
 		     (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
-		EDEB_ERR(4, "Invalid number of WQE SGE. "
+		ehca_gen_err("Invalid number of WQE SGE. "
 			 "num_sqe=%x max_nr_of_sg=%x",
 			 recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
 		return -EINVAL; /* invalid SG list length */
@@ -66,21 +65,21 @@ static inline int ehca_write_rwqe(struct
 	/* clear wqe header until sglist */
 	memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
 
-	wqe_p->work_request_id = be64_to_cpu(recv_wr->wr_id);
+	wqe_p->work_request_id = recv_wr->wr_id;
 	wqe_p->nr_of_data_seg = recv_wr->num_sge;
 
 	for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
 		wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
-		    be64_to_cpu(recv_wr->sg_list[cnt_ds].addr);
+			recv_wr->sg_list[cnt_ds].addr;
 		wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
-		    ntohl(recv_wr->sg_list[cnt_ds].lkey);
+			recv_wr->sg_list[cnt_ds].lkey;
 		wqe_p->u.all_rcv.sg_list[cnt_ds].length =
-		    ntohl(recv_wr->sg_list[cnt_ds].length);
+			recv_wr->sg_list[cnt_ds].length;
 	}
 
-	if (IS_EDEB_ON(7)) {
-		EDEB(7, "RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
-		EDEB_DMP(7, wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
+	if (ehca_debug_level) {
+		ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
+		ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
 	}
 
 	return 0;
@@ -93,31 +92,35 @@ static inline int ehca_write_rwqe(struct
 
 static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
 {
-	int idx = 0;
-	int j = 0;
+	int idx;
+	int j;
 	while (send_wr) {
 		struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
 		struct ib_sge *sge = send_wr->sg_list;
-		EDEB(4, "send_wr#%x wr_id=%lx num_sge=%x "
-		     "send_flags=%x opcode=%x",idx, send_wr->wr_id,
-		     send_wr->num_sge, send_wr->send_flags, send_wr->opcode);
+		ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
+			     "send_flags=%x opcode=%x",idx, send_wr->wr_id,
+			     send_wr->num_sge, send_wr->send_flags,
+			     send_wr->opcode);
 		if (mad_hdr) {
-			EDEB(4, "send_wr#%x mad_hdr base_version=%x "
-			     "mgmt_class=%x class_version=%x method=%x "
-			     "status=%x class_specific=%x tid=%lx attr_id=%x "
-			     "resv=%x attr_mod=%x",
-			     idx, mad_hdr->base_version, mad_hdr->mgmt_class,
-			     mad_hdr->class_version, mad_hdr->method,
-			     mad_hdr->status, mad_hdr->class_specific,
-			     mad_hdr->tid, mad_hdr->attr_id, mad_hdr->resv,
-			     mad_hdr->attr_mod);
+			ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
+				     "mgmt_class=%x class_version=%x method=%x "
+				     "status=%x class_specific=%x tid=%lx "
+				     "attr_id=%x resv=%x attr_mod=%x",
+				     idx, mad_hdr->base_version,
+				     mad_hdr->mgmt_class,
+				     mad_hdr->class_version, mad_hdr->method,
+				     mad_hdr->status, mad_hdr->class_specific,
+				     mad_hdr->tid, mad_hdr->attr_id,
+				     mad_hdr->resv,
+				     mad_hdr->attr_mod);
 		}
 		for (j = 0; j < send_wr->num_sge; j++) {
 			u8 *data = (u8 *) abs_to_virt(sge->addr);
-			EDEB(4, "send_wr#%x sge#%x addr=%p length=%x lkey=%x",
-			     idx, j, data, sge->length, sge->lkey);
+			ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
+				     "lkey=%x",
+				     idx, j, data, sge->length, sge->lkey);
 			/* assume length is n*16 */
-			EDEB_DMP(4, data, sge->length, "send_wr#%x sge#%x",
+			ehca_dmp(data, sge->length, "send_wr#%x sge#%x",
 				 idx, j);
 			sge++;
 		} /* eof for j */
@@ -139,7 +142,7 @@ static inline int ehca_write_swqe(struct
 
 	if (unlikely((send_wr->num_sge < 0) ||
 		     (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
-		EDEB_ERR(4, "Invalid number of WQE SGE. "
+		ehca_gen_err("Invalid number of WQE SGE. "
 			 "num_sqe=%x max_nr_of_sg=%x",
 			 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
 		return -EINVAL; /* invalid SG list length */
@@ -148,7 +151,7 @@ static inline int ehca_write_swqe(struct
 	/* clear wqe header until sglist */
 	memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
 
-	wqe_p->work_request_id = be64_to_cpu(send_wr->wr_id);
+	wqe_p->work_request_id = send_wr->wr_id;
 
 	switch (send_wr->opcode) {
 	case IB_WR_SEND:
@@ -163,7 +166,7 @@ static inline int ehca_write_swqe(struct
 		wqe_p->optype = WQE_OPTYPE_RDMAREAD;
 		break;
 	default:
-		EDEB_ERR(4, "Invalid opcode=%x", send_wr->opcode);
+		ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
 		return -EINVAL; /* invalid opcode */
 	}
 
@@ -177,7 +180,7 @@ static inline int ehca_write_swqe(struct
 	if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
 	    send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
 		/* this might not work as long as HW does not support it */
-		wqe_p->immediate_data = send_wr->imm_data;
+		wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data);
 		wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
 	}
 
@@ -192,32 +195,32 @@ static inline int ehca_write_swqe(struct
 		if (send_wr->wr.ud.remote_qkey & 0x80000000)
 			remote_qkey = qp->qkey;
 
-		wqe_p->destination_qp_number =
-		    ntohl(send_wr->wr.ud.remote_qpn << 8);
-		wqe_p->local_ee_context_qkey = ntohl(remote_qkey);
+		wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
+		wqe_p->local_ee_context_qkey = remote_qkey;
 		if (!send_wr->wr.ud.ah) {
-			EDEB_ERR(4, "wr.ud.ah is NULL. qp=%p", qp);
+			ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
 			return -EINVAL;
 		}
 		my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
 		wqe_p->u.ud_av.ud_av = my_av->av;
 
-		/* omitted check of IB_SEND_INLINE
-		   since HW does not support it */
+		/*
+		 * omitted check of IB_SEND_INLINE
+		 * since HW does not support it
+		 */
 		for (idx = 0; idx < send_wr->num_sge; idx++) {
 			wqe_p->u.ud_av.sg_list[idx].vaddr =
-			    be64_to_cpu(send_wr->sg_list[idx].addr);
+				send_wr->sg_list[idx].addr;
 			wqe_p->u.ud_av.sg_list[idx].lkey =
-			    ntohl(send_wr->sg_list[idx].lkey);
+				send_wr->sg_list[idx].lkey;
 			wqe_p->u.ud_av.sg_list[idx].length =
-			    ntohl(send_wr->sg_list[idx].length);
+				send_wr->sg_list[idx].length;
 		} /* eof for idx */
 		if (qp->qp_type == IB_QPT_SMI ||
 		    qp->qp_type == IB_QPT_GSI)
 			wqe_p->u.ud_av.ud_av.pmtu = 1;
 		if (qp->qp_type == IB_QPT_GSI) {
-			wqe_p->pkeyi =
-			    ntohs(send_wr->wr.ud.pkey_index);
+			wqe_p->pkeyi = send_wr->wr.ud.pkey_index;
 #ifdef DEBUG_GSI_SEND_WR
 			trace_send_wr_ud(send_wr);
 #endif /* DEBUG_GSI_SEND_WR */
@@ -231,39 +234,40 @@ static inline int ehca_write_swqe(struct
 	case IB_QPT_RC:
 		/* TODO: atomic not implemented */
 		wqe_p->u.nud.remote_virtual_adress =
-		    be64_to_cpu(send_wr->wr.rdma.remote_addr);
-		wqe_p->u.nud.rkey = ntohl(send_wr->wr.rdma.rkey);
+			send_wr->wr.rdma.remote_addr;
+		wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey;
 
-		/* omitted checking of IB_SEND_INLINE
-		   since HW does not support it */
+		/*
+		 * omitted checking of IB_SEND_INLINE
+		 * since HW does not support it
+		 */
 		dma_length = 0;
 		for (idx = 0; idx < send_wr->num_sge; idx++) {
 			wqe_p->u.nud.sg_list[idx].vaddr =
-			    be64_to_cpu(send_wr->sg_list[idx].addr);
+				send_wr->sg_list[idx].addr;
 			wqe_p->u.nud.sg_list[idx].lkey =
-			    ntohl(send_wr->sg_list[idx].lkey);
+				send_wr->sg_list[idx].lkey;
 			wqe_p->u.nud.sg_list[idx].length =
-			    ntohl(send_wr->sg_list[idx].length);
+				send_wr->sg_list[idx].length;
 			dma_length += send_wr->sg_list[idx].length;
 		} /* eof idx */
-		wqe_p->u.nud.atomic_1st_op_dma_len = be64_to_cpu(dma_length);
+		wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
 
 		break;
 
 	default:
-		EDEB_ERR(4, "Invalid qptype=%x", qp->qp_type);
+		ehca_gen_err("Invalid qptype=%x", qp->qp_type);
 		return -EINVAL;
 	}
 
-	if (IS_EDEB_ON(7)) {
-		EDEB(7, "SEND WQE written into queue qp=%p ", qp);
-		EDEB_DMP(7, wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
+	if (ehca_debug_level) {
+		ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
+		ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
 	}
 	return 0;
 }
 
-/** map_ib_wc_status - convert raw cqe_status to ib_wc_status
- */
+/* map_ib_wc_status converts raw cqe_status to ib_wc_status */
 static inline void map_ib_wc_status(u32 cqe_status,
 				    enum ib_wc_status *wc_status)
 {
@@ -296,8 +300,10 @@ static inline void map_ib_wc_status(u32 
 			switch ((cqe_status
 				 & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
 			case 0x0:
-				/* PSN Sequence Error!
-				   couldn't find a matching status! */
+				/*
+				 * PSN Sequence Error!
+				 * couldn't find a matching status!
+				 */
 				*wc_status = IB_WC_GENERAL_ERR;
 				break;
 			case 0x1:
@@ -351,19 +357,12 @@ int ehca_post_send(struct ib_qp *qp,
 		   struct ib_send_wr *send_wr,
 		   struct ib_send_wr **bad_send_wr)
 {
-	struct ehca_qp *my_qp = NULL;
-	struct ib_send_wr *cur_send_wr = NULL;
-	struct ehca_wqe *wqe_p = NULL;
+	struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+	struct ib_send_wr *cur_send_wr;
+	struct ehca_wqe *wqe_p;
 	int wqe_cnt = 0;
 	int ret = 0;
-	unsigned long spl_flags = 0;
-
-	EHCA_CHECK_ADR(qp);
-	my_qp = container_of(qp, struct ehca_qp, ib_qp);
-	EHCA_CHECK_QP(my_qp);
-	EHCA_CHECK_ADR(send_wr);
-	EDEB_EN(7, "ehca_qp=%p qp_num=%x send_wr=%p bad_send_wr=%p",
-		my_qp, qp->qp_num, send_wr, bad_send_wr);
+	unsigned long spl_flags;
 
 	/* LOCK the QUEUE */
 	spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
@@ -380,29 +379,30 @@ int ehca_post_send(struct ib_qp *qp,
 				*bad_send_wr = cur_send_wr;
 			if (wqe_cnt == 0) {
 				ret = -ENOMEM;
-				EDEB_ERR(4, "Too many posted WQEs qp_num=%x",
-					 qp->qp_num);
+				ehca_err(qp->device, "Too many posted WQEs "
+					 "qp_num=%x", qp->qp_num);
 			}
 			goto post_send_exit0;
 		}
 		/* write a SEND WQE into the QUEUE */
 		ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
-		/* if something failed,
-		   reset the free entry pointer to the start value
-		*/
+		/*
+		 * if something failed,
+		 * reset the free entry pointer to the start value
+		 */
 		if (unlikely(ret)) {
 			my_qp->ipz_squeue.current_q_offset = start_offset;
 			*bad_send_wr = cur_send_wr;
 			if (wqe_cnt == 0) {
 				ret = -EINVAL;
-				EDEB_ERR(4, "Could not write WQE qp_num=%x",
-					 qp->qp_num);
+				ehca_err(qp->device, "Could not write WQE "
+					 "qp_num=%x", qp->qp_num);
 			}
 			goto post_send_exit0;
 		}
 		wqe_cnt++;
-		EDEB(7, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
-		     my_qp, qp->qp_num, wqe_cnt);
+		ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
+			 my_qp, qp->qp_num, wqe_cnt);
 	} /* eof for cur_send_wr */
 
 post_send_exit0:
@@ -410,8 +410,6 @@ post_send_exit0:
 	spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
 	iosync(); /* serialize GAL register access */
 	hipz_update_sqa(my_qp, wqe_cnt);
-	EDEB_EX(7, "ehca_qp=%p qp_num=%x ret=%x wqe_cnt=%d",
-		my_qp, qp->qp_num, ret, wqe_cnt);
 	return ret;
 }
 
@@ -419,19 +417,12 @@ int ehca_post_recv(struct ib_qp *qp,
 		   struct ib_recv_wr *recv_wr,
 		   struct ib_recv_wr **bad_recv_wr)
 {
-	struct ehca_qp *my_qp = NULL;
-	struct ib_recv_wr *cur_recv_wr = NULL;
-	struct ehca_wqe *wqe_p = NULL;
+	struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+	struct ib_recv_wr *cur_recv_wr;
+	struct ehca_wqe *wqe_p;
 	int wqe_cnt = 0;
 	int ret = 0;
-	unsigned long spl_flags = 0;
-
-	EHCA_CHECK_ADR(qp);
-	my_qp = container_of(qp, struct ehca_qp, ib_qp);
-	EHCA_CHECK_QP(my_qp);
-	EHCA_CHECK_ADR(recv_wr);
-	EDEB_EN(7, "ehca_qp=%p qp_num=%x recv_wr=%p bad_recv_wr=%p",
-		my_qp, qp->qp_num, recv_wr, bad_recv_wr);
+	unsigned long spl_flags;
 
 	/* LOCK the QUEUE */
 	spin_lock_irqsave(&my_qp->spinlock_r, spl_flags);
@@ -448,29 +439,29 @@ int ehca_post_recv(struct ib_qp *qp,
 				*bad_recv_wr = cur_recv_wr;
 			if (wqe_cnt == 0) {
 				ret = -ENOMEM;
-				EDEB_ERR(4, "Too many posted WQEs qp_num=%x",
-					 qp->qp_num);
+				ehca_err(qp->device, "Too many posted WQEs "
+					 "qp_num=%x", qp->qp_num);
 			}
 			goto post_recv_exit0;
 		}
 		/* write a RECV WQE into the QUEUE */
-		ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p,
-					  cur_recv_wr);
-		/* if something failed,
-		   reset the free entry pointer to the start value
-		*/
+		ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
+		/*
+		 * if something failed,
+		 * reset the free entry pointer to the start value
+		 */
 		if (unlikely(ret)) {
 			my_qp->ipz_rqueue.current_q_offset = start_offset;
 			*bad_recv_wr = cur_recv_wr;
 			if (wqe_cnt == 0) {
 				ret = -EINVAL;
-				EDEB_ERR(4, "Could not write WQE qp_num=%x",
-					 qp->qp_num);
+				ehca_err(qp->device, "Could not write WQE "
+					 "qp_num=%x", qp->qp_num);
 			}
 			goto post_recv_exit0;
 		}
 		wqe_cnt++;
-		EDEB(7, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
+		ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d",
 		     my_qp, qp->qp_num, wqe_cnt);
 	} /* eof for cur_recv_wr */
 
@@ -478,13 +469,11 @@ post_recv_exit0:
 	spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags);
 	iosync(); /* serialize GAL register access */
 	hipz_update_rqa(my_qp, wqe_cnt);
-	EDEB_EX(7, "ehca_qp=%p qp_num=%x ret=%x wqe_cnt=%d",
-		my_qp, qp->qp_num, ret, wqe_cnt);
 	return ret;
 }
 
-/**
- * ib_wc_opcode - Table converts ehca wc opcode to ib
+/*
+ * ib_wc_opcode table converts ehca wc opcode to ib
  * Since we use zero to indicate invalid opcode, the actual ib opcode must
  * be decremented!!!
  */
@@ -499,37 +488,37 @@ static const u8 ib_wc_opcode[255] = {
 	[0x80] = IB_WC_SEND+1
 };
 
-/**
- * internal function to poll one entry of cq
- */
+/* internal function to poll one entry of cq */
 static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
 {
 	int ret = 0;
 	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-	struct ehca_cqe *cqe = NULL;
+	struct ehca_cqe *cqe;
 	int cqe_count = 0;
 
-	EDEB_EN(7, "ehca_cq=%p cq_num=%x wc=%p", my_cq, my_cq->cq_number, wc);
-
 poll_cq_one_read_cqe:
 	cqe = (struct ehca_cqe *)
 		ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
 	if (!cqe) {
 		ret = -EAGAIN;
-		EDEB(7, "Completion queue is empty ehca_cq=%p cq_num=%x "
-		     "ret=%x", my_cq, my_cq->cq_number, ret);
+		ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
+			 "cq_num=%x ret=%x", my_cq, my_cq->cq_number, ret);
 		goto  poll_cq_one_exit0;
 	}
+
+	/* prevents loads being reordered across this point */
+	rmb();
+
 	cqe_count++;
 	if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
 		struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
-		int purgeflag = 0;
-		unsigned long spl_flags = 0;
+		int purgeflag;
+		unsigned long spl_flags;
 		if (!qp) {
-			EDEB_ERR(4, "cq_num=%x qp_num=%x "
+			ehca_err(cq->device, "cq_num=%x qp_num=%x "
 				 "could not find qp -> ignore cqe",
 				 my_cq->cq_number, cqe->local_qp_number);
-			EDEB_DMP(4, cqe, 64, "cq_num=%x qp_num=%x",
+			ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
 				 my_cq->cq_number, cqe->local_qp_number);
 			/* ignore this purged cqe */
 			goto poll_cq_one_read_cqe;
@@ -539,25 +528,32 @@ poll_cq_one_read_cqe:
 		spin_unlock_irqrestore(&qp->spinlock_s, spl_flags);
 
 		if (purgeflag) {
-			EDEB(6, "Got CQE with purged bit qp_num=%x src_qp=%x",
-			     cqe->local_qp_number, cqe->remote_qp_number);
-			EDEB_DMP(6, cqe, 64, "qp_num=%x src_qp=%x",
+			ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x "
+				 "src_qp=%x",
 				 cqe->local_qp_number, cqe->remote_qp_number);
-			/* ignore this to avoid double cqes of bad wqe
-			   that caused sqe and turn off purge flag */
+			if (ehca_debug_level)
+				ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
+					 cqe->local_qp_number,
+					 cqe->remote_qp_number);
+			/*
+			 * ignore this to avoid double cqes of bad wqe
+			 * that caused sqe and turn off purge flag
+			 */
 			qp->sqerr_purgeflag = 0;
 			goto poll_cq_one_read_cqe;
 		}
 	}
 
 	/* tracing cqe */
-	if (IS_EDEB_ON(7)) {
-		EDEB(7, "Received COMPLETION ehca_cq=%p cq_num=%x -----",
-		     my_cq, my_cq->cq_number);
-		EDEB_DMP(7, cqe, 64, "ehca_cq=%p cq_num=%x",
+	if (ehca_debug_level) {
+		ehca_dbg(cq->device,
+			 "Received COMPLETION ehca_cq=%p cq_num=%x -----",
+			 my_cq, my_cq->cq_number);
+		ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
+			 my_cq, my_cq->cq_number);
+		ehca_dbg(cq->device,
+			 "ehca_cq=%p cq_num=%x -------------------------",
 			 my_cq, my_cq->cq_number);
-		EDEB(7, "ehca_cq=%p cq_num=%x -------------------------",
-		     my_cq, my_cq->cq_number);
 	}
 
 	/* we got a completion! */
@@ -566,76 +562,63 @@ poll_cq_one_read_cqe:
 	/* eval ib_wc_opcode */
 	wc->opcode = ib_wc_opcode[cqe->optype]-1;
 	if (unlikely(wc->opcode == -1)) {
-		EDEB_ERR(4, "Invalid cqe->OPType=%x cqe->status=%x "
+		ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
 			 "ehca_cq=%p cq_num=%x",
 			 cqe->optype, cqe->status, my_cq, my_cq->cq_number);
 		/* dump cqe for other infos */
-		EDEB_DMP(4, cqe, 64, "ehca_cq=%p cq_num=%x",
+		ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
 			 my_cq, my_cq->cq_number);
 		/* update also queue adder to throw away this entry!!! */
 		goto poll_cq_one_exit0;
 	}
 	/* eval ib_wc_status */
-	if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) { /* complete with errors */
+	if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
+		/* complete with errors */
 		map_ib_wc_status(cqe->status, &wc->status);
 		wc->vendor_err = wc->status;
 	} else
 		wc->status = IB_WC_SUCCESS;
 
 	wc->qp_num = cqe->local_qp_number;
-	wc->byte_len = ntohl(cqe->nr_bytes_transferred);
+	wc->byte_len = cqe->nr_bytes_transferred;
 	wc->pkey_index = cqe->pkey_index;
 	wc->slid = cqe->rlid;
 	wc->dlid_path_bits = cqe->dlid;
 	wc->src_qp = cqe->remote_qp_number;
 	wc->wc_flags = cqe->w_completion_flags;
-	wc->imm_data = cqe->immediate_data;
+	wc->imm_data = cpu_to_be32(cqe->immediate_data);
 	wc->sl = cqe->service_level;
 
 	if (wc->status != IB_WC_SUCCESS)
-		EDEB(6, "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
-		     "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx cqe=%p",
-		     my_cq, my_cq->cq_number, cqe->optype, cqe->status,
-		     cqe->local_qp_number, cqe->remote_qp_number,
-		     cqe->work_request_id, cqe);
+		ehca_dbg(cq->device,
+			 "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
+			 "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
+			 "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
+			 cqe->status, cqe->local_qp_number,
+			 cqe->remote_qp_number, cqe->work_request_id, cqe);
 
 poll_cq_one_exit0:
 	if (cqe_count > 0)
 		hipz_update_feca(my_cq, cqe_count);
 
-	EDEB_EX(7, "ret=%x ehca_cq=%p cq_number=%x wc=%p "
-		"status=%x opcode=%x qp_num=%x byte_len=%x",
-		ret, my_cq, my_cq->cq_number, wc, wc->status,
-		wc->opcode, wc->qp_num, wc->byte_len);
-
 	return ret;
 }
 
 int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
 {
-	struct ehca_cq *my_cq = NULL;
-	int nr = 0;
-	struct ib_wc *current_wc = NULL;
+	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+	int nr;
+	struct ib_wc *current_wc = wc;
 	int ret = 0;
-	unsigned long spl_flags = 0;
-
-	EHCA_CHECK_CQ(cq);
-	EHCA_CHECK_ADR(wc);
-
-	my_cq = container_of(cq, struct ehca_cq, ib_cq);
-	EHCA_CHECK_CQ(my_cq);
-
-	EDEB_EN(7, "ehca_cq=%p cq_num=%x num_entries=%d wc=%p",
-		my_cq, my_cq->cq_number, num_entries, wc);
+	unsigned long spl_flags;
 
 	if (num_entries < 1) {
-		EDEB_ERR(4, "Invalid num_entries=%d ehca_cq=%p cq_num=%x",
-			 num_entries, my_cq, my_cq->cq_number);
+		ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
+			 "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
 		ret = -EINVAL;
 		goto poll_cq_exit0;
 	}
 
-	current_wc = wc;
 	spin_lock_irqsave(&my_cq->spinlock, spl_flags);
 	for (nr = 0; nr < num_entries; nr++) {
 		ret = ehca_poll_cq_one(cq, current_wc);
@@ -648,22 +631,12 @@ int ehca_poll_cq(struct ib_cq *cq, int n
 		ret = nr;
 
 poll_cq_exit0:
-	EDEB_EX(7, "ehca_cq=%p cq_num=%x ret=%x wc=%p nr_entries=%d",
-		my_cq, my_cq->cq_number, ret, wc, nr);
-
 	return ret;
 }
 
 int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify)
 {
-	struct ehca_cq *my_cq = NULL;
-	int ret = 0;
-
-	EHCA_CHECK_CQ(cq);
-	my_cq = container_of(cq, struct ehca_cq, ib_cq);
-	EHCA_CHECK_CQ(my_cq);
-	EDEB_EN(7, "ehca_cq=%p cq_num=%x cq_notif=%x",
-		my_cq, my_cq->cq_number, cq_notify);
+	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
 
 	switch (cq_notify) {
 	case IB_CQ_SOLICITED:
@@ -676,8 +649,5 @@ int ehca_req_notify_cq(struct ib_cq *cq,
 		return -EINVAL;
 	}
 
-	EDEB_EX(7, "ehca_cq=%p cq_num=%x ret=%x",
-		my_cq, my_cq->cq_number, ret);
-
-	return ret;
+	return 0;
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_sqp.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_sqp.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_sqp.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_sqp.c	2006-10-17 10:15:06.000000000 -0400
@@ -40,8 +40,6 @@
  */
 
 
-#define DEB_PREFIX "e_qp"
-
 #include <linux/module.h>
 #include <linux/err.h>
 #include "ehca_classes.h"
@@ -51,11 +49,6 @@
 #include "hcp_if.h"
 
 
-extern int ehca_create_aqp1(struct ehca_shca *shca, struct ehca_sport *sport);
-extern int ehca_destroy_aqp1(struct ehca_sport *sport);
-
-extern int ehca_port_act_time;
-
 /**
  * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
  * pair is created successfully, the corresponding port gets active.
@@ -69,15 +62,10 @@ u64 ehca_define_sqp(struct ehca_shca *sh
 		    struct ehca_qp *ehca_qp,
 		    struct ib_qp_init_attr *qp_init_attr)
 {
-
-	u32 pma_qp_nr = 0;
-	u32 bma_qp_nr = 0;
-	u64 ret = H_SUCCESS;
+	u32 pma_qp_nr, bma_qp_nr;
+	u64 ret;
 	u8 port = qp_init_attr->port_num;
-	int counter = 0;
-
-	EDEB_EN(7, "port=%x qp_type=%x",
-		port, qp_init_attr->qp_type);
+	int counter;
 
 	shca->sport[port - 1].port_state = IB_PORT_DOWN;
 
@@ -93,31 +81,31 @@ u64 ehca_define_sqp(struct ehca_shca *sh
 					 &pma_qp_nr, &bma_qp_nr);
 
 		if (ret != H_SUCCESS) {
-			EDEB_ERR(4, "Can't define AQP1 for port %x. rc=%lx",
-				    port, ret);
-			goto ehca_define_aqp1;
+			ehca_err(&shca->ib_device,
+				 "Can't define AQP1 for port %x. rc=%lx",
+				 port, ret);
+			return ret;
 		}
 		break;
 	default:
-		ret = H_PARAMETER;
-		goto ehca_define_aqp1;
+		ehca_err(&shca->ib_device, "invalid qp_type=%x",
+			 qp_init_attr->qp_type);
+		return H_PARAMETER;
 	}
 
-	while ((shca->sport[port - 1].port_state != IB_PORT_ACTIVE) &&
-	       (counter < ehca_port_act_time)) {
-		EDEB(6, "... wait until port %x is active",
-			port);
+	for (counter = 0;
+	     shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
+		     counter < ehca_port_act_time;
+	     counter++) {
+		ehca_dbg(&shca->ib_device, "... wait until port %x is active",
+			 port);
 		msleep_interruptible(1000);
-		counter++;
 	}
 
 	if (counter == ehca_port_act_time) {
-		EDEB_ERR(4, "Port %x is not active.", port);
-		ret = H_HARDWARE;
+		ehca_err(&shca->ib_device, "Port %x is not active.", port);
+		return H_HARDWARE;
 	}
 
-ehca_define_aqp1:
-	EDEB_EX(7, "ret=%lx", ret);
-
-	return ret;
+	return H_SUCCESS;
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_tools.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_tools.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_tools.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_tools.h	2006-10-19 12:41:11.000000000 -0400
@@ -55,345 +55,105 @@
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
 #include <linux/version.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
 
 #include <asm/abs_addr.h>
 #include <asm/ibmebus.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 
-#define EHCA_EDEB_TRACE_MASK_SIZE 32
-extern u8 ehca_edeb_mask[EHCA_EDEB_TRACE_MASK_SIZE];
-#define EDEB_ID_TO_U32(str4) (str4[3] | (str4[2] << 8) | (str4[1] << 16) | \
-			      (str4[0] << 24))
+extern int ehca_debug_level;
 
-static inline u64 ehca_edeb_filter(const u32 level,
-				   const u32 id, const u32 line)
-{
-	u64 ret = 0;
-	u32 filenr = 0;
-	u32 filter_level = 9;
-	u32 dynamic_level = 0;
-
-	/* This is code written for the gcc -O2 optimizer which should colapse
-	 * to two single ints filter_level is the first level kicked out by
-	 * compiler means trace everythin below 6. */
-	if (id == EDEB_ID_TO_U32("ehav")) {
-		filenr = 0x01;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("clas")) {
-		filenr = 0x02;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("cqeq")) {
-		filenr = 0x03;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("shca")) {
-		filenr = 0x05;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("eirq")) {
-		filenr = 0x06;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("lMad")) {
-		filenr = 0x07;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("mcas")) {
-		filenr = 0x08;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("mrmw")) {
-		filenr = 0x09;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("vpd ")) {
-		filenr = 0x0a;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("e_qp")) {
-		filenr = 0x0b;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("uqes")) {
-		filenr = 0x0c;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("PHYP")) {
-		filenr = 0x0d;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("hcpi")) {
-		filenr = 0x0e;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("iptz")) {
-		filenr = 0x0f;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("spta")) {
-		filenr = 0x10;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("simp")) {
-		filenr = 0x11;
-		filter_level = 8;
-	}
-	if (id == EDEB_ID_TO_U32("reqs")) {
-		filenr = 0x12;
-		filter_level = 8;
-	}
-
-	if ((filenr - 1) > sizeof(ehca_edeb_mask)) {
-		filenr = 0;
-	}
-
-	if (filenr == 0) {
-		filter_level = 9;
-	} /* default */
-	ret = filenr * 0x10000 + line;
-	if (filter_level <= level) {
-		return ret | 0x100000000L; /* this is the flag to not trace */
-	}
-	dynamic_level = ehca_edeb_mask[filenr];
-	if (likely(dynamic_level <= level)) {
-		ret = ret | 0x100000000L;
-	};
-	return ret;
-}
-
-#ifdef EHCA_USE_HCALL_KERNEL
-#ifdef CONFIG_PPC_PSERIES
-
-#include <asm/paca.h>
-
-/**
- * IS_EDEB_ON - Checks if debug is on for the given level.
- */
-#define IS_EDEB_ON(level) \
-    ((ehca_edeb_filter(level, EDEB_ID_TO_U32(DEB_PREFIX), __LINE__) & 0x100000000L)==0)
+#define ehca_dbg(ib_dev, format, arg...) \
+	do { \
+		if (unlikely(ehca_debug_level)) \
+			dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
+				   "PU%04x EHCA_DBG:%s " format "\n", \
+				   get_paca()->paca_index, __FUNCTION__, \
+				   ## arg); \
+	} while (0)
 
-#define EDEB_P_GENERIC(level,idstring,format,args...) \
-do { \
-	u64 ehca_edeb_filterresult =					\
-		ehca_edeb_filter(level, EDEB_ID_TO_U32(DEB_PREFIX), __LINE__);\
-	if ((ehca_edeb_filterresult & 0x100000000L) == 0)		\
-		printk("PU%04x %08x:%s " idstring " "format "\n",	\
-		       get_paca()->paca_index, (u32)(ehca_edeb_filterresult), \
-		       __func__,  ##args);				\
-} while (1 == 0)
-
-#elif REAL_HCALL
-
-#define EDEB_P_GENERIC(level,idstring,format,args...) \
-do { \
-	u64 ehca_edeb_filterresult =					\
-		ehca_edeb_filter(level, EDEB_ID_TO_U32(DEB_PREFIX), __LINE__); \
-	if ((ehca_edeb_filterresult & 0x100000000L) == 0)		\
-		printk("%08x:%s " idstring " "format "\n",	\
-			(u32)(ehca_edeb_filterresult), \
-			__func__,  ##args); \
-} while (1 == 0)
-
-#endif
-#else
-
-#define IS_EDEB_ON(level) (1)
-
-#define EDEB_P_GENERIC(level,idstring,format,args...) \
-do { \
-	printk("%s " idstring " "format "\n",	\
-	       __func__,  ##args);		\
-} while (1 == 0)
+#define ehca_info(ib_dev, format, arg...) \
+	dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
+		 get_paca()->paca_index, __FUNCTION__, ## arg)
+
+#define ehca_warn(ib_dev, format, arg...) \
+	dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
+		 get_paca()->paca_index, __FUNCTION__, ## arg)
+
+#define ehca_err(ib_dev, format, arg...) \
+	dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
+		get_paca()->paca_index, __FUNCTION__, ## arg)
+
+/* use this one only if no ib_dev available */
+#define ehca_gen_dbg(format, arg...) \
+	do { \
+		if (unlikely(ehca_debug_level)) \
+			printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n",\
+			       get_paca()->paca_index, __FUNCTION__, ## arg); \
+	} while (0)
 
-#endif
+#define ehca_gen_warn(format, arg...) \
+	do { \
+		if (unlikely(ehca_debug_level)) \
+			printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n",\
+			       get_paca()->paca_index, __FUNCTION__, ## arg); \
+	} while (0)
 
-/**
- * EDEB - Trace output macro.
- * @level tracelevel
- * @format optional format string, use "" if not desired
- * @args printf like arguments for trace, use %Lx for u64, %x for u32
- *       %p for pointer
- */
-#define EDEB(level,format,args...) \
-	EDEB_P_GENERIC(level,"",format,##args)
-#define EDEB_ERR(level,format,args...) \
-	EDEB_P_GENERIC(level,"HCAD_ERROR ",format,##args)
-#define EDEB_EN(level,format,args...) \
-	EDEB_P_GENERIC(level,">>>",format,##args)
-#define EDEB_EX(level,format,args...) \
-	EDEB_P_GENERIC(level,"<<<",format,##args)
+#define ehca_gen_err(format, arg...) \
+	printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
+		get_paca()->paca_index, __FUNCTION__, ## arg)
 
 /**
- * EDEB macro to dump a memory block, whose length is n*8 bytes.
+ * ehca_dmp - printk a memory block, whose length is n*8 bytes.
  * Each line has the following layout:
  * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
  */
-#define EDEB_DMP(level,adr,len,format,args...) \
+#define ehca_dmp(adr, len, format, args...) \
 	do {				       \
 		unsigned int x;			      \
 		unsigned int l = (unsigned int)(len); \
 		unsigned char *deb = (unsigned char*)(adr);	\
 		for (x = 0; x < l; x += 16) { \
-		        EDEB(level, format " adr=%p ofs=%04x %016lx %016lx", \
-			     ##args, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
+			printk("EHCA_DMP:%s " format \
+			       " adr=%p ofs=%04x %016lx %016lx\n", \
+			       __FUNCTION__, ##args, deb, x, \
+			       *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
 			deb += 16; \
 		} \
 	} while (0)
 
 /* define a bitmask, little endian version */
 #define EHCA_BMASK(pos,length) (((pos)<<16)+(length))
+
 /* define a bitmask, the ibm way... */
 #define EHCA_BMASK_IBM(from,to) (((63-to)<<16)+((to)-(from)+1))
+
 /* internal function, don't use */
 #define EHCA_BMASK_SHIFTPOS(mask) (((mask)>>16)&0xffff)
+
 /* internal function, don't use */
 #define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff))
-/* return value shifted and masked by mask\n
- * variable|=HCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable\n
- * variable&=~HCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
+
+/**
+ * EHCA_BMASK_SET - return value shifted and masked by mask
+ * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable
+ * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
  * in variable
  */
 #define EHCA_BMASK_SET(mask,value) \
 	((EHCA_BMASK_MASK(mask) & ((u64)(value)))<<EHCA_BMASK_SHIFTPOS(mask))
-/* extract a parameter from value by mask\n
- * param=EHCA_BMASK_GET(MY_MASK,value)
- */
-#define EHCA_BMASK_GET(mask,value) \
-	( EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask)))
-
-#define PARANOIA_MODE
-#ifdef PARANOIA_MODE
-
-#define EHCA_CHECK_ADR_P(adr)					\
-	if (unlikely(adr == 0)) {					\
-		EDEB_ERR(4, "adr=%p check failed line %i", adr,	\
-			 __LINE__);				\
-		return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_ADR(adr)					\
-	if (unlikely(adr == 0)) {					\
-		EDEB_ERR(4, "adr=%p check failed line %i", adr,	\
-			 __LINE__);				\
-		return -EFAULT; }
-
-#define EHCA_CHECK_DEVICE_P(device)				\
-	if (unlikely(device == 0)) {				\
-		EDEB_ERR(4, "device=%p check failed", device);	\
-		return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_DEVICE(device)				\
-	if (unlikely(device == 0)) {				\
-		EDEB_ERR(4, "device=%p check failed", device);	\
-		return -EFAULT; }
-
-#define EHCA_CHECK_PD(pd)				\
-	if (unlikely(pd == 0)) {				\
-		EDEB_ERR(4, "pd=%p check failed", pd);	\
-		return -EFAULT; }
-
-#define EHCA_CHECK_PD_P(pd)				\
-	if (unlikely(pd == 0)) {				\
-		EDEB_ERR(4, "pd=%p check failed", pd);	\
-		return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_AV(av)				\
-	if (unlikely(av == 0)) {				\
-		EDEB_ERR(4, "av=%p check failed", av);	\
-		return -EFAULT; }
-
-#define EHCA_CHECK_AV_P(av)				\
-	if (unlikely(av == 0)) {				\
-		EDEB_ERR(4, "av=%p check failed", av);	\
-		return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_CQ(cq)				\
-	if (unlikely(cq == 0)) {				\
-		EDEB_ERR(4, "cq=%p check failed", cq);	\
-		return -EFAULT; }
-
-#define EHCA_CHECK_CQ_P(cq)				\
-	if (unlikely(cq == 0)) {				\
-		EDEB_ERR(4, "cq=%p check failed", cq);	\
-		return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_EQ(eq)				\
-	if (unlikely(eq == 0)) {				\
-		EDEB_ERR(4, "eq=%p check failed", eq);	\
-		return -EFAULT; }
-
-#define EHCA_CHECK_EQ_P(eq)				\
-	if (unlikely(eq == 0)) {				\
-		EDEB_ERR(4, "eq=%p check failed", eq);	\
-		return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_QP(qp)				\
-	if (unlikely(qp == 0)) {				\
-		EDEB_ERR(4, "qp=%p check failed", qp);	\
-		return -EFAULT; }
-
-#define EHCA_CHECK_QP_P(qp)				\
-	if (unlikely(qp == 0)) {				\
-		EDEB_ERR(4, "qp=%p check failed", qp);	\
-		return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_MR(mr)				\
-	if (unlikely(mr == 0)) {				\
-		EDEB_ERR(4, "mr=%p check failed", mr);	\
-		return -EFAULT; }
-
-#define EHCA_CHECK_MR_P(mr)				\
-	if (unlikely(mr == 0)) {				\
-		EDEB_ERR(4, "mr=%p check failed", mr);	\
-		return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_MW(mw)				\
-	if (unlikely(mw == 0)) {				\
-		EDEB_ERR(4, "mw=%p check failed", mw);	\
-		return -EFAULT; }
-
-#define EHCA_CHECK_MW_P(mw)				\
-	if (unlikely(mw == 0)) {				\
-		EDEB_ERR(4, "mw=%p check failed", mw);	\
-		return ERR_PTR(-EFAULT); }
-
-#define EHCA_CHECK_FMR(fmr)					\
-	if (unlikely(fmr == 0)) {					\
-		EDEB_ERR(4, "fmr=%p check failed", fmr);	\
-		return -EFAULT; }
-
-#define EHCA_CHECK_FMR_P(fmr)					\
-	if (unlikely(fmr == 0)) {					\
-		EDEB_ERR(4, "fmr=%p check failed", fmr);	\
-		return ERR_PTR(-EFAULT); }
-
-#define EHCA_REGISTER_PD(device,pd)
-#define EHCA_REGISTER_AV(pd,av)
-#define EHCA_DEREGISTER_PD(PD)
-#define EHCA_DEREGISTER_AV(av)
-#else
-#define EHCA_CHECK_DEVICE_P(device)
-
-#define EHCA_CHECK_PD(pd)
-#define EHCA_REGISTER_PD(device,pd)
-#define EHCA_DEREGISTER_PD(PD)
-#endif
 
 /**
- * ehca_adr_bad - Handle to be used for adress translation mechanisms,
- * currently a placeholder.
+ * EHCA_BMASK_GET - extract a parameter from value by mask
  */
-static inline int ehca_adr_bad(void *adr)
-{
-	return !adr;
-}
+#define EHCA_BMASK_GET(mask,value) \
+	(EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask)))
 
-/**
- * ehca2ib_return_code - Returns ib return code corresponding to the given
- * ehca return code.
- */
+
+/* Converts ehca to ib return code */
 static inline int ehca2ib_return_code(u64 ehca_rc)
 {
 	switch (ehca_rc) {
@@ -408,4 +168,5 @@ static inline int ehca2ib_return_code(u6
 	}
 }
 
+
 #endif /* EHCA_TOOLS_H */
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_uverbs.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_uverbs.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ehca_uverbs.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ehca_uverbs.c	2006-10-17 10:15:06.000000000 -0400
@@ -40,9 +40,6 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#undef DEB_PREFIX
-#define DEB_PREFIX "uver"
-
 #include <asm/current.h>
 
 #include "ehca_classes.h"
@@ -54,30 +51,20 @@
 struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
 					struct ib_udata *udata)
 {
-	struct ehca_ucontext *my_context = NULL;
-
-	EHCA_CHECK_ADR_P(device);
-	EDEB_EN(7, "device=%p name=%s", device, device->name);
+	struct ehca_ucontext *my_context;
 
 	my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
 	if (!my_context) {
-		EDEB_ERR(4, "Out of memory device=%p", device);
+		ehca_err(device, "Out of memory device=%p", device);
 		return ERR_PTR(-ENOMEM);
 	}
 
-	EDEB_EX(7, "device=%p ucontext=%p", device, my_context);
-
 	return &my_context->ib_ucontext;
 }
 
 int ehca_dealloc_ucontext(struct ib_ucontext *context)
 {
-	struct ehca_ucontext *my_context = NULL;
-	EHCA_CHECK_ADR(context);
-	EDEB_EN(7, "ucontext=%p", context);
-	my_context = container_of(context, struct ehca_ucontext, ib_ucontext);
-	kfree(my_context);
-	EDEB_EN(7, "ucontext=%p", context);
+	kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
 	return 0;
 }
 
@@ -91,83 +78,88 @@ struct page *ehca_nopage(struct vm_area_
 	u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
 	u32 cur_pid = current->tgid;
 	unsigned long flags;
+	struct ehca_cq *cq;
+	struct ehca_qp *qp;
+	struct ehca_pd *pd;
+	u64 offset;
+	void *vaddr;
 
-	EDEB_EN(7, "vm_start=%lx vm_end=%lx vm_page_prot=%lx vm_fileoff=%lx "
-		"address=%lx",
-		vma->vm_start, vma->vm_end, vma->vm_page_prot, fileoffset,
-		address);
-
-	if (q_type == 1) { /* CQ */
-		struct ehca_cq *cq = NULL;
-		u64 offset;
-		void *vaddr = NULL;
-
+	switch (q_type) {
+	case 1: /* CQ */
 		spin_lock_irqsave(&ehca_cq_idr_lock, flags);
 		cq = idr_find(&ehca_cq_idr, idr_handle);
 		spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
 
-		if (cq->ownpid != cur_pid) {
-			EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
-				 cur_pid, cq->ownpid);
+		/* make sure this mmap really belongs to the authorized user */
+		if (!cq) {
+			ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS");
 			return NOPAGE_SIGBUS;
 		}
 
-		/* make sure this mmap really belongs to the authorized user */
-		if (!cq) {
-			EDEB_ERR(4, "cq is NULL ret=NOPAGE_SIGBUS");
+		if (cq->ownpid != cur_pid) {
+			ehca_err(cq->ib_cq.device,
+				 "Invalid caller pid=%x ownpid=%x",
+				 cur_pid, cq->ownpid);
 			return NOPAGE_SIGBUS;
 		}
+
 		if (rsrc_type == 2) {
-			EDEB(6, "cq=%p cq queuearea", cq);
+			ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq);
 			offset = address - vma->vm_start;
 			vaddr = ipz_qeit_calc(&cq->ipz_queue, offset);
-			EDEB(6, "offset=%lx vaddr=%p", offset, vaddr);
+			ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p",
+				 offset, vaddr);
 			mypage = virt_to_page(vaddr);
 		}
-	} else if (q_type == 2) { /* QP */
-		struct ehca_qp *qp = NULL;
-		struct ehca_pd *pd = NULL;
-		u64 offset;
-		void *vaddr = NULL;
+		break;
 
+	case 2: /* QP */
 		spin_lock_irqsave(&ehca_qp_idr_lock, flags);
 		qp = idr_find(&ehca_qp_idr, idr_handle);
 		spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
 
+		/* make sure this mmap really belongs to the authorized user */
+		if (!qp) {
+			ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS");
+			return NOPAGE_SIGBUS;
+		}
 
 		pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
 		if (pd->ownpid != cur_pid) {
-			EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+			ehca_err(qp->ib_qp.device,
+				 "Invalid caller pid=%x ownpid=%x",
 				 cur_pid, pd->ownpid);
 			return NOPAGE_SIGBUS;
 		}
 
-		/* make sure this mmap really belongs to the authorized user */
-		if (!qp) {
-			EDEB_ERR(4, "qp is NULL ret=NOPAGE_SIGBUS");
-			return NOPAGE_SIGBUS;
-		}
 		if (rsrc_type == 2) {	/* rqueue */
-			EDEB(6, "qp=%p qp rqueuearea", qp);
+			ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp);
 			offset = address - vma->vm_start;
 			vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset);
-			EDEB(6, "offset=%lx vaddr=%p", offset, vaddr);
+			ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
+				 offset, vaddr);
 			mypage = virt_to_page(vaddr);
 		} else if (rsrc_type == 3) {	/* squeue */
-			EDEB(6, "qp=%p qp squeuearea", qp);
+			ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp);
 			offset = address - vma->vm_start;
 			vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
-			EDEB(6, "offset=%lx vaddr=%p", offset, vaddr);
+			ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
+				 offset, vaddr);
 			mypage = virt_to_page(vaddr);
 		}
+		break;
+
+	default:
+		ehca_gen_err("bad queue type %x", q_type);
+		return NOPAGE_SIGBUS;
 	}
 
 	if (!mypage) {
-		EDEB_ERR(4, "Invalid page adr==NULL ret=NOPAGE_SIGBUS");
+		ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
 		return NOPAGE_SIGBUS;
 	}
 	get_page(mypage);
-	EDEB_EX(7, "page adr=%p", mypage);
+
 	return mypage;
 }
 
@@ -181,201 +173,210 @@ int ehca_mmap(struct ib_ucontext *contex
 	u32 idr_handle = fileoffset >> 32;
 	u32 q_type = (fileoffset >> 28) & 0xF;	  /* CQ, QP,...        */
 	u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
-	u32 ret = -EFAULT;	/* assume the worst             */
-	u64 vsize = 0;		/* must be calculated/set below */
-	u64 physical = 0;	/* must be calculated/set below */
 	u32 cur_pid = current->tgid;
+	u32 ret;
+	u64 vsize, physical;
 	unsigned long flags;
+	struct ehca_cq *cq;
+	struct ehca_qp *qp;
+	struct ehca_pd *pd;
 
-	EDEB_EN(7, "vm_start=%lx vm_end=%lx vm_page_prot=%lx vm_fileoff=%lx",
-		vma->vm_start, vma->vm_end, vma->vm_page_prot, fileoffset);
-
-	if (q_type == 1) { /* CQ */
-		struct ehca_cq *cq;
-
+	switch (q_type) {
+	case  1: /* CQ */
 		spin_lock_irqsave(&ehca_cq_idr_lock, flags);
 		cq = idr_find(&ehca_cq_idr, idr_handle);
 		spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
 
+		/* make sure this mmap really belongs to the authorized user */
+		if (!cq)
+			return -EINVAL;
+
 		if (cq->ownpid != cur_pid) {
-			EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+			ehca_err(cq->ib_cq.device,
+				 "Invalid caller pid=%x ownpid=%x",
 				 cur_pid, cq->ownpid);
 			return -ENOMEM;
 		}
 
-		/* make sure this mmap really belongs to the authorized user */
-		if (!cq)
-			return -EINVAL;
-		if (!cq->ib_cq.uobject)
+		if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
 			return -EINVAL;
-		if (cq->ib_cq.uobject->context != context)
-			return -EINVAL;
-		if (rsrc_type == 1) {	/* galpa fw handle */
-			EDEB(6, "cq=%p cq triggerarea", cq);
+
+		switch (rsrc_type) {
+		case 1: /* galpa fw handle */
+			ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq);
 			vma->vm_flags |= VM_RESERVED;
 			vsize = vma->vm_end - vma->vm_start;
 			if (vsize != EHCA_PAGESIZE) {
-				EDEB_ERR(4, "invalid vsize=%lx",
+				ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
 					 vma->vm_end - vma->vm_start);
-				ret = -EINVAL;
-				goto mmap_exit0;
+				return -EINVAL;
 			}
 
 			physical = cq->galpas.user.fw_handle;
 			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 			vma->vm_flags |= VM_IO | VM_RESERVED;
 
-			EDEB(6, "vsize=%lx physical=%lx", vsize, physical);
+			ehca_dbg(cq->ib_cq.device,
+				 "vsize=%lx physical=%lx", vsize, physical);
 			ret = remap_pfn_range(vma, vma->vm_start,
 					      physical >> PAGE_SHIFT, vsize,
 					      vma->vm_page_prot);
 			if (ret) {
-				EDEB_ERR(4, "remap_pfn_range() failed ret=%x",
+				ehca_err(cq->ib_cq.device,
+					 "remap_pfn_range() failed ret=%x",
 					 ret);
-				ret = -ENOMEM;
+				return -ENOMEM;
 			}
-			goto mmap_exit0;
-		} else if (rsrc_type == 2) {	/* cq queue_addr */
-			EDEB(6, "cq=%p cq q_addr", cq);
-			/* vma->vm_page_prot =
-			 * pgprot_noncached(vma->vm_page_prot); */
+			break;
+
+		case 2: /* cq queue_addr */
+			ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
 			vma->vm_flags |= VM_RESERVED;
 			vma->vm_ops = &ehcau_vm_ops;
-			ret = 0;
-			goto mmap_exit0;
-		} else {
-			EDEB_ERR(6, "bad resource type %x", rsrc_type);
-			ret = -EINVAL;
-			goto mmap_exit0;
+			break;
+
+		default:
+			ehca_err(cq->ib_cq.device, "bad resource type %x",
+				 rsrc_type);
+			return -EINVAL;
 		}
-	} else if (q_type == 2) { /* QP */
-		struct ehca_qp *qp = NULL;
-		struct ehca_pd *pd = NULL;
+		break;
 
+	case 2: /* QP */
 		spin_lock_irqsave(&ehca_qp_idr_lock, flags);
 		qp = idr_find(&ehca_qp_idr, idr_handle);
 		spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
 
+		/* make sure this mmap really belongs to the authorized user */
+		if (!qp)
+			return -EINVAL;
+
 		pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
 		if (pd->ownpid != cur_pid) {
-			EDEB_ERR(4, "Invalid caller pid=%x ownpid=%x",
+			ehca_err(qp->ib_qp.device,
+				 "Invalid caller pid=%x ownpid=%x",
 				 cur_pid, pd->ownpid);
 			return -ENOMEM;
 		}
 
-		/* make sure this mmap really belongs to the authorized user */
-		if (!qp || !qp->ib_qp.uobject ||
-		    qp->ib_qp.uobject->context != context) {
-			EDEB(6, "qp=%p, uobject=%p, context=%p",
-			     qp, qp->ib_qp.uobject, qp->ib_qp.uobject->context);
-			ret = -EINVAL;
-			goto mmap_exit0;
-		}
-		if (rsrc_type == 1) {	/* galpa fw handle */
-			EDEB(6, "qp=%p qp triggerarea", qp);
+		if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
+			return -EINVAL;
+
+		switch (rsrc_type) {
+		case 1: /* galpa fw handle */
+			ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp);
 			vma->vm_flags |= VM_RESERVED;
 			vsize = vma->vm_end - vma->vm_start;
 			if (vsize != EHCA_PAGESIZE) {
-				EDEB_ERR(4, "invalid vsize=%lx",
+				ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
 					 vma->vm_end - vma->vm_start);
-				ret = -EINVAL;
-				goto mmap_exit0;
+				return -EINVAL;
 			}
 
 			physical = qp->galpas.user.fw_handle;
 			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 			vma->vm_flags |= VM_IO | VM_RESERVED;
 
-			EDEB(6, "vsize=%lx physical=%lx", vsize, physical);
+			ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
+				 vsize, physical);
 			ret = remap_pfn_range(vma, vma->vm_start,
 					      physical >> PAGE_SHIFT, vsize,
 					      vma->vm_page_prot);
 			if (ret) {
-				EDEB_ERR(4, "remap_pfn_range() failed ret=%x",
+				ehca_err(qp->ib_qp.device,
+					 "remap_pfn_range() failed ret=%x",
 					 ret);
-				ret = -ENOMEM;
+				return -ENOMEM;
 			}
-			goto mmap_exit0;
-		} else if (rsrc_type == 2) {	/* qp rqueue_addr */
-			EDEB(6, "qp=%p qp rqueue_addr", qp);
+			break;
+
+		case 2: /* qp rqueue_addr */
+			ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
 			vma->vm_flags |= VM_RESERVED;
 			vma->vm_ops = &ehcau_vm_ops;
-			ret = 0;
-			goto mmap_exit0;
-		} else if (rsrc_type == 3) {	/* qp squeue_addr */
-			EDEB(6, "qp=%p qp squeue_addr", qp);
+			break;
+
+		case 3: /* qp squeue_addr */
+			ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
 			vma->vm_flags |= VM_RESERVED;
 			vma->vm_ops = &ehcau_vm_ops;
-			ret = 0;
-			goto mmap_exit0;
-		} else {
-			EDEB_ERR(4, "bad resource type %x", rsrc_type);
-			ret = -EINVAL;
-			goto mmap_exit0;
+			break;
+
+		default:
+			ehca_err(qp->ib_qp.device, "bad resource type %x",
+				 rsrc_type);
+			return -EINVAL;
 		}
-	} else {
-		EDEB_ERR(4, "bad queue type %x", q_type);
-		ret = -EINVAL;
-		goto mmap_exit0;
+		break;
+
+	default:
+		ehca_gen_err("bad queue type %x", q_type);
+		return -EINVAL;
 	}
 
-mmap_exit0:
-	EDEB_EX(7, "ret=%x", ret);
-	return ret;
+	return 0;
 }
 
-int ehca_mmap_nopage(u64 foffset, u64 length, void ** mapped,
-		     struct vm_area_struct ** vma)
+int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
+		     struct vm_area_struct **vma)
 {
-	EDEB_EN(7, "foffset=%lx length=%lx", foffset, length);
 	down_write(&current->mm->mmap_sem);
-	*mapped = (void*)
-		do_mmap(NULL,0,
-			length,
-			PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-			foffset);
+	*mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
+				 MAP_SHARED | MAP_ANONYMOUS,
+				 foffset);
 	up_write(&current->mm->mmap_sem);
-	if (*mapped) {
-		*vma = find_vma(current->mm,(u64)*mapped);
-		if (*vma) {
-			(*vma)->vm_flags |= VM_RESERVED;
-			(*vma)->vm_ops = &ehcau_vm_ops;
-		} else
-			EDEB_ERR(4, "couldn't find queue vma queue=%p", *mapped);
-	} else
-		EDEB_ERR(4, "couldn't create mmap length=%lx", length);
-	EDEB_EX(7, "mapped=%p", *mapped);
+	if (!(*mapped)) {
+		ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
+			     foffset, length);
+		return -EINVAL;
+	}
+
+	*vma = find_vma(current->mm, (u64)*mapped);
+	if (!(*vma)) {
+		down_write(&current->mm->mmap_sem);
+		do_munmap(current->mm, 0, length);
+		up_write(&current->mm->mmap_sem);
+		ehca_gen_err("couldn't find vma queue=%p", *mapped);
+		return -EINVAL;
+	}
+	(*vma)->vm_flags |= VM_RESERVED;
+	(*vma)->vm_ops = &ehcau_vm_ops;
+
 	return 0;
 }
 
-int ehca_mmap_register(u64 physical, void ** mapped,
-		       struct vm_area_struct ** vma)
+int ehca_mmap_register(u64 physical, void **mapped,
+		       struct vm_area_struct **vma)
 {
-	int ret = 0;
+	int ret;
 	unsigned long vsize;
 	/* ehca hw supports only 4k page */
-	ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
+	ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
+	if (ret) {
+		ehca_gen_err("could'nt mmap physical=%lx", physical);
+		return ret;
+	}
+
 	(*vma)->vm_flags |= VM_RESERVED;
 	vsize = (*vma)->vm_end - (*vma)->vm_start;
 	if (vsize != EHCA_PAGESIZE) {
-		EDEB_ERR(4, "invalid vsize=%lx",
-			 (*vma)->vm_end - (*vma)->vm_start);
-		ret = -EINVAL;
-		return ret;
+		ehca_gen_err("invalid vsize=%lx",
+			     (*vma)->vm_end - (*vma)->vm_start);
+		return -EINVAL;
 	}
 
 	(*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
 	(*vma)->vm_flags |= VM_IO | VM_RESERVED;
 
-	EDEB(6, "vsize=%lx physical=%lx", vsize, physical);
 	ret = remap_pfn_range((*vma), (*vma)->vm_start,
 			      physical >> PAGE_SHIFT, vsize,
 			      (*vma)->vm_page_prot);
 	if (ret) {
-		EDEB_ERR(4, "remap_pfn_range() failed ret=%x", ret);
-		ret = -ENOMEM;
+		ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
+		return -ENOMEM;
 	}
-	return ret;
+
+	return 0;
 
 }
 
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hcp_if.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hcp_if.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hcp_if.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hcp_if.c	2006-10-17 10:15:06.000000000 -0400
@@ -41,13 +41,12 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#define DEB_PREFIX "hcpi"
-
 #include <asm/hvcall.h>
 #include "ehca_tools.h"
 #include "hcp_if.h"
 #include "hcp_phyp.h"
 #include "hipz_fns.h"
+#include "ipz_pt_fn.h"
 
 #define H_ALL_RES_QP_ENHANCED_OPS       EHCA_BMASK_IBM(9,11)
 #define H_ALL_RES_QP_PTE_PIN            EHCA_BMASK_IBM(12,12)
@@ -112,12 +111,12 @@ static long ehca_hcall_7arg_7ret(unsigne
 				 unsigned long *out6,
 				 unsigned long *out7)
 {
-	long ret = H_SUCCESS;
+	long ret;
 	int i, sleep_msecs;
 
-	EDEB_EN(7, "opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx arg5=%lx"
-	        " arg6=%lx arg7=%lx", opcode, arg1, arg2, arg3, arg4, arg5,
-		arg6, arg7);
+	ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx arg5=%lx "
+		     "arg6=%lx arg7=%lx", opcode, arg1, arg2, arg3, arg4, arg5,
+		     arg6, arg7);
 
 	for (i = 0; i < 5; i++) {
 		ret = plpar_hcall_7arg_7ret(opcode,
@@ -133,26 +132,24 @@ static long ehca_hcall_7arg_7ret(unsigne
 		}
 
 		if (ret < H_SUCCESS)
-			EDEB_ERR(4, "opcode=%lx ret=%lx"
-				 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
-				 " arg5=%lx arg6=%lx arg7=%lx"
-				 " out1=%lx out2=%lx out3=%lx out4=%lx"
-				 " out5=%lx out6=%lx out7=%lx",
-				 opcode, ret,
-				 arg1, arg2, arg3, arg4,
-				 arg5, arg6, arg7,
-				 *out1, *out2, *out3, *out4,
-				 *out5, *out6, *out7);
-
-		EDEB_EX(7, "opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx "
-			"out4=%lx out5=%lx out6=%lx out7=%lx",
-			opcode, ret, *out1, *out2, *out3, *out4, *out5,
-			*out6, *out7);
+			ehca_gen_err("opcode=%lx ret=%lx"
+				     " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+				     " arg5=%lx arg6=%lx arg7=%lx"
+				     " out1=%lx out2=%lx out3=%lx out4=%lx"
+				     " out5=%lx out6=%lx out7=%lx",
+				     opcode, ret,
+				     arg1, arg2, arg3, arg4,
+				     arg5, arg6, arg7,
+				     *out1, *out2, *out3, *out4,
+				     *out5, *out6, *out7);
+
+		ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx "
+			     "out4=%lx out5=%lx out6=%lx out7=%lx",
+			     opcode, ret, *out1, *out2, *out3, *out4, *out5,
+			     *out6, *out7);
 		return ret;
 	}
 
-	EDEB_EX(7, "opcode=%lx ret=H_BUSY", opcode);
-
 	return H_BUSY;
 }
 
@@ -176,14 +173,13 @@ static long ehca_hcall_9arg_9ret(unsigne
 				 unsigned long *out8,
 				 unsigned long *out9)
 {
-	long ret = H_SUCCESS;
+	long ret;
 	int i, sleep_msecs;
 
-	EDEB_EN(7, "opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
-		"arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
-		opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
-		arg8, arg9);
-
+	ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
+		     "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
+		     opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
+		     arg8, arg9);
 
 	for (i = 0; i < 5; i++) {
 		ret = plpar_hcall_9arg_9ret(opcode,
@@ -201,30 +197,29 @@ static long ehca_hcall_9arg_9ret(unsigne
 		}
 
 		if (ret < H_SUCCESS)
-			EDEB_ERR(4, "opcode=%lx ret=%lx"
-				 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
-				 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
-				 " arg9=%lx"
-				 " out1=%lx out2=%lx out3=%lx out4=%lx"
-				 " out5=%lx out6=%lx out7=%lx out8=%lx"
-				 " out9=%lx",
-				 opcode, ret,
-				 arg1, arg2, arg3, arg4,
-				 arg5, arg6, arg7, arg8,
-				 arg9,
-				 *out1, *out2, *out3, *out4,
-				 *out5, *out6, *out7, *out8,
-				 *out9);
-
-		EDEB_EX(7, "opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx "
-			"out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx out9=%lx",
-			opcode, ret,*out1, *out2, *out3, *out4, *out5, *out6,
-			*out7, *out8, *out9);
+			ehca_gen_err("opcode=%lx ret=%lx"
+				     " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+				     " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
+				     " arg9=%lx"
+				     " out1=%lx out2=%lx out3=%lx out4=%lx"
+				     " out5=%lx out6=%lx out7=%lx out8=%lx"
+				     " out9=%lx",
+				     opcode, ret,
+				     arg1, arg2, arg3, arg4,
+				     arg5, arg6, arg7, arg8,
+				     arg9,
+				     *out1, *out2, *out3, *out4,
+				     *out5, *out6, *out7, *out8,
+				     *out9);
+
+		ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx "
+			     "out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx "
+			     "out9=%lx", opcode, ret,*out1, *out2, *out3, *out4,
+			     *out5, *out6, *out7, *out8, *out9);
 		return ret;
 
 	}
 
-	EDEB_EX(7, "opcode=%lx ret=H_BUSY", opcode);
 	return H_BUSY;
 }
 
@@ -237,18 +232,10 @@ u64 hipz_h_alloc_resource_eq(const struc
 			     u32 * act_pages,
 			     u32 * eq_ist)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
-	u64 act_nr_of_entries_out = 0;
-	u64 act_pages_out         = 0;
-	u64 eq_ist_out            = 0;
-	u64 allocate_controls     = 0;
-	u32 x = (u64)(&x);
-
-	EDEB_EN(7, "pfeq=%p adapter_handle=%lx  new_control=%x"
-		" number_of_entries=%x",
-		pfeq, adapter_handle.handle, neq_control,
-		number_of_entries);
+	u64 allocate_controls;
+	u64 act_nr_of_entries_out, act_pages_out, eq_ist_out;
 
 	/* resource type */
 	allocate_controls = 3ULL;
@@ -277,10 +264,7 @@ u64 hipz_h_alloc_resource_eq(const struc
 	*eq_ist            = (u32)eq_ist_out;
 
 	if (ret == H_NOT_ENOUGH_RESOURCES)
-		EDEB_ERR(4, "Not enough resource - ret=%lx ", ret);
-
-	EDEB_EX(7, "act_nr_of_entries=%x act_pages=%x eq_ist=%x",
-		*act_nr_of_entries, *act_pages, *eq_ist);
+		ehca_gen_err("Not enough resource - ret=%lx ", ret);
 
 	return ret;
 }
@@ -289,45 +273,30 @@ u64 hipz_h_reset_event(const struct ipz_
 		       struct ipz_eq_handle eq_handle,
 		       const u64 event_mask)
 {
-	u64 ret = H_SUCCESS;
 	u64 dummy;
 
-	EDEB_EN(7, "eq_handle=%lx, adapter_handle=%lx  event_mask=%lx",
-		eq_handle.handle, adapter_handle.handle, event_mask);
-
-	ret = ehca_hcall_7arg_7ret(H_RESET_EVENTS,
-				   adapter_handle.handle, /* r4 */
-				   eq_handle.handle,      /* r5 */
-				   event_mask,	          /* r6 */
-				   0, 0, 0, 0,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy);
-
-	EDEB(7, "ret=%lx", ret);
-
-	return ret;
+	return ehca_hcall_7arg_7ret(H_RESET_EVENTS,
+				    adapter_handle.handle, /* r4 */
+				    eq_handle.handle,      /* r5 */
+				    event_mask,	           /* r6 */
+				    0, 0, 0, 0,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy);
 }
 
 u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
 			     struct ehca_cq *cq,
 			     struct ehca_alloc_cq_parms *param)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
-	u64 act_nr_of_entries_out;
-	u64 act_pages_out;
-	u64 g_la_privileged_out;
-	u64 g_la_user_out;
-
-	EDEB_EN(7, "Adapter_handle=%lx eq_handle=%lx cq_token=%x"
-		" cq_number_of_entries=%x",
-		adapter_handle.handle, param->eq_handle.handle,
-		cq->token, param->nr_cqe);
+	u64 act_nr_of_entries_out, act_pages_out;
+	u64 g_la_privileged_out, g_la_user_out;
 
 	ret = ehca_hcall_7arg_7ret(H_ALLOC_RESOURCE,
 				   adapter_handle.handle,     /* r4  */
@@ -351,10 +320,7 @@ u64 hipz_h_alloc_resource_cq(const struc
 		hcp_galpas_ctor(&cq->galpas, g_la_privileged_out, g_la_user_out);
 
 	if (ret == H_NOT_ENOUGH_RESOURCES)
-		EDEB_ERR(4, "Not enough resources. ret=%lx", ret);
-
-	EDEB_EX(7, "cq_handle=%lx act_nr_of_entries=%x act_pages=%x",
-		cq->ipz_cq_handle.handle, param->act_nr_of_entries, param->act_pages);
+		ehca_gen_err("Not enough resources. ret=%lx", ret);
 
 	return ret;
 }
@@ -363,32 +329,13 @@ u64 hipz_h_alloc_resource_qp(const struc
 			     struct ehca_qp *qp,
 			     struct ehca_alloc_qp_parms *parms)
 {
-	u64 ret = H_SUCCESS;
-	u64 allocate_controls;
-	u64 max_r10_reg;
-	u64 dummy         = 0;
-	u64 qp_nr_out     = 0;
-	u64 r6_out        = 0;
-	u64 r7_out        = 0;
-	u64 r8_out        = 0;
-	u64 g_la_user_out = 0;
-	u64 r11_out       = 0;
+	u64 ret;
+	u64 dummy, allocate_controls, max_r10_reg;
+	u64 qp_nr_out, r6_out, r7_out, r8_out, g_la_user_out, r11_out;
 	u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1;
 	u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1;
 	int daqp_ctrl = parms->daqp_ctrl;
 
-	EDEB_EN(7, "Adapter_handle=%lx servicetype=%x signalingtype=%x"
-		" ud_av_l_key=%x send_cq_handle=%lx receive_cq_handle=%lx"
-		" async_eq_handle=%lx qp_token=%x pd=%x max_nr_send_wqes=%x"
-		" max_nr_receive_wqes=%x max_nr_send_sges=%x"
-		" max_nr_receive_sges=%x ud_av_l_key=%x galpa.pid=%x",
-		adapter_handle.handle, parms->servicetype, parms->sigtype,
-		parms->ud_av_l_key_ctl, qp->send_cq->ipz_cq_handle.handle,
-		qp->recv_cq->ipz_cq_handle.handle, parms->ipz_eq_handle.handle,
-		qp->token, parms->pd.value, max_nr_send_wqes,
-		max_nr_receive_wqes, parms->max_send_sge, parms->max_recv_sge,
-		parms->ud_av_l_key_ctl, qp->galpas.pid);
-
 	allocate_controls =
 		EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS,
 			       (daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0)
@@ -454,17 +401,7 @@ u64 hipz_h_alloc_resource_qp(const struc
 		hcp_galpas_ctor(&qp->galpas, g_la_user_out, g_la_user_out);
 
 	if (ret == H_NOT_ENOUGH_RESOURCES)
-		EDEB_ERR(4, "Not enough resources. ret=%lx",ret);
-
-	EDEB_EX(7, "qp_nr=%x act_nr_send_wqes=%x"
-		" act_nr_receive_wqes=%x act_nr_send_sges=%x"
-		" act_nr_receive_sges=%x nr_sq_pages=%x"
-		" nr_rq_pages=%x galpa.user=%lx galpa.kernel=%lx",
-		qp->real_qp_num, parms->act_nr_send_wqes,
-		parms->act_nr_recv_wqes, parms->act_nr_send_sges,
-		parms->act_nr_recv_sges, parms->nr_sq_pages,
-		parms->nr_rq_pages, qp->galpas.user.fw_handle,
-		qp->galpas.kernel.fw_handle);
+		ehca_gen_err("Not enough resources. ret=%lx",ret);
 
 	return ret;
 }
@@ -473,21 +410,15 @@ u64 hipz_h_query_port(const struct ipz_a
 		      const u8 port_id,
 		      struct hipz_query_port *query_port_response_block)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
-	u64 r_cb;
+	u64 r_cb = virt_to_abs(query_port_response_block);
 
-	EDEB_EN(7, "adapter_handle=%lx port_id %x",
-		adapter_handle.handle, port_id);
-
-	if (((u64)query_port_response_block) & 0xfff) {
-		EDEB_ERR(4, "response block not page aligned");
-		ret = H_PARAMETER;
-		return ret;
+	if (r_cb & (EHCA_PAGESIZE-1)) {
+		ehca_gen_err("response block not page aligned");
+		return H_PARAMETER;
 	}
 
-	r_cb = virt_to_abs(query_port_response_block);
-
 	ret = ehca_hcall_7arg_7ret(H_QUERY_PORT,
 				   adapter_handle.handle, /* r4 */
 				   port_id,	          /* r5 */
@@ -501,19 +432,8 @@ u64 hipz_h_query_port(const struct ipz_a
 				   &dummy,
 				   &dummy);
 
-	EDEB_DMP(7, query_port_response_block, 64, "query_port_response_block");
-	EDEB(7, "offset31=%x offset35=%x offset36=%x",
-	     ((u32*)query_port_response_block)[32],
-	     ((u32*)query_port_response_block)[36],
-	     ((u32*)query_port_response_block)[37]);
-	EDEB(7, "offset200=%x offset201=%x offset202=%x "
-	     "offset203=%x",
-	     ((u32*)query_port_response_block)[0x200],
-	     ((u32*)query_port_response_block)[0x201],
-	     ((u32*)query_port_response_block)[0x202],
-	     ((u32*)query_port_response_block)[0x203]);
-
-	EDEB_EX(7, "ret=%lx", ret);
+	if (ehca_debug_level)
+		ehca_dmp(query_port_response_block, 64, "response_block");
 
 	return ret;
 }
@@ -521,62 +441,26 @@ u64 hipz_h_query_port(const struct ipz_a
 u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
 		     struct hipz_query_hca *query_hca_rblock)
 {
-	u64 ret = H_SUCCESS;
 	u64 dummy;
-	u64 r_cb;
-	EDEB_EN(7, "adapter_handle=%lx", adapter_handle.handle);
+	u64 r_cb = virt_to_abs(query_hca_rblock);
 
-	if (((u64)query_hca_rblock) & 0xfff) {
-		EDEB_ERR(4, "response block not page aligned");
-		ret = H_PARAMETER;
-		return ret;
+	if (r_cb & (EHCA_PAGESIZE-1)) {
+		ehca_gen_err("response_block=%p not page aligned",
+			     query_hca_rblock);
+		return H_PARAMETER;
 	}
 
-	r_cb = virt_to_abs(query_hca_rblock);
-
-	ret = ehca_hcall_7arg_7ret(H_QUERY_HCA,
-				   adapter_handle.handle, /* r4 */
-				   r_cb,                  /* r5 */
-				   0, 0, 0, 0, 0,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy);
-
-	EDEB(7, "offset0=%x offset1=%x offset2=%x offset3=%x",
-	     ((u32*)query_hca_rblock)[0],
-	     ((u32*)query_hca_rblock)[1],
-	     ((u32*)query_hca_rblock)[2], ((u32*)query_hca_rblock)[3]);
-	EDEB(7, "offset4=%x offset5=%x offset6=%x offset7=%x",
-	     ((u32*)query_hca_rblock)[4],
-	     ((u32*)query_hca_rblock)[5],
-	     ((u32*)query_hca_rblock)[6], ((u32*)query_hca_rblock)[7]);
-	EDEB(7, "offset8=%x offset9=%x offseta=%x offsetb=%x",
-	     ((u32*)query_hca_rblock)[8],
-	     ((u32*)query_hca_rblock)[9],
-	     ((u32*)query_hca_rblock)[10], ((u32*)query_hca_rblock)[11]);
-	EDEB(7, "offsetc=%x offsetd=%x offsete=%x offsetf=%x",
-	     ((u32*)query_hca_rblock)[12],
-	     ((u32*)query_hca_rblock)[13],
-	     ((u32*)query_hca_rblock)[14], ((u32*)query_hca_rblock)[15]);
-	EDEB(7, "offset136=%x offset192=%x offset204=%x",
-	     ((u32*)query_hca_rblock)[32],
-	     ((u32*)query_hca_rblock)[48], ((u32*)query_hca_rblock)[51]);
-	EDEB(7, "offset231=%x offset235=%x",
-	     ((u32*)query_hca_rblock)[57], ((u32*)query_hca_rblock)[58]);
-	EDEB(7, "offset200=%x offset201=%x offset202=%x offset203=%x",
-	     ((u32*)query_hca_rblock)[0x201],
-	     ((u32*)query_hca_rblock)[0x202],
-	     ((u32*)query_hca_rblock)[0x203],
-	     ((u32*)query_hca_rblock)[0x204]);
-
-	EDEB_EX(7, "ret=%lx adapter_handle=%lx",
-		ret, adapter_handle.handle);
-
-	return ret;
+	return ehca_hcall_7arg_7ret(H_QUERY_HCA,
+				    adapter_handle.handle, /* r4 */
+				    r_cb,                  /* r5 */
+				    0, 0, 0, 0, 0,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy);
 }
 
 u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
@@ -586,32 +470,22 @@ u64 hipz_h_register_rpage(const struct i
 			  const u64 logical_address_of_page,
 			  u64 count)
 {
-	u64 ret = H_SUCCESS;
 	u64 dummy;
 
-	EDEB_EN(7, "adapter_handle=%lx pagesize=%x queue_type=%x"
-		" resource_handle=%lx logical_address_of_page=%lx count=%lx",
-		adapter_handle.handle, pagesize, queue_type,
-		resource_handle, logical_address_of_page, count);
-
-	ret = ehca_hcall_7arg_7ret(H_REGISTER_RPAGES,
-				   adapter_handle.handle,      /* r4  */
-				   queue_type | pagesize << 8, /* r5  */
-				   resource_handle,	       /* r6  */
-				   logical_address_of_page,    /* r7  */
-				   count,	               /* r8  */
-				   0, 0,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy);
-
-	EDEB_EX(7, "ret=%lx", ret);
-
-	return ret;
+	return ehca_hcall_7arg_7ret(H_REGISTER_RPAGES,
+				    adapter_handle.handle,      /* r4  */
+				    queue_type | pagesize << 8, /* r5  */
+				    resource_handle,	        /* r6  */
+				    logical_address_of_page,    /* r7  */
+				    count,	                /* r8  */
+				    0, 0,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy);
 }
 
 u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
@@ -622,34 +496,22 @@ u64 hipz_h_register_rpage_eq(const struc
 			     const u64 logical_address_of_page,
 			     const u64 count)
 {
-	u64 ret = H_SUCCESS;
-
-	EDEB_EN(7, "pfeq=%p adapter_handle=%lx eq_handle=%lx pagesize=%x"
-		" queue_type=%x logical_address_of_page=%lx count=%lx",
-		pfeq, adapter_handle.handle, eq_handle.handle, pagesize,
-		queue_type,logical_address_of_page, count);
-
 	if (count != 1) {
-		EDEB_ERR(4, "Ppage counter=%lx", count);
+		ehca_gen_err("Ppage counter=%lx", count);
 		return H_PARAMETER;
 	}
-	ret = hipz_h_register_rpage(adapter_handle,
-				    pagesize,
-				    queue_type,
-				    eq_handle.handle,
-				    logical_address_of_page, count);
-	EDEB_EX(7, "ret=%lx", ret);
-
-	return ret;
+	return hipz_h_register_rpage(adapter_handle,
+				     pagesize,
+				     queue_type,
+				     eq_handle.handle,
+				     logical_address_of_page, count);
 }
 
 u32 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
 			   u32 ist)
 {
-	u32 ret = H_SUCCESS;
-	u64 dummy = 0;
-
-	EDEB_EN(7, "ist=%x", ist);
+	u32 ret;
+	u64 dummy;
 
 	ret = ehca_hcall_7arg_7ret(H_QUERY_INT_STATE,
 				   adapter_handle.handle, /* r4 */
@@ -664,9 +526,7 @@ u32 hipz_h_query_int_state(const struct 
 				   &dummy);
 
 	if (ret != H_SUCCESS && ret != H_BUSY)
-		EDEB_ERR(4, "Could not query interrupt state.");
-
-	EDEB_EX(7, "interrupt state: %x", ret);
+		ehca_gen_err("Could not query interrupt state.");
 
 	return ret;
 }
@@ -680,24 +540,14 @@ u64 hipz_h_register_rpage_cq(const struc
 			     const u64 count,
 			     const struct h_galpa gal)
 {
-	u64 ret = H_SUCCESS;
-
-	EDEB_EN(7, "pfcq=%p adapter_handle=%lx cq_handle=%lx pagesize=%x"
-		" queue_type=%x logical_address_of_page=%lx count=%lx",
-		pfcq, adapter_handle.handle, cq_handle.handle, pagesize,
-		queue_type, logical_address_of_page, count);
-
 	if (count != 1) {
-		EDEB_ERR(4, "Page counter=%lx", count);
+		ehca_gen_err("Page counter=%lx", count);
 		return H_PARAMETER;
 	}
 
-	ret = hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
-				    cq_handle.handle, logical_address_of_page,
-				    count);
-	EDEB_EX(7, "ret=%lx", ret);
-
-	return ret;
+	return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
+				     cq_handle.handle, logical_address_of_page,
+				     count);
 }
 
 u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
@@ -709,24 +559,14 @@ u64 hipz_h_register_rpage_qp(const struc
 			     const u64 count,
 			     const struct h_galpa galpa)
 {
-	u64 ret = H_SUCCESS;
-
-	EDEB_EN(7, "pfqp=%p adapter_handle=%lx qp_handle=%lx pagesize=%x"
-		" queue_type=%x logical_address_of_page=%lx count=%lx",
-		pfqp, adapter_handle.handle, qp_handle.handle, pagesize,
-		queue_type, logical_address_of_page, count);
-
 	if (count != 1) {
-		EDEB_ERR(4, "Page counter=%lx", count);
+		ehca_gen_err("Page counter=%lx", count);
 		return H_PARAMETER;
 	}
 
-	ret = hipz_h_register_rpage(adapter_handle,pagesize,queue_type,
-				    qp_handle.handle,logical_address_of_page,
-				    count);
-	EDEB_EX(7, "ret=%lx", ret);
-
-	return ret;
+	return hipz_h_register_rpage(adapter_handle,pagesize,queue_type,
+				     qp_handle.handle,logical_address_of_page,
+				     count);
 }
 
 u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
@@ -736,36 +576,25 @@ u64 hipz_h_disable_and_get_wqe(const str
 			       void **log_addr_next_rq_wqe2processed,
 			       int dis_and_get_function_code)
 {
-	u64 ret = H_SUCCESS;
-	u8 function_code = 1;
 	u64 dummy, dummy1, dummy2;
 
-	EDEB_EN(7, "pfqp=%p adapter_handle=%lx function=%x qp_handle=%lx",
-		pfqp, adapter_handle.handle, function_code, qp_handle.handle);
-
 	if (!log_addr_next_sq_wqe2processed)
 		log_addr_next_sq_wqe2processed = (void**)&dummy1;
 	if (!log_addr_next_rq_wqe2processed)
 		log_addr_next_rq_wqe2processed = (void**)&dummy2;
 
-	ret = ehca_hcall_7arg_7ret(H_DISABLE_AND_GETC,
-				   adapter_handle.handle,     /* r4 */
-				   dis_and_get_function_code, /* r5 */
-				   qp_handle.handle,	      /* r6 */
-				   0, 0, 0, 0,
-				   (void*)log_addr_next_sq_wqe2processed,
-				   (void*)log_addr_next_rq_wqe2processed,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy);
-	EDEB_EX(7, "ret=%lx ladr_next_rq_wqe_out=%p"
-		" ladr_next_sq_wqe_out=%p", ret,
-		*log_addr_next_sq_wqe2processed,
-		*log_addr_next_rq_wqe2processed);
-
-	return ret;
+	return ehca_hcall_7arg_7ret(H_DISABLE_AND_GETC,
+				    adapter_handle.handle,     /* r4 */
+				    dis_and_get_function_code, /* r5 */
+				    qp_handle.handle,	       /* r6 */
+				    0, 0, 0, 0,
+				    (void*)log_addr_next_sq_wqe2processed,
+				    (void*)log_addr_next_rq_wqe2processed,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy);
 }
 
 u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
@@ -775,22 +604,15 @@ u64 hipz_h_modify_qp(const struct ipz_ad
 		     struct hcp_modify_qp_control_block *mqpcb,
 		     struct h_galpa gal)
 {
-	u64 ret = H_SUCCESS;
-	u64 invalid_attribute_identifier = 0;
-	u64 rc_attrib_mask = 0;
-	u64 dummy;
-	u64 r_cb;
-	EDEB_EN(7, "pfqp=%p adapter_handle=%lx qp_handle=%lx"
-		" update_mask=%lx qp_state=%x mqpcb=%p",
-		pfqp, adapter_handle.handle, qp_handle.handle,
-		update_mask, mqpcb->qp_state, mqpcb);
+	u64 ret;
+	u64 dummy;
+	u64 invalid_attribute_identifier, rc_attrib_mask;
 
-	r_cb = virt_to_abs(mqpcb);
 	ret = ehca_hcall_7arg_7ret(H_MODIFY_QP,
 				   adapter_handle.handle,         /* r4 */
 				   qp_handle.handle,	          /* r5 */
 				   update_mask,	                  /* r6 */
-				   r_cb,	                  /* r7 */
+				   virt_to_abs(mqpcb),	          /* r7 */
 				   0, 0, 0,
 				   &invalid_attribute_identifier, /* r4 */
 				   &dummy,	                  /* r5 */
@@ -799,12 +621,9 @@ u64 hipz_h_modify_qp(const struct ipz_ad
 				   &dummy,	                  /* r8 */
 				   &rc_attrib_mask,               /* r9 */
 				   &dummy);
-	if (ret == H_NOT_ENOUGH_RESOURCES)
-		EDEB_ERR(4, "Insufficient resources ret=%lx", ret);
 
-	EDEB_EX(7, "ret=%lx invalid_attribute_identifier=%lx"
-		" invalid_attribute_MASK=%lx", ret,
-		invalid_attribute_identifier, rc_attrib_mask);
+	if (ret == H_NOT_ENOUGH_RESOURCES)
+		ehca_gen_err("Insufficient resources ret=%lx", ret);
 
 	return ret;
 }
@@ -815,47 +634,32 @@ u64 hipz_h_query_qp(const struct ipz_ada
 		    struct hcp_modify_qp_control_block *qqpcb,
 		    struct h_galpa gal)
 {
-	u64 ret = H_SUCCESS;
 	u64 dummy;
-	u64 r_cb;
-	EDEB_EN(7, "adapter_handle=%lx qp_handle=%lx",
-		adapter_handle.handle, qp_handle.handle);
-
-	r_cb = virt_to_abs(qqpcb);
-	EDEB(7, "r_cb=%lx", r_cb);
-
-	ret = ehca_hcall_7arg_7ret(H_QUERY_QP,
-				   adapter_handle.handle, /* r4 */
-				   qp_handle.handle,      /* r5 */
-				   r_cb,	          /* r6 */
-				   0, 0, 0, 0,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy);
 
-	EDEB_EX(7, "ret=%lx", ret);
-
-	return ret;
+	return ehca_hcall_7arg_7ret(H_QUERY_QP,
+				    adapter_handle.handle, /* r4 */
+				    qp_handle.handle,      /* r5 */
+				    virt_to_abs(qqpcb),	   /* r6 */
+				    0, 0, 0, 0,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy);
 }
 
 u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
 		      struct ehca_qp *qp)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
-	u64 ladr_next_sq_wqe_out;
-	u64 ladr_next_rq_wqe_out;
-
-	EDEB_EN(7, "qp=%p ipz_qp_handle=%lx adapter_handle=%lx",
-		qp, qp->ipz_qp_handle.handle, adapter_handle.handle);
+	u64 ladr_next_sq_wqe_out, ladr_next_rq_wqe_out;
 
 	ret = hcp_galpas_dtor(&qp->galpas);
 	if (ret) {
-		EDEB_ERR(4, "Could not destruct qp->galpas");
+		ehca_gen_err("Could not destruct qp->galpas");
 		return H_RESOURCE;
 	}
 	ret = ehca_hcall_7arg_7ret(H_DISABLE_AND_GETC,
@@ -872,7 +676,7 @@ u64 hipz_h_destroy_qp(const struct ipz_a
 				   &dummy,
 				   &dummy);
 	if (ret == H_HARDWARE)
-		EDEB_ERR(4, "HCA not operational. ret=%lx", ret);
+		ehca_gen_err("HCA not operational. ret=%lx", ret);
 
 	ret = ehca_hcall_7arg_7ret(H_FREE_RESOURCE,
 				   adapter_handle.handle,     /* r4 */
@@ -887,9 +691,7 @@ u64 hipz_h_destroy_qp(const struct ipz_a
 				   &dummy);
 
 	if (ret == H_RESOURCE)
-		EDEB_ERR(4, "Resource still in use. ret=%lx", ret);
-
-	EDEB_EX(7, "ret=%lx", ret);
+		ehca_gen_err("Resource still in use. ret=%lx", ret);
 
 	return ret;
 }
@@ -899,28 +701,20 @@ u64 hipz_h_define_aqp0(const struct ipz_
 		       struct h_galpa gal,
 		       u32 port)
 {
-	u64 ret = H_SUCCESS;
 	u64 dummy;
 
-	EDEB_EN(7, "port=%x ipz_qp_handle=%lx adapter_handle=%lx",
-		port, qp_handle.handle, adapter_handle.handle);
-
-	ret = ehca_hcall_7arg_7ret(H_DEFINE_AQP0,
-				   adapter_handle.handle, /* r4 */
-				   qp_handle.handle,      /* r5 */
-				   port,                  /* r6 */
-				   0, 0, 0, 0,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy);
-
-	EDEB_EX(7, "ret=%lx", ret);
-
-	return ret;
+	return ehca_hcall_7arg_7ret(H_DEFINE_AQP0,
+				    adapter_handle.handle, /* r4 */
+				    qp_handle.handle,      /* r5 */
+				    port,                  /* r6 */
+				    0, 0, 0, 0,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy);
 }
 
 u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
@@ -929,13 +723,9 @@ u64 hipz_h_define_aqp1(const struct ipz_
 		       u32 port, u32 * pma_qp_nr,
 		       u32 * bma_qp_nr)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
-	u64 pma_qp_nr_out;
-	u64 bma_qp_nr_out;
-
-	EDEB_EN(7, "port=%x qp_handle=%lx adapter_handle=%lx",
-		port, qp_handle.handle, adapter_handle.handle);
+	u64 pma_qp_nr_out, bma_qp_nr_out;
 
 	ret = ehca_hcall_7arg_7ret(H_DEFINE_AQP1,
 				   adapter_handle.handle, /* r4 */
@@ -954,10 +744,7 @@ u64 hipz_h_define_aqp1(const struct ipz_
 	*bma_qp_nr = (u32)bma_qp_nr_out;
 
 	if (ret == H_ALIAS_EXIST)
-		EDEB_ERR(4, "AQP1 already exists. ret=%lx", ret);
-
-	EDEB_EX(7, "ret=%lx pma_qp_nr=%i bma_qp_nr=%i",
-		ret, (int)*pma_qp_nr, (int)*bma_qp_nr);
+		ehca_gen_err("AQP1 already exists. ret=%lx", ret);
 
 	return ret;
 }
@@ -968,23 +755,8 @@ u64 hipz_h_attach_mcqp(const struct ipz_
 		       u16 mcg_dlid,
 		       u64 subnet_prefix, u64 interface_id)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
-	u8 *dgid_sp = (u8*)&subnet_prefix;
-	u8 *dgid_ii = (u8*)&interface_id;
-
-	EDEB_EN(7, "qp_handle=%lx adapter_handle=%lx\nMCG_DGID ="
-		" %d.%d.%d.%d.%d.%d.%d.%d."
-		" %d.%d.%d.%d.%d.%d.%d.%d",
-		qp_handle.handle, adapter_handle.handle,
-		dgid_sp[0], dgid_sp[1],
-		dgid_sp[2], dgid_sp[3],
-		dgid_sp[4], dgid_sp[5],
-		dgid_sp[6], dgid_sp[7],
-		dgid_ii[0], dgid_ii[1],
-		dgid_ii[2], dgid_ii[3],
-		dgid_ii[4], dgid_ii[5],
-		dgid_ii[6], dgid_ii[7]);
 
 	ret = ehca_hcall_7arg_7ret(H_ATTACH_MCQP,
 				   adapter_handle.handle,     /* r4 */
@@ -1002,9 +774,7 @@ u64 hipz_h_attach_mcqp(const struct ipz_
 				   &dummy);
 
 	if (ret == H_NOT_ENOUGH_RESOURCES)
-		EDEB_ERR(4, "Not enough resources. ret=%lx", ret);
-
-	EDEB_EX(7, "ret=%lx", ret);
+		ehca_gen_err("Not enough resources. ret=%lx", ret);
 
 	return ret;
 }
@@ -1015,56 +785,34 @@ u64 hipz_h_detach_mcqp(const struct ipz_
 		       u16 mcg_dlid,
 		       u64 subnet_prefix, u64 interface_id)
 {
-	u64 ret = H_SUCCESS;
 	u64 dummy;
-	u8 *dgid_sp = (u8*)&subnet_prefix;
-	u8 *dgid_ii = (u8*)&interface_id;
 
-	EDEB_EN(7, "qp_handle=%lx adapter_handle=%lx\nMCG_DGID ="
-		" %d.%d.%d.%d.%d.%d.%d.%d."
-		" %d.%d.%d.%d.%d.%d.%d.%d",
-		qp_handle.handle, adapter_handle.handle,
-		dgid_sp[0], dgid_sp[1],
-		dgid_sp[2], dgid_sp[3],
-		dgid_sp[4], dgid_sp[5],
-		dgid_sp[6], dgid_sp[7],
-		dgid_ii[0], dgid_ii[1],
-		dgid_ii[2], dgid_ii[3],
-		dgid_ii[4], dgid_ii[5],
-		dgid_ii[6], dgid_ii[7]);
-	ret = ehca_hcall_7arg_7ret(H_DETACH_MCQP,
-				   adapter_handle.handle, /* r4 */
-				   qp_handle.handle,	  /* r5 */
-				   mcg_dlid,	          /* r6 */
-				   interface_id,          /* r7 */
-				   subnet_prefix,         /* r8 */
-				   0, 0,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy);
-
-	EDEB(7, "ret=%lx", ret);
-
-	return ret;
+	return ehca_hcall_7arg_7ret(H_DETACH_MCQP,
+				    adapter_handle.handle, /* r4 */
+				    qp_handle.handle,	   /* r5 */
+				    mcg_dlid,	           /* r6 */
+				    interface_id,          /* r7 */
+				    subnet_prefix,         /* r8 */
+				    0, 0,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy);
 }
 
 u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
 		      struct ehca_cq *cq,
 		      u8 force_flag)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
 
-	EDEB_EN(7, "cq->pf=%p cq=.%p ipz_cq_handle=%lx adapter_handle=%lx",
-		&cq->pf, cq, cq->ipz_cq_handle.handle, adapter_handle.handle);
-
 	ret = hcp_galpas_dtor(&cq->galpas);
 	if (ret) {
-		EDEB_ERR(4, "Could not destruct cp->galpas");
+		ehca_gen_err("Could not destruct cp->galpas");
 		return H_RESOURCE;
 	}
 
@@ -1082,9 +830,7 @@ u64 hipz_h_destroy_cq(const struct ipz_a
 				   &dummy);
 
 	if (ret == H_RESOURCE)
-		EDEB(4, "ret=%lx ", ret);
-
-	EDEB_EX(7, "ret=%lx", ret);
+		ehca_gen_err("H_FREE_RESOURCE failed ret=%lx ", ret);
 
 	return ret;
 }
@@ -1092,16 +838,12 @@ u64 hipz_h_destroy_cq(const struct ipz_a
 u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
 		      struct ehca_eq *eq)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
 
-	EDEB_EN(7, "eq->pf=%p eq=%p ipz_eq_handle=%lx adapter_handle=%lx",
-		&eq->pf, eq, eq->ipz_eq_handle.handle,
-		adapter_handle.handle);
-
 	ret = hcp_galpas_dtor(&eq->galpas);
 	if (ret) {
-		EDEB_ERR(4, "Could not destruct eq->galpas");
+		ehca_gen_err("Could not destruct eq->galpas");
 		return H_RESOURCE;
 	}
 
@@ -1119,9 +861,7 @@ u64 hipz_h_destroy_eq(const struct ipz_a
 
 
 	if (ret == H_RESOURCE)
-		EDEB_ERR(4, "Resource in use. ret=%lx ", ret);
-
-	EDEB_EX(7, "ret=%lx", ret);
+		ehca_gen_err("Resource in use. ret=%lx ", ret);
 
 	return ret;
 }
@@ -1134,16 +874,11 @@ u64 hipz_h_alloc_resource_mr(const struc
 			     const struct ipz_pd pd,
 			     struct ehca_mr_hipzout_parms *outparms)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
 	u64 lkey_out;
 	u64 rkey_out;
 
-	EDEB_EN(7, "adapter_handle=%lx mr=%p vaddr=%lx length=%lx"
-		" access_ctrl=%x pd=%x",
-		adapter_handle.handle, mr, vaddr, length, access_ctrl,
-		pd.value);
-
 	ret = ehca_hcall_7arg_7ret(H_ALLOC_RESOURCE,
 				   adapter_handle.handle,            /* r4 */
 				   5,                                /* r5 */
@@ -1162,9 +897,6 @@ u64 hipz_h_alloc_resource_mr(const struc
 	outparms->lkey = (u32)lkey_out;
 	outparms->rkey = (u32)rkey_out;
 
-	EDEB_EX(7, "ret=%lx mr_handle=%lx lkey=%x rkey=%x",
-		ret, outparms->handle.handle, outparms->lkey, outparms->rkey);
-
 	return ret;
 }
 
@@ -1175,27 +907,22 @@ u64 hipz_h_register_rpage_mr(const struc
 			     const u64 logical_address_of_page,
 			     const u64 count)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 
-	EDEB_EN(7, "adapter_handle=%lx mr=%p mr_handle=%lx pagesize=%x"
-		" queue_type=%x logical_address_of_page=%lx count=%lx",
-		adapter_handle.handle, mr, mr->ipz_mr_handle.handle, pagesize,
-		queue_type, logical_address_of_page, count);
-
-	if ((count > 1) && (logical_address_of_page & 0xfff)) {
-		EDEB_ERR(4, "logical_address_of_page not on a 4k boundary "
-			 "adapter_handle=%lx mr=%p mr_handle=%lx "
-			 "pagesize=%x queue_type=%x logical_address_of_page=%lx"
-			 " count=%lx",
-			 adapter_handle.handle, mr, mr->ipz_mr_handle.handle,
-			 pagesize, queue_type, logical_address_of_page, count);
+	if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
+		ehca_gen_err("logical_address_of_page not on a 4k boundary "
+			     "adapter_handle=%lx mr=%p mr_handle=%lx "
+			     "pagesize=%x queue_type=%x "
+			     "logical_address_of_page=%lx count=%lx",
+			     adapter_handle.handle, mr,
+			     mr->ipz_mr_handle.handle, pagesize, queue_type,
+			     logical_address_of_page, count);
 		ret = H_PARAMETER;
 	} else
 		ret = hipz_h_register_rpage(adapter_handle, pagesize,
 					    queue_type,
 					    mr->ipz_mr_handle.handle,
 					    logical_address_of_page, count);
-	EDEB_EX(7, "ret=%lx", ret);
 
 	return ret;
 }
@@ -1204,15 +931,9 @@ u64 hipz_h_query_mr(const struct ipz_ada
 		    const struct ehca_mr *mr,
 		    struct ehca_mr_hipzout_parms *outparms)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
-	u64 remote_len_out;
-	u64 remote_vaddr_out;
-	u64 acc_ctrl_pd_out;
-	u64 r9_out;
-
-	EDEB_EN(7, "adapter_handle=%lx mr=%p mr_handle=%lx",
-		adapter_handle.handle, mr, mr->ipz_mr_handle.handle);
+	u64 remote_len_out, remote_vaddr_out, acc_ctrl_pd_out, r9_out;
 
 	ret = ehca_hcall_7arg_7ret(H_QUERY_MR,
 				   adapter_handle.handle,     /* r4 */
@@ -1230,38 +951,25 @@ u64 hipz_h_query_mr(const struct ipz_ada
 	outparms->lkey = (u32)(r9_out >> 32);
 	outparms->rkey = (u32)(r9_out & (0xffffffff));
 
-	EDEB_EX(7, "ret=%lx mr_local_length=%lx mr_local_vaddr=%lx "
-		"mr_remote_length=%lx mr_remote_vaddr=%lx access_ctrl=%x "
-		"pd=%x lkey=%x rkey=%x", ret, outparms->len,
-		outparms->vaddr, remote_len_out, remote_vaddr_out,
-		outparms->acl, outparms->acl, outparms->lkey, outparms->rkey);
-
 	return ret;
 }
 
 u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
 			    const struct ehca_mr *mr)
 {
-	u64 ret = H_SUCCESS;
 	u64 dummy;
 
-	EDEB_EN(7, "adapter_handle=%lx mr=%p mr_handle=%lx",
-		adapter_handle.handle, mr, mr->ipz_mr_handle.handle);
-
-	ret = ehca_hcall_7arg_7ret(H_FREE_RESOURCE,
-				   adapter_handle.handle,    /* r4 */
-				   mr->ipz_mr_handle.handle, /* r5 */
-				   0, 0, 0, 0, 0,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy);
-	EDEB_EX(7, "ret=%lx", ret);
-
-	return ret;
+	return ehca_hcall_7arg_7ret(H_FREE_RESOURCE,
+				    adapter_handle.handle,    /* r4 */
+				    mr->ipz_mr_handle.handle, /* r5 */
+				    0, 0, 0, 0, 0,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy);
 }
 
 u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
@@ -1273,15 +981,9 @@ u64 hipz_h_reregister_pmr(const struct i
 			  const u64 mr_addr_cb,
 			  struct ehca_mr_hipzout_parms *outparms)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
-	u64 lkey_out;
-	u64 rkey_out;
-
-	EDEB_EN(7, "adapter_handle=%lx mr=%p mr_handle=%lx vaddr_in=%lx "
-		"length=%lx access_ctrl=%x pd=%x mr_addr_cb=%lx",
-		adapter_handle.handle, mr, mr->ipz_mr_handle.handle, vaddr_in,
-		length, access_ctrl, pd.value, mr_addr_cb);
+	u64 lkey_out, rkey_out;
 
 	ret = ehca_hcall_7arg_7ret(H_REREGISTER_PMR,
 				   adapter_handle.handle,    /* r4 */
@@ -1303,8 +1005,6 @@ u64 hipz_h_reregister_pmr(const struct i
 	outparms->lkey = (u32)lkey_out;
 	outparms->rkey = (u32)rkey_out;
 
-	EDEB_EX(7, "ret=%lx vaddr=%lx lkey=%x rkey=%x",
-		ret, outparms->vaddr, outparms->lkey, outparms->rkey);
 	return ret;
 }
 
@@ -1316,16 +1016,9 @@ u64 hipz_h_register_smr(const struct ipz
 			const struct ipz_pd pd,
 			struct ehca_mr_hipzout_parms *outparms)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
-	u64 lkey_out;
-	u64 rkey_out;
-
-	EDEB_EN(7, "adapter_handle=%lx orig_mr=%p orig_mr_handle=%lx "
-		"vaddr_in=%lx access_ctrl=%x pd=%x", adapter_handle.handle,
-		orig_mr, orig_mr->ipz_mr_handle.handle, vaddr_in, access_ctrl,
-		pd.value);
-
+	u64 lkey_out, rkey_out;
 
 	ret = ehca_hcall_7arg_7ret(H_REGISTER_SMR,
 				   adapter_handle.handle,            /* r4 */
@@ -1344,9 +1037,6 @@ u64 hipz_h_register_smr(const struct ipz
 	outparms->lkey = (u32)lkey_out;
 	outparms->rkey = (u32)rkey_out;
 
-	EDEB_EX(7, "ret=%lx mr_handle=%lx lkey=%x rkey=%x",
-		ret, outparms->handle.handle, outparms->lkey, outparms->rkey);
-
 	return ret;
 }
 
@@ -1355,13 +1045,10 @@ u64 hipz_h_alloc_resource_mw(const struc
 			     const struct ipz_pd pd,
 			     struct ehca_mw_hipzout_parms *outparms)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
 	u64 rkey_out;
 
-	EDEB_EN(7, "adapter_handle=%lx mw=%p pd=%x",
-		adapter_handle.handle, mw, pd.value);
-
 	ret = ehca_hcall_7arg_7ret(H_ALLOC_RESOURCE,
 				   adapter_handle.handle,      /* r4 */
 				   6,                          /* r5 */
@@ -1377,8 +1064,6 @@ u64 hipz_h_alloc_resource_mw(const struc
 
 	outparms->rkey = (u32)rkey_out;
 
-	EDEB_EX(7, "ret=%lx mw_handle=%lx rkey=%x",
-		ret, outparms->handle.handle, outparms->rkey);
 	return ret;
 }
 
@@ -1386,13 +1071,9 @@ u64 hipz_h_query_mw(const struct ipz_ada
 		    const struct ehca_mw *mw,
 		    struct ehca_mw_hipzout_parms *outparms)
 {
-	u64 ret = H_SUCCESS;
+	u64 ret;
 	u64 dummy;
-	u64 pd_out;
-	u64 rkey_out;
-
-	EDEB_EN(7, "adapter_handle=%lx mw=%p mw_handle=%lx",
-		adapter_handle.handle, mw, mw->ipz_mw_handle.handle);
+	u64 pd_out, rkey_out;
 
 	ret = ehca_hcall_7arg_7ret(H_QUERY_MW,
 				   adapter_handle.handle,    /* r4 */
@@ -1407,34 +1088,25 @@ u64 hipz_h_query_mw(const struct ipz_ada
 				   &dummy);
 	outparms->rkey = (u32)rkey_out;
 
-	EDEB_EX(7, "ret=%lx rkey=%x pd=%lx", ret, outparms->rkey, pd_out);
-
 	return ret;
 }
 
 u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
 			    const struct ehca_mw *mw)
 {
-	u64 ret = H_SUCCESS;
 	u64 dummy;
 
-	EDEB_EN(7, "adapter_handle=%lx mw=%p mw_handle=%lx",
-		adapter_handle.handle, mw, mw->ipz_mw_handle.handle);
-
-	ret = ehca_hcall_7arg_7ret(H_FREE_RESOURCE,
-				   adapter_handle.handle,    /* r4 */
-				   mw->ipz_mw_handle.handle, /* r5 */
-				   0, 0, 0, 0, 0,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy);
-	EDEB_EX(7, "ret=%lx", ret);
-
-	return ret;
+	return ehca_hcall_7arg_7ret(H_FREE_RESOURCE,
+				    adapter_handle.handle,    /* r4 */
+				    mw->ipz_mw_handle.handle, /* r5 */
+				    0, 0, 0, 0, 0,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy);
 }
 
 u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
@@ -1442,35 +1114,24 @@ u64 hipz_h_error_data(const struct ipz_a
 		      void *rblock,
 		      unsigned long *byte_count)
 {
-	u64 ret = H_SUCCESS;
 	u64 dummy;
-	u64 r_cb;
-
-	EDEB_EN(7, "adapter_handle=%lx ressource_handle=%lx rblock=%p",
-		adapter_handle.handle, ressource_handle, rblock);
+	u64 r_cb = virt_to_abs(rblock);
 
-	if (((u64)rblock) & 0xfff) {
-		EDEB_ERR(4, "rblock not page aligned.");
-		ret = H_PARAMETER;
-		return ret;
+	if (r_cb & (EHCA_PAGESIZE-1)) {
+		ehca_gen_err("rblock not page aligned.");
+		return H_PARAMETER;
 	}
 
-	r_cb = virt_to_abs(rblock);
-
-	ret = ehca_hcall_7arg_7ret(H_ERROR_DATA,
-				   adapter_handle.handle,
-				   ressource_handle,
-				   r_cb,
-				   0, 0, 0, 0,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy,
-				   &dummy);
-
-	EDEB_EX(7, "ret=%lx", ret);
-
-	return ret;
+	return ehca_hcall_7arg_7ret(H_ERROR_DATA,
+				    adapter_handle.handle,
+				    ressource_handle,
+				    r_cb,
+				    0, 0, 0, 0,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy,
+				    &dummy);
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hcp_if.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hcp_if.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hcp_if.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hcp_if.h	2006-10-17 10:15:06.000000000 -0400
@@ -48,14 +48,9 @@
 #include "ehca_tools.h"
 #include "hipz_hw.h"
 
-/**
- * hipz_h_alloc_resource_eq - Allocate EQ resources in HW and FW, initalize
+/*
+ * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initalize
  * resources, create the empty EQPT (ring).
- *
- * @eq_handle:         eq handle for this queue
- * @act_nr_of_entries: actual number of queue entries
- * @act_pages:         actual number of queue pages
- * @eq_ist:            used by hcp_H_XIRR() call
  */
 u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
 			     struct ehca_pfeq *pfeq,
@@ -69,27 +64,18 @@ u64 hipz_h_alloc_resource_eq(const struc
 u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
 		       struct ipz_eq_handle eq_handle,
 		       const u64 event_mask);
-/**
- * hipz_h_allocate_resource_cq - Allocate CQ resources in HW and FW, initialize
+/*
+ * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize
  * resources, create the empty CQPT (ring).
- *
- * @eq_handle:         eq handle to use for this cq
- * @cq_handle:         cq handle for this queue
- * @act_nr_of_entries: actual number of queue entries
- * @act_pages:         actual number of queue pages
- * @galpas:            contain logical adress of priv. storage and
- *                     log_user_storage
  */
 u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
 			     struct ehca_cq *cq,
 			     struct ehca_alloc_cq_parms *param);
 
 
-/**
- * hipz_h_alloc_resource_qp - Allocate QP resources in HW and FW,
+/*
+ * hipz_h_alloc_resource_qp allocates QP resources in HW and FW,
  * initialize resources, create empty QPPTs (2 rings).
- *
- * @h_galpas to access HCA resident QP attributes
  */
 u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
 			     struct ehca_qp *qp,
@@ -102,11 +88,9 @@ u64 hipz_h_query_port(const struct ipz_a
 u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
 		     struct hipz_query_hca *query_hca_rblock);
 
-/**
- * hipz_h_register_rpage - hcp_if.h internal function for all
+/*
+ * hipz_h_register_rpage internal function in hcp_if.h for all
  * hcp_H_REGISTER_RPAGE calls.
- *
- * @logical_address_of_page: kv transformation to GX address in this routine
  */
 u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
 			  const u8 pagesize,
@@ -203,16 +187,9 @@ u64 hipz_h_destroy_cq(const struct ipz_a
 u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
 		      struct ehca_eq *eq);
 
-/**
- * hipz_h_alloc_resource_mr - Allocate MR resources in HW and FW, initialize
+/*
+ * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize
  * resources.
- *
- * @mr:          ehca MR
- * @vaddr:       Memory Region I/O Virtual Address
- * @length:      Memory Region Length
- * @access_ctrl: Memory Region Access Controls
- * @pd:          Protection Domain
- * @outparms:    output parameters
  */
 u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
 			     const struct ehca_mr *mr,
@@ -222,12 +199,7 @@ u64 hipz_h_alloc_resource_mr(const struc
 			     const struct ipz_pd pd,
 			     struct ehca_mr_hipzout_parms *outparms);
 
-/**
- * hipz_h_register_rpage_mr - Register MR resource page in HW and FW .
- *
- * @mr:         ehca MR
- * @queue_type: must be zero for MR
- */
+/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */
 u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
 			     const struct ehca_mr *mr,
 			     const u8 pagesize,
@@ -235,35 +207,16 @@ u64 hipz_h_register_rpage_mr(const struc
 			     const u64 logical_address_of_page,
 			     const u64 count);
 
-/**
- * hipz_h_query_mr - Query MR in HW and FW.
- *
- * @mr:       ehca MR
- * @outparms: output parameters
- */
+/* hipz_h_query_mr queries MR in HW and FW */
 u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
 		    const struct ehca_mr *mr,
 		    struct ehca_mr_hipzout_parms *outparms);
 
-/**
- * hipz_h_free_resource_mr - Free MR resources in HW and FW.
- *
- * @mr: ehca MR
- */
+/* hipz_h_free_resource_mr frees MR resources in HW and FW */
 u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
 			    const struct ehca_mr *mr);
 
-/**
- * hipz_h_reregister_pmr - Reregister MR in HW and FW.
- *
- * @mr:          ehca MR
- * @vaddr_in:    Memory Region I/O Virtual Address
- * @length:      Memory Region Length
- * @access_ctrl: Memory Region Access Controls
- * @pd:          Protection Domain
- * @mr_addr_cb:  Logical Address of MR Control Block
- * @outparms:    output parameters
- */
+/* hipz_h_reregister_pmr reregisters MR in HW and FW */
 u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
 			  const struct ehca_mr *mr,
 			  const u64 vaddr_in,
@@ -273,16 +226,7 @@ u64 hipz_h_reregister_pmr(const struct i
 			  const u64 mr_addr_cb,
 			  struct ehca_mr_hipzout_parms *outparms);
 
-/**
- * hipz_h_register_smr - Register shared MR in HW and FW.
- *
- * @mr:          ehca MR
- * @orig_mr:     original ehca MR
- * @vaddr_in:    Memory Region I/O Virtual Address of new shared MR
- * @access_ctrl: Memory Region Access Controls of new shared MR
- * @pd:          Protection Domain of new shared MR
- * @outparms:    output parameters
- */
+/* hipz_h_register_smr register shared MR in HW and FW */
 u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
 			const struct ehca_mr *mr,
 			const struct ehca_mr *orig_mr,
@@ -291,34 +235,21 @@ u64 hipz_h_register_smr(const struct ipz
 			const struct ipz_pd pd,
 			struct ehca_mr_hipzout_parms *outparms);
 
-/**
- * hipz_h_alloc_resource_mw - Allocate MR resources in HW and FW, initialize
+/*
+ * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize
  * resources.
- *
- * @mw:       ehca MW
- * @pd:       Protection Domain
- * @outparms: output parameters
  */
 u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
 			     const struct ehca_mw *mw,
 			     const struct ipz_pd pd,
 			     struct ehca_mw_hipzout_parms *outparms);
 
-/**
- * hipz_h_query_mw - Query MW in HW and FW.
- *
- * @mw:       ehca MW
- * @outparms: output parameters
- */
+/* hipz_h_query_mw queries MW in HW and FW */
 u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
 		    const struct ehca_mw *mw,
 		    struct ehca_mw_hipzout_parms *outparms);
 
-/**
- * hipz_h_free_resource_mw - Free MW resources in HW and FW.
- *
- * @mw: ehca MW
- */
+/* hipz_h_free_resource_mw frees MW resources in HW and FW */
 u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
 			    const struct ehca_mw *mw);
 
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hcp_phyp.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hcp_phyp.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hcp_phyp.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hcp_phyp.c	2006-10-17 10:15:06.000000000 -0400
@@ -39,22 +39,17 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#define DEB_PREFIX "PHYP"
-
 #include "ehca_classes.h"
 #include "hipz_hw.h"
 
 int hcall_map_page(u64 physaddr, u64 *mapaddr)
 {
 	*mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE));
-
-	EDEB(7, "ioremap physaddr=%lx mapaddr=%lx", physaddr, *mapaddr);
 	return 0;
 }
 
 int hcall_unmap_page(u64 mapaddr)
 {
-	EDEB(7, "mapaddr=%lx", mapaddr);
 	iounmap((volatile void __iomem*)mapaddr);
 	return 0;
 }
@@ -68,25 +63,18 @@ int hcp_galpas_ctor(struct h_galpas *gal
 
 	galpas->user.fw_handle = paddr_user;
 
-	EDEB(7, "paddr_kernel=%lx paddr_user=%lx galpas->kernel=%lx"
-	     " galpas->user=%lx",
-	     paddr_kernel, paddr_user, galpas->kernel.fw_handle,
-	     galpas->user.fw_handle);
-
-	return ret;
+	return 0;
 }
 
 int hcp_galpas_dtor(struct h_galpas *galpas)
 {
-	int ret = 0;
-
-	if (galpas->kernel.fw_handle)
-		ret = hcall_unmap_page(galpas->kernel.fw_handle);
-
-	if (ret)
-		return ret;
+	if (galpas->kernel.fw_handle) {
+		int ret = hcall_unmap_page(galpas->kernel.fw_handle);
+		if (ret)
+			return ret;
+	}
 
 	galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
 
-	return ret;
+	return 0;
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hcp_phyp.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hcp_phyp.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hcp_phyp.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hcp_phyp.h	2006-10-17 10:15:06.000000000 -0400
@@ -45,8 +45,9 @@
 #define __HCP_PHYP_H__
 
 
-/* eHCA page (mapped into memory)
-    resource to access eHCA register pages in CPU address space
+/*
+ * eHCA page (mapped into memory)
+ * resource to access eHCA register pages in CPU address space
 */
 struct h_galpa {
 	u64 fw_handle;
@@ -55,8 +56,8 @@ struct h_galpa {
 };
 
 /*
-   resource to access eHCA address space registers, all types
-*/
+ * resource to access eHCA address space registers, all types
+ */
 struct h_galpas {
 	u32 pid;		/*PID of userspace galpa checking */
 	struct h_galpa user;	/* user space accessible resource,
@@ -68,19 +69,13 @@ struct h_galpas {
 static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
 {
 	u64 addr = galpa.fw_handle + offset;
-	u64 out;
-	EDEB_EN(7, "addr=%lx offset=%x ", addr, offset);
-	out = *(u64 *) addr;
-	EDEB_EX(7, "addr=%lx value=%lx", addr, out);
-	return out;
+	return *(volatile u64 __force *)addr;
 }
 
 static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
 {
 	u64 addr = galpa.fw_handle + offset;
-	EDEB(7, "addr=%lx offset=%x value=%lx", addr,
-	     offset, value);
-	*(u64 *) addr = value;
+	*(volatile u64 __force *)addr = value;
 }
 
 int hcp_galpas_ctor(struct h_galpas *galpas,
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hipz_fns_core.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hipz_fns_core.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hipz_fns_core.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hipz_fns_core.h	2006-10-17 10:15:06.000000000 -0400
@@ -60,63 +60,41 @@
 
 static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
 {
-	struct h_galpa gal;
-
-	EDEB_EN(7, "qp=%p", qp);
-	gal = qp->galpas.kernel;
 	/*  ringing doorbell :-) */
-	hipz_galpa_store_qp(gal, qpx_sqa, EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
-	EDEB_EX(7, "qp=%p QPx_SQA = %i", qp, nr_wqes);
+	hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
+			    EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
 }
 
 static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
 {
-	struct h_galpa gal;
-
-	EDEB_EN(7, "qp=%p", qp);
-	gal = qp->galpas.kernel;
 	/*  ringing doorbell :-) */
-	hipz_galpa_store_qp(gal, qpx_rqa, EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
-	EDEB_EX(7, "qp=%p QPx_RQA = %i", qp, nr_wqes);
+	hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
+			    EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
 }
 
 static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
 {
-	struct h_galpa gal;
-
-	EDEB_EN(7, "cq=%p", cq);
-	gal = cq->galpas.kernel;
-	hipz_galpa_store_cq(gal, cqx_feca,
+	hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
 			    EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
-	EDEB_EX(7, "cq=%p CQx_FECA = %i", cq, nr_cqes);
 }
 
 static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
 {
-	struct h_galpa gal;
-	u64 CQx_N0_reg = 0;
+	u64 cqx_n0_reg;
 
-	EDEB_EN(7, "cq=%p event on solicited completion -- write CQx_N0", cq);
-	gal = cq->galpas.kernel;
-	hipz_galpa_store_cq(gal, cqx_n0,
+	hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
 			    EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
 					   value));
-	CQx_N0_reg = hipz_galpa_load_cq(gal, cqx_n0);
-	EDEB_EX(7, "cq=%p loaded CQx_N0=%lx", cq, (unsigned long)CQx_N0_reg);
+	cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
 }
 
 static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
 {
-	struct h_galpa gal;
-	u64 CQx_N1_reg = 0;
+	u64 cqx_n1_reg;
 
-	EDEB_EN(7, "cq=%p event on completion -- write CQx_N1",
-		cq);
-	gal = cq->galpas.kernel;
-	hipz_galpa_store_cq(gal, cqx_n1,
+	hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
 			    EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value));
-	CQx_N1_reg = hipz_galpa_load_cq(gal, cqx_n1);
-	EDEB_EX(7, "cq=%p loaded CQx_N1=%lx", cq, (unsigned long)CQx_N1_reg);
+	cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
 }
 
 #endif /* __HIPZ_FNC_CORE_H__ */
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hipz_hw.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hipz_hw.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/hipz_hw.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/hipz_hw.h	2006-10-17 10:15:06.000000000 -0400
@@ -45,8 +45,7 @@
 
 #include "ehca_tools.h"
 
-/** QP Table Entry Memory Map
- */
+/* QP Table Entry Memory Map */
 struct hipz_qptemm {
 	u64 qpx_hcr;
 	u64 qpx_c;
@@ -118,7 +117,6 @@ struct hipz_qptemm {
 	u64 qpx_sigt;
 	u64 qpx_wqecnt;
 /* 0x1c0*/
-
 	u64 qpx_rqhp;
 	u64 qpx_rqptp;
 	u64 qpx_rqsize;
@@ -166,8 +164,7 @@ struct hipz_qptemm {
 
 #define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x)
 
-/** MRMWPT Entry Memory Map
- */
+/* MRMWPT Entry Memory Map */
 struct hipz_mrmwmm {
 	/* 0x00 */
 	u64 mrx_hcr;
@@ -240,10 +237,9 @@ struct hipz_qpedmm {
 	u64 qpedx_rrva3;
 };
 
-#define QPEDMM_OFFSET(x) offsetof(struct hipz_QPEDMM,x)
+#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm,x)
 
-/** CQ Table Entry Memory Map
- */
+/* CQ Table Entry Memory Map */
 struct hipz_cqtemm {
 	u64 cqx_hcr;
 	u64 cqx_c;
@@ -273,8 +269,7 @@ struct hipz_cqtemm {
 
 #define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm,x)
 
-/** EQ Table Entry Memory Map
- */
+/* EQ Table Entry Memory Map */
 struct hipz_eqtemm {
 	u64 eqx_hcr;
 	u64 eqx_c;
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ipz_pt_fn.c linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ipz_pt_fn.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ipz_pt_fn.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ipz_pt_fn.c	2006-10-17 10:15:06.000000000 -0400
@@ -38,13 +38,9 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#define DEB_PREFIX "iptz"
-
 #include "ehca_tools.h"
 #include "ipz_pt_fn.h"
 
-extern int ehca_hwlevel;
-
 void *ipz_qpageit_get_inc(struct ipz_queue *queue)
 {
 	void *ret = ipz_qeit_get(queue);
@@ -54,10 +50,9 @@ void *ipz_qpageit_get_inc(struct ipz_que
 		ret = NULL;
 	}
 	if (((u64)ret) % EHCA_PAGESIZE) {
-		EDEB(4, "ERROR!! not at PAGE-Boundary");
+		ehca_gen_err("ERROR!! not at PAGE-Boundary");
 		return NULL;
 	}
-	EDEB(7, "queue=%p ret=%p", queue, ret);
 	return ret;
 }
 
@@ -65,15 +60,13 @@ void *ipz_qeit_eq_get_inc(struct ipz_que
 {
 	void *ret = ipz_qeit_get(queue);
 	u64 last_entry_in_q = queue->queue_length - queue->qe_size;
+
 	queue->current_q_offset += queue->qe_size;
 	if (queue->current_q_offset > last_entry_in_q) {
 		queue->current_q_offset = 0;
 		queue->toggle_state = (~queue->toggle_state) & 1;
 	}
 
-	EDEB(7, "queue=%p ret=%p new current_q_offset=%lx qe_size=%x",
-	     queue, ret, queue->current_q_offset, queue->qe_size);
-
 	return ret;
 }
 
@@ -84,48 +77,35 @@ int ipz_queue_ctor(struct ipz_queue *que
 	int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT;
 	int f;
 
-	EDEB_EN(7, "nr_of_pages=%x pagesize=%x qe_size=%x",
-		nr_of_pages, pagesize, qe_size);
 	if (pagesize > PAGE_SIZE) {
-		EDEB_ERR(4, "FATAL ERROR: pagesize=%x is greater than "
-			 "kernel page size", pagesize);
+		ehca_gen_err("FATAL ERROR: pagesize=%x is greater "
+			     "than kernel page size", pagesize);
 		return 0;
 	}
 	if (!pages_per_kpage) {
-		EDEB_ERR(4, "FATAL ERROR: invalid kernel page size. "
-			"pages_per_kpage=%x", pages_per_kpage);
+		ehca_gen_err("FATAL ERROR: invalid kernel page size. "
+			     "pages_per_kpage=%x", pages_per_kpage);
 		return 0;
 	}
 	queue->queue_length = nr_of_pages * pagesize;
 	queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
 	if (!queue->queue_pages) {
-		EDEB(4, "ERROR!! didn't get the memory");
+		ehca_gen_err("ERROR!! didn't get the memory");
 		return 0;
 	}
 	memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
-	/* allocate pages for queue:
-	   while loop allocates whole kernel pages
-	   if cond allocates so much mem needed for the rest of queue pages,
-	   which is nr_of_pages % pages_per_kpage
+	/*
+	 * allocate pages for queue:
+	 * outer loop allocates whole kernel pages (page aligned) and
+	 * inner loop divides a kernel page into smaller hca queue pages
 	 */
 	f = 0;
-	while (f + pages_per_kpage <= nr_of_pages) {
-		u8 *kpage = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	while (f < nr_of_pages) {
+		u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
 		int k;
 		if (!kpage)
 			goto ipz_queue_ctor_exit0; /*NOMEM*/
-		for (k = 0; k < pages_per_kpage; k++) {
-			(queue->queue_pages)[f] = (struct ipz_page *)kpage;
-			kpage += EHCA_PAGESIZE;
-			f++;
-		}
-	}
-	if (f < nr_of_pages) {
-		u8 *kpage = kzalloc((nr_of_pages - f) * EHCA_PAGESIZE,
-				      GFP_KERNEL);
-		if (!kpage)
-			goto ipz_queue_ctor_exit0; /*NOMEM*/
-		while (f < nr_of_pages) {
+		for (k = 0; k < pages_per_kpage && f < nr_of_pages; k++) {
 			(queue->queue_pages)[f] = (struct ipz_page *)kpage;
 			kpage += EHCA_PAGESIZE;
 			f++;
@@ -137,18 +117,15 @@ int ipz_queue_ctor(struct ipz_queue *que
 	queue->act_nr_of_sg = nr_of_sg;
 	queue->pagesize = pagesize;
 	queue->toggle_state = 1;
-	EDEB_EX(7, "queue_length=%x queue_pages=%p qe_size=%x"
-		" act_nr_of_sg=%x", queue->queue_length, queue->queue_pages,
-		queue->qe_size, queue->act_nr_of_sg);
 	return 1;
 
  ipz_queue_ctor_exit0:
-	EDEB_ERR(4, "Couldn't get alloc pages queue=%p f=%x nr_of_pages=%x",
-		 queue, f, nr_of_pages);
+	ehca_gen_err("Couldn't get alloc pages queue=%p f=%x nr_of_pages=%x",
+		     queue, f, nr_of_pages);
 	for (f = 0; f < nr_of_pages; f += pages_per_kpage) {
 		if (!(queue->queue_pages)[f])
 			break;
-		kfree((queue->queue_pages)[f]);
+		free_page((unsigned long)(queue->queue_pages)[f]);
 	}
 	return 0;
 }
@@ -159,19 +136,14 @@ int ipz_queue_dtor(struct ipz_queue *que
 	int g;
 	int nr_pages;
 
-	EDEB_EN(7, "ipz_queue pointer=%p", queue);
 	if (!queue || !queue->queue_pages) {
-		EDEB_ERR(4, "queue or queue_pages is NULL");
+		ehca_gen_dbg("queue or queue_pages is NULL");
 		return 0;
 	}
-	EDEB(7, "destructing a queue with the following "
-	     "properties:\n nr_of_pages=%x pagesize=%x qe_size=%x",
-	     queue->act_nr_of_sg, queue->pagesize, queue->qe_size);
 	nr_pages = queue->queue_length / queue->pagesize;
 	for (g = 0; g < nr_pages; g += pages_per_kpage)
-		kfree((queue->queue_pages)[g]);
+		free_page((unsigned long)(queue->queue_pages)[g]);
 	vfree(queue->queue_pages);
 
-	EDEB_EX(7, "queue freed!");
 	return 1;
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ipz_pt_fn.h linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ipz_pt_fn.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/ipz_pt_fn.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/ipz_pt_fn.h	2006-10-17 10:15:06.000000000 -0400
@@ -43,22 +43,20 @@
 #ifndef __IPZ_PT_FN_H__
 #define __IPZ_PT_FN_H__
 
-#include "ehca_qes.h"
 #define EHCA_PAGESHIFT   12
 #define EHCA_PAGESIZE   4096UL
+#define EHCA_PAGEMASK   (~(EHCA_PAGESIZE-1))
 #define EHCA_PT_ENTRIES 512UL
 
 #include "ehca_tools.h"
 #include "ehca_qes.h"
 
-/* struct generic ehca page
- */
+/* struct generic ehca page */
 struct ipz_page {
 	u8 entries[EHCA_PAGESIZE];
 };
 
-/* struct generic queue in linux kernel virtual memory (kv)
- */
+/* struct generic queue in linux kernel virtual memory (kv) */
 struct ipz_queue {
 	u64 current_q_offset;	/* current queue entry */
 
@@ -71,45 +69,47 @@ struct ipz_queue {
 	u32 dummy3;		/* 64 bit alignment */
 };
 
-/*  return current Queue Entry for a certain q_offset
- *   returns address (kv) of Queue Entry
+/*
+ * return current Queue Entry for a certain q_offset
+ * returns address (kv) of Queue Entry
  */
 static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
 {
-	struct ipz_page *current_page = NULL;
+	struct ipz_page *current_page;
 	if (q_offset >= queue->queue_length)
 		return NULL;
 	current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
 	return  &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
 }
 
-/*  return current Queue Entry
- *   returns address (kv) of Queue Entry
+/*
+ * return current Queue Entry
+ * returns address (kv) of Queue Entry
  */
 static inline void *ipz_qeit_get(struct ipz_queue *queue)
 {
 	return ipz_qeit_calc(queue, queue->current_q_offset);
 }
 
-/*  return current Queue Page , increment Queue Page iterator from
- *   page to page in struct ipz_queue, last increment will return 0! and
- *   NOT wrap
- *   returns address (kv) of Queue Page
- *   warning don't use in parallel with ipz_QE_get_inc()
+/*
+ * return current Queue Page , increment Queue Page iterator from
+ * page to page in struct ipz_queue, last increment will return 0! and
+ * NOT wrap
+ * returns address (kv) of Queue Page
+ * warning don't use in parallel with ipz_QE_get_inc()
  */
 void *ipz_qpageit_get_inc(struct ipz_queue *queue);
 
-/*  return current Queue Entry, increment Queue Entry iterator by one
- *   step in struct ipz_queue, will wrap in ringbuffer
- *   @returns address (kv) of Queue Entry BEFORE increment
- *   warning don't use in parallel with ipz_qpageit_get_inc()
- *   warning unpredictable results may occur if steps>act_nr_of_queue_entries
+/*
+ * return current Queue Entry, increment Queue Entry iterator by one
+ * step in struct ipz_queue, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * warning don't use in parallel with ipz_qpageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
  */
 static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
 {
-	void *ret = NULL;
-
-	ret = ipz_qeit_get(queue);
+	void *ret = ipz_qeit_get(queue);
 	queue->current_q_offset += queue->qe_size;
 	if (queue->current_q_offset >= queue->queue_length) {
 		queue->current_q_offset = 0;
@@ -117,18 +117,16 @@ static inline void *ipz_qeit_get_inc(str
 		queue->toggle_state = (~queue->toggle_state) & 1;
 	}
 
-	EDEB(7, "queue=%p ret=%p new current_q_addr=%lx qe_size=%x",
-	     queue, ret, queue->current_q_offset, queue->qe_size);
-
 	return ret;
 }
 
-/*  return current Queue Entry, increment Queue Entry iterator by one
- *   step in struct ipz_queue, will wrap in ringbuffer
- *   returns address (kv) of Queue Entry BEFORE increment
- *   returns 0 and does not increment, if wrong valid state
- *   warning don't use in parallel with ipz_qpageit_get_inc()
- *   warning unpredictable results may occur if steps>act_nr_of_queue_entries
+/*
+ * return current Queue Entry, increment Queue Entry iterator by one
+ * step in struct ipz_queue, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * returns 0 and does not increment, if wrong valid state
+ * warning don't use in parallel with ipz_qpageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
  */
 static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
 {
@@ -142,8 +140,9 @@ static inline void *ipz_qeit_get_inc_val
 	return cqe;
 }
 
-/*  returns and resets Queue Entry iterator
- *   returns address (kv) of first Queue Entry
+/*
+ * returns and resets Queue Entry iterator
+ * returns address (kv) of first Queue Entry
  */
 static inline void *ipz_qeit_reset(struct ipz_queue *queue)
 {
@@ -151,83 +150,82 @@ static inline void *ipz_qeit_reset(struc
 	return ipz_qeit_get(queue);
 }
 
-/** struct generic page table
- */
+/* struct generic page table */
 struct ipz_pt {
 	u64 entries[EHCA_PT_ENTRIES];
 };
 
-/* struct page table for a queue, only to be used in pf
- */
+/* struct page table for a queue, only to be used in pf */
 struct ipz_qpt {
 	/* queue page tables (kv), use u64 because we know the element length */
 	u64 *qpts;
-	u32 allocated_qpts_entries;
-	u32 nr_of_PTEs;		/*  number of page table entries PTE iterators */
+	u32 n_qpts;
+	u32 n_ptes;       /*  number of page table entries */
 	u64 *current_pte_addr;
 };
 
-/* constructor for a ipz_queue_t, placement new for ipz_queue_t,
- *  new for all dependent datastructors
- *
- *  all QP Tables are the same
- *  flow:
- *     allocate+pin queue
- *  see ipz_qpt_ctor()
- *  returns true if ok, false if out of memory
+/*
+ * constructor for a ipz_queue_t, placement new for ipz_queue_t,
+ * new for all dependent datastructors
+ * all QP Tables are the same
+ * flow:
+ *    allocate+pin queue
+ * see ipz_qpt_ctor()
+ * returns true if ok, false if out of memory
  */
 int ipz_queue_ctor(struct ipz_queue *queue, const u32 nr_of_pages,
 		   const u32 pagesize, const u32 qe_size,
 		   const u32 nr_of_sg);
 
-/* destructor for a ipz_queue_t
+/*
+ * destructor for a ipz_queue_t
  *  -# free queue
  *  see ipz_queue_ctor()
  *  returns true if ok, false if queue was NULL-ptr of free failed
  */
 int ipz_queue_dtor(struct ipz_queue *queue);
 
-/* constructor for a ipz_qpt_t,
+/*
+ * constructor for a ipz_qpt_t,
  * placement new for struct ipz_queue, new for all dependent datastructors
- *
- *  all QP Tables are the same,
- *  flow:
- *  -# allocate+pin queue
- *  -# initialise ptcb
- *  -# allocate+pin PTs
- *  -# link PTs to a ring, according to HCA Arch, set bit62 id needed
- *  -# the ring must have room for exactly nr_of_PTEs
- *  see ipz_qpt_ctor()
+ * all QP Tables are the same,
+ * flow:
+ * -# allocate+pin queue
+ * -# initialise ptcb
+ * -# allocate+pin PTs
+ * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
+ * -# the ring must have room for exactly nr_of_PTEs
+ * see ipz_qpt_ctor()
  */
 void ipz_qpt_ctor(struct ipz_qpt *qpt,
-		  const u32 nr_of_QEs,
+		  const u32 nr_of_qes,
 		  const u32 pagesize,
 		  const u32 qe_size,
 		  const u8 lowbyte, const u8 toggle,
 		  u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
 
-/*  return current Queue Entry, increment Queue Entry iterator by one
- *   step in struct ipz_queue, will wrap in ringbuffer
- *   returns address (kv) of Queue Entry BEFORE increment
- *   warning don't use in parallel with ipz_qpageit_get_inc()
- *   warning unpredictable results may occur if steps>act_nr_of_queue_entries
- *
- *   fix EQ page problems
+/*
+ * return current Queue Entry, increment Queue Entry iterator by one
+ * step in struct ipz_queue, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * warning don't use in parallel with ipz_qpageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
+ * fix EQ page problems
  */
 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
 
-/*  return current Event Queue Entry, increment Queue Entry iterator
- *   by one step in struct ipz_queue if valid, will wrap in ringbuffer
- *   returns address (kv) of Queue Entry BEFORE increment
- *   returns 0 and does not increment, if wrong valid state
- *   warning don't use in parallel with ipz_queue_QPageit_get_inc()
- *   warning unpredictable results may occur if steps>act_nr_of_queue_entries
+/*
+ * return current Event Queue Entry, increment Queue Entry iterator
+ * by one step in struct ipz_queue if valid, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * returns 0 and does not increment, if wrong valid state
+ * warning don't use in parallel with ipz_queue_QPageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
  */
 static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
 {
 	void *ret = ipz_qeit_get(queue);
 	u32 qe = *(u8 *) ret;
-	EDEB(7, "ipz_QEit_EQ_get_inc_valid qe=%x", qe);
 	if ((qe >> 7) == (queue->toggle_state & 1))
 		ipz_qeit_eq_get_inc(queue); /* this is a good one */
 	else
@@ -235,17 +233,13 @@ static inline void *ipz_eqit_eq_get_inc_
 	return ret;
 }
 
-/*
- *   returns address (GX) of first queue entry
- */
+/* returns address (GX) of first queue entry */
 static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
 {
 	return be64_to_cpu(qpt->qpts[0]);
 }
 
-/*
- *   returns address (kv) of first page of queue page table
- */
+/* returns address (kv) of first page of queue page table */
 static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
 {
 	return qpt->qpts;
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/Kconfig linux-2.6.18.noarch/drivers/infiniband/hw/ehca/Kconfig
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/Kconfig	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/Kconfig	2006-10-17 10:15:06.000000000 -0400
@@ -1,6 +1,16 @@
 config INFINIBAND_EHCA
-       tristate "eHCA support"
-       depends on IBMEBUS && INFINIBAND
-       ---help---
-       This is a low level device driver for the IBM
-       GX based Host channel adapters (HCAs)
+	tristate "eHCA support"
+	depends on IBMEBUS && INFINIBAND
+	---help---
+	This driver supports the IBM pSeries eHCA InfiniBand adapter.
+
+	To compile the driver as a module, choose M here. The module
+	will be called ib_ehca.
+
+config INFINIBAND_EHCA_SCALING
+	bool "Scaling support (EXPERIMENTAL)"
+	depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL
+	---help---
+	eHCA scaling support schedules the CQ callbacks to different CPUs.
+
+	To enable this feature choose Y here.
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/Makefile linux-2.6.18.noarch/drivers/infiniband/hw/ehca/Makefile
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ehca/Makefile	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ehca/Makefile	2006-10-17 10:15:06.000000000 -0400
@@ -1,5 +1,6 @@
 #  Authors: Heiko J Schick <schickhj@de.ibm.com>
 #           Christoph Raisch <raisch@de.ibm.com>
+#           Joachim Fenkes <fenkes@de.ibm.com>
 #
 #  Copyright (c) 2005 IBM Corporation
 #
@@ -7,10 +8,10 @@
 #
 #  This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
 
-obj-$(CONFIG_INFINIBAND_EHCA) += hcad_mod.o
+obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
 
-hcad_mod-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
+
+ib_ehca-objs  = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
 		ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
-		ehca_uverbs.o hcp_if.o hcp_phyp.o ipz_pt_fn.o
+		ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
 
-CFLAGS += -DEHCA_USE_HCALL -DEHCA_USE_HCALL_KERNEL
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_common.h linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_common.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_common.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_common.h	2006-10-19 12:41:10.000000000 -0400
@@ -106,9 +106,9 @@ struct infinipath_stats {
 	__u64 sps_ether_spkts;
 	/* number of "ethernet" packets received by driver */
 	__u64 sps_ether_rpkts;
-	/* number of SMA packets sent by driver */
+	/* number of SMA packets sent by driver. Obsolete. */
 	__u64 sps_sma_spkts;
-	/* number of SMA packets received by driver */
+	/* number of SMA packets received by driver. Obsolete. */
 	__u64 sps_sma_rpkts;
 	/* number of times all ports rcvhdrq was full and packet dropped */
 	__u64 sps_hdrqfull;
@@ -138,7 +138,7 @@ struct infinipath_stats {
 	__u64 sps_pageunlocks;
 	/*
 	 * Number of packets dropped in kernel other than errors (ether
-	 * packets if ipath not configured, sma/mad, etc.)
+	 * packets if ipath not configured, etc.)
 	 */
 	__u64 sps_krdrops;
 	/* pad for future growth */
@@ -153,8 +153,6 @@ struct infinipath_stats {
 #define IPATH_STATUS_DISABLED      0x2	/* hardware disabled */
 /* Device has been disabled via admin request */
 #define IPATH_STATUS_ADMIN_DISABLED    0x4
-#define IPATH_STATUS_OIB_SMA       0x8	/* ipath_mad kernel SMA running */
-#define IPATH_STATUS_SMA          0x10	/* user SMA running */
 /* Chip has been found and initted */
 #define IPATH_STATUS_CHIP_PRESENT 0x20
 /* IB link is at ACTIVE, usable for data traffic */
@@ -465,12 +463,11 @@ struct __ipath_sendpkt {
 	struct ipath_iovec sps_iov[4];
 };
 
-/* Passed into SMA special file's ->read and ->write methods. */
-struct ipath_sma_pkt
-{
-	__u32 unit;	/* unit on which to send packet */
-	__u64 data;	/* address of payload in userspace */
-	__u32 len;	/* length of payload */
+/* Passed into diag data special file's ->write method. */
+struct ipath_diag_pkt {
+	__u32 unit;
+	__u64 data;
+	__u32 len;
 };
 
 /*
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_cq.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_cq.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_cq.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_cq.c	2006-10-19 12:41:10.000000000 -0400
@@ -163,14 +163,9 @@ struct ib_cq *ipath_create_cq(struct ib_
 	struct ib_wc *wc;
 	struct ib_cq *ret;
 
-	if (entries > ib_ipath_max_cqes) {
+	if (entries < 1 || entries > ib_ipath_max_cqes) {
 		ret = ERR_PTR(-EINVAL);
-		goto bail;
-	}
-
-	if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
-		ret = ERR_PTR(-ENOMEM);
-		goto bail;
+		goto done;
 	}
 
 	/*
@@ -180,7 +175,7 @@ struct ib_cq *ipath_create_cq(struct ib_
 	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
 	if (!cq) {
 		ret = ERR_PTR(-ENOMEM);
-		goto bail;
+		goto done;
 	}
 
 	/*
@@ -188,10 +183,20 @@ struct ib_cq *ipath_create_cq(struct ib_
 	 */
 	wc = vmalloc(sizeof(*wc) * (entries + 1));
 	if (!wc) {
-		kfree(cq);
 		ret = ERR_PTR(-ENOMEM);
-		goto bail;
+		goto bail_cq;
+	}
+
+	spin_lock(&dev->n_cqs_lock);
+	if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
+		spin_unlock(&dev->n_cqs_lock);
+		ret = ERR_PTR(-ENOMEM);
+		goto bail_wc;
 	}
+
+	dev->n_cqs_allocated++;
+	spin_unlock(&dev->n_cqs_lock);
+
 	/*
 	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
 	 * The number of entries should be >= the number requested or return
@@ -208,9 +213,15 @@ struct ib_cq *ipath_create_cq(struct ib_
 
 	ret = &cq->ibcq;
 
-	dev->n_cqs_allocated++;
+	goto done;
 
-bail:
+bail_wc:
+	vfree(wc);
+
+bail_cq:
+	kfree(cq);
+
+done:
 	return ret;
 }
 
@@ -228,7 +239,9 @@ int ipath_destroy_cq(struct ib_cq *ibcq)
 	struct ipath_cq *cq = to_icq(ibcq);
 
 	tasklet_kill(&cq->comptask);
+	spin_lock(&dev->n_cqs_lock);
 	dev->n_cqs_allocated--;
+	spin_unlock(&dev->n_cqs_lock);
 	vfree(cq->queue);
 	kfree(cq);
 
@@ -268,6 +281,11 @@ int ipath_resize_cq(struct ib_cq *ibcq, 
 	u32 n;
 	int ret;
 
+	if (cqe < 1 || cqe > ib_ipath_max_cqes) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
 	/*
 	 * Need to use vmalloc() if we want to support large #s of entries.
 	 */
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_debug.h linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_debug.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_debug.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_debug.h	2006-10-19 12:41:10.000000000 -0400
@@ -60,7 +60,6 @@
 #define __IPATH_USER_SEND   0x1000	/* use user mode send */
 #define __IPATH_KERNEL_SEND 0x2000	/* use kernel mode send */
 #define __IPATH_EPKTDBG     0x4000	/* print ethernet packet data */
-#define __IPATH_SMADBG      0x8000	/* sma packet debug */
 #define __IPATH_IPATHDBG    0x10000	/* Ethernet (IPATH) gen debug */
 #define __IPATH_IPATHWARN   0x20000	/* Ethernet (IPATH) warnings */
 #define __IPATH_IPATHERR    0x40000	/* Ethernet (IPATH) errors */
@@ -84,7 +83,6 @@
 /* print mmap/nopage stuff, not using VDBG any more */
 #define __IPATH_MMDBG     0x0
 #define __IPATH_EPKTDBG   0x0	/* print ethernet packet data */
-#define __IPATH_SMADBG    0x0   /* process startup (init)/exit messages */
 #define __IPATH_IPATHDBG  0x0	/* Ethernet (IPATH) table dump on */
 #define __IPATH_IPATHWARN 0x0	/* Ethernet (IPATH) warnings on   */
 #define __IPATH_IPATHERR  0x0	/* Ethernet (IPATH) errors on   */
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_diag.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_diag.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_diag.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_diag.c	2006-10-19 12:41:11.000000000 -0400
@@ -41,11 +41,11 @@
  * through the /sys/bus/pci resource mmap interface.
  */
 
+#include <linux/io.h>
 #include <linux/pci.h>
 #include <asm/uaccess.h>
 
 #include "ipath_kernel.h"
-#include "ipath_layer.h"
 #include "ipath_common.h"
 
 int ipath_diag_inuse;
@@ -274,6 +274,161 @@ bail:
 	return ret;
 }
 
+static ssize_t ipath_diagpkt_write(struct file *fp,
+				   const char __user *data,
+				   size_t count, loff_t *off);
+
+static struct file_operations diagpkt_file_ops = {
+	.owner = THIS_MODULE,
+	.write = ipath_diagpkt_write,
+};
+
+static struct cdev *diagpkt_cdev;
+static struct class_device *diagpkt_class_dev;
+static atomic_t diagpkt_count = ATOMIC_INIT(0);
+
+void ipath_diagpkt_add(void)
+{
+	if (atomic_inc_return(&diagpkt_count) == 1)
+		ipath_cdev_init(IPATH_DIAGPKT_MINOR,
+				"ipath_diagpkt", &diagpkt_file_ops,
+				&diagpkt_cdev, &diagpkt_class_dev);
+}
+
+void ipath_diagpkt_remove(void)
+{
+	if (atomic_dec_and_test(&diagpkt_count))
+		ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_class_dev);
+}
+
+/**
+ * ipath_diagpkt_write - write an IB packet
+ * @fp: the diag data device file pointer
+ * @data: ipath_diag_pkt structure saying where to get the packet
+ * @count: size of data to write
+ * @off: unused by this code
+ */
+static ssize_t ipath_diagpkt_write(struct file *fp,
+				   const char __user *data,
+				   size_t count, loff_t *off)
+{
+	u32 __iomem *piobuf;
+	u32 plen, clen, pbufn;
+	struct ipath_diag_pkt dp;
+	u32 *tmpbuf = NULL;
+	struct ipath_devdata *dd;
+	ssize_t ret = 0;
+	u64 val;
+
+	if (count < sizeof(dp)) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (copy_from_user(&dp, data, sizeof(dp))) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	/* send count must be an exact number of dwords */
+	if (dp.len & 3) {
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	clen = dp.len >> 2;
+
+	dd = ipath_lookup(dp.unit);
+	if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
+	    !dd->ipath_kregbase) {
+		ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n",
+			   dp.unit);
+		ret = -ENODEV;
+		goto bail;
+	}
+
+	if (ipath_diag_inuse && !diag_set_link &&
+	    !(dd->ipath_flags & IPATH_LINKACTIVE)) {
+		diag_set_link = 1;
+		ipath_cdbg(VERBOSE, "Trying to set to set link active for "
+			   "diag pkt\n");
+		ipath_set_linkstate(dd, IPATH_IB_LINKARM);
+		ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
+	}
+
+	if (!(dd->ipath_flags & IPATH_INITTED)) {
+		/* no hardware, freeze, etc. */
+		ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit);
+		ret = -ENODEV;
+		goto bail;
+	}
+	val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
+	if (val != IPATH_IBSTATE_INIT && val != IPATH_IBSTATE_ARM &&
+	    val != IPATH_IBSTATE_ACTIVE) {
+		ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
+			   dd->ipath_unit, (unsigned long long) val);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/* need total length before first word written */
+	/* +1 word is for the qword padding */
+	plen = sizeof(u32) + dp.len;
+
+	if ((plen + 4) > dd->ipath_ibmaxlen) {
+		ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
+			  plen - 4, dd->ipath_ibmaxlen);
+		ret = -EINVAL;
+		goto bail;	/* before writing pbc */
+	}
+	tmpbuf = vmalloc(plen);
+	if (!tmpbuf) {
+		dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
+			 "failing\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	if (copy_from_user(tmpbuf,
+			   (const void __user *) (unsigned long) dp.data,
+			   dp.len)) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	piobuf = ipath_getpiobuf(dd, &pbufn);
+	if (!piobuf) {
+		ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
+			   dd->ipath_unit);
+		ret = -EBUSY;
+		goto bail;
+	}
+
+	plen >>= 2;		/* in dwords */
+
+	if (ipath_debug & __IPATH_PKTDBG)
+		ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
+			   dd->ipath_unit, plen - 1, pbufn);
+
+	/* we have to flush after the PBC for correctness on some cpus
+	 * or WC buffer can be written out of order */
+	writeq(plen, piobuf);
+	ipath_flush_wc();
+	/* copy all by the trigger word, then flush, so it's written
+	 * to chip before trigger word, then write trigger word, then
+	 * flush again, so packet is sent. */
+	__iowrite32_copy(piobuf + 2, tmpbuf, clen - 1);
+	ipath_flush_wc();
+	__raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
+	ipath_flush_wc();
+
+	ret = sizeof(dp);
+
+bail:
+	vfree(tmpbuf);
+	return ret;
+}
+
 static int ipath_diag_release(struct inode *in, struct file *fp)
 {
 	mutex_lock(&ipath_mutex);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_driver.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_driver.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_driver.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_driver.c	2006-10-19 12:41:11.000000000 -0400
@@ -39,7 +39,7 @@
 #include <linux/vmalloc.h>
 
 #include "ipath_kernel.h"
-#include "ipath_layer.h"
+#include "ipath_verbs.h"
 #include "ipath_common.h"
 
 static void ipath_update_pio_bufs(struct ipath_devdata *);
@@ -51,8 +51,6 @@ const char *ipath_get_unit_name(int unit
 	return iname;
 }
 
-EXPORT_SYMBOL_GPL(ipath_get_unit_name);
-
 #define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
 #define PFX IPATH_DRV_NAME ": "
 
@@ -60,13 +58,13 @@ EXPORT_SYMBOL_GPL(ipath_get_unit_name);
  * The size has to be longer than this string, so we can append
  * board/chip information to it in the init code.
  */
-const char ipath_core_version[] = IPATH_IDSTR "\n";
+const char ib_ipath_version[] = IPATH_IDSTR "\n";
 
 static struct idr unit_table;
 DEFINE_SPINLOCK(ipath_devs_lock);
 LIST_HEAD(ipath_dev_list);
 
-wait_queue_head_t ipath_sma_state_wait;
+wait_queue_head_t ipath_state_wait;
 
 unsigned ipath_debug = __IPATH_INFO;
 
@@ -116,6 +114,13 @@ static int __devinit ipath_init_one(stru
 #define PCI_DEVICE_ID_INFINIPATH_HT 0xd
 #define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
 
+/*
+ * Number of seconds before we complain about not getting a LID
+ * assignment.
+ */
+
+#define LID_TIMEOUT 60
+
 static const struct pci_device_id ipath_pci_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
@@ -132,6 +137,29 @@ static struct pci_driver ipath_driver = 
 };
 
 
+static void check_link_status(void *data)
+{
+	struct ipath_devdata *dd = data;
+
+	/*
+	 * If we're in the NOCABLE state, try again in another minute.
+	 */
+
+	if (dd->ipath_flags & IPATH_STATUS_IB_NOCABLE) {
+		schedule_delayed_work(&dd->link_task, HZ * LID_TIMEOUT);
+		return;
+	}
+
+	/*
+	 * If we don't have a LID, let the user know and don't bother
+	 * checking again.
+	 */
+
+	if (dd->ipath_lid == 0)
+		dev_info(&dd->pcidev->dev,
+			 "We don't have a LID yet (no subnet manager?)");
+}
+
 static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
 			     u32 *bar0, u32 *bar1)
 {
@@ -199,6 +227,8 @@ static struct ipath_devdata *ipath_alloc
 	dd->pcidev = pdev;
 	pci_set_drvdata(pdev, dd);
 
+	INIT_WORK(&dd->link_task, check_link_status, dd);
+
 	list_add(&dd->ipath_list, &ipath_dev_list);
 
 bail_unlock:
@@ -403,10 +433,10 @@ static int __devinit ipath_init_one(stru
 	/* setup the chip-specific functions, as early as possible. */
 	switch (ent->device) {
 	case PCI_DEVICE_ID_INFINIPATH_HT:
-		ipath_init_ht400_funcs(dd);
+		ipath_init_iba6110_funcs(dd);
 		break;
 	case PCI_DEVICE_ID_INFINIPATH_PE800:
-		ipath_init_pe800_funcs(dd);
+		ipath_init_iba6120_funcs(dd);
 		break;
 	default:
 		ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
@@ -440,7 +470,13 @@ static int __devinit ipath_init_one(stru
 	}
 	dd->ipath_pcirev = rev;
 
+#if defined(__powerpc__)
+	/* There isn't a generic way to specify writethrough mappings */
+	dd->ipath_kregbase = __ioremap(addr, len,
+		(_PAGE_NO_CACHE|_PAGE_WRITETHRU));
+#else
 	dd->ipath_kregbase = ioremap_nocache(addr, len);
+#endif
 
 	if (!dd->ipath_kregbase) {
 		ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
@@ -503,7 +539,11 @@ static int __devinit ipath_init_one(stru
 	ipathfs_add_device(dd);
 	ipath_user_add(dd);
 	ipath_diag_add(dd);
-	ipath_layer_add(dd);
+	ipath_diagpkt_add();
+	ipath_register_ib_device(dd);
+
+	/* Check that we have a LID in LID_TIMEOUT seconds. */
+	schedule_delayed_work(&dd->link_task, HZ * LID_TIMEOUT);
 
 	goto bail;
 
@@ -532,7 +572,11 @@ static void __devexit ipath_remove_one(s
 		return;
 
 	dd = pci_get_drvdata(pdev);
-	ipath_layer_remove(dd);
+
+	cancel_delayed_work(&dd->link_task);
+
+	ipath_unregister_ib_device(dd->verbs_dev);
+	ipath_diagpkt_remove();
 	ipath_diag_remove(dd);
 	ipath_user_remove(dd);
 	ipathfs_remove_device(dd);
@@ -607,21 +651,23 @@ void ipath_disarm_piobufs(struct ipath_d
  *
  * wait up to msecs milliseconds for IB link state change to occur for
  * now, take the easy polling route.  Currently used only by
- * ipath_layer_set_linkstate.  Returns 0 if state reached, otherwise
+ * ipath_set_linkstate.  Returns 0 if state reached, otherwise
  * -ETIMEDOUT state can have multiple states set, for any of several
  * transitions.
  */
-int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
+static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state,
+				int msecs)
 {
-	dd->ipath_sma_state_wanted = state;
-	wait_event_interruptible_timeout(ipath_sma_state_wait,
+	dd->ipath_state_wanted = state;
+	wait_event_interruptible_timeout(ipath_state_wait,
 					 (dd->ipath_flags & state),
 					 msecs_to_jiffies(msecs));
-	dd->ipath_sma_state_wanted = 0;
+	dd->ipath_state_wanted = 0;
 
 	if (!(dd->ipath_flags & state)) {
 		u64 val;
-		ipath_cdbg(SMA, "Didn't reach linkstate %s within %u ms\n",
+		ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
+			   " ms\n",
 			   /* test INIT ahead of DOWN, both can be set */
 			   (state & IPATH_LINKINIT) ? "INIT" :
 			   ((state & IPATH_LINKDOWN) ? "DOWN" :
@@ -807,58 +853,6 @@ bail:
 	return skb;
 }
 
-/**
- * ipath_rcv_layer - receive a packet for the layered (ethernet) driver
- * @dd: the infinipath device
- * @etail: the sk_buff number
- * @tlen: the total packet length
- * @hdr: the ethernet header
- *
- * Separate routine for better overall optimization
- */
-static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
-			    u32 tlen, struct ether_header *hdr)
-{
-	u32 elen;
-	u8 pad, *bthbytes;
-	struct sk_buff *skb, *nskb;
-
-	if (dd->ipath_port0_skbs &&
-			hdr->sub_opcode == IPATH_ITH4X_OPCODE_ENCAP) {
-		/*
-		 * Allocate a new sk_buff to replace the one we give
-		 * to the network stack.
-		 */
-		nskb = ipath_alloc_skb(dd, GFP_ATOMIC);
-		if (!nskb) {
-			/* count OK packets that we drop */
-			ipath_stats.sps_krdrops++;
-			return;
-		}
-
-		bthbytes = (u8 *) hdr->bth;
-		pad = (bthbytes[1] >> 4) & 3;
-		/* +CRC32 */
-		elen = tlen - (sizeof(*hdr) + pad + sizeof(u32));
-
-		skb = dd->ipath_port0_skbs[etail];
-		dd->ipath_port0_skbs[etail] = nskb;
-		skb_put(skb, elen);
-
-		dd->ipath_f_put_tid(dd, etail + (u64 __iomem *)
-				    ((char __iomem *) dd->ipath_kregbase
-				     + dd->ipath_rcvegrbase), 0,
-				    virt_to_phys(nskb->data));
-
-		__ipath_layer_rcv(dd, hdr, skb);
-
-		/* another ether packet received */
-		ipath_stats.sps_ether_rpkts++;
-	}
-	else if (hdr->sub_opcode == IPATH_ITH4X_OPCODE_LID_ARP)
-		__ipath_layer_rcv_lid(dd, hdr);
-}
-
 static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
 			     u32 eflags,
 			     u32 l,
@@ -972,26 +966,17 @@ reloop:
 		if (unlikely(eflags))
 			ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
 		else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
-				int ret = __ipath_verbs_rcv(dd, rc + 1,
-							    ebuf, tlen);
-				if (ret == -ENODEV)
-					ipath_cdbg(VERBOSE,
-						   "received IB packet, "
-						   "not SMA (QP=%x)\n", qp);
-				if (dd->ipath_lli_counter)
-					dd->ipath_lli_counter--;
-
-		} else if (etype == RCVHQ_RCV_TYPE_EAGER) {
-			if (qp == IPATH_KD_QP &&
-			    bthbytes[0] == ipath_layer_rcv_opcode &&
-			    ebuf)
-				ipath_rcv_layer(dd, etail, tlen,
-						(struct ether_header *)hdr);
-			else
-				ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
-					   "qp=%x), len %x; ignored\n",
-					   etype, bthbytes[0], qp, tlen);
-		}
+			ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen);
+			if (dd->ipath_lli_counter)
+				dd->ipath_lli_counter--;
+			ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
+				   "qp=%x), len %x; ignored\n",
+				   etype, bthbytes[0], qp, tlen);
+		}
+		else if (etype == RCVHQ_RCV_TYPE_EAGER)
+			ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
+				   "qp=%x), len %x; ignored\n",
+				   etype, bthbytes[0], qp, tlen);
 		else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
 			ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
 				  be32_to_cpu(hdr->bth[0]) & 0xff);
@@ -1024,7 +1009,8 @@ reloop:
 		 */
 		if (l == hdrqtail || (i && !(i&0xf))) {
 			u64 lval;
-			if (l == hdrqtail) /* PE-800 interrupt only on last */
+			if (l == hdrqtail)
+				/* request IBA6120 interrupt only on last */
 				lval = dd->ipath_rhdrhead_intr_off | l;
 			else
 				lval = l;
@@ -1038,7 +1024,7 @@ reloop:
 	}
 
 	if (!dd->ipath_rhdrhead_intr_off && !reloop) {
-		/* HT-400 workaround; we can have a race clearing chip
+		/* IBA6110 workaround; we can have a race clearing chip
 		 * interrupt with another interrupt about to be delivered,
 		 * and can clear it before it is delivered on the GPIO
 		 * workaround.  By doing the extra check here for the
@@ -1211,7 +1197,7 @@ int ipath_setrcvhdrsize(struct ipath_dev
  *
  * do appropriate marking as busy, etc.
  * returns buffer number if one found (>=0), negative number is error.
- * Used by ipath_sma_send_pkt and ipath_layer_send
+ * Used by ipath_layer_send
  */
 u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
 {
@@ -1317,13 +1303,6 @@ rescan:
 		goto bail;
 	}
 
-	if (updated)
-		/*
-		 * ran out of bufs, now some (at least this one we just
-		 * got) are now available, so tell the layered driver.
-		 */
-		__ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
-
 	/*
 	 * set next starting place.  Since it's just an optimization,
 	 * it doesn't matter who wins on this, so no locking
@@ -1500,7 +1479,7 @@ int ipath_waitfor_mdio_cmdready(struct i
 	return ret;
 }
 
-void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
+static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
 {
 	static const char *what[4] = {
 		[0] = "DOWN",
@@ -1511,7 +1490,7 @@ void ipath_set_ib_lstate(struct ipath_de
 	int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
 			INFINIPATH_IBCC_LINKCMD_MASK;
 
-	ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate "
+	ipath_cdbg(VERBOSE, "Trying to move unit %u to %s, current ltstate "
 		   "is %s\n", dd->ipath_unit,
 		   what[linkcmd],
 		   ipath_ibcstatus_str[
@@ -1520,7 +1499,7 @@ void ipath_set_ib_lstate(struct ipath_de
 			    INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
 			   INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
 	/* flush all queued sends when going to DOWN or INIT, to be sure that
-	 * they don't block SMA and other MAD packets */
+	 * they don't block MAD packets */
 	if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) {
 		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
 				 INFINIPATH_S_ABORT);
@@ -1534,6 +1513,182 @@ void ipath_set_ib_lstate(struct ipath_de
 			 dd->ipath_ibcctrl | which);
 }
 
+int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
+{
+	u32 lstate;
+	int ret;
+
+	switch (newstate) {
+	case IPATH_IB_LINKDOWN:
+		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
+				    INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+		/* don't wait */
+		ret = 0;
+		goto bail;
+
+	case IPATH_IB_LINKDOWN_SLEEP:
+		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
+				    INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+		/* don't wait */
+		ret = 0;
+		goto bail;
+
+	case IPATH_IB_LINKDOWN_DISABLE:
+		ipath_set_ib_lstate(dd,
+				    INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
+				    INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+		/* don't wait */
+		ret = 0;
+		goto bail;
+
+	case IPATH_IB_LINKINIT:
+		if (dd->ipath_flags & IPATH_LINKINIT) {
+			ret = 0;
+			goto bail;
+		}
+		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
+				    INFINIPATH_IBCC_LINKCMD_SHIFT);
+		lstate = IPATH_LINKINIT;
+		break;
+
+	case IPATH_IB_LINKARM:
+		if (dd->ipath_flags & IPATH_LINKARMED) {
+			ret = 0;
+			goto bail;
+		}
+		if (!(dd->ipath_flags &
+		      (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
+			ret = -EINVAL;
+			goto bail;
+		}
+		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
+				    INFINIPATH_IBCC_LINKCMD_SHIFT);
+		/*
+		 * Since the port can transition to ACTIVE by receiving
+		 * a non VL 15 packet, wait for either state.
+		 */
+		lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
+		break;
+
+	case IPATH_IB_LINKACTIVE:
+		if (dd->ipath_flags & IPATH_LINKACTIVE) {
+			ret = 0;
+			goto bail;
+		}
+		if (!(dd->ipath_flags & IPATH_LINKARMED)) {
+			ret = -EINVAL;
+			goto bail;
+		}
+		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
+				    INFINIPATH_IBCC_LINKCMD_SHIFT);
+		lstate = IPATH_LINKACTIVE;
+		break;
+
+	default:
+		ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
+		ret = -EINVAL;
+		goto bail;
+	}
+	ret = ipath_wait_linkstate(dd, lstate, 2000);
+
+bail:
+	return ret;
+}
+
+/**
+ * ipath_set_mtu - set the MTU
+ * @dd: the infinipath device
+ * @arg: the new MTU
+ *
+ * we can handle "any" incoming size, the issue here is whether we
+ * need to restrict our outgoing size.   For now, we don't do any
+ * sanity checking on this, and we don't deal with what happens to
+ * programs that are already running when the size changes.
+ * NOTE: changing the MTU will usually cause the IBC to go back to
+ * link initialize (IPATH_IBSTATE_INIT) state...
+ */
+int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
+{
+	u32 piosize;
+	int changed = 0;
+	int ret;
+
+	/*
+	 * mtu is IB data payload max.  It's the largest power of 2 less
+	 * than piosize (or even larger, since it only really controls the
+	 * largest we can receive; we can send the max of the mtu and
+	 * piosize).  We check that it's one of the valid IB sizes.
+	 */
+	if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
+	    arg != 4096) {
+		ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
+		ret = -EINVAL;
+		goto bail;
+	}
+	if (dd->ipath_ibmtu == arg) {
+		ret = 0;        /* same as current */
+		goto bail;
+	}
+
+	piosize = dd->ipath_ibmaxlen;
+	dd->ipath_ibmtu = arg;
+
+	if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
+		/* Only if it's not the initial value (or reset to it) */
+		if (piosize != dd->ipath_init_ibmaxlen) {
+			dd->ipath_ibmaxlen = piosize;
+			changed = 1;
+		}
+	} else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
+		piosize = arg + IPATH_PIO_MAXIBHDR;
+		ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
+			   "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
+			   arg);
+		dd->ipath_ibmaxlen = piosize;
+		changed = 1;
+	}
+
+	if (changed) {
+		/*
+		 * set the IBC maxpktlength to the size of our pio
+		 * buffers in words
+		 */
+		u64 ibc = dd->ipath_ibcctrl;
+		ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
+			 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
+
+		piosize = piosize - 2 * sizeof(u32);    /* ignore pbc */
+		dd->ipath_ibmaxlen = piosize;
+		piosize /= sizeof(u32); /* in words */
+		/*
+		 * for ICRC, which we only send in diag test pkt mode, and
+		 * we don't need to worry about that for mtu
+		 */
+		piosize += 1;
+
+		ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
+		dd->ipath_ibcctrl = ibc;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+				 dd->ipath_ibcctrl);
+		dd->ipath_f_tidtemplate(dd);
+	}
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
+{
+	dd->ipath_lid = arg;
+	dd->ipath_lmc = lmc;
+
+	dev_info(&dd->pcidev->dev, "We got a lid: %u\n", arg);
+
+	return 0;
+}
+
 /**
  * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register
  * @dd: the infinipath device
@@ -1637,13 +1792,6 @@ void ipath_shutdown_device(struct ipath_
 	ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
 			    INFINIPATH_IBCC_LINKINITCMD_SHIFT);
 
-	/*
-	 * we are shutting down, so tell the layered driver.  We don't do
-	 * this on just a link state change, much like ethernet, a cable
-	 * unplug, etc. doesn't change driver state
-	 */
-	ipath_layer_intr(dd, IPATH_LAYER_INT_IF_DOWN);
-
 	/* disable IBC */
 	dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
@@ -1743,7 +1891,7 @@ static int __init infinipath_init(void)
 {
 	int ret;
 
-	ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ipath_core_version);
+	ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
 
 	/*
 	 * These must be called before the driver is registered with
@@ -1888,6 +2036,8 @@ static void __exit infinipath_cleanup(vo
 	struct ipath_devdata *dd, *tmp;
 	unsigned long flags;
 
+	ipath_diagpkt_remove();
+
 	ipath_exit_ipathfs();
 
 	ipath_driver_remove_group(&ipath_driver.driver);
@@ -1998,5 +2148,22 @@ bail:
 	return ret;
 }
 
+int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
+{
+	u64 val;
+	if ( new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK ) {
+		return -1;
+	}
+	if ( dd->ipath_rx_pol_inv != new_pol_inv ) {
+		dd->ipath_rx_pol_inv = new_pol_inv;
+		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
+		val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
+                         INFINIPATH_XGXS_RX_POL_SHIFT);
+                val |= ((u64)dd->ipath_rx_pol_inv) <<
+                        INFINIPATH_XGXS_RX_POL_SHIFT;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
+	}
+	return 0;
+}
 module_init(infinipath_init);
 module_exit(infinipath_cleanup);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_file_ops.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_file_ops.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_file_ops.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_file_ops.c	2006-10-19 12:41:10.000000000 -0400
@@ -39,7 +39,6 @@
 #include <asm/pgtable.h>
 
 #include "ipath_kernel.h"
-#include "ipath_layer.h"
 #include "ipath_common.h"
 
 static int ipath_open(struct inode *, struct file *);
@@ -985,15 +984,17 @@ static int mmap_piobufs(struct vm_area_s
 	 * write combining behavior we want on the PIO buffers!
 	 */
 
-	if (vma->vm_flags & VM_READ) {
-		dev_info(&dd->pcidev->dev,
-			 "Can't map piobufs as readable (flags=%lx)\n",
-			 vma->vm_flags);
-		ret = -EPERM;
-		goto bail;
-	}
+#if defined(__powerpc__)
+	/* There isn't a generic way to specify writethrough mappings */
+	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+	pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
+	pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
+#endif
 
-	/* don't allow them to later change to readable with mprotect */
+	/*
+	 * don't allow them to later change to readable with mprotect (for when
+	 * not initially mapped readable, as is normally the case)
+	 */
 	vma->vm_flags &= ~VM_MAYREAD;
 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 
@@ -1109,7 +1110,7 @@ static int ipath_mmap(struct file *fp, s
 		ret = mmap_rcvegrbufs(vma, pd);
 	else if (pgaddr == (u64) pd->port_rcvhdrq_phys) {
 		/*
-		 * The rcvhdrq itself; readonly except on HT-400 (so have
+		 * The rcvhdrq itself; readonly except on HT (so have
 		 * to allow writable mapping), multiple pages, contiguous
 		 * from an i/o perspective.
 		 */
@@ -1149,6 +1150,7 @@ static unsigned int ipath_poll(struct fi
 	struct ipath_portdata *pd;
 	u32 head, tail;
 	int bit;
+	unsigned pollflag = 0;
 	struct ipath_devdata *dd;
 
 	pd = port_fp(fp);
@@ -1185,9 +1187,12 @@ static unsigned int ipath_poll(struct fi
 			clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
 			pd->port_rcvwait_to++;
 		}
+		else
+			pollflag = POLLIN | POLLRDNORM;
 	}
 	else {
 		/* it's already happened; don't do wait_event overhead */
+		pollflag = POLLIN | POLLRDNORM;
 		pd->port_rcvnowait++;
 	}
 
@@ -1195,7 +1200,7 @@ static unsigned int ipath_poll(struct fi
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
 			 dd->ipath_rcvctrl);
 
-	return 0;
+	return pollflag;
 }
 
 static int try_alloc_port(struct ipath_devdata *dd, int port,
@@ -1297,14 +1302,14 @@ static int find_best_unit(struct file *f
 	 * This code is present to allow a knowledgeable person to
 	 * specify the layout of processes to processors before opening
 	 * this driver, and then we'll assign the process to the "closest"
-	 * HT-400 to that processor (we assume reasonable connectivity,
+	 * InfiniPath chip to that processor (we assume reasonable connectivity,
 	 * for now).  This code assumes that if affinity has been set
 	 * before this point, that at most one cpu is set; for now this
 	 * is reasonable.  I check for both cpus_empty() and cpus_full(),
 	 * in case some kernel variant sets none of the bits when no
 	 * affinity is set.  2.6.11 and 12 kernels have all present
 	 * cpus set.  Some day we'll have to fix it up further to handle
-	 * a cpu subset.  This algorithm fails for two HT-400's connected
+	 * a cpu subset.  This algorithm fails for two HT chips connected
 	 * in tunnel fashion.  Eventually this needs real topology
 	 * information.  There may be some issues with dual core numbering
 	 * as well.  This needs more work prior to release.
@@ -1815,7 +1820,7 @@ int ipath_user_add(struct ipath_devdata 
 		if (ret < 0) {
 			ipath_dev_err(dd, "Could not create wildcard "
 				      "minor: error %d\n", -ret);
-			goto bail_sma;
+			goto bail_user;
 		}
 
 		atomic_set(&user_setup, 1);
@@ -1831,7 +1836,7 @@ int ipath_user_add(struct ipath_devdata 
 
 	goto bail;
 
-bail_sma:
+bail_user:
 	user_cleanup();
 bail:
 	return ret;
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_fs.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_fs.c	2006-10-19 14:44:55.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_fs.c	2006-10-19 12:41:10.000000000 -0400
@@ -61,9 +61,10 @@ static int ipathfs_mknod(struct inode *d
 	inode->i_mode = mode;
 	inode->i_uid = 0;
 	inode->i_gid = 0;
+	inode->i_blksize = PAGE_CACHE_SIZE;
 	inode->i_blocks = 0;
 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-	inode->i_private = data;
+	inode->u.generic_ip = data;
 	if ((mode & S_IFMT) == S_IFDIR) {
 		inode->i_op = &simple_dir_inode_operations;
 		inode->i_nlink++;
@@ -118,7 +119,7 @@ static ssize_t atomic_counters_read(stru
 	u16 i;
 	struct ipath_devdata *dd;
 
-	dd = file->f_dentry->d_inode->i_private;
+	dd = file->f_dentry->d_inode->u.generic_ip;
 
 	for (i = 0; i < NUM_COUNTERS; i++)
 		counters[i] = ipath_snap_cntr(dd, i);
@@ -138,7 +139,7 @@ static ssize_t atomic_node_info_read(str
 	struct ipath_devdata *dd;
 	u64 guid;
 
-	dd = file->f_dentry->d_inode->i_private;
+	dd = file->f_dentry->d_inode->u.generic_ip;
 
 	guid = be64_to_cpu(dd->ipath_guid);
 
@@ -177,7 +178,7 @@ static ssize_t atomic_port_info_read(str
 	u32 tmp, tmp2;
 	struct ipath_devdata *dd;
 
-	dd = file->f_dentry->d_inode->i_private;
+	dd = file->f_dentry->d_inode->u.generic_ip;
 
 	/* so we only initialize non-zero fields. */
 	memset(portinfo, 0, sizeof portinfo);
@@ -190,8 +191,8 @@ static ssize_t atomic_port_info_read(str
 	portinfo[4] = (dd->ipath_lid << 16);
 
 	/*
-	 * Notimpl yet SMLID (should we store this in the driver, in case
-	 * SMA dies?)  CapabilityMask is 0, we don't support any of these
+	 * Notimpl yet SMLID.
+	 * CapabilityMask is 0, we don't support any of these
 	 * DiagCode is 0; we don't store any diag info for now Notimpl yet
 	 * M_KeyLeasePeriod (we don't support M_Key)
 	 */
@@ -324,7 +325,7 @@ static ssize_t flash_read(struct file *f
 		goto bail;
 	}
 
-	dd = file->f_dentry->d_inode->i_private;
+	dd = file->f_dentry->d_inode->u.generic_ip;
 	if (ipath_eeprom_read(dd, pos, tmp, count)) {
 		ipath_dev_err(dd, "failed to read from flash\n");
 		ret = -ENXIO;
@@ -380,7 +381,7 @@ static ssize_t flash_write(struct file *
 		goto bail_tmp;
 	}
 
-	dd = file->f_dentry->d_inode->i_private;
+	dd = file->f_dentry->d_inode->u.generic_ip;
 	if (ipath_eeprom_write(dd, pos, tmp, count)) {
 		ret = -ENXIO;
 		ipath_dev_err(dd, "failed to write to flash\n");
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_ht400.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_ht400.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_ht400.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_ht400.c	1969-12-31 19:00:00.000000000 -0500
@@ -1,1603 +0,0 @@
-/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains all of the code that is specific to the InfiniPath
- * HT-400 chip.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-
-/*
- * This lists the InfiniPath HT400 registers, in the actual chip layout.
- * This structure should never be directly accessed.
- *
- * The names are in InterCap form because they're taken straight from
- * the chip specification.  Since they're only used in this file, they
- * don't pollute the rest of the source.
-*/
-
-struct _infinipath_do_not_use_kernel_regs {
-	unsigned long long Revision;
-	unsigned long long Control;
-	unsigned long long PageAlign;
-	unsigned long long PortCnt;
-	unsigned long long DebugPortSelect;
-	unsigned long long DebugPort;
-	unsigned long long SendRegBase;
-	unsigned long long UserRegBase;
-	unsigned long long CounterRegBase;
-	unsigned long long Scratch;
-	unsigned long long ReservedMisc1;
-	unsigned long long InterruptConfig;
-	unsigned long long IntBlocked;
-	unsigned long long IntMask;
-	unsigned long long IntStatus;
-	unsigned long long IntClear;
-	unsigned long long ErrorMask;
-	unsigned long long ErrorStatus;
-	unsigned long long ErrorClear;
-	unsigned long long HwErrMask;
-	unsigned long long HwErrStatus;
-	unsigned long long HwErrClear;
-	unsigned long long HwDiagCtrl;
-	unsigned long long MDIO;
-	unsigned long long IBCStatus;
-	unsigned long long IBCCtrl;
-	unsigned long long ExtStatus;
-	unsigned long long ExtCtrl;
-	unsigned long long GPIOOut;
-	unsigned long long GPIOMask;
-	unsigned long long GPIOStatus;
-	unsigned long long GPIOClear;
-	unsigned long long RcvCtrl;
-	unsigned long long RcvBTHQP;
-	unsigned long long RcvHdrSize;
-	unsigned long long RcvHdrCnt;
-	unsigned long long RcvHdrEntSize;
-	unsigned long long RcvTIDBase;
-	unsigned long long RcvTIDCnt;
-	unsigned long long RcvEgrBase;
-	unsigned long long RcvEgrCnt;
-	unsigned long long RcvBufBase;
-	unsigned long long RcvBufSize;
-	unsigned long long RxIntMemBase;
-	unsigned long long RxIntMemSize;
-	unsigned long long RcvPartitionKey;
-	unsigned long long ReservedRcv[10];
-	unsigned long long SendCtrl;
-	unsigned long long SendPIOBufBase;
-	unsigned long long SendPIOSize;
-	unsigned long long SendPIOBufCnt;
-	unsigned long long SendPIOAvailAddr;
-	unsigned long long TxIntMemBase;
-	unsigned long long TxIntMemSize;
-	unsigned long long ReservedSend[9];
-	unsigned long long SendBufferError;
-	unsigned long long SendBufferErrorCONT1;
-	unsigned long long SendBufferErrorCONT2;
-	unsigned long long SendBufferErrorCONT3;
-	unsigned long long ReservedSBE[4];
-	unsigned long long RcvHdrAddr0;
-	unsigned long long RcvHdrAddr1;
-	unsigned long long RcvHdrAddr2;
-	unsigned long long RcvHdrAddr3;
-	unsigned long long RcvHdrAddr4;
-	unsigned long long RcvHdrAddr5;
-	unsigned long long RcvHdrAddr6;
-	unsigned long long RcvHdrAddr7;
-	unsigned long long RcvHdrAddr8;
-	unsigned long long ReservedRHA[7];
-	unsigned long long RcvHdrTailAddr0;
-	unsigned long long RcvHdrTailAddr1;
-	unsigned long long RcvHdrTailAddr2;
-	unsigned long long RcvHdrTailAddr3;
-	unsigned long long RcvHdrTailAddr4;
-	unsigned long long RcvHdrTailAddr5;
-	unsigned long long RcvHdrTailAddr6;
-	unsigned long long RcvHdrTailAddr7;
-	unsigned long long RcvHdrTailAddr8;
-	unsigned long long ReservedRHTA[7];
-	unsigned long long Sync;	/* Software only */
-	unsigned long long Dump;	/* Software only */
-	unsigned long long SimVer;	/* Software only */
-	unsigned long long ReservedSW[5];
-	unsigned long long SerdesConfig0;
-	unsigned long long SerdesConfig1;
-	unsigned long long SerdesStatus;
-	unsigned long long XGXSConfig;
-	unsigned long long ReservedSW2[4];
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof(struct \
-    _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
-    struct infinipath_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_ht_kregs = {
-	.kr_control = IPATH_KREG_OFFSET(Control),
-	.kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
-	.kr_debugport = IPATH_KREG_OFFSET(DebugPort),
-	.kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
-	.kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
-	.kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
-	.kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
-	.kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
-	.kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
-	.kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
-	.kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
-	.kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
-	.kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
-	.kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
-	.kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
-	.kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
-	.kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
-	.kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
-	.kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
-	.kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
-	.kr_intclear = IPATH_KREG_OFFSET(IntClear),
-	.kr_interruptconfig = IPATH_KREG_OFFSET(InterruptConfig),
-	.kr_intmask = IPATH_KREG_OFFSET(IntMask),
-	.kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
-	.kr_mdio = IPATH_KREG_OFFSET(MDIO),
-	.kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
-	.kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
-	.kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
-	.kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
-	.kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
-	.kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
-	.kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
-	.kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
-	.kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
-	.kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
-	.kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
-	.kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
-	.kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
-	.kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
-	.kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
-	.kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
-	.kr_revision = IPATH_KREG_OFFSET(Revision),
-	.kr_scratch = IPATH_KREG_OFFSET(Scratch),
-	.kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
-	.kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
-	.kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
-	.kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
-	.kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
-	.kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
-	.kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
-	.kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
-	.kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
-	.kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
-	.kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
-	.kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
-	.kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
-	.kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
-	/*
-	 * These should not be used directly via ipath_read_kreg64(),
-	 * use them with ipath_read_kreg64_port(),
-	 */
-	.kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
-	.kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0)
-};
-
-static const struct ipath_cregs ipath_ht_cregs = {
-	.cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
-	.cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
-	.cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
-	.cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
-	.cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
-	.cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
-	.cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
-	.cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
-	.cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
-	.cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
-	.cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
-	.cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
-	/* calc from Reg_CounterRegBase + offset */
-	.cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
-	.cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
-	.cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
-	.cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
-	.cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
-	.cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
-	.cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
-	.cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
-	.cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
-	.cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
-	.cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
-	.cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
-	.cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
-	.cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
-	.cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
-	.cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
-	.cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
-	.cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
-	.cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
-	.cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
-	.cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
-};
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK 0x1FF
-#define INFINIPATH_I_RCVAVAIL_MASK 0x1FF
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_HTCMEMPARITYERR_MASK 0x3FFFFFULL
-#define INFINIPATH_HWE_HTCLNKABYTE0CRCERR   0x0000000000800000ULL
-#define INFINIPATH_HWE_HTCLNKABYTE1CRCERR   0x0000000001000000ULL
-#define INFINIPATH_HWE_HTCLNKBBYTE0CRCERR   0x0000000002000000ULL
-#define INFINIPATH_HWE_HTCLNKBBYTE1CRCERR   0x0000000004000000ULL
-#define INFINIPATH_HWE_HTCMISCERR4          0x0000000008000000ULL
-#define INFINIPATH_HWE_HTCMISCERR5          0x0000000010000000ULL
-#define INFINIPATH_HWE_HTCMISCERR6          0x0000000020000000ULL
-#define INFINIPATH_HWE_HTCMISCERR7          0x0000000040000000ULL
-#define INFINIPATH_HWE_HTCBUSTREQPARITYERR  0x0000000080000000ULL
-#define INFINIPATH_HWE_HTCBUSTRESPPARITYERR 0x0000000100000000ULL
-#define INFINIPATH_HWE_HTCBUSIREQPARITYERR  0x0000000200000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
-#define INFINIPATH_HWE_HTBPLL_FBSLIP        0x0200000000000000ULL
-#define INFINIPATH_HWE_HTBPLL_RFSLIP        0x0400000000000000ULL
-#define INFINIPATH_HWE_HTAPLL_FBSLIP        0x0800000000000000ULL
-#define INFINIPATH_HWE_HTAPLL_RFSLIP        0x1000000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED      0x2000000000000000ULL
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST     0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_CORRECT     0x0000000000008000
-
-/*
- * masks and bits that are different in different chips, or present only
- * in one
- */
-static const ipath_err_t infinipath_hwe_htcmemparityerr_mask =
-    INFINIPATH_HWE_HTCMEMPARITYERR_MASK;
-static const ipath_err_t infinipath_hwe_htcmemparityerr_shift =
-    INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT;
-
-static const ipath_err_t infinipath_hwe_htclnkabyte0crcerr =
-    INFINIPATH_HWE_HTCLNKABYTE0CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkabyte1crcerr =
-    INFINIPATH_HWE_HTCLNKABYTE1CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkbbyte0crcerr =
-    INFINIPATH_HWE_HTCLNKBBYTE0CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkbbyte1crcerr =
-    INFINIPATH_HWE_HTCLNKBBYTE1CRCERR;
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA \
-	(1ULL << (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL \
-	(1ULL << (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-/* keep the code below somewhat more readonable; not used elsewhere */
-#define _IPATH_HTLINK0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr |	\
-				infinipath_hwe_htclnkabyte1crcerr)
-#define _IPATH_HTLINK1_CRCBITS (infinipath_hwe_htclnkbbyte0crcerr |	\
-				infinipath_hwe_htclnkbbyte1crcerr)
-#define _IPATH_HTLANE0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr |	\
-				infinipath_hwe_htclnkbbyte0crcerr)
-#define _IPATH_HTLANE1_CRCBITS (infinipath_hwe_htclnkabyte1crcerr |	\
-				infinipath_hwe_htclnkbbyte1crcerr)
-
-static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs,
-			  char *msg, size_t msgl)
-{
-	char bitsmsg[64];
-	ipath_err_t crcbits = hwerrs &
-		(_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS);
-	/* don't check if 8bit HT */
-	if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
-		crcbits &= ~infinipath_hwe_htclnkabyte1crcerr;
-	/* don't check if 8bit HT */
-	if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
-		crcbits &= ~infinipath_hwe_htclnkbbyte1crcerr;
-	/*
-	 * we'll want to ignore link errors on link that is
-	 * not in use, if any.  For now, complain about both
-	 */
-	if (crcbits) {
-		u16 ctrl0, ctrl1;
-		snprintf(bitsmsg, sizeof bitsmsg,
-			 "[HT%s lane %s CRC (%llx); ignore till reload]",
-			 !(crcbits & _IPATH_HTLINK1_CRCBITS) ?
-			 "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS)
-				    ? "1 (B)" : "0+1 (A+B)"),
-			 !(crcbits & _IPATH_HTLANE1_CRCBITS) ? "0"
-			 : (!(crcbits & _IPATH_HTLANE0_CRCBITS) ? "1" :
-			    "0+1"), (unsigned long long) crcbits);
-		strlcat(msg, bitsmsg, msgl);
-
-		/*
-		 * print extra info for debugging.  slave/primary
-		 * config word 4, 8 (link control 0, 1)
-		 */
-
-		if (pci_read_config_word(dd->pcidev,
-					 dd->ipath_ht_slave_off + 0x4,
-					 &ctrl0))
-			dev_info(&dd->pcidev->dev, "Couldn't read "
-				 "linkctrl0 of slave/primary "
-				 "config block\n");
-		else if (!(ctrl0 & 1 << 6))
-			/* not if EOC bit set */
-			ipath_dbg("HT linkctrl0 0x%x%s%s\n", ctrl0,
-				  ((ctrl0 >> 8) & 7) ? " CRC" : "",
-				  ((ctrl0 >> 4) & 1) ? "linkfail" :
-				  "");
-		if (pci_read_config_word(dd->pcidev,
-					 dd->ipath_ht_slave_off + 0x8,
-					 &ctrl1))
-			dev_info(&dd->pcidev->dev, "Couldn't read "
-				 "linkctrl1 of slave/primary "
-				 "config block\n");
-		else if (!(ctrl1 & 1 << 6))
-			/* not if EOC bit set */
-			ipath_dbg("HT linkctrl1 0x%x%s%s\n", ctrl1,
-				  ((ctrl1 >> 8) & 7) ? " CRC" : "",
-				  ((ctrl1 >> 4) & 1) ? "linkfail" :
-				  "");
-
-		/* disable until driver reloaded */
-		dd->ipath_hwerrmask &= ~crcbits;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-		ipath_dbg("HT crc errs: %s\n", msg);
-	} else
-		ipath_dbg("ignoring HT crc errors 0x%llx, "
-			  "not in use\n", (unsigned long long)
-			  (hwerrs & (_IPATH_HTLINK0_CRCBITS |
-				     _IPATH_HTLINK1_CRCBITS)));
-}
-
-/**
- * ipath_ht_handle_hwerrors - display hardware errors
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid
- * excessive stack use.  Most hardware errors are catastrophic, but for
- * right now, we'll print them and continue.
- * We reuse the same message buffer as ipath_handle_errors() to avoid
- * excessive stack usage.
- */
-static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
-				     size_t msgl)
-{
-	ipath_err_t hwerrs;
-	u32 bits, ctrl;
-	int isfatal = 0;
-	char bitsmsg[64];
-
-	hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-
-	if (!hwerrs) {
-		ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
-		/*
-		 * better than printing cofusing messages
-		 * This seems to be related to clearing the crc error, or
-		 * the pll error during init.
-		 */
-		goto bail;
-	} else if (hwerrs == -1LL) {
-		ipath_dev_err(dd, "Read of hardware error status failed "
-			      "(all bits set); ignoring\n");
-		goto bail;
-	}
-	ipath_stats.sps_hwerrs++;
-
-	/* Always clear the error status register, except MEMBISTFAIL,
-	 * regardless of whether we continue or stop using the chip.
-	 * We want that set so we know it failed, even across driver reload.
-	 * We'll still ignore it in the hwerrmask.  We do this partly for
-	 * diagnostics, but also for support */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-			 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
-	hwerrs &= dd->ipath_hwerrmask;
-
-	/*
-	 * make sure we get this much out, unless told to be quiet,
-	 * or it's occurred within the last 5 seconds
-	 */
-	if ((hwerrs & ~dd->ipath_lasthwerror) ||
-	    (ipath_debug & __IPATH_VERBDBG))
-		dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
-			 "(cleared)\n", (unsigned long long) hwerrs);
-	dd->ipath_lasthwerror |= hwerrs;
-
-	if (hwerrs & ~infinipath_hwe_bitsextant)
-		ipath_dev_err(dd, "hwerror interrupt with unknown errors "
-			      "%llx set\n", (unsigned long long)
-			      (hwerrs & ~infinipath_hwe_bitsextant));
-
-	ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-	if (ctrl & INFINIPATH_C_FREEZEMODE) {
-		if (hwerrs) {
-			/*
-			 * if any set that we aren't ignoring; only
-			 * make the complaint once, in case it's stuck
-			 * or recurring, and we get here multiple
-			 * times.
-			 */
-			if (dd->ipath_flags & IPATH_INITTED) {
-				ipath_dev_err(dd, "Fatal Error (freeze "
-					      "mode), no longer usable\n");
-				isfatal = 1;
-			}
-			*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-			/* mark as having had error */
-			*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-			/*
-			 * mark as not usable, at a minimum until driver
-			 * is reloaded, probably until reboot, since no
-			 * other reset is possible.
-			 */
-			dd->ipath_flags &= ~IPATH_INITTED;
-		} else {
-			ipath_dbg("Clearing freezemode on ignored hardware "
-				  "error\n");
-			ctrl &= ~INFINIPATH_C_FREEZEMODE;
-			ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-					 ctrl);
-		}
-	}
-
-	*msg = '\0';
-
-	/*
-	 * may someday want to decode into which bits are which
-	 * functional area for parity errors, etc.
-	 */
-	if (hwerrs & (infinipath_hwe_htcmemparityerr_mask
-		      << INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT)) {
-		bits = (u32) ((hwerrs >>
-			       INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) &
-			      INFINIPATH_HWE_HTCMEMPARITYERR_MASK);
-		snprintf(bitsmsg, sizeof bitsmsg, "[HTC Parity Errs %x] ",
-			 bits);
-		strlcat(msg, bitsmsg, msgl);
-	}
-	if (hwerrs & (INFINIPATH_HWE_RXEMEMPARITYERR_MASK
-		      << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)) {
-		bits = (u32) ((hwerrs >>
-			       INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) &
-			      INFINIPATH_HWE_RXEMEMPARITYERR_MASK);
-		snprintf(bitsmsg, sizeof bitsmsg, "[RXE Parity Errs %x] ",
-			 bits);
-		strlcat(msg, bitsmsg, msgl);
-	}
-	if (hwerrs & (INFINIPATH_HWE_TXEMEMPARITYERR_MASK
-		      << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
-		bits = (u32) ((hwerrs >>
-			       INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) &
-			      INFINIPATH_HWE_TXEMEMPARITYERR_MASK);
-		snprintf(bitsmsg, sizeof bitsmsg, "[TXE Parity Errs %x] ",
-			 bits);
-		strlcat(msg, bitsmsg, msgl);
-	}
-	if (hwerrs & INFINIPATH_HWE_IBCBUSTOSPCPARITYERR)
-		strlcat(msg, "[IB2IPATH Parity]", msgl);
-	if (hwerrs & INFINIPATH_HWE_IBCBUSFRSPCPARITYERR)
-		strlcat(msg, "[IPATH2IB Parity]", msgl);
-	if (hwerrs & INFINIPATH_HWE_HTCBUSIREQPARITYERR)
-		strlcat(msg, "[HTC Ireq Parity]", msgl);
-	if (hwerrs & INFINIPATH_HWE_HTCBUSTREQPARITYERR)
-		strlcat(msg, "[HTC Treq Parity]", msgl);
-	if (hwerrs & INFINIPATH_HWE_HTCBUSTRESPPARITYERR)
-		strlcat(msg, "[HTC Tresp Parity]", msgl);
-
-	if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))
-		hwerr_crcbits(dd, hwerrs, msg, msgl);
-
-	if (hwerrs & INFINIPATH_HWE_HTCMISCERR5)
-		strlcat(msg, "[HT core Misc5]", msgl);
-	if (hwerrs & INFINIPATH_HWE_HTCMISCERR6)
-		strlcat(msg, "[HT core Misc6]", msgl);
-	if (hwerrs & INFINIPATH_HWE_HTCMISCERR7)
-		strlcat(msg, "[HT core Misc7]", msgl);
-	if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
-		strlcat(msg, "[Memory BIST test failed, HT-400 unusable]",
-			msgl);
-		/* ignore from now on, so disable until driver reloaded */
-		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP |	\
-			 INFINIPATH_HWE_COREPLL_RFSLIP |	\
-			 INFINIPATH_HWE_HTBPLL_FBSLIP |		\
-			 INFINIPATH_HWE_HTBPLL_RFSLIP |		\
-			 INFINIPATH_HWE_HTAPLL_FBSLIP |		\
-			 INFINIPATH_HWE_HTAPLL_RFSLIP)
-
-	if (hwerrs & _IPATH_PLL_FAIL) {
-		snprintf(bitsmsg, sizeof bitsmsg,
-			 "[PLL failed (%llx), HT-400 unusable]",
-			 (unsigned long long) (hwerrs & _IPATH_PLL_FAIL));
-		strlcat(msg, bitsmsg, msgl);
-		/* ignore from now on, so disable until driver reloaded */
-		dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
-		/*
-		 * If it occurs, it is left masked since the eternal
-		 * interface is unused
-		 */
-		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	if (hwerrs & INFINIPATH_HWE_RXDSYNCMEMPARITYERR)
-		strlcat(msg, "[Rx Dsync]", msgl);
-	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED)
-		strlcat(msg, "[SerDes PLL]", msgl);
-
-	ipath_dev_err(dd, "%s hardware error\n", msg);
-	if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
-		/*
-		 * for status file; if no trailing brace is copied,
-		 * we'll know it was truncated.
-		 */
-		snprintf(dd->ipath_freezemsg,
-			 dd->ipath_freezelen, "{%s}", msg);
-
-bail:;
-}
-
-/**
- * ipath_ht_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * fill in the board name, based on the board revision register
- */
-static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
-			      size_t namelen)
-{
-	char *n = NULL;
-	u8 boardrev = dd->ipath_boardrev;
-	int ret;
-
-	switch (boardrev) {
-	case 4:		/* Ponderosa is one of the bringup boards */
-		n = "Ponderosa";
-		break;
-	case 5:
-		/*
-		 * HT-460 original production board; two production levels, with
-		 * different serial number ranges.   See ipath_ht_early_init() for
-		 * case where we enable IPATH_GPIO_INTR for later serial # range.
-		 */
-		n = "InfiniPath_HT-460";
-		break;
-	case 6:
-		n = "OEM_Board_3";
-		break;
-	case 7:
-		/* HT-460 small form factor production board */
-		n = "InfiniPath_HT-465";
-		break;
-	case 8:
-		n = "LS/X-1";
-		break;
-	case 9:		/* Comstock bringup test board */
-		n = "Comstock";
-		break;
-	case 10:
-		n = "OEM_Board_2";
-		break;
-	case 11:
-		n = "InfiniPath_HT-470";
-		break;
-	case 12:
-		n = "OEM_Board_4";
-		break;
-	default:		/* don't know, just print the number */
-		ipath_dev_err(dd, "Don't yet know about board "
-			      "with ID %u\n", boardrev);
-		snprintf(name, namelen, "Unknown_InfiniPath_HT-4xx_%u",
-			 boardrev);
-		break;
-	}
-	if (n)
-		snprintf(name, namelen, "%s", n);
-
-	if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) {
-		/*
-		 * This version of the driver only supports the HT-400
-		 * Rev 3.2
-		 */
-		ipath_dev_err(dd,
-			      "Unsupported HT-400 revision %u.%u!\n",
-			      dd->ipath_majrev, dd->ipath_minrev);
-		ret = 1;
-		goto bail;
-	}
-	/*
-	 * pkt/word counters are 32 bit, and therefore wrap fast enough
-	 * that we snapshot them from a timer, and maintain 64 bit shadow
-	 * copies
-	 */
-	dd->ipath_flags |= IPATH_32BITCOUNTERS;
-	if (dd->ipath_htspeed != 800)
-		ipath_dev_err(dd,
-			      "Incorrectly configured for HT @ %uMHz\n",
-			      dd->ipath_htspeed);
-	if (dd->ipath_boardrev == 7 || dd->ipath_boardrev == 11 ||
-	    dd->ipath_boardrev == 6)
-		dd->ipath_flags |= IPATH_GPIO_INTR;
-	else
-		dd->ipath_flags |= IPATH_POLL_RX_INTR;
-	if (dd->ipath_boardrev == 8) {	/* LS/X-1 */
-		u64 val;
-		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-		if (val & INFINIPATH_EXTS_SERDESSEL) {
-			/*
-			 * hardware disabled
-			 *
-			 * This means that the chip is hardware disabled,
-			 * and will not be able to bring up the link,
-			 * in any case.  We special case this and abort
-			 * early, to avoid later messages.  We also set
-			 * the DISABLED status bit
-			 */
-			ipath_dbg("Unit %u is hardware-disabled\n",
-				  dd->ipath_unit);
-			*dd->ipath_statusp |= IPATH_STATUS_DISABLED;
-			/* this value is handled differently */
-			ret = 2;
-			goto bail;
-		}
-	}
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-static void ipath_check_htlink(struct ipath_devdata *dd)
-{
-	u8 linkerr, link_off, i;
-
-	for (i = 0; i < 2; i++) {
-		link_off = dd->ipath_ht_slave_off + i * 4 + 0xd;
-		if (pci_read_config_byte(dd->pcidev, link_off, &linkerr))
-			dev_info(&dd->pcidev->dev, "Couldn't read "
-				 "linkerror%d of HT slave/primary block\n",
-				 i);
-		else if (linkerr & 0xf0) {
-			ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
-				   "clearing\n", linkerr >> 4, i);
-			/*
-			 * writing the linkerr bits that are set should
-			 * clear them
-			 */
-			if (pci_write_config_byte(dd->pcidev, link_off,
-						  linkerr))
-				ipath_dbg("Failed write to clear HT "
-					  "linkerror%d\n", i);
-			if (pci_read_config_byte(dd->pcidev, link_off,
-						 &linkerr))
-				dev_info(&dd->pcidev->dev,
-					 "Couldn't reread linkerror%d of "
-					 "HT slave/primary block\n", i);
-			else if (linkerr & 0xf0)
-				dev_info(&dd->pcidev->dev,
-					 "HT linkerror%d bits 0x%x "
-					 "couldn't be cleared\n",
-					 i, linkerr >> 4);
-		}
-	}
-}
-
-static int ipath_setup_ht_reset(struct ipath_devdata *dd)
-{
-	ipath_dbg("No reset possible for HT-400\n");
-	return 0;
-}
-
-#define HT_CAPABILITY_ID   0x08	/* HT capabilities not defined in kernel */
-#define HT_INTR_DISC_CONFIG  0x80	/* HT interrupt and discovery cap */
-#define HT_INTR_REG_INDEX    2	/* intconfig requires indirect accesses */
-
-/*
- * Bits 13-15 of command==0 is slave/primary block.  Clear any HT CRC
- * errors.  We only bother to do this at load time, because it's OK if
- * it happened before we were loaded (first time after boot/reset),
- * but any time after that, it's fatal anyway.  Also need to not check
- * for for upper byte errors if we are in 8 bit mode, so figure out
- * our width.  For now, at least, also complain if it's 8 bit.
- */
-static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
-			     int pos, u8 cap_type)
-{
-	u8 linkwidth = 0, linkerr, link_a_b_off, link_off;
-	u16 linkctrl = 0;
-	int i;
-
-	dd->ipath_ht_slave_off = pos;
-	/* command word, master_host bit */
-	/* master host || slave */
-	if ((cap_type >> 2) & 1)
-		link_a_b_off = 4;
-	else
-		link_a_b_off = 0;
-	ipath_cdbg(VERBOSE, "HT%u (Link %c) connected to processor\n",
-		   link_a_b_off ? 1 : 0,
-		   link_a_b_off ? 'B' : 'A');
-
-	link_a_b_off += pos;
-
-	/*
-	 * check both link control registers; clear both HT CRC sets if
-	 * necessary.
-	 */
-	for (i = 0; i < 2; i++) {
-		link_off = pos + i * 4 + 0x4;
-		if (pci_read_config_word(pdev, link_off, &linkctrl))
-			ipath_dev_err(dd, "Couldn't read HT link control%d "
-				      "register\n", i);
-		else if (linkctrl & (0xf << 8)) {
-			ipath_cdbg(VERBOSE, "Clear linkctrl%d CRC Error "
-				   "bits %x\n", i, linkctrl & (0xf << 8));
-			/*
-			 * now write them back to clear the error.
-			 */
-			pci_write_config_byte(pdev, link_off,
-					      linkctrl & (0xf << 8));
-		}
-	}
-
-	/*
-	 * As with HT CRC bits, same for protocol errors that might occur
-	 * during boot.
-	 */
-	for (i = 0; i < 2; i++) {
-		link_off = pos + i * 4 + 0xd;
-		if (pci_read_config_byte(pdev, link_off, &linkerr))
-			dev_info(&pdev->dev, "Couldn't read linkerror%d "
-				 "of HT slave/primary block\n", i);
-		else if (linkerr & 0xf0) {
-			ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
-				   "clearing\n", linkerr >> 4, i);
-			/*
-			 * writing the linkerr bits that are set will clear
-			 * them
-			 */
-			if (pci_write_config_byte
-			    (pdev, link_off, linkerr))
-				ipath_dbg("Failed write to clear HT "
-					  "linkerror%d\n", i);
-			if (pci_read_config_byte(pdev, link_off, &linkerr))
-				dev_info(&pdev->dev, "Couldn't reread "
-					 "linkerror%d of HT slave/primary "
-					 "block\n", i);
-			else if (linkerr & 0xf0)
-				dev_info(&pdev->dev, "HT linkerror%d bits "
-					 "0x%x couldn't be cleared\n",
-					 i, linkerr >> 4);
-		}
-	}
-
-	/*
-	 * this is just for our link to the host, not devices connected
-	 * through tunnel.
-	 */
-
-	if (pci_read_config_byte(pdev, link_a_b_off + 7, &linkwidth))
-		ipath_dev_err(dd, "Couldn't read HT link width "
-			      "config register\n");
-	else {
-		u32 width;
-		switch (linkwidth & 7) {
-		case 5:
-			width = 4;
-			break;
-		case 4:
-			width = 2;
-			break;
-		case 3:
-			width = 32;
-			break;
-		case 1:
-			width = 16;
-			break;
-		case 0:
-		default:	/* if wrong, assume 8 bit */
-			width = 8;
-			break;
-		}
-
-		dd->ipath_htwidth = width;
-
-		if (linkwidth != 0x11) {
-			ipath_dev_err(dd, "Not configured for 16 bit HT "
-				      "(%x)\n", linkwidth);
-			if (!(linkwidth & 0xf)) {
-				ipath_dbg("Will ignore HT lane1 errors\n");
-				dd->ipath_flags |= IPATH_8BIT_IN_HT0;
-			}
-		}
-	}
-
-	/*
-	 * this is just for our link to the host, not devices connected
-	 * through tunnel.
-	 */
-	if (pci_read_config_byte(pdev, link_a_b_off + 0xd, &linkwidth))
-		ipath_dev_err(dd, "Couldn't read HT link frequency "
-			      "config register\n");
-	else {
-		u32 speed;
-		switch (linkwidth & 0xf) {
-		case 6:
-			speed = 1000;
-			break;
-		case 5:
-			speed = 800;
-			break;
-		case 4:
-			speed = 600;
-			break;
-		case 3:
-			speed = 500;
-			break;
-		case 2:
-			speed = 400;
-			break;
-		case 1:
-			speed = 300;
-			break;
-		default:
-			/*
-			 * assume reserved and vendor-specific are 200...
-			 */
-		case 0:
-			speed = 200;
-			break;
-		}
-		dd->ipath_htspeed = speed;
-	}
-}
-
-static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev,
-			    int pos)
-{
-	u32 int_handler_addr_lower;
-	u32 int_handler_addr_upper;
-	u64 ihandler;
-	u32 intvec;
-
-	/* use indirection register to get the intr handler */
-	pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x10);
-	pci_read_config_dword(pdev, pos + 4, &int_handler_addr_lower);
-	pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x11);
-	pci_read_config_dword(pdev, pos + 4, &int_handler_addr_upper);
-
-	ihandler = (u64) int_handler_addr_lower |
-		((u64) int_handler_addr_upper << 32);
-
-	/*
-	 * kernels with CONFIG_PCI_MSI set the vector in the irq field of
-	 * struct pci_device, so we use that to program the HT-400 internal
-	 * interrupt register (not config space) with that value. The BIOS
-	 * must still have done the basic MSI setup.
-	 */
-	intvec = pdev->irq;
-	/*
-	 * clear any vector bits there; normally not set but we'll overload
-	 * this for some debug purposes (setting the HTC debug register
-	 * value from software, rather than GPIOs), so it might be set on a
-	 * driver reload.
-	 */
-	ihandler &= ~0xff0000;
-	/* x86 vector goes in intrinfo[23:16] */
-	ihandler |= intvec << 16;
-	ipath_cdbg(VERBOSE, "ihandler lower %x, upper %x, intvec %x, "
-		   "interruptconfig %llx\n", int_handler_addr_lower,
-		   int_handler_addr_upper, intvec,
-		   (unsigned long long) ihandler);
-
-	/* can't program yet, so save for interrupt setup */
-	dd->ipath_intconfig = ihandler;
-	/* keep going, so we find link control stuff also */
-
-	return ihandler != 0;
-}
-
-/**
- * ipath_setup_ht_config - setup the interruptconfig register
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * setup the interruptconfig register from the HT config info.
- * Also clear CRC errors in HT linkcontrol, if necessary.
- * This is done only for the real hardware.  It is done before
- * chip address space is initted, so can't touch infinipath registers
- */
-static int ipath_setup_ht_config(struct ipath_devdata *dd,
-				 struct pci_dev *pdev)
-{
-	int pos, ret = 0;
-	int ihandler = 0;
-
-	/*
-	 * Read the capability info to find the interrupt info, and also
-	 * handle clearing CRC errors in linkctrl register if necessary.  We
-	 * do this early, before we ever enable errors or hardware errors,
-	 * mostly to avoid causing the chip to enter freeze mode.
-	 */
-	pos = pci_find_capability(pdev, HT_CAPABILITY_ID);
-	if (!pos) {
-		ipath_dev_err(dd, "Couldn't find HyperTransport "
-			      "capability; no interrupts\n");
-		ret = -ENODEV;
-		goto bail;
-	}
-	do {
-		u8 cap_type;
-
-		/* the HT capability type byte is 3 bytes after the
-		 * capability byte.
-		 */
-		if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
-			dev_info(&pdev->dev, "Couldn't read config "
-				 "command @ %d\n", pos);
-			continue;
-		}
-		if (!(cap_type & 0xE0))
-			slave_or_pri_blk(dd, pdev, pos, cap_type);
-		else if (cap_type == HT_INTR_DISC_CONFIG)
-			ihandler = set_int_handler(dd, pdev, pos);
-	} while ((pos = pci_find_next_capability(pdev, pos,
-						 HT_CAPABILITY_ID)));
-
-	if (!ihandler) {
-		ipath_dev_err(dd, "Couldn't find interrupt handler in "
-			      "config space\n");
-		ret = -ENODEV;
-	}
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_setup_ht_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * Called during driver unload.
- * This is currently a nop for the HT-400, not for all chips
- */
-static void ipath_setup_ht_cleanup(struct ipath_devdata *dd)
-{
-}
-
-/**
- * ipath_setup_ht_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
- *
- * Set the state of the two external LEDs, to indicate physical and
- * logical state of IB link.   For this chip (at least with recommended
- * board pinouts), LED1 is Green (physical state), and LED2 is Yellow
- * (logical state)
- *
- * Note:  We try to match the Mellanox HCA LED behavior as best
- * we can.  Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate.  That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
-				     u64 lst, u64 ltst)
-{
-	u64 extctl;
-
-	/* the diags use the LED to indicate diag info, so we leave
-	 * the external LED alone when the diags are running */
-	if (ipath_diag_inuse)
-		return;
-
-	/*
-	 * start by setting both LED control bits to off, then turn
-	 * on the appropriate bit(s).
-	 */
-	if (dd->ipath_boardrev == 8) { /* LS/X-1 uses different pins */
-		/*
-		 * major difference is that INFINIPATH_EXTC_LEDGBLERR_OFF
-		 * is inverted,  because it is normally used to indicate
-		 * a hardware fault at reset, if there were errors
-		 */
-		extctl = (dd->ipath_extctrl & ~INFINIPATH_EXTC_LEDGBLOK_ON)
-			| INFINIPATH_EXTC_LEDGBLERR_OFF;
-		if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
-			extctl &= ~INFINIPATH_EXTC_LEDGBLERR_OFF;
-		if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
-			extctl |= INFINIPATH_EXTC_LEDGBLOK_ON;
-	}
-	else {
-		extctl = dd->ipath_extctrl &
-			~(INFINIPATH_EXTC_LED1PRIPORT_ON |
-			  INFINIPATH_EXTC_LED2PRIPORT_ON);
-		if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
-			extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
-		if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
-			extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
-	}
-	dd->ipath_extctrl = extctl;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
-}
-
-static void ipath_init_ht_variables(void)
-{
-	ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
-	ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
-	ipath_gpio_sda = IPATH_GPIO_SDA;
-	ipath_gpio_scl = IPATH_GPIO_SCL;
-
-	infinipath_i_bitsextant =
-		(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
-		(INFINIPATH_I_RCVAVAIL_MASK <<
-		 INFINIPATH_I_RCVAVAIL_SHIFT) |
-		INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
-		INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
-
-	infinipath_e_bitsextant =
-		INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
-		INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
-		INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
-		INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
-		INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
-		INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
-		INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-		INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
-		INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
-		INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
-		INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
-		INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
-		INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
-		INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
-		INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
-		INFINIPATH_E_HARDWARE;
-
-	infinipath_hwe_bitsextant =
-		(INFINIPATH_HWE_HTCMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) |
-		(INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
-		(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
-		INFINIPATH_HWE_HTCLNKABYTE0CRCERR |
-		INFINIPATH_HWE_HTCLNKABYTE1CRCERR |
-		INFINIPATH_HWE_HTCLNKBBYTE0CRCERR |
-		INFINIPATH_HWE_HTCLNKBBYTE1CRCERR |
-		INFINIPATH_HWE_HTCMISCERR4 |
-		INFINIPATH_HWE_HTCMISCERR5 | INFINIPATH_HWE_HTCMISCERR6 |
-		INFINIPATH_HWE_HTCMISCERR7 |
-		INFINIPATH_HWE_HTCBUSTREQPARITYERR |
-		INFINIPATH_HWE_HTCBUSTRESPPARITYERR |
-		INFINIPATH_HWE_HTCBUSIREQPARITYERR |
-		INFINIPATH_HWE_RXDSYNCMEMPARITYERR |
-		INFINIPATH_HWE_MEMBISTFAILED |
-		INFINIPATH_HWE_COREPLL_FBSLIP |
-		INFINIPATH_HWE_COREPLL_RFSLIP |
-		INFINIPATH_HWE_HTBPLL_FBSLIP |
-		INFINIPATH_HWE_HTBPLL_RFSLIP |
-		INFINIPATH_HWE_HTAPLL_FBSLIP |
-		INFINIPATH_HWE_HTAPLL_RFSLIP |
-		INFINIPATH_HWE_SERDESPLLFAILED |
-		INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
-		INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
-
-	infinipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
-	infinipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
-}
-
-/**
- * ipath_ht_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
-{
-	ipath_err_t val;
-	u64 extsval;
-
-	extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
-	if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
-		ipath_dev_err(dd, "MemBIST did not complete!\n");
-
-	ipath_check_htlink(dd);
-
-	/* barring bugs, all hwerrors become interrupts, which can */
-	val = -1LL;
-	/* don't look at crc lane1 if 8 bit */
-	if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
-		val &= ~infinipath_hwe_htclnkabyte1crcerr;
-	/* don't look at crc lane1 if 8 bit */
-	if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
-		val &= ~infinipath_hwe_htclnkbbyte1crcerr;
-
-	/*
-	 * disable RXDSYNCMEMPARITY because external serdes is unused,
-	 * and therefore the logic will never be used or initialized,
-	 * and uninitialized state will normally result in this error
-	 * being asserted.  Similarly for the external serdess pll
-	 * lock signal.
-	 */
-	val &= ~(INFINIPATH_HWE_SERDESPLLFAILED |
-		 INFINIPATH_HWE_RXDSYNCMEMPARITYERR);
-
-	/*
-	 * Disable MISCERR4 because of an inversion in the HT core
-	 * logic checking for errors that cause this bit to be set.
-	 * The errata can also cause the protocol error bit to be set
-	 * in the HT config space linkerror register(s).
-	 */
-	val &= ~INFINIPATH_HWE_HTCMISCERR4;
-
-	/*
-	 * PLL ignored because MDIO interface has a logic problem
-	 * for reads, on Comstock and Ponderosa.  BRINGUP
-	 */
-	if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
-		val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-	dd->ipath_hwerrmask = val;
-}
-
-/**
- * ipath_ht_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
-{
-	u64 val, config1;
-	int ret = 0, change = 0;
-
-	ipath_dbg("Trying to bringup serdes\n");
-
-	if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
-	    INFINIPATH_HWE_SERDESPLLFAILED)
-	{
-		ipath_dbg("At start, serdes PLL failed bit set in "
-			  "hwerrstatus, clearing and continuing\n");
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-				 INFINIPATH_HWE_SERDESPLLFAILED);
-	}
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-	config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
-
-	ipath_cdbg(VERBOSE, "Initial serdes status is config0=%llx "
-		   "config1=%llx, sstatus=%llx xgxs %llx\n",
-		   (unsigned long long) val, (unsigned long long) config1,
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-	/* force reset on */
-	val |= INFINIPATH_SERDC0_RESET_PLL
-		/* | INFINIPATH_SERDC0_RESET_MASK */
-		;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-	udelay(15);		/* need pll reset set at least for a bit */
-
-	if (val & INFINIPATH_SERDC0_RESET_PLL) {
-		u64 val2 = val &= ~INFINIPATH_SERDC0_RESET_PLL;
-		/* set lane resets, and tx idle, during pll reset */
-		val2 |= INFINIPATH_SERDC0_RESET_MASK |
-			INFINIPATH_SERDC0_TXIDLE;
-		ipath_cdbg(VERBOSE, "Clearing serdes PLL reset (writing "
-			   "%llx)\n", (unsigned long long) val2);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
-				 val2);
-		/*
-		 * be sure chip saw it
-		 */
-		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-		/*
-		 * need pll reset clear at least 11 usec before lane
-		 * resets cleared; give it a few more
-		 */
-		udelay(15);
-		val = val2;	/* for check below */
-	}
-
-	if (val & (INFINIPATH_SERDC0_RESET_PLL |
-		   INFINIPATH_SERDC0_RESET_MASK |
-		   INFINIPATH_SERDC0_TXIDLE)) {
-		val &= ~(INFINIPATH_SERDC0_RESET_PLL |
-			 INFINIPATH_SERDC0_RESET_MASK |
-			 INFINIPATH_SERDC0_TXIDLE);
-		/* clear them */
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
-				 val);
-	}
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-	if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
-	     INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
-		val &= ~(INFINIPATH_XGXS_MDIOADDR_MASK <<
-			 INFINIPATH_XGXS_MDIOADDR_SHIFT);
-		/*
-		 * we use address 3
-		 */
-		val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
-		change = 1;
-	}
-	if (val & INFINIPATH_XGXS_RESET) {
-		/* normally true after boot */
-		val &= ~INFINIPATH_XGXS_RESET;
-		change = 1;
-	}
-	if (change)
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-	/* clear current and de-emphasis bits */
-	config1 &= ~0x0ffffffff00ULL;
-	/* set current to 20ma */
-	config1 |= 0x00000000000ULL;
-	/* set de-emphasis to -5.68dB */
-	config1 |= 0x0cccc000000ULL;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
-
-	ipath_cdbg(VERBOSE, "After setup: serdes status is config0=%llx "
-		   "config1=%llx, sstatus=%llx xgxs %llx\n",
-		   (unsigned long long) val, (unsigned long long) config1,
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-	if (!ipath_waitfor_mdio_cmdready(dd)) {
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_mdio,
-				 ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
-						IPATH_MDIO_CTRL_XGXS_REG_8,
-						0));
-		if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
-					   IPATH_MDIO_DATAVALID, &val))
-			ipath_dbg("Never got MDIO data for XGXS status "
-				  "read\n");
-		else
-			ipath_cdbg(VERBOSE, "MDIO Read reg8, "
-				   "'bank' 31 %x\n", (u32) val);
-	} else
-		ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
-
-	return ret;		/* for now, say we always succeeded */
-}
-
-/**
- * ipath_ht_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * driver is being unloaded
- */
-static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
-{
-	u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-	val |= INFINIPATH_SERDC0_TXIDLE;
-	ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
-		  (unsigned long long) val);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-}
-
-static int ipath_ht_intconfig(struct ipath_devdata *dd)
-{
-	int ret;
-
-	if (!dd->ipath_intconfig) {
-		ipath_dev_err(dd, "No interrupts enabled, couldn't setup "
-			      "interrupt address\n");
-		ret = 1;
-		goto bail;
-	}
-
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig,
-			 dd->ipath_intconfig);	/* interrupt address */
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_pe_put_tid - write a TID in chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
- * @tidtype: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void ipath_ht_put_tid(struct ipath_devdata *dd,
-			     u64 __iomem *tidptr, u32 type,
-			     unsigned long pa)
-{
-	if (pa != dd->ipath_tidinvalid) {
-		if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) {
-			dev_info(&dd->pcidev->dev,
-				 "physaddr %lx has more than "
-				 "40 bits, using only 40!!!\n", pa);
-			pa &= INFINIPATH_RT_ADDR_MASK;
-		}
-		if (type == 0)
-			pa |= dd->ipath_tidtemplate;
-		else {
-			/* in words (fixed, full page).  */
-			u64 lenvalid = PAGE_SIZE >> 2;
-			lenvalid <<= INFINIPATH_RT_BUFSIZE_SHIFT;
-			pa |= lenvalid | INFINIPATH_RT_VALID;
-		}
-	}
-	if (dd->ipath_kregbase)
-		writeq(pa, tidptr);
-}
-
-/**
- * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * Used from ipath_close(), and at chip initialization.
- */
-static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
-	u64 __iomem *tidbase;
-	int i;
-
-	if (!dd->ipath_kregbase)
-		return;
-
-	ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
-	/*
-	 * need to invalidate all of the expected TID entries for this
-	 * port, so we don't have valid entries that might somehow get
-	 * used (early in next use of this port, or through some bug)
-	 */
-	tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
-				   dd->ipath_rcvtidbase +
-				   port * dd->ipath_rcvtidcnt *
-				   sizeof(*tidbase));
-	for (i = 0; i < dd->ipath_rcvtidcnt; i++)
-		ipath_ht_put_tid(dd, &tidbase[i], 1, dd->ipath_tidinvalid);
-
-	tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
-				   dd->ipath_rcvegrbase +
-				   port * dd->ipath_rcvegrcnt *
-				   sizeof(*tidbase));
-
-	for (i = 0; i < dd->ipath_rcvegrcnt; i++)
-		ipath_ht_put_tid(dd, &tidbase[i], 0, dd->ipath_tidinvalid);
-}
-
-/**
- * ipath_ht_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
-{
-	dd->ipath_tidtemplate = dd->ipath_ibmaxlen >> 2;
-	dd->ipath_tidtemplate <<= INFINIPATH_RT_BUFSIZE_SHIFT;
-	dd->ipath_tidtemplate |= INFINIPATH_RT_VALID;
-
-	/*
-	 * work around chip errata bug 7358, by marking invalid tids
-	 * as having max length
-	 */
-	dd->ipath_tidinvalid = (-1LL & INFINIPATH_RT_BUFSIZE_MASK) <<
-		INFINIPATH_RT_BUFSIZE_SHIFT;
-}
-
-static int ipath_ht_early_init(struct ipath_devdata *dd)
-{
-	u32 __iomem *piobuf;
-	u32 pioincr, val32, egrsize;
-	int i;
-
-	/*
-	 * one cache line; long IB headers will spill over into received
-	 * buffer
-	 */
-	dd->ipath_rcvhdrentsize = 16;
-	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
-
-	/*
-	 * For HT-400, we allocate a somewhat overly large eager buffer,
-	 * such that we can guarantee that we can receive the largest
-	 * packet that we can send out.  To truly support a 4KB MTU,
-	 * we need to bump this to a large value.  To date, other than
-	 * testing, we have never encountered an HCA that can really
-	 * send 4KB MTU packets, so we do not handle that (we'll get
-	 * errors interrupts if we ever see one).
-	 */
-	dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
-	egrsize = dd->ipath_rcvegrbufsize;
-
-	/*
-	 * the min() check here is currently a nop, but it may not
-	 * always be, depending on just how we do ipath_rcvegrbufsize
-	 */
-	dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
-				 dd->ipath_rcvegrbufsize);
-	dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-	ipath_ht_tidtemplate(dd);
-
-	/*
-	 * zero all the TID entries at startup.  We do this for sanity,
-	 * in case of a previous driver crash of some kind, and also
-	 * because the chip powers up with these memories in an unknown
-	 * state.  Use portcnt, not cfgports, since this is for the
-	 * full chip, not for current (possibly different) configuration
-	 * value.
-	 * Chip Errata bug 6447
-	 */
-	for (val32 = 0; val32 < dd->ipath_portcnt; val32++)
-		ipath_ht_clear_tids(dd, val32);
-
-	/*
-	 * write the pbc of each buffer, to be sure it's initialized, then
-	 * cancel all the buffers, and also abort any packets that might
-	 * have been in flight for some reason (the latter is for driver
-	 * unload/reload, but isn't a bad idea at first init).	PIO send
-	 * isn't enabled at this point, so there is no danger of sending
-	 * these out on the wire.
-	 * Chip Errata bug 6610
-	 */
-	piobuf = (u32 __iomem *) (((char __iomem *)(dd->ipath_kregbase)) +
-				  dd->ipath_piobufbase);
-	pioincr = dd->ipath_palign / sizeof(*piobuf);
-	for (i = 0; i < dd->ipath_piobcnt2k; i++) {
-		/*
-		 * reasonable word count, just to init pbc
-		 */
-		writel(16, piobuf);
-		piobuf += pioincr;
-	}
-	/*
-	 * self-clearing
-	 */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-			 INFINIPATH_S_ABORT);
-
-	ipath_get_eeprom_info(dd);
-	if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
-		dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
-		/*
-		 * Later production HT-460 has same changes as HT-465, so
-		 * can use GPIO interrupts.  They have serial #'s starting
-		 * with 128, rather than 112.
-		 */
-		dd->ipath_flags |= IPATH_GPIO_INTR;
-		dd->ipath_flags &= ~IPATH_POLL_RX_INTR;
-	}
-	return 0;
-}
-
-/**
- * ipath_init_ht_get_base_info - set chip-specific flags for user code
- * @dd: the infinipath device
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
-	struct ipath_base_info *kinfo = kbase;
-
-	kinfo->spi_runtime_flags |= IPATH_RUNTIME_HT |
-		IPATH_RUNTIME_RCVHDR_COPY;
-
-	return 0;
-}
-
-/**
- * ipath_init_ht400_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_ht400_funcs(struct ipath_devdata *dd)
-{
-	dd->ipath_f_intrsetup = ipath_ht_intconfig;
-	dd->ipath_f_bus = ipath_setup_ht_config;
-	dd->ipath_f_reset = ipath_setup_ht_reset;
-	dd->ipath_f_get_boardname = ipath_ht_boardname;
-	dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
-	dd->ipath_f_early_init = ipath_ht_early_init;
-	dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
-	dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
-	dd->ipath_f_bringup_serdes = ipath_ht_bringup_serdes;
-	dd->ipath_f_clear_tids = ipath_ht_clear_tids;
-	dd->ipath_f_put_tid = ipath_ht_put_tid;
-	dd->ipath_f_cleanup = ipath_setup_ht_cleanup;
-	dd->ipath_f_setextled = ipath_setup_ht_setextled;
-	dd->ipath_f_get_base_info = ipath_ht_get_base_info;
-
-	/*
-	 * initialize chip-specific variables
-	 */
-	dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
-
-	/*
-	 * setup the register offsets, since they are different for each
-	 * chip
-	 */
-	dd->ipath_kregs = &ipath_ht_kregs;
-	dd->ipath_cregs = &ipath_ht_cregs;
-
-	/*
-	 * do very early init that is needed before ipath_f_bus is
-	 * called
-	 */
-	ipath_init_ht_variables();
-}
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_iba6110.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_iba6110.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_iba6110.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_iba6110.c	2006-10-19 12:41:10.000000000 -0400
@@ -0,0 +1,1612 @@
+/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains all of the code that is specific to the InfiniPath
+ * HT chip.
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "ipath_kernel.h"
+#include "ipath_registers.h"
+
+/*
+ * This lists the InfiniPath registers, in the actual chip layout.
+ * This structure should never be directly accessed.
+ *
+ * The names are in InterCap form because they're taken straight from
+ * the chip specification.  Since they're only used in this file, they
+ * don't pollute the rest of the source.
+*/
+
+struct _infinipath_do_not_use_kernel_regs {
+	unsigned long long Revision;
+	unsigned long long Control;
+	unsigned long long PageAlign;
+	unsigned long long PortCnt;
+	unsigned long long DebugPortSelect;
+	unsigned long long DebugPort;
+	unsigned long long SendRegBase;
+	unsigned long long UserRegBase;
+	unsigned long long CounterRegBase;
+	unsigned long long Scratch;
+	unsigned long long ReservedMisc1;
+	unsigned long long InterruptConfig;
+	unsigned long long IntBlocked;
+	unsigned long long IntMask;
+	unsigned long long IntStatus;
+	unsigned long long IntClear;
+	unsigned long long ErrorMask;
+	unsigned long long ErrorStatus;
+	unsigned long long ErrorClear;
+	unsigned long long HwErrMask;
+	unsigned long long HwErrStatus;
+	unsigned long long HwErrClear;
+	unsigned long long HwDiagCtrl;
+	unsigned long long MDIO;
+	unsigned long long IBCStatus;
+	unsigned long long IBCCtrl;
+	unsigned long long ExtStatus;
+	unsigned long long ExtCtrl;
+	unsigned long long GPIOOut;
+	unsigned long long GPIOMask;
+	unsigned long long GPIOStatus;
+	unsigned long long GPIOClear;
+	unsigned long long RcvCtrl;
+	unsigned long long RcvBTHQP;
+	unsigned long long RcvHdrSize;
+	unsigned long long RcvHdrCnt;
+	unsigned long long RcvHdrEntSize;
+	unsigned long long RcvTIDBase;
+	unsigned long long RcvTIDCnt;
+	unsigned long long RcvEgrBase;
+	unsigned long long RcvEgrCnt;
+	unsigned long long RcvBufBase;
+	unsigned long long RcvBufSize;
+	unsigned long long RxIntMemBase;
+	unsigned long long RxIntMemSize;
+	unsigned long long RcvPartitionKey;
+	unsigned long long ReservedRcv[10];
+	unsigned long long SendCtrl;
+	unsigned long long SendPIOBufBase;
+	unsigned long long SendPIOSize;
+	unsigned long long SendPIOBufCnt;
+	unsigned long long SendPIOAvailAddr;
+	unsigned long long TxIntMemBase;
+	unsigned long long TxIntMemSize;
+	unsigned long long ReservedSend[9];
+	unsigned long long SendBufferError;
+	unsigned long long SendBufferErrorCONT1;
+	unsigned long long SendBufferErrorCONT2;
+	unsigned long long SendBufferErrorCONT3;
+	unsigned long long ReservedSBE[4];
+	unsigned long long RcvHdrAddr0;
+	unsigned long long RcvHdrAddr1;
+	unsigned long long RcvHdrAddr2;
+	unsigned long long RcvHdrAddr3;
+	unsigned long long RcvHdrAddr4;
+	unsigned long long RcvHdrAddr5;
+	unsigned long long RcvHdrAddr6;
+	unsigned long long RcvHdrAddr7;
+	unsigned long long RcvHdrAddr8;
+	unsigned long long ReservedRHA[7];
+	unsigned long long RcvHdrTailAddr0;
+	unsigned long long RcvHdrTailAddr1;
+	unsigned long long RcvHdrTailAddr2;
+	unsigned long long RcvHdrTailAddr3;
+	unsigned long long RcvHdrTailAddr4;
+	unsigned long long RcvHdrTailAddr5;
+	unsigned long long RcvHdrTailAddr6;
+	unsigned long long RcvHdrTailAddr7;
+	unsigned long long RcvHdrTailAddr8;
+	unsigned long long ReservedRHTA[7];
+	unsigned long long Sync;	/* Software only */
+	unsigned long long Dump;	/* Software only */
+	unsigned long long SimVer;	/* Software only */
+	unsigned long long ReservedSW[5];
+	unsigned long long SerdesConfig0;
+	unsigned long long SerdesConfig1;
+	unsigned long long SerdesStatus;
+	unsigned long long XGXSConfig;
+	unsigned long long ReservedSW2[4];
+};
+
+#define IPATH_KREG_OFFSET(field) (offsetof(struct \
+    _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
+#define IPATH_CREG_OFFSET(field) (offsetof( \
+    struct infinipath_counters, field) / sizeof(u64))
+
+static const struct ipath_kregs ipath_ht_kregs = {
+	.kr_control = IPATH_KREG_OFFSET(Control),
+	.kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
+	.kr_debugport = IPATH_KREG_OFFSET(DebugPort),
+	.kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
+	.kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
+	.kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
+	.kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
+	.kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
+	.kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
+	.kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
+	.kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
+	.kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
+	.kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
+	.kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
+	.kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
+	.kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
+	.kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
+	.kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
+	.kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
+	.kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
+	.kr_intclear = IPATH_KREG_OFFSET(IntClear),
+	.kr_interruptconfig = IPATH_KREG_OFFSET(InterruptConfig),
+	.kr_intmask = IPATH_KREG_OFFSET(IntMask),
+	.kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
+	.kr_mdio = IPATH_KREG_OFFSET(MDIO),
+	.kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
+	.kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
+	.kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
+	.kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
+	.kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
+	.kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
+	.kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
+	.kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
+	.kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
+	.kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
+	.kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
+	.kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
+	.kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
+	.kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
+	.kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
+	.kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
+	.kr_revision = IPATH_KREG_OFFSET(Revision),
+	.kr_scratch = IPATH_KREG_OFFSET(Scratch),
+	.kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
+	.kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
+	.kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
+	.kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
+	.kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
+	.kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
+	.kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
+	.kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
+	.kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
+	.kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
+	.kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
+	.kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
+	.kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
+	.kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
+	/*
+	 * These should not be used directly via ipath_read_kreg64(),
+	 * use them with ipath_read_kreg64_port(),
+	 */
+	.kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
+	.kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0)
+};
+
+static const struct ipath_cregs ipath_ht_cregs = {
+	.cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
+	.cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
+	.cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
+	.cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
+	.cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
+	.cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
+	.cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
+	.cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
+	.cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
+	.cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
+	.cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
+	.cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
+	/* calc from Reg_CounterRegBase + offset */
+	.cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
+	.cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
+	.cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
+	.cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
+	.cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
+	.cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
+	.cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
+	.cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
+	.cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
+	.cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
+	.cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
+	.cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
+	.cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
+	.cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
+	.cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
+	.cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
+	.cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
+	.cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
+	.cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
+	.cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
+	.cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
+};
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define INFINIPATH_I_RCVURG_MASK 0x1FF
+#define INFINIPATH_I_RCVAVAIL_MASK 0x1FF
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
+#define INFINIPATH_HWE_HTCMEMPARITYERR_MASK 0x3FFFFFULL
+#define INFINIPATH_HWE_HTCLNKABYTE0CRCERR   0x0000000000800000ULL
+#define INFINIPATH_HWE_HTCLNKABYTE1CRCERR   0x0000000001000000ULL
+#define INFINIPATH_HWE_HTCLNKBBYTE0CRCERR   0x0000000002000000ULL
+#define INFINIPATH_HWE_HTCLNKBBYTE1CRCERR   0x0000000004000000ULL
+#define INFINIPATH_HWE_HTCMISCERR4          0x0000000008000000ULL
+#define INFINIPATH_HWE_HTCMISCERR5          0x0000000010000000ULL
+#define INFINIPATH_HWE_HTCMISCERR6          0x0000000020000000ULL
+#define INFINIPATH_HWE_HTCMISCERR7          0x0000000040000000ULL
+#define INFINIPATH_HWE_HTCBUSTREQPARITYERR  0x0000000080000000ULL
+#define INFINIPATH_HWE_HTCBUSTRESPPARITYERR 0x0000000100000000ULL
+#define INFINIPATH_HWE_HTCBUSIREQPARITYERR  0x0000000200000000ULL
+#define INFINIPATH_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
+#define INFINIPATH_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
+#define INFINIPATH_HWE_HTBPLL_FBSLIP        0x0200000000000000ULL
+#define INFINIPATH_HWE_HTBPLL_RFSLIP        0x0400000000000000ULL
+#define INFINIPATH_HWE_HTAPLL_FBSLIP        0x0800000000000000ULL
+#define INFINIPATH_HWE_HTAPLL_RFSLIP        0x1000000000000000ULL
+#define INFINIPATH_HWE_SERDESPLLFAILED      0x2000000000000000ULL
+
+/* kr_extstatus bits */
+#define INFINIPATH_EXTS_FREQSEL 0x2
+#define INFINIPATH_EXTS_SERDESSEL 0x4
+#define INFINIPATH_EXTS_MEMBIST_ENDTEST     0x0000000000004000
+#define INFINIPATH_EXTS_MEMBIST_CORRECT     0x0000000000008000
+
+/*
+ * masks and bits that are different in different chips, or present only
+ * in one
+ */
+static const ipath_err_t infinipath_hwe_htcmemparityerr_mask =
+    INFINIPATH_HWE_HTCMEMPARITYERR_MASK;
+static const ipath_err_t infinipath_hwe_htcmemparityerr_shift =
+    INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT;
+
+static const ipath_err_t infinipath_hwe_htclnkabyte0crcerr =
+    INFINIPATH_HWE_HTCLNKABYTE0CRCERR;
+static const ipath_err_t infinipath_hwe_htclnkabyte1crcerr =
+    INFINIPATH_HWE_HTCLNKABYTE1CRCERR;
+static const ipath_err_t infinipath_hwe_htclnkbbyte0crcerr =
+    INFINIPATH_HWE_HTCLNKBBYTE0CRCERR;
+static const ipath_err_t infinipath_hwe_htclnkbbyte1crcerr =
+    INFINIPATH_HWE_HTCLNKBBYTE1CRCERR;
+
+#define _IPATH_GPIO_SDA_NUM 1
+#define _IPATH_GPIO_SCL_NUM 0
+
+#define IPATH_GPIO_SDA \
+	(1ULL << (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
+#define IPATH_GPIO_SCL \
+	(1ULL << (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
+
+/* keep the code below somewhat more readonable; not used elsewhere */
+#define _IPATH_HTLINK0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr |	\
+				infinipath_hwe_htclnkabyte1crcerr)
+#define _IPATH_HTLINK1_CRCBITS (infinipath_hwe_htclnkbbyte0crcerr |	\
+				infinipath_hwe_htclnkbbyte1crcerr)
+#define _IPATH_HTLANE0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr |	\
+				infinipath_hwe_htclnkbbyte0crcerr)
+#define _IPATH_HTLANE1_CRCBITS (infinipath_hwe_htclnkabyte1crcerr |	\
+				infinipath_hwe_htclnkbbyte1crcerr)
+
+static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs,
+			  char *msg, size_t msgl)
+{
+	char bitsmsg[64];
+	ipath_err_t crcbits = hwerrs &
+		(_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS);
+	/* don't check if 8bit HT */
+	if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
+		crcbits &= ~infinipath_hwe_htclnkabyte1crcerr;
+	/* don't check if 8bit HT */
+	if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
+		crcbits &= ~infinipath_hwe_htclnkbbyte1crcerr;
+	/*
+	 * we'll want to ignore link errors on link that is
+	 * not in use, if any.  For now, complain about both
+	 */
+	if (crcbits) {
+		u16 ctrl0, ctrl1;
+		snprintf(bitsmsg, sizeof bitsmsg,
+			 "[HT%s lane %s CRC (%llx); ignore till reload]",
+			 !(crcbits & _IPATH_HTLINK1_CRCBITS) ?
+			 "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS)
+				    ? "1 (B)" : "0+1 (A+B)"),
+			 !(crcbits & _IPATH_HTLANE1_CRCBITS) ? "0"
+			 : (!(crcbits & _IPATH_HTLANE0_CRCBITS) ? "1" :
+			    "0+1"), (unsigned long long) crcbits);
+		strlcat(msg, bitsmsg, msgl);
+
+		/*
+		 * print extra info for debugging.  slave/primary
+		 * config word 4, 8 (link control 0, 1)
+		 */
+
+		if (pci_read_config_word(dd->pcidev,
+					 dd->ipath_ht_slave_off + 0x4,
+					 &ctrl0))
+			dev_info(&dd->pcidev->dev, "Couldn't read "
+				 "linkctrl0 of slave/primary "
+				 "config block\n");
+		else if (!(ctrl0 & 1 << 6))
+			/* not if EOC bit set */
+			ipath_dbg("HT linkctrl0 0x%x%s%s\n", ctrl0,
+				  ((ctrl0 >> 8) & 7) ? " CRC" : "",
+				  ((ctrl0 >> 4) & 1) ? "linkfail" :
+				  "");
+		if (pci_read_config_word(dd->pcidev,
+					 dd->ipath_ht_slave_off + 0x8,
+					 &ctrl1))
+			dev_info(&dd->pcidev->dev, "Couldn't read "
+				 "linkctrl1 of slave/primary "
+				 "config block\n");
+		else if (!(ctrl1 & 1 << 6))
+			/* not if EOC bit set */
+			ipath_dbg("HT linkctrl1 0x%x%s%s\n", ctrl1,
+				  ((ctrl1 >> 8) & 7) ? " CRC" : "",
+				  ((ctrl1 >> 4) & 1) ? "linkfail" :
+				  "");
+
+		/* disable until driver reloaded */
+		dd->ipath_hwerrmask &= ~crcbits;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+				 dd->ipath_hwerrmask);
+		ipath_dbg("HT crc errs: %s\n", msg);
+	} else
+		ipath_dbg("ignoring HT crc errors 0x%llx, "
+			  "not in use\n", (unsigned long long)
+			  (hwerrs & (_IPATH_HTLINK0_CRCBITS |
+				     _IPATH_HTLINK1_CRCBITS)));
+}
+
+/**
+ * ipath_ht_handle_hwerrors - display hardware errors
+ * @dd: the infinipath device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid
+ * excessive stack use.  Most hardware errors are catastrophic, but for
+ * right now, we'll print them and continue.
+ * We reuse the same message buffer as ipath_handle_errors() to avoid
+ * excessive stack usage.
+ */
+static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
+				     size_t msgl)
+{
+	ipath_err_t hwerrs;
+	u32 bits, ctrl;
+	int isfatal = 0;
+	char bitsmsg[64];
+
+	hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
+
+	if (!hwerrs) {
+		ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
+		/*
+		 * better than printing cofusing messages
+		 * This seems to be related to clearing the crc error, or
+		 * the pll error during init.
+		 */
+		goto bail;
+	} else if (hwerrs == -1LL) {
+		ipath_dev_err(dd, "Read of hardware error status failed "
+			      "(all bits set); ignoring\n");
+		goto bail;
+	}
+	ipath_stats.sps_hwerrs++;
+
+	/* Always clear the error status register, except MEMBISTFAIL,
+	 * regardless of whether we continue or stop using the chip.
+	 * We want that set so we know it failed, even across driver reload.
+	 * We'll still ignore it in the hwerrmask.  We do this partly for
+	 * diagnostics, but also for support */
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+			 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
+
+	hwerrs &= dd->ipath_hwerrmask;
+
+	/*
+	 * make sure we get this much out, unless told to be quiet,
+	 * or it's occurred within the last 5 seconds
+	 */
+	if ((hwerrs & ~dd->ipath_lasthwerror) ||
+	    (ipath_debug & __IPATH_VERBDBG))
+		dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
+			 "(cleared)\n", (unsigned long long) hwerrs);
+	dd->ipath_lasthwerror |= hwerrs;
+
+	if (hwerrs & ~infinipath_hwe_bitsextant)
+		ipath_dev_err(dd, "hwerror interrupt with unknown errors "
+			      "%llx set\n", (unsigned long long)
+			      (hwerrs & ~infinipath_hwe_bitsextant));
+
+	ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
+	if (ctrl & INFINIPATH_C_FREEZEMODE) {
+		if (hwerrs) {
+			/*
+			 * if any set that we aren't ignoring; only
+			 * make the complaint once, in case it's stuck
+			 * or recurring, and we get here multiple
+			 * times.
+			 */
+			if (dd->ipath_flags & IPATH_INITTED) {
+				ipath_dev_err(dd, "Fatal Hardware Error (freeze "
+					      "mode), no longer usable, SN %.16s\n",
+						  dd->ipath_serial);
+				isfatal = 1;
+			}
+			*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+			/* mark as having had error */
+			*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
+			/*
+			 * mark as not usable, at a minimum until driver
+			 * is reloaded, probably until reboot, since no
+			 * other reset is possible.
+			 */
+			dd->ipath_flags &= ~IPATH_INITTED;
+		} else {
+			ipath_dbg("Clearing freezemode on ignored hardware "
+				  "error\n");
+			ctrl &= ~INFINIPATH_C_FREEZEMODE;
+			ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
+					 ctrl);
+		}
+	}
+
+	*msg = '\0';
+
+	/*
+	 * may someday want to decode into which bits are which
+	 * functional area for parity errors, etc.
+	 */
+	if (hwerrs & (infinipath_hwe_htcmemparityerr_mask
+		      << INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT)) {
+		bits = (u32) ((hwerrs >>
+			       INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) &
+			      INFINIPATH_HWE_HTCMEMPARITYERR_MASK);
+		snprintf(bitsmsg, sizeof bitsmsg, "[HTC Parity Errs %x] ",
+			 bits);
+		strlcat(msg, bitsmsg, msgl);
+	}
+	if (hwerrs & (INFINIPATH_HWE_RXEMEMPARITYERR_MASK
+		      << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)) {
+		bits = (u32) ((hwerrs >>
+			       INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) &
+			      INFINIPATH_HWE_RXEMEMPARITYERR_MASK);
+		snprintf(bitsmsg, sizeof bitsmsg, "[RXE Parity Errs %x] ",
+			 bits);
+		strlcat(msg, bitsmsg, msgl);
+	}
+	if (hwerrs & (INFINIPATH_HWE_TXEMEMPARITYERR_MASK
+		      << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
+		bits = (u32) ((hwerrs >>
+			       INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) &
+			      INFINIPATH_HWE_TXEMEMPARITYERR_MASK);
+		snprintf(bitsmsg, sizeof bitsmsg, "[TXE Parity Errs %x] ",
+			 bits);
+		strlcat(msg, bitsmsg, msgl);
+	}
+	if (hwerrs & INFINIPATH_HWE_IBCBUSTOSPCPARITYERR)
+		strlcat(msg, "[IB2IPATH Parity]", msgl);
+	if (hwerrs & INFINIPATH_HWE_IBCBUSFRSPCPARITYERR)
+		strlcat(msg, "[IPATH2IB Parity]", msgl);
+	if (hwerrs & INFINIPATH_HWE_HTCBUSIREQPARITYERR)
+		strlcat(msg, "[HTC Ireq Parity]", msgl);
+	if (hwerrs & INFINIPATH_HWE_HTCBUSTREQPARITYERR)
+		strlcat(msg, "[HTC Treq Parity]", msgl);
+	if (hwerrs & INFINIPATH_HWE_HTCBUSTRESPPARITYERR)
+		strlcat(msg, "[HTC Tresp Parity]", msgl);
+
+	if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))
+		hwerr_crcbits(dd, hwerrs, msg, msgl);
+
+	if (hwerrs & INFINIPATH_HWE_HTCMISCERR5)
+		strlcat(msg, "[HT core Misc5]", msgl);
+	if (hwerrs & INFINIPATH_HWE_HTCMISCERR6)
+		strlcat(msg, "[HT core Misc6]", msgl);
+	if (hwerrs & INFINIPATH_HWE_HTCMISCERR7)
+		strlcat(msg, "[HT core Misc7]", msgl);
+	if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
+		strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
+			msgl);
+		/* ignore from now on, so disable until driver reloaded */
+		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+				 dd->ipath_hwerrmask);
+	}
+#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP |	\
+			 INFINIPATH_HWE_COREPLL_RFSLIP |	\
+			 INFINIPATH_HWE_HTBPLL_FBSLIP |		\
+			 INFINIPATH_HWE_HTBPLL_RFSLIP |		\
+			 INFINIPATH_HWE_HTAPLL_FBSLIP |		\
+			 INFINIPATH_HWE_HTAPLL_RFSLIP)
+
+	if (hwerrs & _IPATH_PLL_FAIL) {
+		snprintf(bitsmsg, sizeof bitsmsg,
+			 "[PLL failed (%llx), InfiniPath hardware unusable]",
+			 (unsigned long long) (hwerrs & _IPATH_PLL_FAIL));
+		strlcat(msg, bitsmsg, msgl);
+		/* ignore from now on, so disable until driver reloaded */
+		dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+				 dd->ipath_hwerrmask);
+	}
+
+	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
+		/*
+		 * If it occurs, it is left masked since the eternal
+		 * interface is unused
+		 */
+		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+				 dd->ipath_hwerrmask);
+	}
+
+	if (hwerrs & INFINIPATH_HWE_RXDSYNCMEMPARITYERR)
+		strlcat(msg, "[Rx Dsync]", msgl);
+	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED)
+		strlcat(msg, "[SerDes PLL]", msgl);
+
+	ipath_dev_err(dd, "%s hardware error\n", msg);
+	if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
+		/*
+		 * for status file; if no trailing brace is copied,
+		 * we'll know it was truncated.
+		 */
+		snprintf(dd->ipath_freezemsg,
+			 dd->ipath_freezelen, "{%s}", msg);
+
+bail:;
+}
+
+/**
+ * ipath_ht_boardname - fill in the board name
+ * @dd: the infinipath device
+ * @name: the output buffer
+ * @namelen: the size of the output buffer
+ *
+ * fill in the board name, based on the board revision register
+ */
+static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
+			      size_t namelen)
+{
+	char *n = NULL;
+	u8 boardrev = dd->ipath_boardrev;
+	int ret;
+
+	switch (boardrev) {
+	case 4:		/* Ponderosa is one of the bringup boards */
+		n = "Ponderosa";
+		break;
+	case 5:
+		/*
+		 * original production board; two production levels, with
+		 * different serial number ranges.   See ipath_ht_early_init() for
+		 * case where we enable IPATH_GPIO_INTR for later serial # range.
+		 */
+		n = "InfiniPath_QHT7040";
+		break;
+	case 6:
+		n = "OEM_Board_3";
+		break;
+	case 7:
+		/* small form factor production board */
+		n = "InfiniPath_QHT7140";
+		break;
+	case 8:
+		n = "LS/X-1";
+		break;
+	case 9:		/* Comstock bringup test board */
+		n = "Comstock";
+		break;
+	case 10:
+		n = "OEM_Board_2";
+		break;
+	case 11:
+		n = "InfiniPath_HT-470"; /* obsoleted */
+		break;
+	case 12:
+		n = "OEM_Board_4";
+		break;
+	default:		/* don't know, just print the number */
+		ipath_dev_err(dd, "Don't yet know about board "
+			      "with ID %u\n", boardrev);
+		snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u",
+			 boardrev);
+		break;
+	}
+	if (n)
+		snprintf(name, namelen, "%s", n);
+
+	if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) {
+		/*
+		 * This version of the driver only supports Rev 3.2 and 3.3
+		 */
+		ipath_dev_err(dd,
+			      "Unsupported InfiniPath hardware revision %u.%u!\n",
+			      dd->ipath_majrev, dd->ipath_minrev);
+		ret = 1;
+		goto bail;
+	}
+	/*
+	 * pkt/word counters are 32 bit, and therefore wrap fast enough
+	 * that we snapshot them from a timer, and maintain 64 bit shadow
+	 * copies
+	 */
+	dd->ipath_flags |= IPATH_32BITCOUNTERS;
+	if (dd->ipath_htspeed != 800)
+		ipath_dev_err(dd,
+			      "Incorrectly configured for HT @ %uMHz\n",
+			      dd->ipath_htspeed);
+	if (dd->ipath_boardrev == 7 || dd->ipath_boardrev == 11 ||
+	    dd->ipath_boardrev == 6)
+		dd->ipath_flags |= IPATH_GPIO_INTR;
+	else
+		dd->ipath_flags |= IPATH_POLL_RX_INTR;
+	if (dd->ipath_boardrev == 8) {	/* LS/X-1 */
+		u64 val;
+		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
+		if (val & INFINIPATH_EXTS_SERDESSEL) {
+			/*
+			 * hardware disabled
+			 *
+			 * This means that the chip is hardware disabled,
+			 * and will not be able to bring up the link,
+			 * in any case.  We special case this and abort
+			 * early, to avoid later messages.  We also set
+			 * the DISABLED status bit
+			 */
+			ipath_dbg("Unit %u is hardware-disabled\n",
+				  dd->ipath_unit);
+			*dd->ipath_statusp |= IPATH_STATUS_DISABLED;
+			/* this value is handled differently */
+			ret = 2;
+			goto bail;
+		}
+	}
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+static void ipath_check_htlink(struct ipath_devdata *dd)
+{
+	u8 linkerr, link_off, i;
+
+	for (i = 0; i < 2; i++) {
+		link_off = dd->ipath_ht_slave_off + i * 4 + 0xd;
+		if (pci_read_config_byte(dd->pcidev, link_off, &linkerr))
+			dev_info(&dd->pcidev->dev, "Couldn't read "
+				 "linkerror%d of HT slave/primary block\n",
+				 i);
+		else if (linkerr & 0xf0) {
+			ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
+				   "clearing\n", linkerr >> 4, i);
+			/*
+			 * writing the linkerr bits that are set should
+			 * clear them
+			 */
+			if (pci_write_config_byte(dd->pcidev, link_off,
+						  linkerr))
+				ipath_dbg("Failed write to clear HT "
+					  "linkerror%d\n", i);
+			if (pci_read_config_byte(dd->pcidev, link_off,
+						 &linkerr))
+				dev_info(&dd->pcidev->dev,
+					 "Couldn't reread linkerror%d of "
+					 "HT slave/primary block\n", i);
+			else if (linkerr & 0xf0)
+				dev_info(&dd->pcidev->dev,
+					 "HT linkerror%d bits 0x%x "
+					 "couldn't be cleared\n",
+					 i, linkerr >> 4);
+		}
+	}
+}
+
+static int ipath_setup_ht_reset(struct ipath_devdata *dd)
+{
+	ipath_dbg("No reset possible for this InfiniPath hardware\n");
+	return 0;
+}
+
+#define HT_CAPABILITY_ID   0x08	/* HT capabilities not defined in kernel */
+#define HT_INTR_DISC_CONFIG  0x80	/* HT interrupt and discovery cap */
+#define HT_INTR_REG_INDEX    2	/* intconfig requires indirect accesses */
+
+/*
+ * Bits 13-15 of command==0 is slave/primary block.  Clear any HT CRC
+ * errors.  We only bother to do this at load time, because it's OK if
+ * it happened before we were loaded (first time after boot/reset),
+ * but any time after that, it's fatal anyway.  Also need to not check
+ * for for upper byte errors if we are in 8 bit mode, so figure out
+ * our width.  For now, at least, also complain if it's 8 bit.
+ */
+static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
+			     int pos, u8 cap_type)
+{
+	u8 linkwidth = 0, linkerr, link_a_b_off, link_off;
+	u16 linkctrl = 0;
+	int i;
+
+	dd->ipath_ht_slave_off = pos;
+	/* command word, master_host bit */
+	/* master host || slave */
+	if ((cap_type >> 2) & 1)
+		link_a_b_off = 4;
+	else
+		link_a_b_off = 0;
+	ipath_cdbg(VERBOSE, "HT%u (Link %c) connected to processor\n",
+		   link_a_b_off ? 1 : 0,
+		   link_a_b_off ? 'B' : 'A');
+
+	link_a_b_off += pos;
+
+	/*
+	 * check both link control registers; clear both HT CRC sets if
+	 * necessary.
+	 */
+	for (i = 0; i < 2; i++) {
+		link_off = pos + i * 4 + 0x4;
+		if (pci_read_config_word(pdev, link_off, &linkctrl))
+			ipath_dev_err(dd, "Couldn't read HT link control%d "
+				      "register\n", i);
+		else if (linkctrl & (0xf << 8)) {
+			ipath_cdbg(VERBOSE, "Clear linkctrl%d CRC Error "
+				   "bits %x\n", i, linkctrl & (0xf << 8));
+			/*
+			 * now write them back to clear the error.
+			 */
+			pci_write_config_byte(pdev, link_off,
+					      linkctrl & (0xf << 8));
+		}
+	}
+
+	/*
+	 * As with HT CRC bits, same for protocol errors that might occur
+	 * during boot.
+	 */
+	for (i = 0; i < 2; i++) {
+		link_off = pos + i * 4 + 0xd;
+		if (pci_read_config_byte(pdev, link_off, &linkerr))
+			dev_info(&pdev->dev, "Couldn't read linkerror%d "
+				 "of HT slave/primary block\n", i);
+		else if (linkerr & 0xf0) {
+			ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
+				   "clearing\n", linkerr >> 4, i);
+			/*
+			 * writing the linkerr bits that are set will clear
+			 * them
+			 */
+			if (pci_write_config_byte
+			    (pdev, link_off, linkerr))
+				ipath_dbg("Failed write to clear HT "
+					  "linkerror%d\n", i);
+			if (pci_read_config_byte(pdev, link_off, &linkerr))
+				dev_info(&pdev->dev, "Couldn't reread "
+					 "linkerror%d of HT slave/primary "
+					 "block\n", i);
+			else if (linkerr & 0xf0)
+				dev_info(&pdev->dev, "HT linkerror%d bits "
+					 "0x%x couldn't be cleared\n",
+					 i, linkerr >> 4);
+		}
+	}
+
+	/*
+	 * this is just for our link to the host, not devices connected
+	 * through tunnel.
+	 */
+
+	if (pci_read_config_byte(pdev, link_a_b_off + 7, &linkwidth))
+		ipath_dev_err(dd, "Couldn't read HT link width "
+			      "config register\n");
+	else {
+		u32 width;
+		switch (linkwidth & 7) {
+		case 5:
+			width = 4;
+			break;
+		case 4:
+			width = 2;
+			break;
+		case 3:
+			width = 32;
+			break;
+		case 1:
+			width = 16;
+			break;
+		case 0:
+		default:	/* if wrong, assume 8 bit */
+			width = 8;
+			break;
+		}
+
+		dd->ipath_htwidth = width;
+
+		if (linkwidth != 0x11) {
+			ipath_dev_err(dd, "Not configured for 16 bit HT "
+				      "(%x)\n", linkwidth);
+			if (!(linkwidth & 0xf)) {
+				ipath_dbg("Will ignore HT lane1 errors\n");
+				dd->ipath_flags |= IPATH_8BIT_IN_HT0;
+			}
+		}
+	}
+
+	/*
+	 * this is just for our link to the host, not devices connected
+	 * through tunnel.
+	 */
+	if (pci_read_config_byte(pdev, link_a_b_off + 0xd, &linkwidth))
+		ipath_dev_err(dd, "Couldn't read HT link frequency "
+			      "config register\n");
+	else {
+		u32 speed;
+		switch (linkwidth & 0xf) {
+		case 6:
+			speed = 1000;
+			break;
+		case 5:
+			speed = 800;
+			break;
+		case 4:
+			speed = 600;
+			break;
+		case 3:
+			speed = 500;
+			break;
+		case 2:
+			speed = 400;
+			break;
+		case 1:
+			speed = 300;
+			break;
+		default:
+			/*
+			 * assume reserved and vendor-specific are 200...
+			 */
+		case 0:
+			speed = 200;
+			break;
+		}
+		dd->ipath_htspeed = speed;
+	}
+}
+
+static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev,
+			    int pos)
+{
+	u32 int_handler_addr_lower;
+	u32 int_handler_addr_upper;
+	u64 ihandler;
+	u32 intvec;
+
+	/* use indirection register to get the intr handler */
+	pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x10);
+	pci_read_config_dword(pdev, pos + 4, &int_handler_addr_lower);
+	pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x11);
+	pci_read_config_dword(pdev, pos + 4, &int_handler_addr_upper);
+
+	ihandler = (u64) int_handler_addr_lower |
+		((u64) int_handler_addr_upper << 32);
+
+	/*
+	 * kernels with CONFIG_PCI_MSI set the vector in the irq field of
+	 * struct pci_device, so we use that to program the internal
+	 * interrupt register (not config space) with that value. The BIOS
+	 * must still have done the basic MSI setup.
+	 */
+	intvec = pdev->irq;
+	/*
+	 * clear any vector bits there; normally not set but we'll overload
+	 * this for some debug purposes (setting the HTC debug register
+	 * value from software, rather than GPIOs), so it might be set on a
+	 * driver reload.
+	 */
+	ihandler &= ~0xff0000;
+	/* x86 vector goes in intrinfo[23:16] */
+	ihandler |= intvec << 16;
+	ipath_cdbg(VERBOSE, "ihandler lower %x, upper %x, intvec %x, "
+		   "interruptconfig %llx\n", int_handler_addr_lower,
+		   int_handler_addr_upper, intvec,
+		   (unsigned long long) ihandler);
+
+	/* can't program yet, so save for interrupt setup */
+	dd->ipath_intconfig = ihandler;
+	/* keep going, so we find link control stuff also */
+
+	return ihandler != 0;
+}
+
+/**
+ * ipath_setup_ht_config - setup the interruptconfig register
+ * @dd: the infinipath device
+ * @pdev: the PCI device
+ *
+ * setup the interruptconfig register from the HT config info.
+ * Also clear CRC errors in HT linkcontrol, if necessary.
+ * This is done only for the real hardware.  It is done before
+ * chip address space is initted, so can't touch infinipath registers
+ */
+static int ipath_setup_ht_config(struct ipath_devdata *dd,
+				 struct pci_dev *pdev)
+{
+	int pos, ret = 0;
+	int ihandler = 0;
+
+	/*
+	 * Read the capability info to find the interrupt info, and also
+	 * handle clearing CRC errors in linkctrl register if necessary.  We
+	 * do this early, before we ever enable errors or hardware errors,
+	 * mostly to avoid causing the chip to enter freeze mode.
+	 */
+	pos = pci_find_capability(pdev, HT_CAPABILITY_ID);
+	if (!pos) {
+		ipath_dev_err(dd, "Couldn't find HyperTransport "
+			      "capability; no interrupts\n");
+		ret = -ENODEV;
+		goto bail;
+	}
+	do {
+		u8 cap_type;
+
+		/* the HT capability type byte is 3 bytes after the
+		 * capability byte.
+		 */
+		if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
+			dev_info(&pdev->dev, "Couldn't read config "
+				 "command @ %d\n", pos);
+			continue;
+		}
+		if (!(cap_type & 0xE0))
+			slave_or_pri_blk(dd, pdev, pos, cap_type);
+		else if (cap_type == HT_INTR_DISC_CONFIG)
+			ihandler = set_int_handler(dd, pdev, pos);
+	} while ((pos = pci_find_next_capability(pdev, pos,
+						 HT_CAPABILITY_ID)));
+
+	if (!ihandler) {
+		ipath_dev_err(dd, "Couldn't find interrupt handler in "
+			      "config space\n");
+		ret = -ENODEV;
+	}
+
+bail:
+	return ret;
+}
+
+/**
+ * ipath_setup_ht_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the infinipath device
+ *
+ * Called during driver unload.
+ * This is currently a nop for the HT chip, not for all chips
+ */
+static void ipath_setup_ht_cleanup(struct ipath_devdata *dd)
+{
+}
+
+/**
+ * ipath_setup_ht_setextled - set the state of the two external LEDs
+ * @dd: the infinipath device
+ * @lst: the L state
+ * @ltst: the LT state
+ *
+ * Set the state of the two external LEDs, to indicate physical and
+ * logical state of IB link.   For this chip (at least with recommended
+ * board pinouts), LED1 is Green (physical state), and LED2 is Yellow
+ * (logical state)
+ *
+ * Note:  We try to match the Mellanox HCA LED behavior as best
+ * we can.  Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate.  That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
+				     u64 lst, u64 ltst)
+{
+	u64 extctl;
+
+	/* the diags use the LED to indicate diag info, so we leave
+	 * the external LED alone when the diags are running */
+	if (ipath_diag_inuse)
+		return;
+
+	/*
+	 * start by setting both LED control bits to off, then turn
+	 * on the appropriate bit(s).
+	 */
+	if (dd->ipath_boardrev == 8) { /* LS/X-1 uses different pins */
+		/*
+		 * major difference is that INFINIPATH_EXTC_LEDGBLERR_OFF
+		 * is inverted,  because it is normally used to indicate
+		 * a hardware fault at reset, if there were errors
+		 */
+		extctl = (dd->ipath_extctrl & ~INFINIPATH_EXTC_LEDGBLOK_ON)
+			| INFINIPATH_EXTC_LEDGBLERR_OFF;
+		if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
+			extctl &= ~INFINIPATH_EXTC_LEDGBLERR_OFF;
+		if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
+			extctl |= INFINIPATH_EXTC_LEDGBLOK_ON;
+	}
+	else {
+		extctl = dd->ipath_extctrl &
+			~(INFINIPATH_EXTC_LED1PRIPORT_ON |
+			  INFINIPATH_EXTC_LED2PRIPORT_ON);
+		if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
+			extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
+		if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
+			extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
+	}
+	dd->ipath_extctrl = extctl;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
+}
+
+static void ipath_init_ht_variables(void)
+{
+	ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
+	ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
+	ipath_gpio_sda = IPATH_GPIO_SDA;
+	ipath_gpio_scl = IPATH_GPIO_SCL;
+
+	infinipath_i_bitsextant =
+		(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
+		(INFINIPATH_I_RCVAVAIL_MASK <<
+		 INFINIPATH_I_RCVAVAIL_SHIFT) |
+		INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
+		INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
+
+	infinipath_e_bitsextant =
+		INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
+		INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
+		INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
+		INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
+		INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
+		INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
+		INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
+		INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
+		INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
+		INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
+		INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
+		INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
+		INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
+		INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
+		INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
+		INFINIPATH_E_HARDWARE;
+
+	infinipath_hwe_bitsextant =
+		(INFINIPATH_HWE_HTCMEMPARITYERR_MASK <<
+		 INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) |
+		(INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
+		 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
+		(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
+		 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
+		INFINIPATH_HWE_HTCLNKABYTE0CRCERR |
+		INFINIPATH_HWE_HTCLNKABYTE1CRCERR |
+		INFINIPATH_HWE_HTCLNKBBYTE0CRCERR |
+		INFINIPATH_HWE_HTCLNKBBYTE1CRCERR |
+		INFINIPATH_HWE_HTCMISCERR4 |
+		INFINIPATH_HWE_HTCMISCERR5 | INFINIPATH_HWE_HTCMISCERR6 |
+		INFINIPATH_HWE_HTCMISCERR7 |
+		INFINIPATH_HWE_HTCBUSTREQPARITYERR |
+		INFINIPATH_HWE_HTCBUSTRESPPARITYERR |
+		INFINIPATH_HWE_HTCBUSIREQPARITYERR |
+		INFINIPATH_HWE_RXDSYNCMEMPARITYERR |
+		INFINIPATH_HWE_MEMBISTFAILED |
+		INFINIPATH_HWE_COREPLL_FBSLIP |
+		INFINIPATH_HWE_COREPLL_RFSLIP |
+		INFINIPATH_HWE_HTBPLL_FBSLIP |
+		INFINIPATH_HWE_HTBPLL_RFSLIP |
+		INFINIPATH_HWE_HTAPLL_FBSLIP |
+		INFINIPATH_HWE_HTAPLL_RFSLIP |
+		INFINIPATH_HWE_SERDESPLLFAILED |
+		INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
+		INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
+
+	infinipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
+	infinipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
+}
+
+/**
+ * ipath_ht_init_hwerrors - enable hardware errors
+ * @dd: the infinipath device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
+{
+	ipath_err_t val;
+	u64 extsval;
+
+	extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
+
+	if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
+		ipath_dev_err(dd, "MemBIST did not complete!\n");
+
+	ipath_check_htlink(dd);
+
+	/* barring bugs, all hwerrors become interrupts, which can */
+	val = -1LL;
+	/* don't look at crc lane1 if 8 bit */
+	if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
+		val &= ~infinipath_hwe_htclnkabyte1crcerr;
+	/* don't look at crc lane1 if 8 bit */
+	if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
+		val &= ~infinipath_hwe_htclnkbbyte1crcerr;
+
+	/*
+	 * disable RXDSYNCMEMPARITY because external serdes is unused,
+	 * and therefore the logic will never be used or initialized,
+	 * and uninitialized state will normally result in this error
+	 * being asserted.  Similarly for the external serdess pll
+	 * lock signal.
+	 */
+	val &= ~(INFINIPATH_HWE_SERDESPLLFAILED |
+		 INFINIPATH_HWE_RXDSYNCMEMPARITYERR);
+
+	/*
+	 * Disable MISCERR4 because of an inversion in the HT core
+	 * logic checking for errors that cause this bit to be set.
+	 * The errata can also cause the protocol error bit to be set
+	 * in the HT config space linkerror register(s).
+	 */
+	val &= ~INFINIPATH_HWE_HTCMISCERR4;
+
+	/*
+	 * PLL ignored because MDIO interface has a logic problem
+	 * for reads, on Comstock and Ponderosa.  BRINGUP
+	 */
+	if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
+		val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+	dd->ipath_hwerrmask = val;
+}
+
+/**
+ * ipath_ht_bringup_serdes - bring up the serdes
+ * @dd: the infinipath device
+ */
+static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
+{
+	u64 val, config1;
+	int ret = 0, change = 0;
+
+	ipath_dbg("Trying to bringup serdes\n");
+
+	if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
+	    INFINIPATH_HWE_SERDESPLLFAILED)
+	{
+		ipath_dbg("At start, serdes PLL failed bit set in "
+			  "hwerrstatus, clearing and continuing\n");
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+				 INFINIPATH_HWE_SERDESPLLFAILED);
+	}
+
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+	config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
+
+	ipath_cdbg(VERBOSE, "Initial serdes status is config0=%llx "
+		   "config1=%llx, sstatus=%llx xgxs %llx\n",
+		   (unsigned long long) val, (unsigned long long) config1,
+		   (unsigned long long)
+		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
+		   (unsigned long long)
+		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
+
+	/* force reset on */
+	val |= INFINIPATH_SERDC0_RESET_PLL
+		/* | INFINIPATH_SERDC0_RESET_MASK */
+		;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+	udelay(15);		/* need pll reset set at least for a bit */
+
+	if (val & INFINIPATH_SERDC0_RESET_PLL) {
+		u64 val2 = val &= ~INFINIPATH_SERDC0_RESET_PLL;
+		/* set lane resets, and tx idle, during pll reset */
+		val2 |= INFINIPATH_SERDC0_RESET_MASK |
+			INFINIPATH_SERDC0_TXIDLE;
+		ipath_cdbg(VERBOSE, "Clearing serdes PLL reset (writing "
+			   "%llx)\n", (unsigned long long) val2);
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
+				 val2);
+		/*
+		 * be sure chip saw it
+		 */
+		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+		/*
+		 * need pll reset clear at least 11 usec before lane
+		 * resets cleared; give it a few more
+		 */
+		udelay(15);
+		val = val2;	/* for check below */
+	}
+
+	if (val & (INFINIPATH_SERDC0_RESET_PLL |
+		   INFINIPATH_SERDC0_RESET_MASK |
+		   INFINIPATH_SERDC0_TXIDLE)) {
+		val &= ~(INFINIPATH_SERDC0_RESET_PLL |
+			 INFINIPATH_SERDC0_RESET_MASK |
+			 INFINIPATH_SERDC0_TXIDLE);
+		/* clear them */
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
+				 val);
+	}
+
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
+	if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
+	     INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
+		val &= ~(INFINIPATH_XGXS_MDIOADDR_MASK <<
+			 INFINIPATH_XGXS_MDIOADDR_SHIFT);
+		/*
+		 * we use address 3
+		 */
+		val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
+		change = 1;
+	}
+	if (val & INFINIPATH_XGXS_RESET) {
+		/* normally true after boot */
+		val &= ~INFINIPATH_XGXS_RESET;
+		change = 1;
+	}
+	if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
+	     INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
+		/* need to compensate for Tx inversion in partner */
+		val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
+		         INFINIPATH_XGXS_RX_POL_SHIFT);
+		val |= dd->ipath_rx_pol_inv <<
+			INFINIPATH_XGXS_RX_POL_SHIFT;
+		change = 1;
+	}
+	if (change)
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
+
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+
+	/* clear current and de-emphasis bits */
+	config1 &= ~0x0ffffffff00ULL;
+	/* set current to 20ma */
+	config1 |= 0x00000000000ULL;
+	/* set de-emphasis to -5.68dB */
+	config1 |= 0x0cccc000000ULL;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
+
+	ipath_cdbg(VERBOSE, "After setup: serdes status is config0=%llx "
+		   "config1=%llx, sstatus=%llx xgxs %llx\n",
+		   (unsigned long long) val, (unsigned long long) config1,
+		   (unsigned long long)
+		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
+		   (unsigned long long)
+		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
+
+	if (!ipath_waitfor_mdio_cmdready(dd)) {
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_mdio,
+				 ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
+						IPATH_MDIO_CTRL_XGXS_REG_8,
+						0));
+		if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
+					   IPATH_MDIO_DATAVALID, &val))
+			ipath_dbg("Never got MDIO data for XGXS status "
+				  "read\n");
+		else
+			ipath_cdbg(VERBOSE, "MDIO Read reg8, "
+				   "'bank' 31 %x\n", (u32) val);
+	} else
+		ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
+
+	return ret;		/* for now, say we always succeeded */
+}
+
+/**
+ * ipath_ht_quiet_serdes - set serdes to txidle
+ * @dd: the infinipath device
+ * driver is being unloaded
+ */
+static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
+{
+	u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+
+	val |= INFINIPATH_SERDC0_TXIDLE;
+	ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
+		  (unsigned long long) val);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+}
+
+static int ipath_ht_intconfig(struct ipath_devdata *dd)
+{
+	int ret;
+
+	if (!dd->ipath_intconfig) {
+		ipath_dev_err(dd, "No interrupts enabled, couldn't setup "
+			      "interrupt address\n");
+		ret = 1;
+		goto bail;
+	}
+
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig,
+			 dd->ipath_intconfig);	/* interrupt address */
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/**
+ * ipath_pe_put_tid - write a TID in chip
+ * @dd: the infinipath device
+ * @tidptr: pointer to the expected TID (in chip) to udpate
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for special locking etc.
+ * It's used for both the full cleanup on exit, as well as the normal
+ * setup and teardown.
+ */
+static void ipath_ht_put_tid(struct ipath_devdata *dd,
+			     u64 __iomem *tidptr, u32 type,
+			     unsigned long pa)
+{
+	if (pa != dd->ipath_tidinvalid) {
+		if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) {
+			dev_info(&dd->pcidev->dev,
+				 "physaddr %lx has more than "
+				 "40 bits, using only 40!!!\n", pa);
+			pa &= INFINIPATH_RT_ADDR_MASK;
+		}
+		if (type == 0)
+			pa |= dd->ipath_tidtemplate;
+		else {
+			/* in words (fixed, full page).  */
+			u64 lenvalid = PAGE_SIZE >> 2;
+			lenvalid <<= INFINIPATH_RT_BUFSIZE_SHIFT;
+			pa |= lenvalid | INFINIPATH_RT_VALID;
+		}
+	}
+	if (dd->ipath_kregbase)
+		writeq(pa, tidptr);
+}
+
+/**
+ * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager
+ * @dd: the infinipath device
+ * @port: the port
+ *
+ * Used from ipath_close(), and at chip initialization.
+ */
+static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
+{
+	u64 __iomem *tidbase;
+	int i;
+
+	if (!dd->ipath_kregbase)
+		return;
+
+	ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
+
+	/*
+	 * need to invalidate all of the expected TID entries for this
+	 * port, so we don't have valid entries that might somehow get
+	 * used (early in next use of this port, or through some bug)
+	 */
+	tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
+				   dd->ipath_rcvtidbase +
+				   port * dd->ipath_rcvtidcnt *
+				   sizeof(*tidbase));
+	for (i = 0; i < dd->ipath_rcvtidcnt; i++)
+		ipath_ht_put_tid(dd, &tidbase[i], 1, dd->ipath_tidinvalid);
+
+	tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
+				   dd->ipath_rcvegrbase +
+				   port * dd->ipath_rcvegrcnt *
+				   sizeof(*tidbase));
+
+	for (i = 0; i < dd->ipath_rcvegrcnt; i++)
+		ipath_ht_put_tid(dd, &tidbase[i], 0, dd->ipath_tidinvalid);
+}
+
+/**
+ * ipath_ht_tidtemplate - setup constants for TID updates
+ * @dd: the infinipath device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
+{
+	dd->ipath_tidtemplate = dd->ipath_ibmaxlen >> 2;
+	dd->ipath_tidtemplate <<= INFINIPATH_RT_BUFSIZE_SHIFT;
+	dd->ipath_tidtemplate |= INFINIPATH_RT_VALID;
+
+	/*
+	 * work around chip errata bug 7358, by marking invalid tids
+	 * as having max length
+	 */
+	dd->ipath_tidinvalid = (-1LL & INFINIPATH_RT_BUFSIZE_MASK) <<
+		INFINIPATH_RT_BUFSIZE_SHIFT;
+}
+
+static int ipath_ht_early_init(struct ipath_devdata *dd)
+{
+	u32 __iomem *piobuf;
+	u32 pioincr, val32, egrsize;
+	int i;
+
+	/*
+	 * one cache line; long IB headers will spill over into received
+	 * buffer
+	 */
+	dd->ipath_rcvhdrentsize = 16;
+	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
+
+	/*
+	 * For HT, we allocate a somewhat overly large eager buffer,
+	 * such that we can guarantee that we can receive the largest
+	 * packet that we can send out.  To truly support a 4KB MTU,
+	 * we need to bump this to a large value.  To date, other than
+	 * testing, we have never encountered an HCA that can really
+	 * send 4KB MTU packets, so we do not handle that (we'll get
+	 * errors interrupts if we ever see one).
+	 */
+	dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
+	egrsize = dd->ipath_rcvegrbufsize;
+
+	/*
+	 * the min() check here is currently a nop, but it may not
+	 * always be, depending on just how we do ipath_rcvegrbufsize
+	 */
+	dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
+				 dd->ipath_rcvegrbufsize);
+	dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
+	ipath_ht_tidtemplate(dd);
+
+	/*
+	 * zero all the TID entries at startup.  We do this for sanity,
+	 * in case of a previous driver crash of some kind, and also
+	 * because the chip powers up with these memories in an unknown
+	 * state.  Use portcnt, not cfgports, since this is for the
+	 * full chip, not for current (possibly different) configuration
+	 * value.
+	 * Chip Errata bug 6447
+	 */
+	for (val32 = 0; val32 < dd->ipath_portcnt; val32++)
+		ipath_ht_clear_tids(dd, val32);
+
+	/*
+	 * write the pbc of each buffer, to be sure it's initialized, then
+	 * cancel all the buffers, and also abort any packets that might
+	 * have been in flight for some reason (the latter is for driver
+	 * unload/reload, but isn't a bad idea at first init).	PIO send
+	 * isn't enabled at this point, so there is no danger of sending
+	 * these out on the wire.
+	 * Chip Errata bug 6610
+	 */
+	piobuf = (u32 __iomem *) (((char __iomem *)(dd->ipath_kregbase)) +
+				  dd->ipath_piobufbase);
+	pioincr = dd->ipath_palign / sizeof(*piobuf);
+	for (i = 0; i < dd->ipath_piobcnt2k; i++) {
+		/*
+		 * reasonable word count, just to init pbc
+		 */
+		writel(16, piobuf);
+		piobuf += pioincr;
+	}
+	/*
+	 * self-clearing
+	 */
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+			 INFINIPATH_S_ABORT);
+
+	ipath_get_eeprom_info(dd);
+	if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
+		dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
+		/*
+		 * Later production QHT7040 has same changes as QHT7140, so
+		 * can use GPIO interrupts.  They have serial #'s starting
+		 * with 128, rather than 112.
+		 */
+		dd->ipath_flags |= IPATH_GPIO_INTR;
+		dd->ipath_flags &= ~IPATH_POLL_RX_INTR;
+	}
+	return 0;
+}
+
+/**
+ * ipath_init_ht_get_base_info - set chip-specific flags for user code
+ * @dd: the infinipath device
+ * @kbase: ipath_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
+{
+	struct ipath_base_info *kinfo = kbase;
+
+	kinfo->spi_runtime_flags |= IPATH_RUNTIME_HT |
+		IPATH_RUNTIME_RCVHDR_COPY;
+
+	return 0;
+}
+
+/**
+ * ipath_init_iba6110_funcs - set up the chip-specific function pointers
+ * @dd: the infinipath device
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
+{
+	dd->ipath_f_intrsetup = ipath_ht_intconfig;
+	dd->ipath_f_bus = ipath_setup_ht_config;
+	dd->ipath_f_reset = ipath_setup_ht_reset;
+	dd->ipath_f_get_boardname = ipath_ht_boardname;
+	dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
+	dd->ipath_f_early_init = ipath_ht_early_init;
+	dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
+	dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
+	dd->ipath_f_bringup_serdes = ipath_ht_bringup_serdes;
+	dd->ipath_f_clear_tids = ipath_ht_clear_tids;
+	dd->ipath_f_put_tid = ipath_ht_put_tid;
+	dd->ipath_f_cleanup = ipath_setup_ht_cleanup;
+	dd->ipath_f_setextled = ipath_setup_ht_setextled;
+	dd->ipath_f_get_base_info = ipath_ht_get_base_info;
+
+	/*
+	 * initialize chip-specific variables
+	 */
+	dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
+
+	/*
+	 * setup the register offsets, since they are different for each
+	 * chip
+	 */
+	dd->ipath_kregs = &ipath_ht_kregs;
+	dd->ipath_cregs = &ipath_ht_cregs;
+
+	/*
+	 * do very early init that is needed before ipath_f_bus is
+	 * called
+	 */
+	ipath_init_ht_variables();
+}
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_iba6120.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_iba6120.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_iba6120.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_iba6120.c	2006-10-19 12:41:10.000000000 -0400
@@ -0,0 +1,1264 @@
+/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the
+ * InfiniPath PCIe chip.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+
+#include "ipath_kernel.h"
+#include "ipath_registers.h"
+
+/*
+ * This file contains all the chip-specific register information and
+ * access functions for the QLogic InfiniPath PCI-Express chip.
+ *
+ * This lists the InfiniPath registers, in the actual chip layout.
+ * This structure should never be directly accessed.
+ */
+struct _infinipath_do_not_use_kernel_regs {
+	unsigned long long Revision;
+	unsigned long long Control;
+	unsigned long long PageAlign;
+	unsigned long long PortCnt;
+	unsigned long long DebugPortSelect;
+	unsigned long long Reserved0;
+	unsigned long long SendRegBase;
+	unsigned long long UserRegBase;
+	unsigned long long CounterRegBase;
+	unsigned long long Scratch;
+	unsigned long long Reserved1;
+	unsigned long long Reserved2;
+	unsigned long long IntBlocked;
+	unsigned long long IntMask;
+	unsigned long long IntStatus;
+	unsigned long long IntClear;
+	unsigned long long ErrorMask;
+	unsigned long long ErrorStatus;
+	unsigned long long ErrorClear;
+	unsigned long long HwErrMask;
+	unsigned long long HwErrStatus;
+	unsigned long long HwErrClear;
+	unsigned long long HwDiagCtrl;
+	unsigned long long MDIO;
+	unsigned long long IBCStatus;
+	unsigned long long IBCCtrl;
+	unsigned long long ExtStatus;
+	unsigned long long ExtCtrl;
+	unsigned long long GPIOOut;
+	unsigned long long GPIOMask;
+	unsigned long long GPIOStatus;
+	unsigned long long GPIOClear;
+	unsigned long long RcvCtrl;
+	unsigned long long RcvBTHQP;
+	unsigned long long RcvHdrSize;
+	unsigned long long RcvHdrCnt;
+	unsigned long long RcvHdrEntSize;
+	unsigned long long RcvTIDBase;
+	unsigned long long RcvTIDCnt;
+	unsigned long long RcvEgrBase;
+	unsigned long long RcvEgrCnt;
+	unsigned long long RcvBufBase;
+	unsigned long long RcvBufSize;
+	unsigned long long RxIntMemBase;
+	unsigned long long RxIntMemSize;
+	unsigned long long RcvPartitionKey;
+	unsigned long long Reserved3;
+	unsigned long long RcvPktLEDCnt;
+	unsigned long long Reserved4[8];
+	unsigned long long SendCtrl;
+	unsigned long long SendPIOBufBase;
+	unsigned long long SendPIOSize;
+	unsigned long long SendPIOBufCnt;
+	unsigned long long SendPIOAvailAddr;
+	unsigned long long TxIntMemBase;
+	unsigned long long TxIntMemSize;
+	unsigned long long Reserved5;
+	unsigned long long PCIeRBufTestReg0;
+	unsigned long long PCIeRBufTestReg1;
+	unsigned long long Reserved51[6];
+	unsigned long long SendBufferError;
+	unsigned long long SendBufferErrorCONT1;
+	unsigned long long Reserved6SBE[6];
+	unsigned long long RcvHdrAddr0;
+	unsigned long long RcvHdrAddr1;
+	unsigned long long RcvHdrAddr2;
+	unsigned long long RcvHdrAddr3;
+	unsigned long long RcvHdrAddr4;
+	unsigned long long Reserved7RHA[11];
+	unsigned long long RcvHdrTailAddr0;
+	unsigned long long RcvHdrTailAddr1;
+	unsigned long long RcvHdrTailAddr2;
+	unsigned long long RcvHdrTailAddr3;
+	unsigned long long RcvHdrTailAddr4;
+	unsigned long long Reserved8RHTA[11];
+	unsigned long long Reserved9SW[8];
+	unsigned long long SerdesConfig0;
+	unsigned long long SerdesConfig1;
+	unsigned long long SerdesStatus;
+	unsigned long long XGXSConfig;
+	unsigned long long IBPLLCfg;
+	unsigned long long Reserved10SW2[3];
+	unsigned long long PCIEQ0SerdesConfig0;
+	unsigned long long PCIEQ0SerdesConfig1;
+	unsigned long long PCIEQ0SerdesStatus;
+	unsigned long long Reserved11;
+	unsigned long long PCIEQ1SerdesConfig0;
+	unsigned long long PCIEQ1SerdesConfig1;
+	unsigned long long PCIEQ1SerdesStatus;
+	unsigned long long Reserved12;
+};
+
+#define IPATH_KREG_OFFSET(field) (offsetof(struct \
+    _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
+#define IPATH_CREG_OFFSET(field) (offsetof( \
+    struct infinipath_counters, field) / sizeof(u64))
+
+static const struct ipath_kregs ipath_pe_kregs = {
+	.kr_control = IPATH_KREG_OFFSET(Control),
+	.kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
+	.kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
+	.kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
+	.kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
+	.kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
+	.kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
+	.kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
+	.kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
+	.kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
+	.kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
+	.kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
+	.kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
+	.kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
+	.kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
+	.kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
+	.kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
+	.kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
+	.kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
+	.kr_intclear = IPATH_KREG_OFFSET(IntClear),
+	.kr_intmask = IPATH_KREG_OFFSET(IntMask),
+	.kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
+	.kr_mdio = IPATH_KREG_OFFSET(MDIO),
+	.kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
+	.kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
+	.kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
+	.kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
+	.kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
+	.kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
+	.kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
+	.kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
+	.kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
+	.kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
+	.kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
+	.kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
+	.kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
+	.kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
+	.kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
+	.kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
+	.kr_revision = IPATH_KREG_OFFSET(Revision),
+	.kr_scratch = IPATH_KREG_OFFSET(Scratch),
+	.kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
+	.kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
+	.kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
+	.kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
+	.kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
+	.kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
+	.kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
+	.kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
+	.kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
+	.kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
+	.kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
+	.kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
+	.kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
+	.kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
+	.kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg),
+
+	/*
+	 * These should not be used directly via ipath_read_kreg64(),
+	 * use them with ipath_read_kreg64_port()
+	 */
+	.kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
+	.kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
+
+	/* The rcvpktled register controls one of the debug port signals, so
+	 * a packet activity LED can be connected to it. */
+	.kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
+	.kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
+	.kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
+	.kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0),
+	.kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1),
+	.kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus),
+	.kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0),
+	.kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1),
+	.kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus)
+};
+
+static const struct ipath_cregs ipath_pe_cregs = {
+	.cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
+	.cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
+	.cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
+	.cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
+	.cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
+	.cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
+	.cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
+	.cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
+	.cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
+	.cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
+	.cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
+	.cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
+	.cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
+	.cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
+	.cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
+	.cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
+	.cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
+	.cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
+	.cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
+	.cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
+	.cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
+	.cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
+	.cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
+	.cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
+	.cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
+	.cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
+	.cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
+	.cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
+	.cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
+	.cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
+	.cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
+	.cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
+	.cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
+};
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define INFINIPATH_I_RCVURG_MASK 0x1F
+#define INFINIPATH_I_RCVAVAIL_MASK 0x1F
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK  0x000000000000003fULL
+#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
+#define INFINIPATH_HWE_PCIEPOISONEDTLP      0x0000000010000000ULL
+#define INFINIPATH_HWE_PCIECPLTIMEOUT       0x0000000020000000ULL
+#define INFINIPATH_HWE_PCIEBUSPARITYXTLH    0x0000000040000000ULL
+#define INFINIPATH_HWE_PCIEBUSPARITYXADM    0x0000000080000000ULL
+#define INFINIPATH_HWE_PCIEBUSPARITYRADM    0x0000000100000000ULL
+#define INFINIPATH_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
+#define INFINIPATH_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
+#define INFINIPATH_HWE_PCIE1PLLFAILED       0x0400000000000000ULL
+#define INFINIPATH_HWE_PCIE0PLLFAILED       0x0800000000000000ULL
+#define INFINIPATH_HWE_SERDESPLLFAILED      0x1000000000000000ULL
+
+/* kr_extstatus bits */
+#define INFINIPATH_EXTS_FREQSEL 0x2
+#define INFINIPATH_EXTS_SERDESSEL 0x4
+#define INFINIPATH_EXTS_MEMBIST_ENDTEST     0x0000000000004000
+#define INFINIPATH_EXTS_MEMBIST_FOUND       0x0000000000008000
+
+#define _IPATH_GPIO_SDA_NUM 1
+#define _IPATH_GPIO_SCL_NUM 0
+
+#define IPATH_GPIO_SDA (1ULL << \
+	(_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
+#define IPATH_GPIO_SCL (1ULL << \
+	(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
+
+/**
+ * ipath_pe_handle_hwerrors - display hardware errors.
+ * @dd: the infinipath device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use.  Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue.  We reuse the same message buffer as
+ * ipath_handle_errors() to avoid excessive stack usage.
+ */
+static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
+				     size_t msgl)
+{
+	ipath_err_t hwerrs;
+	u32 bits, ctrl;
+	int isfatal = 0;
+	char bitsmsg[64];
+
+	hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
+	if (!hwerrs) {
+		/*
+		 * better than printing cofusing messages
+		 * This seems to be related to clearing the crc error, or
+		 * the pll error during init.
+		 */
+		ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
+		return;
+	} else if (hwerrs == ~0ULL) {
+		ipath_dev_err(dd, "Read of hardware error status failed "
+			      "(all bits set); ignoring\n");
+		return;
+	}
+	ipath_stats.sps_hwerrs++;
+
+	/* Always clear the error status register, except MEMBISTFAIL,
+	 * regardless of whether we continue or stop using the chip.
+	 * We want that set so we know it failed, even across driver reload.
+	 * We'll still ignore it in the hwerrmask.  We do this partly for
+	 * diagnostics, but also for support */
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+			 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
+
+	hwerrs &= dd->ipath_hwerrmask;
+
+	/*
+	 * make sure we get this much out, unless told to be quiet,
+	 * or it's occurred within the last 5 seconds
+	 */
+	if ((hwerrs & ~dd->ipath_lasthwerror) ||
+	    (ipath_debug & __IPATH_VERBDBG))
+		dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
+			 "(cleared)\n", (unsigned long long) hwerrs);
+	dd->ipath_lasthwerror |= hwerrs;
+
+	if (hwerrs & ~infinipath_hwe_bitsextant)
+		ipath_dev_err(dd, "hwerror interrupt with unknown errors "
+			      "%llx set\n", (unsigned long long)
+			      (hwerrs & ~infinipath_hwe_bitsextant));
+
+	ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
+	if (ctrl & INFINIPATH_C_FREEZEMODE) {
+		if (hwerrs) {
+			/*
+			 * if any set that we aren't ignoring only make the
+			 * complaint once, in case it's stuck or recurring,
+			 * and we get here multiple times
+			 */
+			if (dd->ipath_flags & IPATH_INITTED) {
+				ipath_dev_err(dd, "Fatal Hardware Error (freeze "
+					      "mode), no longer usable, SN %.16s\n",
+						  dd->ipath_serial);
+				isfatal = 1;
+			}
+			/*
+			 * Mark as having had an error for driver, and also
+			 * for /sys and status word mapped to user programs.
+			 * This marks unit as not usable, until reset
+			 */
+			*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+			*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
+			dd->ipath_flags &= ~IPATH_INITTED;
+		} else {
+			ipath_dbg("Clearing freezemode on ignored hardware "
+				  "error\n");
+			ctrl &= ~INFINIPATH_C_FREEZEMODE;
+			ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
+					 ctrl);
+		}
+	}
+
+	*msg = '\0';
+
+	if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
+		strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
+			msgl);
+		/* ignore from now on, so disable until driver reloaded */
+		*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
+		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+				 dd->ipath_hwerrmask);
+	}
+	if (hwerrs & (INFINIPATH_HWE_RXEMEMPARITYERR_MASK
+		      << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)) {
+		bits = (u32) ((hwerrs >>
+			       INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) &
+			      INFINIPATH_HWE_RXEMEMPARITYERR_MASK);
+		snprintf(bitsmsg, sizeof bitsmsg, "[RXE Parity Errs %x] ",
+			 bits);
+		strlcat(msg, bitsmsg, msgl);
+	}
+	if (hwerrs & (INFINIPATH_HWE_TXEMEMPARITYERR_MASK
+		      << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
+		bits = (u32) ((hwerrs >>
+			       INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) &
+			      INFINIPATH_HWE_TXEMEMPARITYERR_MASK);
+		snprintf(bitsmsg, sizeof bitsmsg, "[TXE Parity Errs %x] ",
+			 bits);
+		strlcat(msg, bitsmsg, msgl);
+	}
+	if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
+		      << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
+		bits = (u32) ((hwerrs >>
+			       INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
+			      INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
+		snprintf(bitsmsg, sizeof bitsmsg,
+			 "[PCIe Mem Parity Errs %x] ", bits);
+		strlcat(msg, bitsmsg, msgl);
+	}
+	if (hwerrs & INFINIPATH_HWE_IBCBUSTOSPCPARITYERR)
+		strlcat(msg, "[IB2IPATH Parity]", msgl);
+	if (hwerrs & INFINIPATH_HWE_IBCBUSFRSPCPARITYERR)
+		strlcat(msg, "[IPATH2IB Parity]", msgl);
+
+#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP |	\
+			 INFINIPATH_HWE_COREPLL_RFSLIP )
+
+	if (hwerrs & _IPATH_PLL_FAIL) {
+		snprintf(bitsmsg, sizeof bitsmsg,
+			 "[PLL failed (%llx), InfiniPath hardware unusable]",
+			 (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
+		strlcat(msg, bitsmsg, msgl);
+		/* ignore from now on, so disable until driver reloaded */
+		dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+				 dd->ipath_hwerrmask);
+	}
+
+	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
+		/*
+		 * If it occurs, it is left masked since the eternal
+		 * interface is unused
+		 */
+		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
+				 dd->ipath_hwerrmask);
+	}
+
+	if (hwerrs & INFINIPATH_HWE_PCIEPOISONEDTLP)
+		strlcat(msg, "[PCIe Poisoned TLP]", msgl);
+	if (hwerrs & INFINIPATH_HWE_PCIECPLTIMEOUT)
+		strlcat(msg, "[PCIe completion timeout]", msgl);
+
+	/*
+	 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+	 * parity or memory parity error failures, because most likely we
+	 * won't be able to talk to the core of the chip.  Nonetheless, we
+	 * might see them, if they are in parts of the PCIe core that aren't
+	 * essential.
+	 */
+	if (hwerrs & INFINIPATH_HWE_PCIE1PLLFAILED)
+		strlcat(msg, "[PCIePLL1]", msgl);
+	if (hwerrs & INFINIPATH_HWE_PCIE0PLLFAILED)
+		strlcat(msg, "[PCIePLL0]", msgl);
+	if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXTLH)
+		strlcat(msg, "[PCIe XTLH core parity]", msgl);
+	if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXADM)
+		strlcat(msg, "[PCIe ADM TX core parity]", msgl);
+	if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYRADM)
+		strlcat(msg, "[PCIe ADM RX core parity]", msgl);
+
+	if (hwerrs & INFINIPATH_HWE_RXDSYNCMEMPARITYERR)
+		strlcat(msg, "[Rx Dsync]", msgl);
+	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED)
+		strlcat(msg, "[SerDes PLL]", msgl);
+
+	ipath_dev_err(dd, "%s hardware error\n", msg);
+	if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) {
+		/*
+		 * for /sys status file ; if no trailing } is copied, we'll
+		 * know it was truncated.
+		 */
+		snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
+			 "{%s}", msg);
+	}
+}
+
+/**
+ * ipath_pe_boardname - fill in the board name
+ * @dd: the infinipath device
+ * @name: the output buffer
+ * @namelen: the size of the output buffer
+ *
+ * info is based on the board revision register
+ */
+static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
+			      size_t namelen)
+{
+	char *n = NULL;
+	u8 boardrev = dd->ipath_boardrev;
+	int ret;
+
+	switch (boardrev) {
+	case 0:
+		n = "InfiniPath_Emulation";
+		break;
+	case 1:
+		n = "InfiniPath_QLE7140-Bringup";
+		break;
+	case 2:
+		n = "InfiniPath_QLE7140";
+		break;
+	case 3:
+		n = "InfiniPath_QMI7140";
+		break;
+	case 4:
+		n = "InfiniPath_QEM7140";
+		break;
+	case 5:
+		n = "InfiniPath_QMH7140";
+		break;
+	default:
+		ipath_dev_err(dd,
+			      "Don't yet know about board with ID %u\n",
+			      boardrev);
+		snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
+			 boardrev);
+		break;
+	}
+	if (n)
+		snprintf(name, namelen, "%s", n);
+
+	if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
+		ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n",
+			      dd->ipath_majrev, dd->ipath_minrev);
+		ret = 1;
+	} else
+		ret = 0;
+
+	return ret;
+}
+
+/**
+ * ipath_pe_init_hwerrors - enable hardware errors
+ * @dd: the infinipath device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
+{
+	ipath_err_t val;
+	u64 extsval;
+
+	extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
+
+	if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
+		ipath_dev_err(dd, "MemBIST did not complete!\n");
+
+	val = ~0ULL;	/* barring bugs, all hwerrors become interrupts, */
+
+	if (!dd->ipath_boardrev)	// no PLL for Emulator
+		val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
+
+	/* workaround bug 9460 in internal interface bus parity checking */
+	val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
+
+	dd->ipath_hwerrmask = val;
+}
+
+/**
+ * ipath_pe_bringup_serdes - bring up the serdes
+ * @dd: the infinipath device
+ */
+static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
+{
+	u64 val, tmp, config1;
+	int ret = 0, change = 0;
+
+	ipath_dbg("Trying to bringup serdes\n");
+
+	if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
+	    INFINIPATH_HWE_SERDESPLLFAILED) {
+		ipath_dbg("At start, serdes PLL failed bit set "
+			  "in hwerrstatus, clearing and continuing\n");
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
+				 INFINIPATH_HWE_SERDESPLLFAILED);
+	}
+
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+	config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
+
+	ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, "
+		   "xgxsconfig %llx\n", (unsigned long long) val,
+		   (unsigned long long) config1, (unsigned long long)
+		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
+
+	/*
+	 * Force reset on, also set rxdetect enable.  Must do before reading
+	 * serdesstatus at least for simulation, or some of the bits in
+	 * serdes status will come back as undefined and cause simulation
+	 * failures
+	 */
+	val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN
+		| INFINIPATH_SERDC0_L1PWR_DN;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+	/* be sure chip saw it */
+	tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+	udelay(5);		/* need pll reset set at least for a bit */
+	/*
+	 * after PLL is reset, set the per-lane Resets and TxIdle and
+	 * clear the PLL reset and rxdetect (to get falling edge).
+	 * Leave L1PWR bits set (permanently)
+	 */
+	val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL
+		 | INFINIPATH_SERDC0_L1PWR_DN);
+	val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE;
+	ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets "
+		   "and txidle (%llx)\n", (unsigned long long) val);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+	/* be sure chip saw it */
+	tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+	/* need PLL reset clear for at least 11 usec before lane
+	 * resets cleared; give it a few more to be sure */
+	udelay(15);
+	val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE);
+
+	ipath_cdbg(VERBOSE, "Clearing lane resets and txidle "
+		   "(writing %llx)\n", (unsigned long long) val);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+	/* be sure chip saw it */
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
+	if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
+	     INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
+		val &=
+			~(INFINIPATH_XGXS_MDIOADDR_MASK <<
+			  INFINIPATH_XGXS_MDIOADDR_SHIFT);
+		/* MDIO address 3 */
+		val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
+		change = 1;
+	}
+	if (val & INFINIPATH_XGXS_RESET) {
+		val &= ~INFINIPATH_XGXS_RESET;
+		change = 1;
+	}
+	if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
+	     INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
+		/* need to compensate for Tx inversion in partner */
+		val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
+		         INFINIPATH_XGXS_RX_POL_SHIFT);
+		val |= dd->ipath_rx_pol_inv <<
+			INFINIPATH_XGXS_RX_POL_SHIFT;
+		change = 1;
+	}
+	if (change)
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
+
+	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+
+	/* clear current and de-emphasis bits */
+	config1 &= ~0x0ffffffff00ULL;
+	/* set current to 20ma */
+	config1 |= 0x00000000000ULL;
+	/* set de-emphasis to -5.68dB */
+	config1 |= 0x0cccc000000ULL;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
+
+	ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx "
+		   "config1=%llx, sstatus=%llx xgxs=%llx\n",
+		   (unsigned long long) val, (unsigned long long) config1,
+		   (unsigned long long)
+		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
+		   (unsigned long long)
+		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
+
+	if (!ipath_waitfor_mdio_cmdready(dd)) {
+		ipath_write_kreg(
+			dd, dd->ipath_kregs->kr_mdio,
+			ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
+				       IPATH_MDIO_CTRL_XGXS_REG_8, 0));
+		if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
+					   IPATH_MDIO_DATAVALID, &val))
+			ipath_dbg("Never got MDIO data for XGXS "
+				  "status read\n");
+		else
+			ipath_cdbg(VERBOSE, "MDIO Read reg8, "
+				   "'bank' 31 %x\n", (u32) val);
+	} else
+		ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
+
+	return ret;
+}
+
+/**
+ * ipath_pe_quiet_serdes - set serdes to txidle
+ * @dd: the infinipath device
+ * Called when driver is being unloaded
+ */
+static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
+{
+	u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
+
+	val |= INFINIPATH_SERDC0_TXIDLE;
+	ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
+		  (unsigned long long) val);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
+}
+
+/* this is not yet needed on this chip, so just return 0. */
+static int ipath_pe_intconfig(struct ipath_devdata *dd)
+{
+	return 0;
+}
+
+/**
+ * ipath_setup_pe_setextled - set the state of the two external LEDs
+ * @dd: the infinipath device
+ * @lst: the L state
+ * @ltst: the LT state
+
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note:  We try to match the Mellanox HCA LED behavior as best
+ * we can.  Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate.  That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
+				     u64 ltst)
+{
+	u64 extctl;
+
+	/* the diags use the LED to indicate diag info, so we leave
+	 * the external LED alone when the diags are running */
+	if (ipath_diag_inuse)
+		return;
+
+	extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
+				       INFINIPATH_EXTC_LED2PRIPORT_ON);
+
+	if (ltst & INFINIPATH_IBCS_LT_STATE_LINKUP)
+		extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
+	if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
+		extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
+	dd->ipath_extctrl = extctl;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
+}
+
+/**
+ * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the infinipath device
+ *
+ * This is called during driver unload.
+ * We do the pci_disable_msi here, not in generic code, because it
+ * isn't used for the HT chips. If we do end up needing pci_enable_msi
+ * at some point in the future for HT, we'll move the call back
+ * into the main init_one code.
+ */
+static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
+{
+	dd->ipath_msi_lo = 0;	/* just in case unload fails */
+	pci_disable_msi(dd->pcidev);
+}
+
+/**
+ * ipath_setup_pe_config - setup PCIe config related stuff
+ * @dd: the infinipath device
+ * @pdev: the PCI device
+ *
+ * The pci_enable_msi() call will fail on systems with MSI quirks
+ * such as those with AMD8131, even if the device of interest is not
+ * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
+ * late in 2.6.16).
+ * All that can be done is to edit the kernel source to remove the quirk
+ * check until that is fixed.
+ * We do not need to call enable_msi() for our HyperTransport chip,
+ * even though it uses MSI, and we want to avoid the quirk warning, so
+ * So we call enable_msi only for PCIe.  If we do end up needing
+ * pci_enable_msi at some point in the future for HT, we'll move the
+ * call back into the main init_one code.
+ * We save the msi lo and hi values, so we can restore them after
+ * chip reset (the kernel PCI infrastructure doesn't yet handle that
+ * correctly).
+ */
+static int ipath_setup_pe_config(struct ipath_devdata *dd,
+				 struct pci_dev *pdev)
+{
+	int pos, ret;
+
+	dd->ipath_msi_lo = 0;	/* used as a flag during reset processing */
+	ret = pci_enable_msi(dd->pcidev);
+	if (ret)
+		ipath_dev_err(dd, "pci_enable_msi failed: %d, "
+			      "interrupts may not work\n", ret);
+	/* continue even if it fails, we may still be OK... */
+
+	if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
+		u16 control;
+		pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
+				      &dd->ipath_msi_lo);
+		pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
+				      &dd->ipath_msi_hi);
+		pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
+				     &control);
+		/* now save the data (vector) info */
+		pci_read_config_word(dd->pcidev,
+				     pos + ((control & PCI_MSI_FLAGS_64BIT)
+					    ? 12 : 8),
+				     &dd->ipath_msi_data);
+		ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset "
+			   "0x%x, control=0x%x\n", dd->ipath_msi_data,
+			   pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
+			   control);
+		/* we save the cachelinesize also, although it doesn't
+		 * really matter */
+		pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
+				     &dd->ipath_pci_cacheline);
+	} else
+		ipath_dev_err(dd, "Can't find MSI capability, "
+			      "can't save MSI settings for reset\n");
+	if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP))) {
+		u16 linkstat;
+		pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
+				     &linkstat);
+		linkstat >>= 4;
+		linkstat &= 0x1f;
+		if (linkstat != 8)
+			ipath_dev_err(dd, "PCIe width %u, "
+				      "performance reduced\n", linkstat);
+	}
+	else
+		ipath_dev_err(dd, "Can't find PCI Express "
+			      "capability!\n");
+	return 0;
+}
+
+static void ipath_init_pe_variables(void)
+{
+	/*
+	 * bits for selecting i2c direction and values,
+	 * used for I2C serial flash
+	 */
+	ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
+	ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
+	ipath_gpio_sda = IPATH_GPIO_SDA;
+	ipath_gpio_scl = IPATH_GPIO_SCL;
+
+	/* variables for sanity checking interrupt and errors */
+	infinipath_hwe_bitsextant =
+		(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
+		 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
+		(INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
+		 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
+		INFINIPATH_HWE_PCIE1PLLFAILED |
+		INFINIPATH_HWE_PCIE0PLLFAILED |
+		INFINIPATH_HWE_PCIEPOISONEDTLP |
+		INFINIPATH_HWE_PCIECPLTIMEOUT |
+		INFINIPATH_HWE_PCIEBUSPARITYXTLH |
+		INFINIPATH_HWE_PCIEBUSPARITYXADM |
+		INFINIPATH_HWE_PCIEBUSPARITYRADM |
+		INFINIPATH_HWE_MEMBISTFAILED |
+		INFINIPATH_HWE_COREPLL_FBSLIP |
+		INFINIPATH_HWE_COREPLL_RFSLIP |
+		INFINIPATH_HWE_SERDESPLLFAILED |
+		INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
+		INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
+	infinipath_i_bitsextant =
+		(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
+		(INFINIPATH_I_RCVAVAIL_MASK <<
+		 INFINIPATH_I_RCVAVAIL_SHIFT) |
+		INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
+		INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
+	infinipath_e_bitsextant =
+		INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
+		INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
+		INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
+		INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
+		INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
+		INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
+		INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
+		INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
+		INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
+		INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
+		INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
+		INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
+		INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
+		INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
+		INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
+		INFINIPATH_E_HARDWARE;
+
+	infinipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
+	infinipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
+}
+
+/* setup the MSI stuff again after a reset.  I'd like to just call
+ * pci_enable_msi() and request_irq() again, but when I do that,
+ * the MSI enable bit doesn't get set in the command word, and
+ * we switch to to a different interrupt vector, which is confusing,
+ * so I instead just do it all inline.  Perhaps somehow can tie this
+ * into the PCIe hotplug support at some point
+ * Note, because I'm doing it all here, I don't call pci_disable_msi()
+ * or free_irq() at the start of ipath_setup_pe_reset().
+ */
+static int ipath_reinit_msi(struct ipath_devdata *dd)
+{
+	int pos;
+	u16 control;
+	int ret;
+
+	if (!dd->ipath_msi_lo) {
+		dev_info(&dd->pcidev->dev, "Can't restore MSI config, "
+			 "initial setup failed?\n");
+		ret = 0;
+		goto bail;
+	}
+
+	if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
+		ipath_dev_err(dd, "Can't find MSI capability, "
+			      "can't restore MSI settings\n");
+		ret = 0;
+		goto bail;
+	}
+	ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
+		   dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
+	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
+			       dd->ipath_msi_lo);
+	ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
+		   dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
+	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
+			       dd->ipath_msi_hi);
+	pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
+	if (!(control & PCI_MSI_FLAGS_ENABLE)) {
+		ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
+			   "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
+			   control, control | PCI_MSI_FLAGS_ENABLE);
+		control |= PCI_MSI_FLAGS_ENABLE;
+		pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
+				      control);
+	}
+	/* now rewrite the data (vector) info */
+	pci_write_config_word(dd->pcidev, pos +
+			      ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
+			      dd->ipath_msi_data);
+	/* we restore the cachelinesize also, although it doesn't really
+	 * matter */
+	pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
+			      dd->ipath_pci_cacheline);
+	/* and now set the pci master bit again */
+	pci_set_master(dd->pcidev);
+	ret = 1;
+
+bail:
+	return ret;
+}
+
+/* This routine sleeps, so it can only be called from user context, not
+ * from interrupt context.  If we need interrupt context, we can split
+ * it into two routines.
+*/
+static int ipath_setup_pe_reset(struct ipath_devdata *dd)
+{
+	u64 val;
+	int i;
+	int ret;
+
+	/* Use ERROR so it shows up in logs, etc. */
+	ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
+	/* keep chip from being accessed in a few places */
+	dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
+	val = dd->ipath_control | INFINIPATH_C_RESET;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
+	mb();
+
+	for (i = 1; i <= 5; i++) {
+		int r;
+		/* allow MBIST, etc. to complete; longer on each retry.
+		 * We sometimes get machine checks from bus timeout if no
+		 * response, so for now, make it *really* long.
+		 */
+		msleep(1000 + (1 + i) * 2000);
+		if ((r =
+		     pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
+					    dd->ipath_pcibar0)))
+			ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n",
+				      r);
+		if ((r =
+		     pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
+					    dd->ipath_pcibar1)))
+			ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
+				      r);
+		/* now re-enable memory access */
+		if ((r = pci_enable_device(dd->pcidev)))
+			ipath_dev_err(dd, "pci_enable_device failed after "
+				      "reset: %d\n", r);
+		/* whether it worked or not, mark as present, again */
+		dd->ipath_flags |= IPATH_PRESENT;
+		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
+		if (val == dd->ipath_revision) {
+			ipath_cdbg(VERBOSE, "Got matching revision "
+				   "register %llx on try %d\n",
+				   (unsigned long long) val, i);
+			ret = ipath_reinit_msi(dd);
+			goto bail;
+		}
+		/* Probably getting -1 back */
+		ipath_dbg("Didn't get expected revision register, "
+			  "got %llx, try %d\n", (unsigned long long) val,
+			  i + 1);
+	}
+	ret = 0; /* failed */
+
+bail:
+	return ret;
+}
+
+/**
+ * ipath_pe_put_tid - write a TID in chip
+ * @dd: the infinipath device
+ * @tidptr: pointer to the expected TID (in chip) to udpate
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for special locking etc.
+ * It's used for both the full cleanup on exit, as well as the normal
+ * setup and teardown.
+ */
+static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
+			     u32 type, unsigned long pa)
+{
+	u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
+	unsigned long flags = 0; /* keep gcc quiet */
+
+	if (pa != dd->ipath_tidinvalid) {
+		if (pa & ((1U << 11) - 1)) {
+			dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
+				 "not 4KB aligned!\n", pa);
+			return;
+		}
+		pa >>= 11;
+		/* paranoia check */
+		if (pa & (7<<29))
+			ipath_dev_err(dd,
+				      "BUG: Physical page address 0x%lx "
+				      "has bits set in 31-29\n", pa);
+
+		if (type == 0)
+			pa |= dd->ipath_tidtemplate;
+		else /* for now, always full 4KB page */
+			pa |= 2 << 29;
+	}
+
+	/* workaround chip bug 9437 by writing each TID twice
+	 * and holding a spinlock around the writes, so they don't
+	 * intermix with other TID (eager or expected) writes
+	 * Unfortunately, this call can be done from interrupt level
+	 * for the port 0 eager TIDs, so we have to use irqsave
+	 */
+	spin_lock_irqsave(&dd->ipath_tid_lock, flags);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
+	if (dd->ipath_kregbase)
+		writel(pa, tidp32);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
+	mmiowb();
+	spin_unlock_irqrestore(&dd->ipath_tid_lock, flags);
+}
+
+/**
+ * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
+ * @dd: the infinipath device
+ * @port: the port
+ *
+ * clear all TID entries for a port, expected and eager.
+ * Used from ipath_close().  On this chip, TIDs are only 32 bits,
+ * not 64, but they are still on 64 bit boundaries, so tidbase
+ * is declared as u64 * for the pointer math, even though we write 32 bits
+ */
+static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
+{
+	u64 __iomem *tidbase;
+	unsigned long tidinv;
+	int i;
+
+	if (!dd->ipath_kregbase)
+		return;
+
+	ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
+
+	tidinv = dd->ipath_tidinvalid;
+	tidbase = (u64 __iomem *)
+		((char __iomem *)(dd->ipath_kregbase) +
+		 dd->ipath_rcvtidbase +
+		 port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
+
+	for (i = 0; i < dd->ipath_rcvtidcnt; i++)
+		ipath_pe_put_tid(dd, &tidbase[i], 0, tidinv);
+
+	tidbase = (u64 __iomem *)
+		((char __iomem *)(dd->ipath_kregbase) +
+		 dd->ipath_rcvegrbase +
+		 port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
+
+	for (i = 0; i < dd->ipath_rcvegrcnt; i++)
+		ipath_pe_put_tid(dd, &tidbase[i], 1, tidinv);
+}
+
+/**
+ * ipath_pe_tidtemplate - setup constants for TID updates
+ * @dd: the infinipath device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void ipath_pe_tidtemplate(struct ipath_devdata *dd)
+{
+	u32 egrsize = dd->ipath_rcvegrbufsize;
+
+	/* For now, we always allocate 4KB buffers (at init) so we can
+	 * receive max size packets.  We may want a module parameter to
+	 * specify 2KB or 4KB and/or make be per port instead of per device
+	 * for those who want to reduce memory footprint.  Note that the
+	 * ipath_rcvhdrentsize size must be large enough to hold the largest
+	 * IB header (currently 96 bytes) that we expect to handle (plus of
+	 * course the 2 dwords of RHF).
+	 */
+	if (egrsize == 2048)
+		dd->ipath_tidtemplate = 1U << 29;
+	else if (egrsize == 4096)
+		dd->ipath_tidtemplate = 2U << 29;
+	else {
+		egrsize = 4096;
+		dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
+			 "%u, using %u\n", dd->ipath_rcvegrbufsize,
+			 egrsize);
+		dd->ipath_tidtemplate = 2U << 29;
+	}
+	dd->ipath_tidinvalid = 0;
+}
+
+static int ipath_pe_early_init(struct ipath_devdata *dd)
+{
+	dd->ipath_flags |= IPATH_4BYTE_TID;
+
+	/*
+	 * For openfabrics, we need to be able to handle an IB header of
+	 * 24 dwords.  HT chip has arbitrary sized receive buffers, so we
+	 * made them the same size as the PIO buffers.  This chip does not
+	 * handle arbitrary size buffers, so we need the header large enough
+	 * to handle largest IB header, but still have room for a 2KB MTU
+	 * standard IB packet.
+	 */
+	dd->ipath_rcvhdrentsize = 24;
+	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
+
+	/*
+	 * To truly support a 4KB MTU (for usermode), we need to
+	 * bump this to a larger value.  For now, we use them for
+	 * the kernel only.
+	 */
+	dd->ipath_rcvegrbufsize = 2048;
+	/*
+	 * the min() check here is currently a nop, but it may not always
+	 * be, depending on just how we do ipath_rcvegrbufsize
+	 */
+	dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
+				 dd->ipath_rcvegrbufsize +
+				 (dd->ipath_rcvhdrentsize << 2));
+	dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
+
+	/*
+	 * We can request a receive interrupt for 1 or
+	 * more packets from current offset.  For now, we set this
+	 * up for a single packet.
+	 */
+	dd->ipath_rhdrhead_intr_off = 1ULL<<32;
+
+	ipath_get_eeprom_info(dd);
+
+	return 0;
+}
+
+int __attribute__((weak)) ipath_unordered_wc(void)
+{
+	return 0;
+}
+
+/**
+ * ipath_init_pe_get_base_info - set chip-specific flags for user code
+ * @dd: the infinipath device
+ * @kbase: ipath_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
+{
+	struct ipath_base_info *kinfo = kbase;
+
+	if (ipath_unordered_wc()) {
+		kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER;
+		ipath_cdbg(PROC, "Intel processor, forcing WC order\n");
+	}
+	else
+		ipath_cdbg(PROC, "Not Intel processor, WC ordered\n");
+
+	kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE;
+
+	return 0;
+}
+
+/**
+ * ipath_init_iba6120_funcs - set up the chip-specific function pointers
+ * @dd: the infinipath device
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
+{
+	dd->ipath_f_intrsetup = ipath_pe_intconfig;
+	dd->ipath_f_bus = ipath_setup_pe_config;
+	dd->ipath_f_reset = ipath_setup_pe_reset;
+	dd->ipath_f_get_boardname = ipath_pe_boardname;
+	dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors;
+	dd->ipath_f_early_init = ipath_pe_early_init;
+	dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors;
+	dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
+	dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
+	dd->ipath_f_clear_tids = ipath_pe_clear_tids;
+	dd->ipath_f_put_tid = ipath_pe_put_tid;
+	dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
+	dd->ipath_f_setextled = ipath_setup_pe_setextled;
+	dd->ipath_f_get_base_info = ipath_pe_get_base_info;
+
+	/* initialize chip-specific variables */
+	dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
+
+	/*
+	 * setup the register offsets, since they are different for each
+	 * chip
+	 */
+	dd->ipath_kregs = &ipath_pe_kregs;
+	dd->ipath_cregs = &ipath_pe_cregs;
+
+	ipath_init_pe_variables();
+}
+
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_init_chip.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_init_chip.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_init_chip.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_init_chip.c	2006-10-19 12:41:10.000000000 -0400
@@ -53,8 +53,8 @@ module_param_named(cfgports, ipath_cfgpo
 MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
 
 /*
- * Number of buffers reserved for driver (layered drivers and SMA
- * send).  Reserved at end of buffer list.   Initialized based on
+ * Number of buffers reserved for driver (verbs and layered drivers.)
+ * Reserved at end of buffer list.   Initialized based on
  * number of PIO buffers if not set via module interface.
  * The problem with this is that it's global, but we'll use different
  * numbers for different chip types.  So the default value is not
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(kpiobufs, "Set number o
  *
  * Allocate the eager TID buffers and program them into infinipath.
  * We use the network layer alloc_skb() allocator to allocate the
- * memory, and either use the buffers as is for things like SMA
+ * memory, and either use the buffers as is for things like verbs
  * packets, or pass the buffers up to the ipath layered driver and
  * thence the network layer, replacing them as we do so (see
  * ipath_rcv_layer()).
@@ -240,7 +240,11 @@ static int init_chip_first(struct ipath_
 			  "only supports %u\n", ipath_cfgports,
 			  dd->ipath_portcnt);
 	}
-	dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_cfgports,
+	/*
+	 * Allocate full portcnt array, rather than just cfgports, because
+	 * cleanup iterates across all possible ports.
+	 */
+	dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_portcnt,
 			       GFP_KERNEL);
 
 	if (!dd->ipath_pd) {
@@ -446,9 +450,9 @@ static void enable_chip(struct ipath_dev
 	u32 val;
 	int i;
 
-	if (!reinit) {
-		init_waitqueue_head(&ipath_sma_state_wait);
-	}
+	if (!reinit)
+		init_waitqueue_head(&ipath_state_wait);
+
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
 			 dd->ipath_rcvctrl);
 
@@ -687,7 +691,7 @@ int ipath_init_chip(struct ipath_devdata
 	dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2)
 		/ (sizeof(u64) * BITS_PER_BYTE / 2);
 	if (ipath_kpiobufs == 0) {
-		/* not set by user, or set explictly to default  */
+		/* not set by user (this is default) */
 		if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128)
 			kpiobufs = 32;
 		else
@@ -946,6 +950,7 @@ static int ipath_set_kpiobufs(const char
 			dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;
 	}
 
+	ipath_kpiobufs = val;
 	ret = 0;
 bail:
 	spin_unlock_irqrestore(&ipath_devs_lock, flags);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_intr.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_intr.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_intr.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_intr.c	2006-10-19 12:41:10.000000000 -0400
@@ -34,7 +34,7 @@
 #include <linux/pci.h>
 
 #include "ipath_kernel.h"
-#include "ipath_layer.h"
+#include "ipath_verbs.h"
 #include "ipath_common.h"
 
 /* These are all rcv-related errors which we want to count for stats */
@@ -201,7 +201,7 @@ static void handle_e_ibstatuschanged(str
 				  ib_linkstate(lstate));
 		}
 		else
-			ipath_cdbg(SMA, "Unit %u link state %s, last "
+			ipath_cdbg(VERBOSE, "Unit %u link state %s, last "
 				   "was %s\n", dd->ipath_unit,
 				   ib_linkstate(lstate),
 				   ib_linkstate((unsigned)
@@ -213,7 +213,7 @@ static void handle_e_ibstatuschanged(str
 		if (lstate == IPATH_IBSTATE_INIT ||
 		    lstate == IPATH_IBSTATE_ARM ||
 		    lstate == IPATH_IBSTATE_ACTIVE)
-			ipath_cdbg(SMA, "Unit %u link state down"
+			ipath_cdbg(VERBOSE, "Unit %u link state down"
 				   " (state 0x%x), from %s\n",
 				   dd->ipath_unit,
 				   (u32)val & IPATH_IBSTATE_MASK,
@@ -269,7 +269,7 @@ static void handle_e_ibstatuschanged(str
 			     INFINIPATH_IBCS_LINKSTATE_MASK)
 			    == INFINIPATH_IBCS_L_STATE_ACTIVE)
 				/* if from up to down be more vocal */
-				ipath_cdbg(SMA,
+				ipath_cdbg(VERBOSE,
 					   "Unit %u link now down (%s)\n",
 					   dd->ipath_unit,
 					   ipath_ibcstatus_str[ltstate]);
@@ -289,8 +289,6 @@ static void handle_e_ibstatuschanged(str
 		*dd->ipath_statusp |=
 			IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
 		dd->ipath_f_setextled(dd, lstate, ltstate);
-
-		__ipath_layer_intr(dd, IPATH_LAYER_INT_IF_UP);
 	} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {
 		/*
 		 * set INIT and DOWN.  Down is checked by most of the other
@@ -598,11 +596,11 @@ static int handle_errors(struct ipath_de
 
 	if (!noprint && *msg)
 		ipath_dev_err(dd, "%s error\n", msg);
-	if (dd->ipath_sma_state_wanted & dd->ipath_flags) {
-		ipath_cdbg(VERBOSE, "sma wanted state %x, iflags now %x, "
-			   "waking\n", dd->ipath_sma_state_wanted,
+	if (dd->ipath_state_wanted & dd->ipath_flags) {
+		ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
+			   "waking\n", dd->ipath_state_wanted,
 			   dd->ipath_flags);
-		wake_up_interruptible(&ipath_sma_state_wait);
+		wake_up_interruptible(&ipath_state_wait);
 	}
 
 	return chkerrpkts;
@@ -708,11 +706,7 @@ static void handle_layer_pioavail(struct
 {
 	int ret;
 
-	ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
-	if (ret > 0)
-		goto set;
-
-	ret = __ipath_verbs_piobufavail(dd);
+	ret = ipath_ib_piobufavail(dd->verbs_dev);
 	if (ret > 0)
 		goto set;
 
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_kernel.h linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_kernel.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_kernel.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_kernel.h	2006-10-19 12:41:11.000000000 -0400
@@ -132,12 +132,6 @@ struct _ipath_layer {
 	void *l_arg;
 };
 
-/* Verbs layer interface */
-struct _verbs_layer {
-	void *l_arg;
-	struct timer_list l_timer;
-};
-
 struct ipath_devdata {
 	struct list_head ipath_list;
 
@@ -198,7 +192,8 @@ struct ipath_devdata {
 	void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
 	/* fill out chip-specific fields */
 	int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
-	struct _verbs_layer verbs_layer;
+	struct ipath_ibdev *verbs_dev;
+	struct timer_list verbs_timer;
 	/* total dwords sent (summed from counter) */
 	u64 ipath_sword;
 	/* total dwords rcvd (summed from counter) */
@@ -241,7 +236,7 @@ struct ipath_devdata {
 	u64 ipath_tidtemplate;
 	/* value to write to free TIDs */
 	u64 ipath_tidinvalid;
-	/* PE-800 rcv interrupt setup */
+	/* IBA6120 rcv interrupt setup */
 	u64 ipath_rhdrhead_intr_off;
 
 	/* size of memory at ipath_kregbase */
@@ -250,8 +245,8 @@ struct ipath_devdata {
 	u32 ipath_pioavregs;
 	/* IPATH_POLL, etc. */
 	u32 ipath_flags;
-	/* ipath_flags sma is waiting for */
-	u32 ipath_sma_state_wanted;
+	/* ipath_flags driver is waiting for */
+	u32 ipath_state_wanted;
 	/* last buffer for user use, first buf for kernel use is this
 	 * index. */
 	u32 ipath_lastport_piobuf;
@@ -311,10 +306,6 @@ struct ipath_devdata {
 	u32 ipath_pcibar0;
 	/* so we can rewrite it after a chip reset */
 	u32 ipath_pcibar1;
-	/* sequential tries for SMA send and no bufs */
-	u32 ipath_nosma_bufs;
-	/* duration (seconds) ipath_nosma_bufs set */
-	u32 ipath_nosma_secs;
 
 	/* HT/PCI Vendor ID (here for NodeInfo) */
 	u16 ipath_vendorid;
@@ -512,29 +503,22 @@ struct ipath_devdata {
 	u8 ipath_pci_cacheline;
 	/* LID mask control */
 	u8 ipath_lmc;
+	/* Rx Polarity inversion (compensate for ~tx on partner) */
+	u8 ipath_rx_pol_inv;
 
 	/* local link integrity counter */
 	u32 ipath_lli_counter;
 	/* local link integrity errors */
 	u32 ipath_lli_errors;
+
+	/* Link status check work */
+	struct work_struct link_task;
 };
 
 extern struct list_head ipath_dev_list;
 extern spinlock_t ipath_devs_lock;
 extern struct ipath_devdata *ipath_lookup(int unit);
 
-extern u16 ipath_layer_rcv_opcode;
-extern int __ipath_layer_intr(struct ipath_devdata *, u32);
-extern int ipath_layer_intr(struct ipath_devdata *, u32);
-extern int __ipath_layer_rcv(struct ipath_devdata *, void *,
-			     struct sk_buff *);
-extern int __ipath_layer_rcv_lid(struct ipath_devdata *, void *);
-extern int __ipath_verbs_piobufavail(struct ipath_devdata *);
-extern int __ipath_verbs_rcv(struct ipath_devdata *, void *, void *, u32);
-
-void ipath_layer_add(struct ipath_devdata *);
-void ipath_layer_remove(struct ipath_devdata *);
-
 int ipath_init_chip(struct ipath_devdata *, int);
 int ipath_enable_wc(struct ipath_devdata *dd);
 void ipath_disable_wc(struct ipath_devdata *dd);
@@ -549,9 +533,8 @@ void ipath_cdev_cleanup(struct cdev **cd
 
 int ipath_diag_add(struct ipath_devdata *);
 void ipath_diag_remove(struct ipath_devdata *);
-void ipath_diag_bringup_link(struct ipath_devdata *);
 
-extern wait_queue_head_t ipath_sma_state_wait;
+extern wait_queue_head_t ipath_state_wait;
 
 int ipath_user_add(struct ipath_devdata *dd);
 void ipath_user_remove(struct ipath_devdata *dd);
@@ -582,12 +565,14 @@ void ipath_free_pddata(struct ipath_devd
 
 int ipath_parse_ushort(const char *str, unsigned short *valp);
 
-int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
-void ipath_set_ib_lstate(struct ipath_devdata *, int);
 void ipath_kreceive(struct ipath_devdata *);
 int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
 int ipath_reset_device(int);
 void ipath_get_faststats(unsigned long);
+int ipath_set_linkstate(struct ipath_devdata *, u8);
+int ipath_set_mtu(struct ipath_devdata *, u16);
+int ipath_set_lid(struct ipath_devdata *, u32, u8);
+int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
 
 /* for use in system calls, where we want to know device type, etc. */
 #define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data)
@@ -642,10 +627,8 @@ void ipath_free_data(struct ipath_portda
 int ipath_waitfor_mdio_cmdready(struct ipath_devdata *);
 int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *);
 u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
-/* init PE-800-specific func */
-void ipath_init_pe800_funcs(struct ipath_devdata *);
-/* init HT-400-specific func */
-void ipath_init_ht400_funcs(struct ipath_devdata *);
+void ipath_init_iba6120_funcs(struct ipath_devdata *);
+void ipath_init_iba6110_funcs(struct ipath_devdata *);
 void ipath_get_eeprom_info(struct ipath_devdata *);
 u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
 
@@ -801,7 +784,7 @@ static inline u32 ipath_read_creg32(cons
 
 struct device_driver;
 
-extern const char ipath_core_version[];
+extern const char ib_ipath_version[];
 
 int ipath_driver_create_group(struct device_driver *);
 void ipath_driver_remove_group(struct device_driver *);
@@ -810,6 +793,9 @@ int ipath_device_create_group(struct dev
 void ipath_device_remove_group(struct device *, struct ipath_devdata *);
 int ipath_expose_reset(struct device *);
 
+void ipath_diagpkt_add(void);
+void ipath_diagpkt_remove(void);
+
 int ipath_init_ipathfs(void);
 void ipath_exit_ipathfs(void);
 int ipathfs_add_device(struct ipath_devdata *);
@@ -831,10 +817,10 @@ const char *ipath_get_unit_name(int unit
 
 extern struct mutex ipath_mutex;
 
-#define IPATH_DRV_NAME		"ipath_core"
+#define IPATH_DRV_NAME		"ib_ipath"
 #define IPATH_MAJOR		233
 #define IPATH_USER_MINOR_BASE	0
-#define IPATH_SMA_MINOR		128
+#define IPATH_DIAGPKT_MINOR	127
 #define IPATH_DIAG_MINOR_BASE	129
 #define IPATH_NMINORS		255
 
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_keys.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_keys.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_keys.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_keys.c	2006-10-19 12:41:10.000000000 -0400
@@ -34,6 +34,7 @@
 #include <asm/io.h>
 
 #include "ipath_verbs.h"
+#include "ipath_kernel.h"
 
 /**
  * ipath_alloc_lkey - allocate an lkey
@@ -60,7 +61,7 @@ int ipath_alloc_lkey(struct ipath_lkey_t
 		r = (r + 1) & (rkt->max - 1);
 		if (r == n) {
 			spin_unlock_irqrestore(&rkt->lock, flags);
-			_VERBS_INFO("LKEY table full\n");
+			ipath_dbg(KERN_INFO "LKEY table full\n");
 			ret = 0;
 			goto bail;
 		}
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_layer.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_layer.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_layer.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_layer.c	2006-10-19 12:41:10.000000000 -0400
@@ -42,26 +42,20 @@
 
 #include "ipath_kernel.h"
 #include "ipath_layer.h"
+#include "ipath_verbs.h"
 #include "ipath_common.h"
 
 /* Acquire before ipath_devs_lock. */
 static DEFINE_MUTEX(ipath_layer_mutex);
 
-static int ipath_verbs_registered;
-
 u16 ipath_layer_rcv_opcode;
 
 static int (*layer_intr)(void *, u32);
 static int (*layer_rcv)(void *, void *, struct sk_buff *);
 static int (*layer_rcv_lid)(void *, void *);
-static int (*verbs_piobufavail)(void *);
-static void (*verbs_rcv)(void *, void *, void *, u32);
 
 static void *(*layer_add_one)(int, struct ipath_devdata *);
 static void (*layer_remove_one)(void *);
-static void *(*verbs_add_one)(int, struct ipath_devdata *);
-static void (*verbs_remove_one)(void *);
-static void (*verbs_timer_cb)(void *);
 
 int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
 {
@@ -107,302 +101,16 @@ int __ipath_layer_rcv_lid(struct ipath_d
 	return ret;
 }
 
-int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
-{
-	int ret = -ENODEV;
-
-	if (dd->verbs_layer.l_arg && verbs_piobufavail)
-		ret = verbs_piobufavail(dd->verbs_layer.l_arg);
-
-	return ret;
-}
-
-int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
-		      u32 tlen)
-{
-	int ret = -ENODEV;
-
-	if (dd->verbs_layer.l_arg && verbs_rcv) {
-		verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
-		ret = 0;
-	}
-
-	return ret;
-}
-
-int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
-{
-	u32 lstate;
-	int ret;
-
-	switch (newstate) {
-	case IPATH_IB_LINKDOWN:
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
-				    INFINIPATH_IBCC_LINKINITCMD_SHIFT);
-		/* don't wait */
-		ret = 0;
-		goto bail;
-
-	case IPATH_IB_LINKDOWN_SLEEP:
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
-				    INFINIPATH_IBCC_LINKINITCMD_SHIFT);
-		/* don't wait */
-		ret = 0;
-		goto bail;
-
-	case IPATH_IB_LINKDOWN_DISABLE:
-		ipath_set_ib_lstate(dd,
-				    INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
-				    INFINIPATH_IBCC_LINKINITCMD_SHIFT);
-		/* don't wait */
-		ret = 0;
-		goto bail;
-
-	case IPATH_IB_LINKINIT:
-		if (dd->ipath_flags & IPATH_LINKINIT) {
-			ret = 0;
-			goto bail;
-		}
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
-				    INFINIPATH_IBCC_LINKCMD_SHIFT);
-		lstate = IPATH_LINKINIT;
-		break;
-
-	case IPATH_IB_LINKARM:
-		if (dd->ipath_flags & IPATH_LINKARMED) {
-			ret = 0;
-			goto bail;
-		}
-		if (!(dd->ipath_flags &
-		      (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
-			ret = -EINVAL;
-			goto bail;
-		}
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
-				    INFINIPATH_IBCC_LINKCMD_SHIFT);
-		/*
-		 * Since the port can transition to ACTIVE by receiving
-		 * a non VL 15 packet, wait for either state.
-		 */
-		lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
-		break;
-
-	case IPATH_IB_LINKACTIVE:
-		if (dd->ipath_flags & IPATH_LINKACTIVE) {
-			ret = 0;
-			goto bail;
-		}
-		if (!(dd->ipath_flags & IPATH_LINKARMED)) {
-			ret = -EINVAL;
-			goto bail;
-		}
-		ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
-				    INFINIPATH_IBCC_LINKCMD_SHIFT);
-		lstate = IPATH_LINKACTIVE;
-		break;
-
-	default:
-		ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
-		ret = -EINVAL;
-		goto bail;
-	}
-	ret = ipath_wait_linkstate(dd, lstate, 2000);
-
-bail:
-	return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
-
-/**
- * ipath_layer_set_mtu - set the MTU
- * @dd: the infinipath device
- * @arg: the new MTU
- *
- * we can handle "any" incoming size, the issue here is whether we
- * need to restrict our outgoing size.   For now, we don't do any
- * sanity checking on this, and we don't deal with what happens to
- * programs that are already running when the size changes.
- * NOTE: changing the MTU will usually cause the IBC to go back to
- * link initialize (IPATH_IBSTATE_INIT) state...
- */
-int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
-{
-	u32 piosize;
-	int changed = 0;
-	int ret;
-
-	/*
-	 * mtu is IB data payload max.  It's the largest power of 2 less
-	 * than piosize (or even larger, since it only really controls the
-	 * largest we can receive; we can send the max of the mtu and
-	 * piosize).  We check that it's one of the valid IB sizes.
-	 */
-	if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
-	    arg != 4096) {
-		ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
-		ret = -EINVAL;
-		goto bail;
-	}
-	if (dd->ipath_ibmtu == arg) {
-		ret = 0;	/* same as current */
-		goto bail;
-	}
-
-	piosize = dd->ipath_ibmaxlen;
-	dd->ipath_ibmtu = arg;
-
-	if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
-		/* Only if it's not the initial value (or reset to it) */
-		if (piosize != dd->ipath_init_ibmaxlen) {
-			dd->ipath_ibmaxlen = piosize;
-			changed = 1;
-		}
-	} else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
-		piosize = arg + IPATH_PIO_MAXIBHDR;
-		ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
-			   "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
-			   arg);
-		dd->ipath_ibmaxlen = piosize;
-		changed = 1;
-	}
-
-	if (changed) {
-		/*
-		 * set the IBC maxpktlength to the size of our pio
-		 * buffers in words
-		 */
-		u64 ibc = dd->ipath_ibcctrl;
-		ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
-			 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
-
-		piosize = piosize - 2 * sizeof(u32);	/* ignore pbc */
-		dd->ipath_ibmaxlen = piosize;
-		piosize /= sizeof(u32);	/* in words */
-		/*
-		 * for ICRC, which we only send in diag test pkt mode, and
-		 * we don't need to worry about that for mtu
-		 */
-		piosize += 1;
-
-		ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-		dd->ipath_ibcctrl = ibc;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-				 dd->ipath_ibcctrl);
-		dd->ipath_f_tidtemplate(dd);
-	}
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
-
-int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
+void ipath_layer_lid_changed(struct ipath_devdata *dd)
 {
-	dd->ipath_lid = arg;
-	dd->ipath_lmc = lmc;
-
 	mutex_lock(&ipath_layer_mutex);
 
 	if (dd->ipath_layer.l_arg && layer_intr)
 		layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
 
 	mutex_unlock(&ipath_layer_mutex);
-
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_set_lid);
-
-int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
-{
-	/* XXX - need to inform anyone who cares this just happened. */
-	dd->ipath_guid = guid;
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
-
-__be64 ipath_layer_get_guid(struct ipath_devdata *dd)
-{
-	return dd->ipath_guid;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
-
-u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
-{
-	return dd->ipath_nguid;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
-
-u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
-{
-	return dd->ipath_majrev;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_majrev);
-
-u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
-{
-	return dd->ipath_minrev;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_minrev);
-
-u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
-{
-	return dd->ipath_pcirev;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev);
-
-u32 ipath_layer_get_flags(struct ipath_devdata *dd)
-{
-	return dd->ipath_flags;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
-
-struct device *ipath_layer_get_device(struct ipath_devdata *dd)
-{
-	return &dd->pcidev->dev;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_device);
-
-u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
-{
-	return dd->ipath_deviceid;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
-
-u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
-{
-	return dd->ipath_vendorid;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid);
-
-u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
-{
-	return dd->ipath_lastibcstat;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
-
-u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
-{
-	return dd->ipath_ibmtu;
 }
 
-EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
-
 void ipath_layer_add(struct ipath_devdata *dd)
 {
 	mutex_lock(&ipath_layer_mutex);
@@ -411,10 +119,6 @@ void ipath_layer_add(struct ipath_devdat
 		dd->ipath_layer.l_arg =
 			layer_add_one(dd->ipath_unit, dd);
 
-	if (verbs_add_one)
-		dd->verbs_layer.l_arg =
-			verbs_add_one(dd->ipath_unit, dd);
-
 	mutex_unlock(&ipath_layer_mutex);
 }
 
@@ -427,11 +131,6 @@ void ipath_layer_remove(struct ipath_dev
 		dd->ipath_layer.l_arg = NULL;
 	}
 
-	if (dd->verbs_layer.l_arg && verbs_remove_one) {
-		verbs_remove_one(dd->verbs_layer.l_arg);
-		dd->verbs_layer.l_arg = NULL;
-	}
-
 	mutex_unlock(&ipath_layer_mutex);
 }
 
@@ -463,9 +162,6 @@ int ipath_layer_register(void *(*l_add)(
 		if (dd->ipath_layer.l_arg)
 			continue;
 
-		if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
-			*dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
-
 		spin_unlock_irqrestore(&ipath_devs_lock, flags);
 		dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
 		spin_lock_irqsave(&ipath_devs_lock, flags);
@@ -509,107 +205,6 @@ void ipath_layer_unregister(void)
 
 EXPORT_SYMBOL_GPL(ipath_layer_unregister);
 
-static void __ipath_verbs_timer(unsigned long arg)
-{
-	struct ipath_devdata *dd = (struct ipath_devdata *) arg;
-
-	/*
-	 * If port 0 receive packet interrupts are not available, or
-	 * can be missed, poll the receive queue
-	 */
-	if (dd->ipath_flags & IPATH_POLL_RX_INTR)
-		ipath_kreceive(dd);
-
-	/* Handle verbs layer timeouts. */
-	if (dd->verbs_layer.l_arg && verbs_timer_cb)
-		verbs_timer_cb(dd->verbs_layer.l_arg);
-
-	mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
-}
-
-/**
- * ipath_verbs_register - verbs layer registration
- * @l_piobufavail: callback for when PIO buffers become available
- * @l_rcv: callback for receiving a packet
- * @l_timer_cb: timer callback
- * @ipath_devdata: device data structure is put here
- */
-int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
-			 void (*l_remove)(void *arg),
-			 int (*l_piobufavail) (void *arg),
-			 void (*l_rcv) (void *arg, void *rhdr,
-					void *data, u32 tlen),
-			 void (*l_timer_cb) (void *arg))
-{
-	struct ipath_devdata *dd, *tmp;
-	unsigned long flags;
-
-	mutex_lock(&ipath_layer_mutex);
-
-	verbs_add_one = l_add;
-	verbs_remove_one = l_remove;
-	verbs_piobufavail = l_piobufavail;
-	verbs_rcv = l_rcv;
-	verbs_timer_cb = l_timer_cb;
-
-	spin_lock_irqsave(&ipath_devs_lock, flags);
-
-	list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
-		if (!(dd->ipath_flags & IPATH_INITTED))
-			continue;
-
-		if (dd->verbs_layer.l_arg)
-			continue;
-
-		spin_unlock_irqrestore(&ipath_devs_lock, flags);
-		dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
-		spin_lock_irqsave(&ipath_devs_lock, flags);
-	}
-
-	spin_unlock_irqrestore(&ipath_devs_lock, flags);
-	mutex_unlock(&ipath_layer_mutex);
-
-	ipath_verbs_registered = 1;
-
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_verbs_register);
-
-void ipath_verbs_unregister(void)
-{
-	struct ipath_devdata *dd, *tmp;
-	unsigned long flags;
-
-	mutex_lock(&ipath_layer_mutex);
-	spin_lock_irqsave(&ipath_devs_lock, flags);
-
-	list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
-		*dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
-
-		if (dd->verbs_layer.l_arg && verbs_remove_one) {
-			spin_unlock_irqrestore(&ipath_devs_lock, flags);
-			verbs_remove_one(dd->verbs_layer.l_arg);
-			spin_lock_irqsave(&ipath_devs_lock, flags);
-			dd->verbs_layer.l_arg = NULL;
-		}
-	}
-
-	spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-	verbs_add_one = NULL;
-	verbs_remove_one = NULL;
-	verbs_piobufavail = NULL;
-	verbs_rcv = NULL;
-	verbs_timer_cb = NULL;
-
-	ipath_verbs_registered = 0;
-
-	mutex_unlock(&ipath_layer_mutex);
-}
-
-EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
-
 int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
 {
 	int ret;
@@ -698,390 +293,6 @@ u16 ipath_layer_get_bcast(struct ipath_d
 
 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
 
-u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
-{
-	return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
-
-static void update_sge(struct ipath_sge_state *ss, u32 length)
-{
-	struct ipath_sge *sge = &ss->sge;
-
-	sge->vaddr += length;
-	sge->length -= length;
-	sge->sge_length -= length;
-	if (sge->sge_length == 0) {
-		if (--ss->num_sge)
-			*sge = *ss->sg_list++;
-	} else if (sge->length == 0 && sge->mr != NULL) {
-		if (++sge->n >= IPATH_SEGSZ) {
-			if (++sge->m >= sge->mr->mapsz)
-				return;
-			sge->n = 0;
-		}
-		sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
-		sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
-	}
-}
-
-#ifdef __LITTLE_ENDIAN
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
-	return data >> shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
-	return data << shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
-	data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
-	data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
-	return data;
-}
-#else
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
-	return data << shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
-	return data >> shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
-	data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
-	data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
-	return data;
-}
-#endif
-
-static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
-		    u32 length)
-{
-	u32 extra = 0;
-	u32 data = 0;
-	u32 last;
-
-	while (1) {
-		u32 len = ss->sge.length;
-		u32 off;
-
-		BUG_ON(len == 0);
-		if (len > length)
-			len = length;
-		if (len > ss->sge.sge_length)
-			len = ss->sge.sge_length;
-		/* If the source address is not aligned, try to align it. */
-		off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
-		if (off) {
-			u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
-					    ~(sizeof(u32) - 1));
-			u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
-			u32 y;
-
-			y = sizeof(u32) - off;
-			if (len > y)
-				len = y;
-			if (len + extra >= sizeof(u32)) {
-				data |= set_upper_bits(v, extra *
-						       BITS_PER_BYTE);
-				len = sizeof(u32) - extra;
-				if (len == length) {
-					last = data;
-					break;
-				}
-				__raw_writel(data, piobuf);
-				piobuf++;
-				extra = 0;
-				data = 0;
-			} else {
-				/* Clear unused upper bytes */
-				data |= clear_upper_bytes(v, len, extra);
-				if (len == length) {
-					last = data;
-					break;
-				}
-				extra += len;
-			}
-		} else if (extra) {
-			/* Source address is aligned. */
-			u32 *addr = (u32 *) ss->sge.vaddr;
-			int shift = extra * BITS_PER_BYTE;
-			int ushift = 32 - shift;
-			u32 l = len;
-
-			while (l >= sizeof(u32)) {
-				u32 v = *addr;
-
-				data |= set_upper_bits(v, shift);
-				__raw_writel(data, piobuf);
-				data = get_upper_bits(v, ushift);
-				piobuf++;
-				addr++;
-				l -= sizeof(u32);
-			}
-			/*
-			 * We still have 'extra' number of bytes leftover.
-			 */
-			if (l) {
-				u32 v = *addr;
-
-				if (l + extra >= sizeof(u32)) {
-					data |= set_upper_bits(v, shift);
-					len -= l + extra - sizeof(u32);
-					if (len == length) {
-						last = data;
-						break;
-					}
-					__raw_writel(data, piobuf);
-					piobuf++;
-					extra = 0;
-					data = 0;
-				} else {
-					/* Clear unused upper bytes */
-					data |= clear_upper_bytes(v, l,
-								  extra);
-					if (len == length) {
-						last = data;
-						break;
-					}
-					extra += l;
-				}
-			} else if (len == length) {
-				last = data;
-				break;
-			}
-		} else if (len == length) {
-			u32 w;
-
-			/*
-			 * Need to round up for the last dword in the
-			 * packet.
-			 */
-			w = (len + 3) >> 2;
-			__iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
-			piobuf += w - 1;
-			last = ((u32 *) ss->sge.vaddr)[w - 1];
-			break;
-		} else {
-			u32 w = len >> 2;
-
-			__iowrite32_copy(piobuf, ss->sge.vaddr, w);
-			piobuf += w;
-
-			extra = len & (sizeof(u32) - 1);
-			if (extra) {
-				u32 v = ((u32 *) ss->sge.vaddr)[w];
-
-				/* Clear unused upper bytes */
-				data = clear_upper_bytes(v, extra, 0);
-			}
-		}
-		update_sge(ss, len);
-		length -= len;
-	}
-	/* Update address before sending packet. */
-	update_sge(ss, length);
-	/* must flush early everything before trigger word */
-	ipath_flush_wc();
-	__raw_writel(last, piobuf);
-	/* be sure trigger word is written */
-	ipath_flush_wc();
-}
-
-/**
- * ipath_verbs_send - send a packet from the verbs layer
- * @dd: the infinipath device
- * @hdrwords: the number of words in the header
- * @hdr: the packet header
- * @len: the length of the packet in bytes
- * @ss: the SGE to send
- *
- * This is like ipath_sma_send_pkt() in that we need to be able to send
- * packets after the chip is initialized (MADs) but also like
- * ipath_layer_send_hdr() since its used by the verbs layer.
- */
-int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
-		     u32 *hdr, u32 len, struct ipath_sge_state *ss)
-{
-	u32 __iomem *piobuf;
-	u32 plen;
-	int ret;
-
-	/* +1 is for the qword padding of pbc */
-	plen = hdrwords + ((len + 3) >> 2) + 1;
-	if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
-		ipath_dbg("packet len 0x%x too long, failing\n", plen);
-		ret = -EINVAL;
-		goto bail;
-	}
-
-	/* Get a PIO buffer to use. */
-	piobuf = ipath_getpiobuf(dd, NULL);
-	if (unlikely(piobuf == NULL)) {
-		ret = -EBUSY;
-		goto bail;
-	}
-
-	/*
-	 * Write len to control qword, no flags.
-	 * We have to flush after the PBC for correctness on some cpus
-	 * or WC buffer can be written out of order.
-	 */
-	writeq(plen, piobuf);
-	ipath_flush_wc();
-	piobuf += 2;
-	if (len == 0) {
-		/*
-		 * If there is just the header portion, must flush before
-		 * writing last word of header for correctness, and after
-		 * the last header word (trigger word).
-		 */
-		__iowrite32_copy(piobuf, hdr, hdrwords - 1);
-		ipath_flush_wc();
-		__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
-		ipath_flush_wc();
-		ret = 0;
-		goto bail;
-	}
-
-	__iowrite32_copy(piobuf, hdr, hdrwords);
-	piobuf += hdrwords;
-
-	/* The common case is aligned and contained in one segment. */
-	if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
-		   !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
-		u32 w;
-		u32 *addr = (u32 *) ss->sge.vaddr;
-
-		/* Update address before sending packet. */
-		update_sge(ss, len);
-		/* Need to round up for the last dword in the packet. */
-		w = (len + 3) >> 2;
-		__iowrite32_copy(piobuf, addr, w - 1);
-		/* must flush early everything before trigger word */
-		ipath_flush_wc();
-		__raw_writel(addr[w - 1], piobuf + w - 1);
-		/* be sure trigger word is written */
-		ipath_flush_wc();
-		ret = 0;
-		goto bail;
-	}
-	copy_io(piobuf, ss, len);
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_verbs_send);
-
-int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
-				  u64 *rwords, u64 *spkts, u64 *rpkts,
-				  u64 *xmit_wait)
-{
-	int ret;
-
-	if (!(dd->ipath_flags & IPATH_INITTED)) {
-		/* no hardware, freeze, etc. */
-		ipath_dbg("unit %u not usable\n", dd->ipath_unit);
-		ret = -EINVAL;
-		goto bail;
-	}
-	*swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
-	*rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
-	*spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
-	*rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
-	*xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
-
-/**
- * ipath_layer_get_counters - get various chip counters
- * @dd: the infinipath device
- * @cntrs: counters are placed here
- *
- * Return the counters needed by recv_pma_get_portcounters().
- */
-int ipath_layer_get_counters(struct ipath_devdata *dd,
-			      struct ipath_layer_counters *cntrs)
-{
-	int ret;
-
-	if (!(dd->ipath_flags & IPATH_INITTED)) {
-		/* no hardware, freeze, etc. */
-		ipath_dbg("unit %u not usable\n", dd->ipath_unit);
-		ret = -EINVAL;
-		goto bail;
-	}
-	cntrs->symbol_error_counter =
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
-	cntrs->link_error_recovery_counter =
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
-	/*
-	 * The link downed counter counts when the other side downs the
-	 * connection.  We add in the number of times we downed the link
-	 * due to local link integrity errors to compensate.
-	 */
-	cntrs->link_downed_counter =
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
-	cntrs->port_rcv_errors =
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
-	cntrs->port_rcv_remphys_errors =
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
-	cntrs->port_xmit_discards =
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
-	cntrs->port_xmit_data =
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
-	cntrs->port_rcv_data =
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
-	cntrs->port_xmit_packets =
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
-	cntrs->port_rcv_packets =
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
-	cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
-	cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
-
-int ipath_layer_want_buffer(struct ipath_devdata *dd)
-{
-	set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-			 dd->ipath_sendctrl);
-
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
-
 int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
 {
 	int ret = 0;
@@ -1153,389 +364,3 @@ int ipath_layer_set_piointbufavail_int(s
 }
 
 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
-
-int ipath_layer_enable_timer(struct ipath_devdata *dd)
-{
-	/*
-	 * HT-400 has a design flaw where the chip and kernel idea
-	 * of the tail register don't always agree, and therefore we won't
-	 * get an interrupt on the next packet received.
-	 * If the board supports per packet receive interrupts, use it.
-	 * Otherwise, the timer function periodically checks for packets
-	 * to cover this case.
-	 * Either way, the timer is needed for verbs layer related
-	 * processing.
-	 */
-	if (dd->ipath_flags & IPATH_GPIO_INTR) {
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
-				 0x2074076542310ULL);
-		/* Enable GPIO bit 2 interrupt */
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
-				 (u64) (1 << 2));
-	}
-
-	init_timer(&dd->verbs_layer.l_timer);
-	dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
-	dd->verbs_layer.l_timer.data = (unsigned long)dd;
-	dd->verbs_layer.l_timer.expires = jiffies + 1;
-	add_timer(&dd->verbs_layer.l_timer);
-
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
-
-int ipath_layer_disable_timer(struct ipath_devdata *dd)
-{
-	/* Disable GPIO bit 2 interrupt */
-	if (dd->ipath_flags & IPATH_GPIO_INTR)
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
-
-	del_timer_sync(&dd->verbs_layer.l_timer);
-
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
-
-/**
- * ipath_layer_set_verbs_flags - set the verbs layer flags
- * @dd: the infinipath device
- * @flags: the flags to set
- */
-int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
-{
-	struct ipath_devdata *ss;
-	unsigned long lflags;
-
-	spin_lock_irqsave(&ipath_devs_lock, lflags);
-
-	list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
-		if (!(ss->ipath_flags & IPATH_INITTED))
-			continue;
-		if ((flags & IPATH_VERBS_KERNEL_SMA) &&
-		    !(*ss->ipath_statusp & IPATH_STATUS_SMA))
-			*ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
-		else
-			*ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
-	}
-
-	spin_unlock_irqrestore(&ipath_devs_lock, lflags);
-
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
-
-/**
- * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
- * @dd: the infinipath device
- */
-unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
-{
-	return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
-
-/**
- * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
- * @dd: the infinipath device
- * @index: the PKEY index
- */
-unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
-{
-	unsigned ret;
-
-	if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
-		ret = 0;
-	else
-		ret = dd->ipath_pd[0]->port_pkeys[index];
-
-	return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
-
-/**
- * ipath_layer_get_pkeys - return the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the pkey table is placed here
- */
-int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
-{
-	struct ipath_portdata *pd = dd->ipath_pd[0];
-
-	memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
-
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
-
-/**
- * rm_pkey - decrecment the reference count for the given PKEY
- * @dd: the infinipath device
- * @key: the PKEY index
- *
- * Return true if this was the last reference and the hardware table entry
- * needs to be changed.
- */
-static int rm_pkey(struct ipath_devdata *dd, u16 key)
-{
-	int i;
-	int ret;
-
-	for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-		if (dd->ipath_pkeys[i] != key)
-			continue;
-		if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
-			dd->ipath_pkeys[i] = 0;
-			ret = 1;
-			goto bail;
-		}
-		break;
-	}
-
-	ret = 0;
-
-bail:
-	return ret;
-}
-
-/**
- * add_pkey - add the given PKEY to the hardware table
- * @dd: the infinipath device
- * @key: the PKEY
- *
- * Return an error code if unable to add the entry, zero if no change,
- * or 1 if the hardware PKEY register needs to be updated.
- */
-static int add_pkey(struct ipath_devdata *dd, u16 key)
-{
-	int i;
-	u16 lkey = key & 0x7FFF;
-	int any = 0;
-	int ret;
-
-	if (lkey == 0x7FFF) {
-		ret = 0;
-		goto bail;
-	}
-
-	/* Look for an empty slot or a matching PKEY. */
-	for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-		if (!dd->ipath_pkeys[i]) {
-			any++;
-			continue;
-		}
-		/* If it matches exactly, try to increment the ref count */
-		if (dd->ipath_pkeys[i] == key) {
-			if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
-				ret = 0;
-				goto bail;
-			}
-			/* Lost the race. Look for an empty slot below. */
-			atomic_dec(&dd->ipath_pkeyrefs[i]);
-			any++;
-		}
-		/*
-		 * It makes no sense to have both the limited and unlimited
-		 * PKEY set at the same time since the unlimited one will
-		 * disable the limited one.
-		 */
-		if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
-			ret = -EEXIST;
-			goto bail;
-		}
-	}
-	if (!any) {
-		ret = -EBUSY;
-		goto bail;
-	}
-	for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-		if (!dd->ipath_pkeys[i] &&
-		    atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
-			/* for ipathstats, etc. */
-			ipath_stats.sps_pkeys[i] = lkey;
-			dd->ipath_pkeys[i] = key;
-			ret = 1;
-			goto bail;
-		}
-	}
-	ret = -EBUSY;
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_layer_set_pkeys - set the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the PKEY table
- */
-int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
-{
-	struct ipath_portdata *pd;
-	int i;
-	int changed = 0;
-
-	pd = dd->ipath_pd[0];
-
-	for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
-		u16 key = pkeys[i];
-		u16 okey = pd->port_pkeys[i];
-
-		if (key == okey)
-			continue;
-		/*
-		 * The value of this PKEY table entry is changing.
-		 * Remove the old entry in the hardware's array of PKEYs.
-		 */
-		if (okey & 0x7FFF)
-			changed |= rm_pkey(dd, okey);
-		if (key & 0x7FFF) {
-			int ret = add_pkey(dd, key);
-
-			if (ret < 0)
-				key = 0;
-			else
-				changed |= ret;
-		}
-		pd->port_pkeys[i] = key;
-	}
-	if (changed) {
-		u64 pkey;
-
-		pkey = (u64) dd->ipath_pkeys[0] |
-			((u64) dd->ipath_pkeys[1] << 16) |
-			((u64) dd->ipath_pkeys[2] << 32) |
-			((u64) dd->ipath_pkeys[3] << 48);
-		ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
-			   (unsigned long long) pkey);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
-				 pkey);
-	}
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
-
-/**
- * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
- * @dd: the infinipath device
- *
- * Returns zero if the default is POLL, 1 if the default is SLEEP.
- */
-int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
-{
-	return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
-
-/**
- * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
- * @dd: the infinipath device
- * @sleep: the new state
- *
- * Note that this will only take effect when the link state changes.
- */
-int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
-					 int sleep)
-{
-	if (sleep)
-		dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
-	else
-		dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-			 dd->ipath_ibcctrl);
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
-
-int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
-{
-	return (dd->ipath_ibcctrl >>
-		INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
-		INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
-
-/**
- * ipath_layer_set_phyerrthreshold - set the physical error threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
-{
-	unsigned v;
-
-	v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
-		INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-	if (v != n) {
-		dd->ipath_ibcctrl &=
-			~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
-			  INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
-		dd->ipath_ibcctrl |=
-			(u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-				 dd->ipath_ibcctrl);
-	}
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
-
-int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
-{
-	return (dd->ipath_ibcctrl >>
-		INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
-		INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
-
-/**
- * ipath_layer_set_overrunthreshold - set the overrun threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
-{
-	unsigned v;
-
-	v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
-		INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
-	if (v != n) {
-		dd->ipath_ibcctrl &=
-			~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
-			  INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
-		dd->ipath_ibcctrl |=
-			(u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-				 dd->ipath_ibcctrl);
-	}
-	return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
-
-int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
-			      size_t namelen)
-{
-	return dd->ipath_f_get_boardname(dd, name, namelen);
-}
-EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
-
-u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
-{
-	return dd->ipath_rcvhdrentsize;
-}
-EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_layer.h linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_layer.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_layer.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_layer.h	2006-10-19 12:41:10.000000000 -0400
@@ -40,73 +40,9 @@
  */
 
 struct sk_buff;
-struct ipath_sge_state;
 struct ipath_devdata;
 struct ether_header;
 
-struct ipath_layer_counters {
-	u64 symbol_error_counter;
-	u64 link_error_recovery_counter;
-	u64 link_downed_counter;
-	u64 port_rcv_errors;
-	u64 port_rcv_remphys_errors;
-	u64 port_xmit_discards;
-	u64 port_xmit_data;
-	u64 port_rcv_data;
-	u64 port_xmit_packets;
-	u64 port_rcv_packets;
-	u32 local_link_integrity_errors;
-	u32 excessive_buffer_overrun_errors;
-};
-
-/*
- * A segment is a linear region of low physical memory.
- * XXX Maybe we should use phys addr here and kmap()/kunmap().
- * Used by the verbs layer.
- */
-struct ipath_seg {
-	void *vaddr;
-	size_t length;
-};
-
-/* The number of ipath_segs that fit in a page. */
-#define IPATH_SEGSZ     (PAGE_SIZE / sizeof (struct ipath_seg))
-
-struct ipath_segarray {
-	struct ipath_seg segs[IPATH_SEGSZ];
-};
-
-struct ipath_mregion {
-	u64 user_base;		/* User's address for this region */
-	u64 iova;		/* IB start address of this region */
-	size_t length;
-	u32 lkey;
-	u32 offset;		/* offset (bytes) to start of region */
-	int access_flags;
-	u32 max_segs;		/* number of ipath_segs in all the arrays */
-	u32 mapsz;		/* size of the map array */
-	struct ipath_segarray *map[0];	/* the segments */
-};
-
-/*
- * These keep track of the copy progress within a memory region.
- * Used by the verbs layer.
- */
-struct ipath_sge {
-	struct ipath_mregion *mr;
-	void *vaddr;		/* current pointer into the segment */
-	u32 sge_length;		/* length of the SGE */
-	u32 length;		/* remaining length of the segment */
-	u16 m;			/* current index: mr->map[m] */
-	u16 n;			/* current index: mr->map[m]->segs[n] */
-};
-
-struct ipath_sge_state {
-	struct ipath_sge *sg_list;	/* next SGE to be used if any */
-	struct ipath_sge sge;	/* progress state for the current SGE */
-	u8 num_sge;
-};
-
 int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
 			 void (*l_remove)(void *),
 			 int (*l_intr)(void *, u32),
@@ -114,62 +50,14 @@ int ipath_layer_register(void *(*l_add)(
 				      struct sk_buff *),
 			 u16 rcv_opcode,
 			 int (*l_rcv_lid)(void *, void *));
-int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
-			 void (*l_remove)(void *arg),
-			 int (*l_piobufavail)(void *arg),
-			 void (*l_rcv)(void *arg, void *rhdr,
-				       void *data, u32 tlen),
-			 void (*l_timer_cb)(void *arg));
 void ipath_layer_unregister(void);
-void ipath_verbs_unregister(void);
 int ipath_layer_open(struct ipath_devdata *, u32 * pktmax);
 u16 ipath_layer_get_lid(struct ipath_devdata *dd);
 int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *);
 u16 ipath_layer_get_bcast(struct ipath_devdata *dd);
-u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd);
-int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 state);
-int ipath_layer_set_mtu(struct ipath_devdata *, u16);
-int ipath_set_lid(struct ipath_devdata *, u32, u8);
 int ipath_layer_send_hdr(struct ipath_devdata *dd,
 			 struct ether_header *hdr);
-int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
-		     u32 * hdr, u32 len, struct ipath_sge_state *ss);
 int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd);
-int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
-			      size_t namelen);
-int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
-				  u64 *rwords, u64 *spkts, u64 *rpkts,
-				  u64 *xmit_wait);
-int ipath_layer_get_counters(struct ipath_devdata *dd,
-			     struct ipath_layer_counters *cntrs);
-int ipath_layer_want_buffer(struct ipath_devdata *dd);
-int ipath_layer_set_guid(struct ipath_devdata *, __be64 guid);
-__be64 ipath_layer_get_guid(struct ipath_devdata *);
-u32 ipath_layer_get_nguid(struct ipath_devdata *);
-u32 ipath_layer_get_majrev(struct ipath_devdata *);
-u32 ipath_layer_get_minrev(struct ipath_devdata *);
-u32 ipath_layer_get_pcirev(struct ipath_devdata *);
-u32 ipath_layer_get_flags(struct ipath_devdata *dd);
-struct device *ipath_layer_get_device(struct ipath_devdata *dd);
-u16 ipath_layer_get_deviceid(struct ipath_devdata *dd);
-u32 ipath_layer_get_vendorid(struct ipath_devdata *);
-u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd);
-u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd);
-int ipath_layer_enable_timer(struct ipath_devdata *dd);
-int ipath_layer_disable_timer(struct ipath_devdata *dd);
-int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags);
-unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd);
-unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index);
-int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 *pkeys);
-int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 *pkeys);
-int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd);
-int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
-					 int sleep);
-int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd);
-int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n);
-int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd);
-int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n);
-u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd);
 
 /* ipath_ether interrupt values */
 #define IPATH_LAYER_INT_IF_UP 0x2
@@ -178,9 +66,6 @@ u32 ipath_layer_get_rcvhdrentsize(struct
 #define IPATH_LAYER_INT_SEND_CONTINUE 0x10
 #define IPATH_LAYER_INT_BCAST 0x40
 
-/* _verbs_layer.l_flags */
-#define IPATH_VERBS_KERNEL_SMA 0x1
-
 extern unsigned ipath_debug; /* debugging bit mask */
 
 #endif				/* _IPATH_LAYER_H */
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_mad.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_mad.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_mad.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_mad.c	2006-10-19 12:41:10.000000000 -0400
@@ -101,15 +101,15 @@ static int recv_subn_get_nodeinfo(struct
 	nip->num_ports = ibdev->phys_port_cnt;
 	/* This is already in network order */
 	nip->sys_guid = to_idev(ibdev)->sys_image_guid;
-	nip->node_guid = ipath_layer_get_guid(dd);
+	nip->node_guid = dd->ipath_guid;
 	nip->port_guid = nip->sys_guid;
-	nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd));
-	nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd));
-	majrev = ipath_layer_get_majrev(dd);
-	minrev = ipath_layer_get_minrev(dd);
+	nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
+	nip->device_id = cpu_to_be16(dd->ipath_deviceid);
+	majrev = dd->ipath_majrev;
+	minrev = dd->ipath_minrev;
 	nip->revision = cpu_to_be32((majrev << 16) | minrev);
 	nip->local_port_num = port;
-	vendor = ipath_layer_get_vendorid(dd);
+	vendor = dd->ipath_vendorid;
 	nip->vendor_id[0] = 0;
 	nip->vendor_id[1] = vendor >> 8;
 	nip->vendor_id[2] = vendor;
@@ -133,13 +133,89 @@ static int recv_subn_get_guidinfo(struct
 	 */
 	if (startgx == 0)
 		/* The first is a copy of the read-only HW GUID. */
-		*p = ipath_layer_get_guid(to_idev(ibdev)->dd);
+		*p = to_idev(ibdev)->dd->ipath_guid;
 	else
 		smp->status |= IB_SMP_INVALID_FIELD;
 
 	return reply(smp);
 }
 
+
+static int get_overrunthreshold(struct ipath_devdata *dd)
+{
+	return (dd->ipath_ibcctrl >>
+		INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
+		INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
+}
+
+/**
+ * set_overrunthreshold - set the overrun threshold
+ * @dd: the infinipath device
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
+{
+	unsigned v;
+
+	v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
+		INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
+	if (v != n) {
+		dd->ipath_ibcctrl &=
+			~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
+			  INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
+		dd->ipath_ibcctrl |=
+			(u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+				 dd->ipath_ibcctrl);
+	}
+	return 0;
+}
+
+static int get_phyerrthreshold(struct ipath_devdata *dd)
+{
+	return (dd->ipath_ibcctrl >>
+		INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
+		INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
+}
+
+/**
+ * set_phyerrthreshold - set the physical error threshold
+ * @dd: the infinipath device
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
+{
+	unsigned v;
+
+	v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
+		INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
+	if (v != n) {
+		dd->ipath_ibcctrl &=
+			~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
+			  INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
+		dd->ipath_ibcctrl |=
+			(u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+				 dd->ipath_ibcctrl);
+	}
+	return 0;
+}
+
+/**
+ * get_linkdowndefaultstate - get the default linkdown state
+ * @dd: the infinipath device
+ *
+ * Returns zero if the default is POLL, 1 if the default is SLEEP.
+ */
+static int get_linkdowndefaultstate(struct ipath_devdata *dd)
+{
+	return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
+}
+
 static int recv_subn_get_portinfo(struct ib_smp *smp,
 				  struct ib_device *ibdev, u8 port)
 {
@@ -166,7 +242,7 @@ static int recv_subn_get_portinfo(struct
 	    (dev->mkeyprot_resv_lmc >> 6) == 0)
 		pip->mkey = dev->mkey;
 	pip->gid_prefix = dev->gid_prefix;
-	lid = ipath_layer_get_lid(dev->dd);
+	lid = dev->dd->ipath_lid;
 	pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
 	pip->sm_lid = cpu_to_be16(dev->sm_lid);
 	pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
@@ -177,14 +253,14 @@ static int recv_subn_get_portinfo(struct
 	pip->link_width_supported = 3;	/* 1x or 4x */
 	pip->link_width_active = 2;	/* 4x */
 	pip->linkspeed_portstate = 0x10;	/* 2.5Gbps */
-	ibcstat = ipath_layer_get_lastibcstat(dev->dd);
+	ibcstat = dev->dd->ipath_lastibcstat;
 	pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1;
 	pip->portphysstate_linkdown =
 		(ipath_cvt_physportstate[ibcstat & 0xf] << 4) |
-		(ipath_layer_get_linkdowndefaultstate(dev->dd) ? 1 : 2);
+		(get_linkdowndefaultstate(dev->dd) ? 1 : 2);
 	pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc;
 	pip->linkspeedactive_enabled = 0x11;	/* 2.5Gbps, 2.5Gbps */
-	switch (ipath_layer_get_ibmtu(dev->dd)) {
+	switch (dev->dd->ipath_ibmtu) {
 	case 4096:
 		mtu = IB_MTU_4096;
 		break;
@@ -217,7 +293,7 @@ static int recv_subn_get_portinfo(struct
 	pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
 	/* P_KeyViolations are counted by hardware. */
 	pip->pkey_violations =
-		cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) -
+		cpu_to_be16((ipath_get_cr_errpkey(dev->dd) -
 			     dev->z_pkey_violations) & 0xFFFF);
 	pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
 	/* Only the hardware GUID is supported for now */
@@ -226,8 +302,8 @@ static int recv_subn_get_portinfo(struct
 	/* 32.768 usec. response time (guessing) */
 	pip->resv_resptimevalue = 3;
 	pip->localphyerrors_overrunerrors =
-		(ipath_layer_get_phyerrthreshold(dev->dd) << 4) |
-		ipath_layer_get_overrunthreshold(dev->dd);
+		(get_phyerrthreshold(dev->dd) << 4) |
+		get_overrunthreshold(dev->dd);
 	/* pip->max_credit_hint; */
 	/* pip->link_roundtrip_latency[3]; */
 
@@ -237,6 +313,20 @@ bail:
 	return ret;
 }
 
+/**
+ * get_pkeys - return the PKEY table for port 0
+ * @dd: the infinipath device
+ * @pkeys: the pkey table is placed here
+ */
+static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
+{
+	struct ipath_portdata *pd = dd->ipath_pd[0];
+
+	memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
+
+	return 0;
+}
+
 static int recv_subn_get_pkeytable(struct ib_smp *smp,
 				   struct ib_device *ibdev)
 {
@@ -249,9 +339,9 @@ static int recv_subn_get_pkeytable(struc
 	memset(smp->data, 0, sizeof(smp->data));
 	if (startpx == 0) {
 		struct ipath_ibdev *dev = to_idev(ibdev);
-		unsigned i, n = ipath_layer_get_npkeys(dev->dd);
+		unsigned i, n = ipath_get_npkeys(dev->dd);
 
-		ipath_layer_get_pkeys(dev->dd, p);
+		get_pkeys(dev->dd, p);
 
 		for (i = 0; i < n; i++)
 			q[i] = cpu_to_be16(p[i]);
@@ -269,6 +359,24 @@ static int recv_subn_set_guidinfo(struct
 }
 
 /**
+ * set_linkdowndefaultstate - set the default linkdown state
+ * @dd: the infinipath device
+ * @sleep: the new state
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
+{
+	if (sleep)
+		dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
+	else
+		dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+			 dd->ipath_ibcctrl);
+	return 0;
+}
+
+/**
  * recv_subn_set_portinfo - set port information
  * @smp: the incoming SM packet
  * @ibdev: the infiniband device
@@ -290,7 +398,7 @@ static int recv_subn_set_portinfo(struct
 	u8 state;
 	u16 lstate;
 	u32 mtu;
-	int ret;
+	int ret, ore;
 
 	if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
 		goto err;
@@ -304,7 +412,7 @@ static int recv_subn_set_portinfo(struct
 	dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
 
 	lid = be16_to_cpu(pip->lid);
-	if (lid != ipath_layer_get_lid(dev->dd)) {
+	if (lid != dev->dd->ipath_lid) {
 		/* Must be a valid unicast LID address. */
 		if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
 			goto err;
@@ -342,11 +450,11 @@ static int recv_subn_set_portinfo(struct
 	case 0: /* NOP */
 		break;
 	case 1: /* SLEEP */
-		if (ipath_layer_set_linkdowndefaultstate(dev->dd, 1))
+		if (set_linkdowndefaultstate(dev->dd, 1))
 			goto err;
 		break;
 	case 2: /* POLL */
-		if (ipath_layer_set_linkdowndefaultstate(dev->dd, 0))
+		if (set_linkdowndefaultstate(dev->dd, 0))
 			goto err;
 		break;
 	default:
@@ -376,7 +484,7 @@ static int recv_subn_set_portinfo(struct
 		/* XXX We have already partially updated our state! */
 		goto err;
 	}
-	ipath_layer_set_mtu(dev->dd, mtu);
+	ipath_set_mtu(dev->dd, mtu);
 
 	dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
 
@@ -392,20 +500,16 @@ static int recv_subn_set_portinfo(struct
 	 * later.
 	 */
 	if (pip->pkey_violations == 0)
-		dev->z_pkey_violations =
-			ipath_layer_get_cr_errpkey(dev->dd);
+		dev->z_pkey_violations = ipath_get_cr_errpkey(dev->dd);
 
 	if (pip->qkey_violations == 0)
 		dev->qkey_violations = 0;
 
-	if (ipath_layer_set_phyerrthreshold(
-		    dev->dd,
-		    (pip->localphyerrors_overrunerrors >> 4) & 0xF))
+	ore = pip->localphyerrors_overrunerrors;
+	if (set_phyerrthreshold(dev->dd, (ore >> 4) & 0xF))
 		goto err;
 
-	if (ipath_layer_set_overrunthreshold(
-		    dev->dd,
-		    (pip->localphyerrors_overrunerrors & 0xF)))
+	if (set_overrunthreshold(dev->dd, (ore & 0xF)))
 		goto err;
 
 	dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
@@ -423,7 +527,7 @@ static int recv_subn_set_portinfo(struct
 	 * is down or is being set to down.
 	 */
 	state = pip->linkspeed_portstate & 0xF;
-	flags = ipath_layer_get_flags(dev->dd);
+	flags = dev->dd->ipath_flags;
 	lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
 	if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
 		goto err;
@@ -439,7 +543,7 @@ static int recv_subn_set_portinfo(struct
 		/* FALLTHROUGH */
 	case IB_PORT_DOWN:
 		if (lstate == 0)
-			if (ipath_layer_get_linkdowndefaultstate(dev->dd))
+			if (get_linkdowndefaultstate(dev->dd))
 				lstate = IPATH_IB_LINKDOWN_SLEEP;
 			else
 				lstate = IPATH_IB_LINKDOWN;
@@ -451,7 +555,7 @@ static int recv_subn_set_portinfo(struct
 			lstate = IPATH_IB_LINKDOWN_DISABLE;
 		else
 			goto err;
-		ipath_layer_set_linkstate(dev->dd, lstate);
+		ipath_set_linkstate(dev->dd, lstate);
 		if (flags & IPATH_LINKACTIVE) {
 			event.event = IB_EVENT_PORT_ERR;
 			ib_dispatch_event(&event);
@@ -460,7 +564,7 @@ static int recv_subn_set_portinfo(struct
 	case IB_PORT_ARMED:
 		if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE)))
 			break;
-		ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKARM);
+		ipath_set_linkstate(dev->dd, IPATH_IB_LINKARM);
 		if (flags & IPATH_LINKACTIVE) {
 			event.event = IB_EVENT_PORT_ERR;
 			ib_dispatch_event(&event);
@@ -469,7 +573,7 @@ static int recv_subn_set_portinfo(struct
 	case IB_PORT_ACTIVE:
 		if (!(flags & IPATH_LINKARMED))
 			break;
-		ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE);
+		ipath_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE);
 		event.event = IB_EVENT_PORT_ACTIVE;
 		ib_dispatch_event(&event);
 		break;
@@ -493,6 +597,152 @@ done:
 	return ret;
 }
 
+/**
+ * rm_pkey - decrecment the reference count for the given PKEY
+ * @dd: the infinipath device
+ * @key: the PKEY index
+ *
+ * Return true if this was the last reference and the hardware table entry
+ * needs to be changed.
+ */
+static int rm_pkey(struct ipath_devdata *dd, u16 key)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
+		if (dd->ipath_pkeys[i] != key)
+			continue;
+		if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
+			dd->ipath_pkeys[i] = 0;
+			ret = 1;
+			goto bail;
+		}
+		break;
+	}
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/**
+ * add_pkey - add the given PKEY to the hardware table
+ * @dd: the infinipath device
+ * @key: the PKEY
+ *
+ * Return an error code if unable to add the entry, zero if no change,
+ * or 1 if the hardware PKEY register needs to be updated.
+ */
+static int add_pkey(struct ipath_devdata *dd, u16 key)
+{
+	int i;
+	u16 lkey = key & 0x7FFF;
+	int any = 0;
+	int ret;
+
+	if (lkey == 0x7FFF) {
+		ret = 0;
+		goto bail;
+	}
+
+	/* Look for an empty slot or a matching PKEY. */
+	for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
+		if (!dd->ipath_pkeys[i]) {
+			any++;
+			continue;
+		}
+		/* If it matches exactly, try to increment the ref count */
+		if (dd->ipath_pkeys[i] == key) {
+			if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
+				ret = 0;
+				goto bail;
+			}
+			/* Lost the race. Look for an empty slot below. */
+			atomic_dec(&dd->ipath_pkeyrefs[i]);
+			any++;
+		}
+		/*
+		 * It makes no sense to have both the limited and unlimited
+		 * PKEY set at the same time since the unlimited one will
+		 * disable the limited one.
+		 */
+		if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
+			ret = -EEXIST;
+			goto bail;
+		}
+	}
+	if (!any) {
+		ret = -EBUSY;
+		goto bail;
+	}
+	for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
+		if (!dd->ipath_pkeys[i] &&
+		    atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
+			/* for ipathstats, etc. */
+			ipath_stats.sps_pkeys[i] = lkey;
+			dd->ipath_pkeys[i] = key;
+			ret = 1;
+			goto bail;
+		}
+	}
+	ret = -EBUSY;
+
+bail:
+	return ret;
+}
+
+/**
+ * set_pkeys - set the PKEY table for port 0
+ * @dd: the infinipath device
+ * @pkeys: the PKEY table
+ */
+static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
+{
+	struct ipath_portdata *pd;
+	int i;
+	int changed = 0;
+
+	pd = dd->ipath_pd[0];
+
+	for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
+		u16 key = pkeys[i];
+		u16 okey = pd->port_pkeys[i];
+
+		if (key == okey)
+			continue;
+		/*
+		 * The value of this PKEY table entry is changing.
+		 * Remove the old entry in the hardware's array of PKEYs.
+		 */
+		if (okey & 0x7FFF)
+			changed |= rm_pkey(dd, okey);
+		if (key & 0x7FFF) {
+			int ret = add_pkey(dd, key);
+
+			if (ret < 0)
+				key = 0;
+			else
+				changed |= ret;
+		}
+		pd->port_pkeys[i] = key;
+	}
+	if (changed) {
+		u64 pkey;
+
+		pkey = (u64) dd->ipath_pkeys[0] |
+			((u64) dd->ipath_pkeys[1] << 16) |
+			((u64) dd->ipath_pkeys[2] << 32) |
+			((u64) dd->ipath_pkeys[3] << 48);
+		ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
+			   (unsigned long long) pkey);
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
+				 pkey);
+	}
+	return 0;
+}
+
 static int recv_subn_set_pkeytable(struct ib_smp *smp,
 				   struct ib_device *ibdev)
 {
@@ -500,13 +750,12 @@ static int recv_subn_set_pkeytable(struc
 	__be16 *p = (__be16 *) smp->data;
 	u16 *q = (u16 *) smp->data;
 	struct ipath_ibdev *dev = to_idev(ibdev);
-	unsigned i, n = ipath_layer_get_npkeys(dev->dd);
+	unsigned i, n = ipath_get_npkeys(dev->dd);
 
 	for (i = 0; i < n; i++)
 		q[i] = be16_to_cpu(p[i]);
 
-	if (startpx != 0 ||
-	    ipath_layer_set_pkeys(dev->dd, q) != 0)
+	if (startpx != 0 || set_pkeys(dev->dd, q) != 0)
 		smp->status |= IB_SMP_INVALID_FIELD;
 
 	return recv_subn_get_pkeytable(smp, ibdev);
@@ -844,10 +1093,10 @@ static int recv_pma_get_portcounters(str
 	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
 		pmp->data;
 	struct ipath_ibdev *dev = to_idev(ibdev);
-	struct ipath_layer_counters cntrs;
+	struct ipath_verbs_counters cntrs;
 	u8 port_select = p->port_select;
 
-	ipath_layer_get_counters(dev->dd, &cntrs);
+	ipath_get_counters(dev->dd, &cntrs);
 
 	/* Adjust counters for any resets done. */
 	cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
@@ -944,8 +1193,8 @@ static int recv_pma_get_portcounters_ext
 	u64 swords, rwords, spkts, rpkts, xwait;
 	u8 port_select = p->port_select;
 
-	ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
-				      &rpkts, &xwait);
+	ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
+				&rpkts, &xwait);
 
 	/* Adjust counters for any resets done. */
 	swords -= dev->z_port_xmit_data;
@@ -978,13 +1227,13 @@ static int recv_pma_set_portcounters(str
 	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
 		pmp->data;
 	struct ipath_ibdev *dev = to_idev(ibdev);
-	struct ipath_layer_counters cntrs;
+	struct ipath_verbs_counters cntrs;
 
 	/*
 	 * Since the HW doesn't support clearing counters, we save the
 	 * current count and subtract it from future responses.
 	 */
-	ipath_layer_get_counters(dev->dd, &cntrs);
+	ipath_get_counters(dev->dd, &cntrs);
 
 	if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
 		dev->z_symbol_error_counter = cntrs.symbol_error_counter;
@@ -1041,8 +1290,8 @@ static int recv_pma_set_portcounters_ext
 	struct ipath_ibdev *dev = to_idev(ibdev);
 	u64 swords, rwords, spkts, rpkts, xwait;
 
-	ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
-				      &rpkts, &xwait);
+	ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
+				&rpkts, &xwait);
 
 	if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
 		dev->z_port_xmit_data = swords;
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_mr.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_mr.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_mr.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_mr.c	2006-10-19 12:41:10.000000000 -0400
@@ -36,6 +36,18 @@
 
 #include "ipath_verbs.h"
 
+/* Fast memory region */
+struct ipath_fmr {
+	struct ib_fmr ibfmr;
+	u8 page_shift;
+	struct ipath_mregion mr;        /* must be last */
+};
+
+static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
+{
+	return container_of(ibfmr, struct ipath_fmr, ibfmr);
+}
+
 /**
  * ipath_get_dma_mr - get a DMA memory region
  * @pd: protection domain for this memory region
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_pe800.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_pe800.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_pe800.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_pe800.c	1969-12-31 19:00:00.000000000 -0500
@@ -1,1254 +0,0 @@
-/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath PE-800 chip.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-
-/*
- * This file contains all the chip-specific register information and
- * access functions for the QLogic InfiniPath PE800, the PCI-Express chip.
- *
- * This lists the InfiniPath PE800 registers, in the actual chip layout.
- * This structure should never be directly accessed.
- */
-struct _infinipath_do_not_use_kernel_regs {
-	unsigned long long Revision;
-	unsigned long long Control;
-	unsigned long long PageAlign;
-	unsigned long long PortCnt;
-	unsigned long long DebugPortSelect;
-	unsigned long long Reserved0;
-	unsigned long long SendRegBase;
-	unsigned long long UserRegBase;
-	unsigned long long CounterRegBase;
-	unsigned long long Scratch;
-	unsigned long long Reserved1;
-	unsigned long long Reserved2;
-	unsigned long long IntBlocked;
-	unsigned long long IntMask;
-	unsigned long long IntStatus;
-	unsigned long long IntClear;
-	unsigned long long ErrorMask;
-	unsigned long long ErrorStatus;
-	unsigned long long ErrorClear;
-	unsigned long long HwErrMask;
-	unsigned long long HwErrStatus;
-	unsigned long long HwErrClear;
-	unsigned long long HwDiagCtrl;
-	unsigned long long MDIO;
-	unsigned long long IBCStatus;
-	unsigned long long IBCCtrl;
-	unsigned long long ExtStatus;
-	unsigned long long ExtCtrl;
-	unsigned long long GPIOOut;
-	unsigned long long GPIOMask;
-	unsigned long long GPIOStatus;
-	unsigned long long GPIOClear;
-	unsigned long long RcvCtrl;
-	unsigned long long RcvBTHQP;
-	unsigned long long RcvHdrSize;
-	unsigned long long RcvHdrCnt;
-	unsigned long long RcvHdrEntSize;
-	unsigned long long RcvTIDBase;
-	unsigned long long RcvTIDCnt;
-	unsigned long long RcvEgrBase;
-	unsigned long long RcvEgrCnt;
-	unsigned long long RcvBufBase;
-	unsigned long long RcvBufSize;
-	unsigned long long RxIntMemBase;
-	unsigned long long RxIntMemSize;
-	unsigned long long RcvPartitionKey;
-	unsigned long long Reserved3;
-	unsigned long long RcvPktLEDCnt;
-	unsigned long long Reserved4[8];
-	unsigned long long SendCtrl;
-	unsigned long long SendPIOBufBase;
-	unsigned long long SendPIOSize;
-	unsigned long long SendPIOBufCnt;
-	unsigned long long SendPIOAvailAddr;
-	unsigned long long TxIntMemBase;
-	unsigned long long TxIntMemSize;
-	unsigned long long Reserved5;
-	unsigned long long PCIeRBufTestReg0;
-	unsigned long long PCIeRBufTestReg1;
-	unsigned long long Reserved51[6];
-	unsigned long long SendBufferError;
-	unsigned long long SendBufferErrorCONT1;
-	unsigned long long Reserved6SBE[6];
-	unsigned long long RcvHdrAddr0;
-	unsigned long long RcvHdrAddr1;
-	unsigned long long RcvHdrAddr2;
-	unsigned long long RcvHdrAddr3;
-	unsigned long long RcvHdrAddr4;
-	unsigned long long Reserved7RHA[11];
-	unsigned long long RcvHdrTailAddr0;
-	unsigned long long RcvHdrTailAddr1;
-	unsigned long long RcvHdrTailAddr2;
-	unsigned long long RcvHdrTailAddr3;
-	unsigned long long RcvHdrTailAddr4;
-	unsigned long long Reserved8RHTA[11];
-	unsigned long long Reserved9SW[8];
-	unsigned long long SerdesConfig0;
-	unsigned long long SerdesConfig1;
-	unsigned long long SerdesStatus;
-	unsigned long long XGXSConfig;
-	unsigned long long IBPLLCfg;
-	unsigned long long Reserved10SW2[3];
-	unsigned long long PCIEQ0SerdesConfig0;
-	unsigned long long PCIEQ0SerdesConfig1;
-	unsigned long long PCIEQ0SerdesStatus;
-	unsigned long long Reserved11;
-	unsigned long long PCIEQ1SerdesConfig0;
-	unsigned long long PCIEQ1SerdesConfig1;
-	unsigned long long PCIEQ1SerdesStatus;
-	unsigned long long Reserved12;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof(struct \
-    _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
-    struct infinipath_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_pe_kregs = {
-	.kr_control = IPATH_KREG_OFFSET(Control),
-	.kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
-	.kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
-	.kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
-	.kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
-	.kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
-	.kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
-	.kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
-	.kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
-	.kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
-	.kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
-	.kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
-	.kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
-	.kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
-	.kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
-	.kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
-	.kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
-	.kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
-	.kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
-	.kr_intclear = IPATH_KREG_OFFSET(IntClear),
-	.kr_intmask = IPATH_KREG_OFFSET(IntMask),
-	.kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
-	.kr_mdio = IPATH_KREG_OFFSET(MDIO),
-	.kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
-	.kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
-	.kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
-	.kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
-	.kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
-	.kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
-	.kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
-	.kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
-	.kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
-	.kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
-	.kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
-	.kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
-	.kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
-	.kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
-	.kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
-	.kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
-	.kr_revision = IPATH_KREG_OFFSET(Revision),
-	.kr_scratch = IPATH_KREG_OFFSET(Scratch),
-	.kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
-	.kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
-	.kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
-	.kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
-	.kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
-	.kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
-	.kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
-	.kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
-	.kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
-	.kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
-	.kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
-	.kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
-	.kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
-	.kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
-	.kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg),
-
-	/*
-	 * These should not be used directly via ipath_read_kreg64(),
-	 * use them with ipath_read_kreg64_port()
-	 */
-	.kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
-	.kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
-
-	/* This group is pe-800-specific; and used only in this file */
-	/* The rcvpktled register controls one of the debug port signals, so
-	 * a packet activity LED can be connected to it. */
-	.kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
-	.kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
-	.kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
-	.kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0),
-	.kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1),
-	.kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus),
-	.kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0),
-	.kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1),
-	.kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus)
-};
-
-static const struct ipath_cregs ipath_pe_cregs = {
-	.cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
-	.cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
-	.cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
-	.cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
-	.cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
-	.cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
-	.cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
-	.cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
-	.cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
-	.cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
-	.cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
-	.cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
-	.cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
-	.cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
-	.cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
-	.cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
-	.cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
-	.cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
-	.cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
-	.cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
-	.cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
-	.cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
-	.cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
-	.cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
-	.cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
-	.cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
-	.cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
-	.cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
-	.cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
-	.cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
-	.cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
-	.cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
-	.cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
-};
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK 0x1F
-#define INFINIPATH_I_RCVAVAIL_MASK 0x1F
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK  0x000000000000003fULL
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_PCIEPOISONEDTLP      0x0000000010000000ULL
-#define INFINIPATH_HWE_PCIECPLTIMEOUT       0x0000000020000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXTLH    0x0000000040000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXADM    0x0000000080000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYRADM    0x0000000100000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIE1PLLFAILED       0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIE0PLLFAILED       0x0800000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED      0x1000000000000000ULL
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST     0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_FOUND       0x0000000000008000
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA (1ULL << \
-	(_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL (1ULL << \
-	(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-/**
- * ipath_pe_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use.  Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue.  We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
-				     size_t msgl)
-{
-	ipath_err_t hwerrs;
-	u32 bits, ctrl;
-	int isfatal = 0;
-	char bitsmsg[64];
-
-	hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-	if (!hwerrs) {
-		/*
-		 * better than printing cofusing messages
-		 * This seems to be related to clearing the crc error, or
-		 * the pll error during init.
-		 */
-		ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
-		return;
-	} else if (hwerrs == ~0ULL) {
-		ipath_dev_err(dd, "Read of hardware error status failed "
-			      "(all bits set); ignoring\n");
-		return;
-	}
-	ipath_stats.sps_hwerrs++;
-
-	/* Always clear the error status register, except MEMBISTFAIL,
-	 * regardless of whether we continue or stop using the chip.
-	 * We want that set so we know it failed, even across driver reload.
-	 * We'll still ignore it in the hwerrmask.  We do this partly for
-	 * diagnostics, but also for support */
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-			 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
-	hwerrs &= dd->ipath_hwerrmask;
-
-	/*
-	 * make sure we get this much out, unless told to be quiet,
-	 * or it's occurred within the last 5 seconds
-	 */
-	if ((hwerrs & ~dd->ipath_lasthwerror) ||
-	    (ipath_debug & __IPATH_VERBDBG))
-		dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
-			 "(cleared)\n", (unsigned long long) hwerrs);
-	dd->ipath_lasthwerror |= hwerrs;
-
-	if (hwerrs & ~infinipath_hwe_bitsextant)
-		ipath_dev_err(dd, "hwerror interrupt with unknown errors "
-			      "%llx set\n", (unsigned long long)
-			      (hwerrs & ~infinipath_hwe_bitsextant));
-
-	ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-	if (ctrl & INFINIPATH_C_FREEZEMODE) {
-		if (hwerrs) {
-			/*
-			 * if any set that we aren't ignoring only make the
-			 * complaint once, in case it's stuck or recurring,
-			 * and we get here multiple times
-			 */
-			if (dd->ipath_flags & IPATH_INITTED) {
-				ipath_dev_err(dd, "Fatal Error (freeze "
-					      "mode), no longer usable\n");
-				isfatal = 1;
-			}
-			/*
-			 * Mark as having had an error for driver, and also
-			 * for /sys and status word mapped to user programs.
-			 * This marks unit as not usable, until reset
-			 */
-			*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-			*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-			dd->ipath_flags &= ~IPATH_INITTED;
-		} else {
-			ipath_dbg("Clearing freezemode on ignored hardware "
-				  "error\n");
-			ctrl &= ~INFINIPATH_C_FREEZEMODE;
-			ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-					 ctrl);
-		}
-	}
-
-	*msg = '\0';
-
-	if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
-		strlcat(msg, "[Memory BIST test failed, PE-800 unusable]",
-			msgl);
-		/* ignore from now on, so disable until driver reloaded */
-		*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-	if (hwerrs & (INFINIPATH_HWE_RXEMEMPARITYERR_MASK
-		      << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)) {
-		bits = (u32) ((hwerrs >>
-			       INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) &
-			      INFINIPATH_HWE_RXEMEMPARITYERR_MASK);
-		snprintf(bitsmsg, sizeof bitsmsg, "[RXE Parity Errs %x] ",
-			 bits);
-		strlcat(msg, bitsmsg, msgl);
-	}
-	if (hwerrs & (INFINIPATH_HWE_TXEMEMPARITYERR_MASK
-		      << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
-		bits = (u32) ((hwerrs >>
-			       INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) &
-			      INFINIPATH_HWE_TXEMEMPARITYERR_MASK);
-		snprintf(bitsmsg, sizeof bitsmsg, "[TXE Parity Errs %x] ",
-			 bits);
-		strlcat(msg, bitsmsg, msgl);
-	}
-	if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
-		      << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
-		bits = (u32) ((hwerrs >>
-			       INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
-			      INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
-		snprintf(bitsmsg, sizeof bitsmsg,
-			 "[PCIe Mem Parity Errs %x] ", bits);
-		strlcat(msg, bitsmsg, msgl);
-	}
-	if (hwerrs & INFINIPATH_HWE_IBCBUSTOSPCPARITYERR)
-		strlcat(msg, "[IB2IPATH Parity]", msgl);
-	if (hwerrs & INFINIPATH_HWE_IBCBUSFRSPCPARITYERR)
-		strlcat(msg, "[IPATH2IB Parity]", msgl);
-
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP |	\
-			 INFINIPATH_HWE_COREPLL_RFSLIP )
-
-	if (hwerrs & _IPATH_PLL_FAIL) {
-		snprintf(bitsmsg, sizeof bitsmsg,
-			 "[PLL failed (%llx), PE-800 unusable]",
-			 (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
-		strlcat(msg, bitsmsg, msgl);
-		/* ignore from now on, so disable until driver reloaded */
-		dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
-		/*
-		 * If it occurs, it is left masked since the eternal
-		 * interface is unused
-		 */
-		dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-				 dd->ipath_hwerrmask);
-	}
-
-	if (hwerrs & INFINIPATH_HWE_PCIEPOISONEDTLP)
-		strlcat(msg, "[PCIe Poisoned TLP]", msgl);
-	if (hwerrs & INFINIPATH_HWE_PCIECPLTIMEOUT)
-		strlcat(msg, "[PCIe completion timeout]", msgl);
-
-	/*
-	 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
-	 * parity or memory parity error failures, because most likely we
-	 * won't be able to talk to the core of the chip.  Nonetheless, we
-	 * might see them, if they are in parts of the PCIe core that aren't
-	 * essential.
-	 */
-	if (hwerrs & INFINIPATH_HWE_PCIE1PLLFAILED)
-		strlcat(msg, "[PCIePLL1]", msgl);
-	if (hwerrs & INFINIPATH_HWE_PCIE0PLLFAILED)
-		strlcat(msg, "[PCIePLL0]", msgl);
-	if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXTLH)
-		strlcat(msg, "[PCIe XTLH core parity]", msgl);
-	if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYXADM)
-		strlcat(msg, "[PCIe ADM TX core parity]", msgl);
-	if (hwerrs & INFINIPATH_HWE_PCIEBUSPARITYRADM)
-		strlcat(msg, "[PCIe ADM RX core parity]", msgl);
-
-	if (hwerrs & INFINIPATH_HWE_RXDSYNCMEMPARITYERR)
-		strlcat(msg, "[Rx Dsync]", msgl);
-	if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED)
-		strlcat(msg, "[SerDes PLL]", msgl);
-
-	ipath_dev_err(dd, "%s hardware error\n", msg);
-	if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) {
-		/*
-		 * for /sys status file ; if no trailing } is copied, we'll
-		 * know it was truncated.
-		 */
-		snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
-			 "{%s}", msg);
-	}
-}
-
-/**
- * ipath_pe_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * info is based on the board revision register
- */
-static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
-			      size_t namelen)
-{
-	char *n = NULL;
-	u8 boardrev = dd->ipath_boardrev;
-	int ret;
-
-	switch (boardrev) {
-	case 0:
-		n = "InfiniPath_Emulation";
-		break;
-	case 1:
-		n = "InfiniPath_PE-800-Bringup";
-		break;
-	case 2:
-		n = "InfiniPath_PE-880";
-		break;
-	case 3:
-		n = "InfiniPath_PE-850";
-		break;
-	case 4:
-		n = "InfiniPath_PE-860";
-		break;
-	default:
-		ipath_dev_err(dd,
-			      "Don't yet know about board with ID %u\n",
-			      boardrev);
-		snprintf(name, namelen, "Unknown_InfiniPath_PE-8xx_%u",
-			 boardrev);
-		break;
-	}
-	if (n)
-		snprintf(name, namelen, "%s", n);
-
-	if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
-		ipath_dev_err(dd, "Unsupported PE-800 revision %u.%u!\n",
-			      dd->ipath_majrev, dd->ipath_minrev);
-		ret = 1;
-	} else
-		ret = 0;
-
-	return ret;
-}
-
-/**
- * ipath_pe_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
-{
-	ipath_err_t val;
-	u64 extsval;
-
-	extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
-	if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
-		ipath_dev_err(dd, "MemBIST did not complete!\n");
-
-	val = ~0ULL;	/* barring bugs, all hwerrors become interrupts, */
-
-	if (!dd->ipath_boardrev)	// no PLL for Emulator
-		val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-
-	/* workaround bug 9460 in internal interface bus parity checking */
-	val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
-
-	dd->ipath_hwerrmask = val;
-}
-
-/**
- * ipath_pe_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
-{
-	u64 val, tmp, config1;
-	int ret = 0, change = 0;
-
-	ipath_dbg("Trying to bringup serdes\n");
-
-	if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
-	    INFINIPATH_HWE_SERDESPLLFAILED) {
-		ipath_dbg("At start, serdes PLL failed bit set "
-			  "in hwerrstatus, clearing and continuing\n");
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-				 INFINIPATH_HWE_SERDESPLLFAILED);
-	}
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-	config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
-
-	ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, "
-		   "xgxsconfig %llx\n", (unsigned long long) val,
-		   (unsigned long long) config1, (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-	/*
-	 * Force reset on, also set rxdetect enable.  Must do before reading
-	 * serdesstatus at least for simulation, or some of the bits in
-	 * serdes status will come back as undefined and cause simulation
-	 * failures
-	 */
-	val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN
-		| INFINIPATH_SERDC0_L1PWR_DN;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-	/* be sure chip saw it */
-	tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	udelay(5);		/* need pll reset set at least for a bit */
-	/*
-	 * after PLL is reset, set the per-lane Resets and TxIdle and
-	 * clear the PLL reset and rxdetect (to get falling edge).
-	 * Leave L1PWR bits set (permanently)
-	 */
-	val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL
-		 | INFINIPATH_SERDC0_L1PWR_DN);
-	val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE;
-	ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets "
-		   "and txidle (%llx)\n", (unsigned long long) val);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-	/* be sure chip saw it */
-	tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-	/* need PLL reset clear for at least 11 usec before lane
-	 * resets cleared; give it a few more to be sure */
-	udelay(15);
-	val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE);
-
-	ipath_cdbg(VERBOSE, "Clearing lane resets and txidle "
-		   "(writing %llx)\n", (unsigned long long) val);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-	/* be sure chip saw it */
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-	if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
-	     INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
-		val &=
-			~(INFINIPATH_XGXS_MDIOADDR_MASK <<
-			  INFINIPATH_XGXS_MDIOADDR_SHIFT);
-		/* MDIO address 3 */
-		val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
-		change = 1;
-	}
-	if (val & INFINIPATH_XGXS_RESET) {
-		val &= ~INFINIPATH_XGXS_RESET;
-		change = 1;
-	}
-	if (change)
-		ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
-	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-	/* clear current and de-emphasis bits */
-	config1 &= ~0x0ffffffff00ULL;
-	/* set current to 20ma */
-	config1 |= 0x00000000000ULL;
-	/* set de-emphasis to -5.68dB */
-	config1 |= 0x0cccc000000ULL;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
-
-	ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx "
-		   "config1=%llx, sstatus=%llx xgxs=%llx\n",
-		   (unsigned long long) val, (unsigned long long) config1,
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
-		   (unsigned long long)
-		   ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-	if (!ipath_waitfor_mdio_cmdready(dd)) {
-		ipath_write_kreg(
-			dd, dd->ipath_kregs->kr_mdio,
-			ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
-				       IPATH_MDIO_CTRL_XGXS_REG_8, 0));
-		if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
-					   IPATH_MDIO_DATAVALID, &val))
-			ipath_dbg("Never got MDIO data for XGXS "
-				  "status read\n");
-		else
-			ipath_cdbg(VERBOSE, "MDIO Read reg8, "
-				   "'bank' 31 %x\n", (u32) val);
-	} else
-		ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
-
-	return ret;
-}
-
-/**
- * ipath_pe_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * Called when driver is being unloaded
- */
-static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
-{
-	u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-	val |= INFINIPATH_SERDC0_TXIDLE;
-	ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
-		  (unsigned long long) val);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-}
-
-/* this is not yet needed on the PE800, so just return 0. */
-static int ipath_pe_intconfig(struct ipath_devdata *dd)
-{
-	return 0;
-}
-
-/**
- * ipath_setup_pe_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
-
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note:  We try to match the Mellanox HCA LED behavior as best
- * we can.  Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate.  That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
-				     u64 ltst)
-{
-	u64 extctl;
-
-	/* the diags use the LED to indicate diag info, so we leave
-	 * the external LED alone when the diags are running */
-	if (ipath_diag_inuse)
-		return;
-
-	extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
-				       INFINIPATH_EXTC_LED2PRIPORT_ON);
-
-	if (ltst & INFINIPATH_IBCS_LT_STATE_LINKUP)
-		extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
-	if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
-		extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
-	dd->ipath_extctrl = extctl;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
-}
-
-/**
- * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * This is called during driver unload.
- * We do the pci_disable_msi here, not in generic code, because it
- * isn't used for the HT-400. If we do end up needing pci_enable_msi
- * at some point in the future for HT-400, we'll move the call back
- * into the main init_one code.
- */
-static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
-{
-	dd->ipath_msi_lo = 0;	/* just in case unload fails */
-	pci_disable_msi(dd->pcidev);
-}
-
-/**
- * ipath_setup_pe_config - setup PCIe config related stuff
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * The pci_enable_msi() call will fail on systems with MSI quirks
- * such as those with AMD8131, even if the device of interest is not
- * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
- * late in 2.6.16).
- * All that can be done is to edit the kernel source to remove the quirk
- * check until that is fixed.
- * We do not need to call enable_msi() for our HyperTransport chip (HT-400),
- * even those it uses MSI, and we want to avoid the quirk warning, so
- * So we call enable_msi only for the PE-800.  If we do end up needing
- * pci_enable_msi at some point in the future for HT-400, we'll move the
- * call back into the main init_one code.
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly).
- */
-static int ipath_setup_pe_config(struct ipath_devdata *dd,
-				 struct pci_dev *pdev)
-{
-	int pos, ret;
-
-	dd->ipath_msi_lo = 0;	/* used as a flag during reset processing */
-	ret = pci_enable_msi(dd->pcidev);
-	if (ret)
-		ipath_dev_err(dd, "pci_enable_msi failed: %d, "
-			      "interrupts may not work\n", ret);
-	/* continue even if it fails, we may still be OK... */
-
-	if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
-		u16 control;
-		pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
-				      &dd->ipath_msi_lo);
-		pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
-				      &dd->ipath_msi_hi);
-		pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
-				     &control);
-		/* now save the data (vector) info */
-		pci_read_config_word(dd->pcidev,
-				     pos + ((control & PCI_MSI_FLAGS_64BIT)
-					    ? 12 : 8),
-				     &dd->ipath_msi_data);
-		ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset "
-			   "0x%x, control=0x%x\n", dd->ipath_msi_data,
-			   pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
-			   control);
-		/* we save the cachelinesize also, although it doesn't
-		 * really matter */
-		pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
-				     &dd->ipath_pci_cacheline);
-	} else
-		ipath_dev_err(dd, "Can't find MSI capability, "
-			      "can't save MSI settings for reset\n");
-	if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP))) {
-		u16 linkstat;
-		pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
-				     &linkstat);
-		linkstat >>= 4;
-		linkstat &= 0x1f;
-		if (linkstat != 8)
-			ipath_dev_err(dd, "PCIe width %u, "
-				      "performance reduced\n", linkstat);
-	}
-	else
-		ipath_dev_err(dd, "Can't find PCI Express "
-			      "capability!\n");
-	return 0;
-}
-
-static void ipath_init_pe_variables(void)
-{
-	/*
-	 * bits for selecting i2c direction and values,
-	 * used for I2C serial flash
-	 */
-	ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
-	ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
-	ipath_gpio_sda = IPATH_GPIO_SDA;
-	ipath_gpio_scl = IPATH_GPIO_SCL;
-
-	/* variables for sanity checking interrupt and errors */
-	infinipath_hwe_bitsextant =
-		(INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
-		(INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
-		 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
-		INFINIPATH_HWE_PCIE1PLLFAILED |
-		INFINIPATH_HWE_PCIE0PLLFAILED |
-		INFINIPATH_HWE_PCIEPOISONEDTLP |
-		INFINIPATH_HWE_PCIECPLTIMEOUT |
-		INFINIPATH_HWE_PCIEBUSPARITYXTLH |
-		INFINIPATH_HWE_PCIEBUSPARITYXADM |
-		INFINIPATH_HWE_PCIEBUSPARITYRADM |
-		INFINIPATH_HWE_MEMBISTFAILED |
-		INFINIPATH_HWE_COREPLL_FBSLIP |
-		INFINIPATH_HWE_COREPLL_RFSLIP |
-		INFINIPATH_HWE_SERDESPLLFAILED |
-		INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
-		INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
-	infinipath_i_bitsextant =
-		(INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
-		(INFINIPATH_I_RCVAVAIL_MASK <<
-		 INFINIPATH_I_RCVAVAIL_SHIFT) |
-		INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
-		INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
-	infinipath_e_bitsextant =
-		INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
-		INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
-		INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
-		INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
-		INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
-		INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
-		INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-		INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
-		INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
-		INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
-		INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
-		INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
-		INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
-		INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
-		INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
-		INFINIPATH_E_HARDWARE;
-
-	infinipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
-	infinipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
-}
-
-/* setup the MSI stuff again after a reset.  I'd like to just call
- * pci_enable_msi() and request_irq() again, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
- * so I instead just do it all inline.  Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- * Note, because I'm doing it all here, I don't call pci_disable_msi()
- * or free_irq() at the start of ipath_setup_pe_reset().
- */
-static int ipath_reinit_msi(struct ipath_devdata *dd)
-{
-	int pos;
-	u16 control;
-	int ret;
-
-	if (!dd->ipath_msi_lo) {
-		dev_info(&dd->pcidev->dev, "Can't restore MSI config, "
-			 "initial setup failed?\n");
-		ret = 0;
-		goto bail;
-	}
-
-	if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
-		ipath_dev_err(dd, "Can't find MSI capability, "
-			      "can't restore MSI settings\n");
-		ret = 0;
-		goto bail;
-	}
-	ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
-		   dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
-	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
-			       dd->ipath_msi_lo);
-	ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
-		   dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
-	pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
-			       dd->ipath_msi_hi);
-	pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
-	if (!(control & PCI_MSI_FLAGS_ENABLE)) {
-		ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
-			   "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
-			   control, control | PCI_MSI_FLAGS_ENABLE);
-		control |= PCI_MSI_FLAGS_ENABLE;
-		pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
-				      control);
-	}
-	/* now rewrite the data (vector) info */
-	pci_write_config_word(dd->pcidev, pos +
-			      ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
-			      dd->ipath_msi_data);
-	/* we restore the cachelinesize also, although it doesn't really
-	 * matter */
-	pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
-			      dd->ipath_pci_cacheline);
-	/* and now set the pci master bit again */
-	pci_set_master(dd->pcidev);
-	ret = 1;
-
-bail:
-	return ret;
-}
-
-/* This routine sleeps, so it can only be called from user context, not
- * from interrupt context.  If we need interrupt context, we can split
- * it into two routines.
-*/
-static int ipath_setup_pe_reset(struct ipath_devdata *dd)
-{
-	u64 val;
-	int i;
-	int ret;
-
-	/* Use ERROR so it shows up in logs, etc. */
-	ipath_dev_err(dd, "Resetting PE-800 unit %u\n",
-		      dd->ipath_unit);
-	/* keep chip from being accessed in a few places */
-	dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
-	val = dd->ipath_control | INFINIPATH_C_RESET;
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
-	mb();
-
-	for (i = 1; i <= 5; i++) {
-		int r;
-		/* allow MBIST, etc. to complete; longer on each retry.
-		 * We sometimes get machine checks from bus timeout if no
-		 * response, so for now, make it *really* long.
-		 */
-		msleep(1000 + (1 + i) * 2000);
-		if ((r =
-		     pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
-					    dd->ipath_pcibar0)))
-			ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n",
-				      r);
-		if ((r =
-		     pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
-					    dd->ipath_pcibar1)))
-			ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
-				      r);
-		/* now re-enable memory access */
-		if ((r = pci_enable_device(dd->pcidev)))
-			ipath_dev_err(dd, "pci_enable_device failed after "
-				      "reset: %d\n", r);
-		/* whether it worked or not, mark as present, again */
-		dd->ipath_flags |= IPATH_PRESENT;
-		val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
-		if (val == dd->ipath_revision) {
-			ipath_cdbg(VERBOSE, "Got matching revision "
-				   "register %llx on try %d\n",
-				   (unsigned long long) val, i);
-			ret = ipath_reinit_msi(dd);
-			goto bail;
-		}
-		/* Probably getting -1 back */
-		ipath_dbg("Didn't get expected revision register, "
-			  "got %llx, try %d\n", (unsigned long long) val,
-			  i + 1);
-	}
-	ret = 0; /* failed */
-
-bail:
-	return ret;
-}
-
-/**
- * ipath_pe_put_tid - write a TID in chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
- * @tidtype: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
-			     u32 type, unsigned long pa)
-{
-	u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
-	unsigned long flags = 0; /* keep gcc quiet */
-
-	if (pa != dd->ipath_tidinvalid) {
-		if (pa & ((1U << 11) - 1)) {
-			dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
-				 "not 4KB aligned!\n", pa);
-			return;
-		}
-		pa >>= 11;
-		/* paranoia check */
-		if (pa & (7<<29))
-			ipath_dev_err(dd,
-				      "BUG: Physical page address 0x%lx "
-				      "has bits set in 31-29\n", pa);
-
-		if (type == 0)
-			pa |= dd->ipath_tidtemplate;
-		else /* for now, always full 4KB page */
-			pa |= 2 << 29;
-	}
-
-	/* workaround chip bug 9437 by writing each TID twice
-	 * and holding a spinlock around the writes, so they don't
-	 * intermix with other TID (eager or expected) writes
-	 * Unfortunately, this call can be done from interrupt level
-	 * for the port 0 eager TIDs, so we have to use irqsave
-	 */
-	spin_lock_irqsave(&dd->ipath_tid_lock, flags);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
-	if (dd->ipath_kregbase)
-		writel(pa, tidp32);
-	ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
-	mmiowb();
-	spin_unlock_irqrestore(&dd->ipath_tid_lock, flags);
-}
-
-/**
- * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * clear all TID entries for a port, expected and eager.
- * Used from ipath_close().  On PE800, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
-	u64 __iomem *tidbase;
-	unsigned long tidinv;
-	int i;
-
-	if (!dd->ipath_kregbase)
-		return;
-
-	ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
-	tidinv = dd->ipath_tidinvalid;
-	tidbase = (u64 __iomem *)
-		((char __iomem *)(dd->ipath_kregbase) +
-		 dd->ipath_rcvtidbase +
-		 port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
-
-	for (i = 0; i < dd->ipath_rcvtidcnt; i++)
-		ipath_pe_put_tid(dd, &tidbase[i], 0, tidinv);
-
-	tidbase = (u64 __iomem *)
-		((char __iomem *)(dd->ipath_kregbase) +
-		 dd->ipath_rcvegrbase +
-		 port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
-
-	for (i = 0; i < dd->ipath_rcvegrcnt; i++)
-		ipath_pe_put_tid(dd, &tidbase[i], 1, tidinv);
-}
-
-/**
- * ipath_pe_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_pe_tidtemplate(struct ipath_devdata *dd)
-{
-	u32 egrsize = dd->ipath_rcvegrbufsize;
-
-	/* For now, we always allocate 4KB buffers (at init) so we can
-	 * receive max size packets.  We may want a module parameter to
-	 * specify 2KB or 4KB and/or make be per port instead of per device
-	 * for those who want to reduce memory footprint.  Note that the
-	 * ipath_rcvhdrentsize size must be large enough to hold the largest
-	 * IB header (currently 96 bytes) that we expect to handle (plus of
-	 * course the 2 dwords of RHF).
-	 */
-	if (egrsize == 2048)
-		dd->ipath_tidtemplate = 1U << 29;
-	else if (egrsize == 4096)
-		dd->ipath_tidtemplate = 2U << 29;
-	else {
-		egrsize = 4096;
-		dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
-			 "%u, using %u\n", dd->ipath_rcvegrbufsize,
-			 egrsize);
-		dd->ipath_tidtemplate = 2U << 29;
-	}
-	dd->ipath_tidinvalid = 0;
-}
-
-static int ipath_pe_early_init(struct ipath_devdata *dd)
-{
-	dd->ipath_flags |= IPATH_4BYTE_TID;
-
-	/*
-	 * For openib, we need to be able to handle an IB header of 96 bytes
-	 * or 24 dwords.  HT-400 has arbitrary sized receive buffers, so we
-	 * made them the same size as the PIO buffers.  The PE-800 does not
-	 * handle arbitrary size buffers, so we need the header large enough
-	 * to handle largest IB header, but still have room for a 2KB MTU
-	 * standard IB packet.
-	 */
-	dd->ipath_rcvhdrentsize = 24;
-	dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
-
-	/* For HT-400, we allocate a somewhat overly large eager buffer,
-	 * such that we can guarantee that we can receive the largest packet
-	 * that we can send out.  To truly support a 4KB MTU, we need to
-	 * bump this to a larger value.  We'll do this when I get around to
-	 * testing 4KB sends on the PE-800, which I have not yet done.
-	 */
-	dd->ipath_rcvegrbufsize = 2048;
-	/*
-	 * the min() check here is currently a nop, but it may not always
-	 * be, depending on just how we do ipath_rcvegrbufsize
-	 */
-	dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
-				 dd->ipath_rcvegrbufsize +
-				 (dd->ipath_rcvhdrentsize << 2));
-	dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-
-	/*
-	 * For PE-800, we can request a receive interrupt for 1 or
-	 * more packets from current offset.  For now, we set this
-	 * up for a single packet, to match the HT-400 behavior.
-	 */
-	dd->ipath_rhdrhead_intr_off = 1ULL<<32;
-
-	ipath_get_eeprom_info(dd);
-
-	return 0;
-}
-
-int __attribute__((weak)) ipath_unordered_wc(void)
-{
-	return 0;
-}
-
-/**
- * ipath_init_pe_get_base_info - set chip-specific flags for user code
- * @dd: the infinipath device
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
-	struct ipath_base_info *kinfo = kbase;
-
-	if (ipath_unordered_wc()) {
-		kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER;
-		ipath_cdbg(PROC, "Intel processor, forcing WC order\n");
-	}
-	else
-		ipath_cdbg(PROC, "Not Intel processor, WC ordered\n");
-
-	kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE;
-
-	return 0;
-}
-
-/**
- * ipath_init_pe800_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_pe800_funcs(struct ipath_devdata *dd)
-{
-	dd->ipath_f_intrsetup = ipath_pe_intconfig;
-	dd->ipath_f_bus = ipath_setup_pe_config;
-	dd->ipath_f_reset = ipath_setup_pe_reset;
-	dd->ipath_f_get_boardname = ipath_pe_boardname;
-	dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors;
-	dd->ipath_f_early_init = ipath_pe_early_init;
-	dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors;
-	dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
-	dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
-	dd->ipath_f_clear_tids = ipath_pe_clear_tids;
-	dd->ipath_f_put_tid = ipath_pe_put_tid;
-	dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
-	dd->ipath_f_setextled = ipath_setup_pe_setextled;
-	dd->ipath_f_get_base_info = ipath_pe_get_base_info;
-
-	/* initialize chip-specific variables */
-	dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
-
-	/*
-	 * setup the register offsets, since they are different for each
-	 * chip
-	 */
-	dd->ipath_kregs = &ipath_pe_kregs;
-	dd->ipath_cregs = &ipath_pe_cregs;
-
-	ipath_init_pe_variables();
-}
-
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_qp.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_qp.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_qp.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_qp.c	2006-10-19 12:41:11.000000000 -0400
@@ -35,6 +35,7 @@
 #include <linux/vmalloc.h>
 
 #include "ipath_verbs.h"
+#include "ipath_kernel.h"
 #include "ipath_common.h"
 
 #define BITS_PER_PAGE		(PAGE_SIZE*BITS_PER_BYTE)
@@ -287,7 +288,7 @@ void ipath_free_all_qps(struct ipath_qp_
 				free_qpn(qpt, qp->ibqp.qp_num);
 			if (!atomic_dec_and_test(&qp->refcount) ||
 			    !ipath_destroy_qp(&qp->ibqp))
-				_VERBS_INFO("QP memory leak!\n");
+				ipath_dbg(KERN_INFO "QP memory leak!\n");
 			qp = nqp;
 		}
 	}
@@ -373,8 +374,8 @@ void ipath_error_qp(struct ipath_qp *qp)
 	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
 	struct ib_wc wc;
 
-	_VERBS_INFO("QP%d/%d in error state\n",
-		    qp->ibqp.qp_num, qp->remote_qpn);
+	ipath_dbg(KERN_INFO "QP%d/%d in error state\n",
+		  qp->ibqp.qp_num, qp->remote_qpn);
 
 	spin_lock(&dev->pending_lock);
 	/* XXX What if its already removed by the timeout code? */
@@ -448,19 +449,38 @@ int ipath_modify_qp(struct ib_qp *ibqp, 
 				attr_mask))
 		goto inval;
 
-	if (attr_mask & IB_QP_AV)
+	if (attr_mask & IB_QP_AV) {
 		if (attr->ah_attr.dlid == 0 ||
 		    attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
 			goto inval;
 
+		if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
+		    (attr->ah_attr.grh.sgid_index > 1))
+			goto inval;
+	}
+
 	if (attr_mask & IB_QP_PKEY_INDEX)
-		if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
+		if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
 			goto inval;
 
 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
 		if (attr->min_rnr_timer > 31)
 			goto inval;
 
+	if (attr_mask & IB_QP_PORT)
+		if (attr->port_num == 0 ||
+		    attr->port_num > ibqp->device->phys_port_cnt)
+			goto inval;
+
+	if (attr_mask & IB_QP_PATH_MTU)
+		if (attr->path_mtu > IB_MTU_4096)
+			goto inval;
+
+	if (attr_mask & IB_QP_PATH_MIG_STATE)
+		if (attr->path_mig_state != IB_MIG_MIGRATED &&
+		    attr->path_mig_state != IB_MIG_REARM)
+			goto inval;
+ 
 	switch (new_state) {
 	case IB_QPS_RESET:
 		ipath_reset_qp(qp);
@@ -482,7 +502,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, 
 		qp->remote_qpn = attr->dest_qp_num;
 
 	if (attr_mask & IB_QP_SQ_PSN) {
-		qp->s_next_psn = attr->sq_psn;
+		qp->s_psn = qp->s_next_psn = attr->sq_psn;
 		qp->s_last_psn = qp->s_next_psn - 1;
 	}
 
@@ -511,11 +531,17 @@ int ipath_modify_qp(struct ib_qp *ibqp, 
 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
 		qp->r_min_rnr_timer = attr->min_rnr_timer;
 
+	if (attr_mask & IB_QP_TIMEOUT)
+		qp->timeout = attr->timeout;
+
 	if (attr_mask & IB_QP_QKEY)
 		qp->qkey = attr->qkey;
 
 	qp->state = new_state;
 	spin_unlock_irqrestore(&qp->s_lock, flags);
+	spin_lock(&dev->n_qps_lock);
+	dev->n_qps_allocated--;
+	spin_unlock(&dev->n_qps_lock);
 
 	ret = 0;
 	goto bail;
@@ -557,7 +583,7 @@ int ipath_query_qp(struct ib_qp *ibqp, s
 	attr->max_dest_rd_atomic = 1;
 	attr->min_rnr_timer = qp->r_min_rnr_timer;
 	attr->port_num = 1;
-	attr->timeout = 0;
+	attr->timeout = qp->timeout;
 	attr->retry_cnt = qp->s_retry_cnt;
 	attr->rnr_retry = qp->s_rnr_retry;
 	attr->alt_port_num = 0;
@@ -569,9 +595,10 @@ int ipath_query_qp(struct ib_qp *ibqp, s
 	init_attr->recv_cq = qp->ibqp.recv_cq;
 	init_attr->srq = qp->ibqp.srq;
 	init_attr->cap = attr->cap;
-	init_attr->sq_sig_type =
-		(qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR))
-		? IB_SIGNAL_REQ_WR : 0;
+	if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR))
+		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+	else
+		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
 	init_attr->qp_type = qp->ibqp.qp_type;
 	init_attr->port_num = 1;
 	return 0;
@@ -719,8 +746,10 @@ struct ib_qp *ipath_create_qp(struct ib_
 		qp->s_wq = swq;
 		qp->s_size = init_attr->cap.max_send_wr + 1;
 		qp->s_max_sge = init_attr->cap.max_send_sge;
-		qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ?
-			1 << IPATH_S_SIGNAL_REQ_WR : 0;
+		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+			qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR;
+		else
+			qp->s_flags = 0;
 		dev = to_idev(ibpd->device);
 		err = ipath_alloc_qpn(&dev->qp_table, qp,
 				      init_attr->qp_type);
@@ -732,11 +761,6 @@ struct ib_qp *ipath_create_qp(struct ib_
 			goto bail;
 		}
 		ipath_reset_qp(qp);
-
-		/* Tell the core driver that the kernel SMA is present. */
-		if (init_attr->qp_type == IB_QPT_SMI)
-			ipath_layer_set_verbs_flags(dev->dd,
-						    IPATH_VERBS_KERNEL_SMA);
 		break;
 
 	default:
@@ -747,7 +771,24 @@ struct ib_qp *ipath_create_qp(struct ib_
 
 	init_attr->cap.max_inline_data = 0;
 
+	spin_lock(&dev->n_qps_lock);
+	if (dev->n_qps_allocated == ib_ipath_max_qps) {
+		spin_unlock(&dev->n_qps_lock);
+		ret = ERR_PTR(-ENOMEM);
+		goto bail_ip;
+	}
+
+	dev->n_qps_allocated++;
+	spin_unlock(&dev->n_qps_lock);
+
 	ret = &qp->ibqp;
+	goto bail;
+
+bail_ip:
+	ipath_free_qp(&dev->qp_table, qp);
+	vfree(swq);
+	vfree(qp->r_rq.wq);
+	kfree(qp);
 
 bail:
 	return ret;
@@ -768,10 +809,6 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
 	struct ipath_ibdev *dev = to_idev(ibqp->device);
 	unsigned long flags;
 
-	/* Tell the core driver that the kernel SMA is gone. */
-	if (qp->ibqp.qp_type == IB_QPT_SMI)
-		ipath_layer_set_verbs_flags(dev->dd, 0);
-
 	spin_lock_irqsave(&qp->r_rq.lock, flags);
 	spin_lock(&qp->s_lock);
 	qp->state = IB_QPS_ERR;
@@ -850,8 +887,8 @@ void ipath_sqerror_qp(struct ipath_qp *q
 	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
 	struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
 
-	_VERBS_INFO("Send queue error on QP%d/%d: err: %d\n",
-		    qp->ibqp.qp_num, qp->remote_qpn, wc->status);
+	ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n",
+		  qp->ibqp.qp_num, qp->remote_qpn, wc->status);
 
 	spin_lock(&dev->pending_lock);
 	/* XXX What if its already removed by the timeout code? */
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_rc.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_rc.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_rc.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_rc.c	2006-10-19 12:41:11.000000000 -0400
@@ -32,7 +32,7 @@
  */
 
 #include "ipath_verbs.h"
-#include "ipath_common.h"
+#include "ipath_kernel.h"
 
 /* cut down ridiculously long IB macro names */
 #define OP(x) IB_OPCODE_RC_##x
@@ -201,6 +201,18 @@ int ipath_make_rc_req(struct ipath_qp *q
 	    qp->s_rnr_timeout)
 		goto done;
 
+	/* Limit the number of packets sent without an ACK. */
+	if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
+		qp->s_wait_credit = 1;
+		dev->n_rc_stalls++;
+		spin_lock(&dev->pending_lock);
+		if (list_empty(&qp->timerwait))
+			list_add_tail(&qp->timerwait,
+				      &dev->pending[dev->pending_index]);
+		spin_unlock(&dev->pending_lock);
+		goto done;
+	}
+
 	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
 	hwords = 5;
 	bth0 = 0;
@@ -221,7 +233,7 @@ int ipath_make_rc_req(struct ipath_qp *q
 			/* Check if send work queue is empty. */
 			if (qp->s_tail == qp->s_head)
 				goto done;
-			qp->s_psn = wqe->psn = qp->s_next_psn;
+			wqe->psn = qp->s_next_psn;
 			newreq = 1;
 		}
 		/*
@@ -393,12 +405,6 @@ int ipath_make_rc_req(struct ipath_qp *q
 		ss = &qp->s_sge;
 		len = qp->s_len;
 		if (len > pmtu) {
-			/*
-			 * Request an ACK every 1/2 MB to avoid retransmit
-			 * timeouts.
-			 */
-			if (((wqe->length - len) % (512 * 1024)) == 0)
-				bth2 |= 1 << 31;
 			len = pmtu;
 			break;
 		}
@@ -435,12 +441,6 @@ int ipath_make_rc_req(struct ipath_qp *q
 		ss = &qp->s_sge;
 		len = qp->s_len;
 		if (len > pmtu) {
-			/*
-			 * Request an ACK every 1/2 MB to avoid retransmit
-			 * timeouts.
-			 */
-			if (((wqe->length - len) % (512 * 1024)) == 0)
-				bth2 |= 1 << 31;
 			len = pmtu;
 			break;
 		}
@@ -498,6 +498,8 @@ int ipath_make_rc_req(struct ipath_qp *q
 		 */
 		goto done;
 	}
+	if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
+		bth2 |= 1 << 31;	/* Request ACK. */
 	qp->s_len -= len;
 	qp->s_hdrwords = hwords;
 	qp->s_cur_sge = ss;
@@ -540,7 +542,7 @@ static void send_rc_ack(struct ipath_qp 
 		lrh0 = IPATH_LRH_GRH;
 	}
 	/* read pkey_index w/o lock (its atomic) */
-	bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+	bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index);
 	if (qp->r_nak_state)
 		ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
 					    (qp->r_nak_state <<
@@ -557,7 +559,7 @@ static void send_rc_ack(struct ipath_qp 
 	hdr.lrh[0] = cpu_to_be16(lrh0);
 	hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
 	hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
-	hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
+	hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
 	ohdr->bth[0] = cpu_to_be32(bth0);
 	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
 	ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
@@ -737,6 +739,15 @@ bail:
 	return;
 }
 
+static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
+{
+	if (qp->s_wait_credit) {
+		qp->s_wait_credit = 0;
+		tasklet_hi_schedule(&qp->s_task);
+	}
+	qp->s_last_psn = psn;
+}
+
 /**
  * do_rc_ack - process an incoming RC ACK
  * @qp: the QP the ACK came in on
@@ -805,7 +816,7 @@ static int do_rc_ack(struct ipath_qp *qp
 			 * The last valid PSN seen is the previous
 			 * request's.
 			 */
-			qp->s_last_psn = wqe->psn - 1;
+			update_last_psn(qp, wqe->psn - 1);
 			/* Retry this request. */
 			ipath_restart_rc(qp, wqe->psn, &wc);
 			/*
@@ -864,7 +875,7 @@ static int do_rc_ack(struct ipath_qp *qp
 		ipath_get_credit(qp, aeth);
 		qp->s_rnr_retry = qp->s_rnr_retry_cnt;
 		qp->s_retry = qp->s_retry_cnt;
-		qp->s_last_psn = psn;
+		update_last_psn(qp, psn);
 		ret = 1;
 		goto bail;
 
@@ -883,7 +894,7 @@ static int do_rc_ack(struct ipath_qp *qp
 			goto bail;
 
 		/* The last valid PSN is the previous PSN. */
-		qp->s_last_psn = psn - 1;
+		update_last_psn(qp, psn - 1);
 
 		dev->n_rc_resends += (int)qp->s_psn - (int)psn;
 
@@ -898,7 +909,7 @@ static int do_rc_ack(struct ipath_qp *qp
 	case 3:		/* NAK */
 		/* The last valid PSN seen is the previous request's. */
 		if (qp->s_last != qp->s_tail)
-			qp->s_last_psn = wqe->psn - 1;
+			update_last_psn(qp, wqe->psn - 1);
 		switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
 			IPATH_AETH_CREDIT_MASK) {
 		case 0:	/* PSN sequence error */
@@ -1071,7 +1082,7 @@ static inline void ipath_rc_rcv_resp(str
 		 * since we don't want s_sge modified.
 		 */
 		qp->s_len -= pmtu;
-		qp->s_last_psn = psn;
+		update_last_psn(qp, psn);
 		spin_unlock_irqrestore(&qp->s_lock, flags);
 		ipath_copy_sge(&qp->s_sge, data, pmtu);
 		goto bail;
@@ -1323,8 +1334,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
 		 * the eager header buffer size to 56 bytes so the last 4
 		 * bytes of the BTH header (PSN) is in the data buffer.
 		 */
-		header_in_data =
-			ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
+		header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
 		if (header_in_data) {
 			psn = be32_to_cpu(((__be32 *) data)[0]);
 			data += sizeof(__be32);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_registers.h linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_registers.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_registers.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_registers.h	2006-10-19 12:41:10.000000000 -0400
@@ -36,8 +36,7 @@
 
 /*
  * This file should only be included by kernel source, and by the diags.  It
- * defines the registers, and their contents, for the InfiniPath HT-400
- * chip.
+ * defines the registers, and their contents, for InfiniPath chips.
  */
 
 /*
@@ -283,10 +282,12 @@
 #define INFINIPATH_XGXS_RESET          0x7ULL
 #define INFINIPATH_XGXS_MDIOADDR_MASK  0xfULL
 #define INFINIPATH_XGXS_MDIOADDR_SHIFT 4
+#define INFINIPATH_XGXS_RX_POL_SHIFT 19
+#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
 
 #define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL	/* 40 bits valid */
 
-/* TID entries (memory), HT400-only */
+/* TID entries (memory), HT-only */
 #define INFINIPATH_RT_VALID 0x8000000000000000ULL
 #define INFINIPATH_RT_ADDR_SHIFT 0
 #define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_ruc.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_ruc.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_ruc.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_ruc.c	2006-10-19 12:41:10.000000000 -0400
@@ -32,6 +32,7 @@
  */
 
 #include "ipath_verbs.h"
+#include "ipath_kernel.h"
 #include "ipath_common.h"
 
 /*
@@ -422,6 +423,15 @@ done:
 		wake_up(&qp->wait);
 }
 
+static int want_buffer(struct ipath_devdata *dd)
+{
+	set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
+	ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+			 dd->ipath_sendctrl);
+
+	return 0;
+}
+
 /**
  * ipath_no_bufs_available - tell the layer driver we need buffers
  * @qp: the QP that caused the problem
@@ -438,7 +448,7 @@ void ipath_no_bufs_available(struct ipat
 		list_add_tail(&qp->piowait, &dev->piowait);
 	spin_unlock_irqrestore(&dev->pending_lock, flags);
 	/*
-	 * Note that as soon as ipath_layer_want_buffer() is called and
+	 * Note that as soon as want_buffer() is called and
 	 * possibly before it returns, ipath_ib_piobufavail()
 	 * could be called.  If we are still in the tasklet function,
 	 * tasklet_hi_schedule() will not call us until the next time
@@ -448,7 +458,7 @@ void ipath_no_bufs_available(struct ipat
 	 */
 	clear_bit(IPATH_S_BUSY, &qp->s_flags);
 	tasklet_unlock(&qp->s_task);
-	ipath_layer_want_buffer(dev->dd);
+	want_buffer(dev->dd);
 	dev->n_piowait++;
 }
 
@@ -563,7 +573,7 @@ u32 ipath_make_grh(struct ipath_ibdev *d
 	hdr->hop_limit = grh->hop_limit;
 	/* The SGID is 32-bit aligned. */
 	hdr->sgid.global.subnet_prefix = dev->gid_prefix;
-	hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd);
+	hdr->sgid.global.interface_id = dev->dd->ipath_guid;
 	hdr->dgid = grh->dgid;
 
 	/* GRH header size in 32-bit words. */
@@ -595,8 +605,7 @@ void ipath_do_ruc_send(unsigned long dat
 	if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
 		goto bail;
 
-	if (unlikely(qp->remote_ah_attr.dlid ==
-		     ipath_layer_get_lid(dev->dd))) {
+	if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) {
 		ipath_ruc_loopback(qp);
 		goto clear;
 	}
@@ -663,8 +672,8 @@ again:
 	qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
 	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
 				       SIZE_OF_CRC);
-	qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
-	bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+	qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
+	bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
 	bth0 |= extra_bytes << 20;
 	ohdr->bth[0] = cpu_to_be32(bth0);
 	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_srq.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_srq.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_srq.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_srq.c	2006-10-19 12:41:10.000000000 -0400
@@ -131,26 +131,21 @@ struct ib_srq *ipath_create_srq(struct i
 	u32 sz;
 	struct ib_srq *ret;
 
-	if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
-		ret = ERR_PTR(-ENOMEM);
-		goto bail;
-	}
-
 	if (srq_init_attr->attr.max_wr == 0) {
 		ret = ERR_PTR(-EINVAL);
-		goto bail;
+		goto done;
 	}
 
 	if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
 	    (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
 		ret = ERR_PTR(-EINVAL);
-		goto bail;
+		goto done;
 	}
 
 	srq = kmalloc(sizeof(*srq), GFP_KERNEL);
 	if (!srq) {
 		ret = ERR_PTR(-ENOMEM);
-		goto bail;
+		goto done;
 	}
 
 	/*
@@ -161,9 +156,8 @@ struct ib_srq *ipath_create_srq(struct i
 		sizeof(struct ipath_rwqe);
 	srq->rq.wq = vmalloc(srq->rq.size * sz);
 	if (!srq->rq.wq) {
-		kfree(srq);
 		ret = ERR_PTR(-ENOMEM);
-		goto bail;
+		goto bail_srq;
 	}
 
 	/*
@@ -177,9 +171,25 @@ struct ib_srq *ipath_create_srq(struct i
 
 	ret = &srq->ibsrq;
 
+	spin_lock(&dev->n_srqs_lock);
+	if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
+		spin_unlock(&dev->n_srqs_lock);
+		ret = ERR_PTR(-ENOMEM);
+		goto bail_wq;
+	}
+
 	dev->n_srqs_allocated++;
+	spin_unlock(&dev->n_srqs_lock);
 
-bail:
+	goto done;
+
+bail_wq:
+	vfree(srq->rq.wq);
+
+bail_srq:
+	kfree(srq);
+
+done:
 	return ret;
 }
 
@@ -289,7 +299,9 @@ int ipath_destroy_srq(struct ib_srq *ibs
 	struct ipath_srq *srq = to_isrq(ibsrq);
 	struct ipath_ibdev *dev = to_idev(ibsrq->device);
 
+	spin_lock(&dev->n_srqs_lock);
 	dev->n_srqs_allocated--;
+	spin_unlock(&dev->n_srqs_lock);
 	vfree(srq->rq.wq);
 	kfree(srq);
 
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_stats.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_stats.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_stats.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_stats.c	2006-10-19 12:41:10.000000000 -0400
@@ -271,33 +271,6 @@ void ipath_get_faststats(unsigned long o
 		}
 	}
 
-	if (dd->ipath_nosma_bufs) {
-		dd->ipath_nosma_secs += 5;
-		if (dd->ipath_nosma_secs >= 30) {
-			ipath_cdbg(SMA, "No SMA bufs avail %u seconds; "
-				   "cancelling pending sends\n",
-				   dd->ipath_nosma_secs);
-			/*
-			 * issue an abort as well, in case we have a packet
-			 * stuck in launch fifo.  This could corrupt an
-			 * outgoing user packet in the worst case,
-			 * but this is a pretty catastrophic, anyway.
-			 */
-			ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-					 INFINIPATH_S_ABORT);
-			ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
-					     dd->ipath_piobcnt2k +
-					     dd->ipath_piobcnt4k -
-					     dd->ipath_lastport_piobuf);
-			/* start again, if necessary */
-			dd->ipath_nosma_secs = 0;
-		} else
-			ipath_cdbg(SMA, "No SMA bufs avail %u tries, "
-				   "after %u seconds\n",
-				   dd->ipath_nosma_bufs,
-				   dd->ipath_nosma_secs);
-	}
-
 done:
 	mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_sysfs.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_sysfs.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_sysfs.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_sysfs.c	2006-10-19 12:41:10.000000000 -0400
@@ -35,7 +35,6 @@
 #include <linux/pci.h>
 
 #include "ipath_kernel.h"
-#include "ipath_layer.h"
 #include "ipath_common.h"
 
 /**
@@ -76,7 +75,7 @@ bail:
 static ssize_t show_version(struct device_driver *dev, char *buf)
 {
 	/* The string printed here is already newline-terminated. */
-	return scnprintf(buf, PAGE_SIZE, "%s", ipath_core_version);
+	return scnprintf(buf, PAGE_SIZE, "%s", ib_ipath_version);
 }
 
 static ssize_t show_num_units(struct device_driver *dev, char *buf)
@@ -108,8 +107,8 @@ static const char *ipath_status_str[] = 
 	"Initted",
 	"Disabled",
 	"Admin_Disabled",
-	"OIB_SMA",
-	"SMA",
+	"", /* This used to be the old "OIB_SMA" status. */
+	"", /* This used to be the old "SMA" status. */
 	"Present",
 	"IB_link_up",
 	"IB_configured",
@@ -227,7 +226,6 @@ static ssize_t store_mlid(struct device 
 	unit = dd->ipath_unit;
 
 	dd->ipath_mlid = mlid;
-	ipath_layer_intr(dd, IPATH_LAYER_INT_BCAST);
 
 	goto bail;
 invalid:
@@ -467,7 +465,7 @@ static ssize_t store_link_state(struct d
 	if (ret < 0)
 		goto invalid;
 
-	r = ipath_layer_set_linkstate(dd, state);
+	r = ipath_set_linkstate(dd, state);
 	if (r < 0) {
 		ret = r;
 		goto bail;
@@ -502,7 +500,7 @@ static ssize_t store_mtu(struct device *
 	if (ret < 0)
 		goto invalid;
 
-	r = ipath_layer_set_mtu(dd, mtu);
+	r = ipath_set_mtu(dd, mtu);
 	if (r < 0)
 		ret = r;
 
@@ -563,6 +561,33 @@ bail:
 	return ret;
 }
 
+static ssize_t store_rx_pol_inv(struct device *dev,
+			  struct device_attribute *attr,
+			  const char *buf,
+			  size_t count)
+{
+	struct ipath_devdata *dd = dev_get_drvdata(dev);
+	int ret, r;
+	u16 val;
+
+	ret = ipath_parse_ushort(buf, &val);
+	if (ret < 0)
+		goto invalid;
+
+	r = ipath_set_rx_pol_inv(dd, val);
+	if (r < 0) {
+		ret = r;
+		goto bail;
+	}
+
+	goto bail;
+invalid:
+	ipath_dev_err(dd, "attempt to set invalid Rx Polarity invert\n");
+bail:
+	return ret;
+}
+
+
 static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
 static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
 
@@ -589,6 +614,7 @@ static DEVICE_ATTR(status, S_IRUGO, show
 static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
 static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
+static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
 
 static struct attribute *dev_attributes[] = {
 	&dev_attr_guid.attr,
@@ -603,6 +629,7 @@ static struct attribute *dev_attributes[
 	&dev_attr_boardversion.attr,
 	&dev_attr_unit.attr,
 	&dev_attr_enabled.attr,
+	&dev_attr_rx_pol_inv.attr,
 	NULL
 };
 
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_uc.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_uc.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_uc.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_uc.c	2006-10-19 12:41:10.000000000 -0400
@@ -32,7 +32,7 @@
  */
 
 #include "ipath_verbs.h"
-#include "ipath_common.h"
+#include "ipath_kernel.h"
 
 /* cut down ridiculously long IB macro names */
 #define OP(x) IB_OPCODE_UC_##x
@@ -261,8 +261,7 @@ void ipath_uc_rcv(struct ipath_ibdev *de
 		 * size to 56 bytes so the last 4 bytes of
 		 * the BTH header (PSN) is in the data buffer.
 		 */
-		header_in_data =
-			ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
+		header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
 		if (header_in_data) {
 			psn = be32_to_cpu(((__be32 *) data)[0]);
 			data += sizeof(__be32);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_ud.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_ud.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_ud.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_ud.c	2006-10-19 12:41:10.000000000 -0400
@@ -34,6 +34,7 @@
 #include <rdma/ib_smi.h>
 
 #include "ipath_verbs.h"
+#include "ipath_kernel.h"
 #include "ipath_common.h"
 
 /**
@@ -276,7 +277,7 @@ int ipath_post_ud_send(struct ipath_qp *
 		ss.num_sge++;
 	}
 	/* Check for invalid packet size. */
-	if (len > ipath_layer_get_ibmtu(dev->dd)) {
+	if (len > dev->dd->ipath_ibmtu) {
 		ret = -EINVAL;
 		goto bail;
 	}
@@ -298,7 +299,7 @@ int ipath_post_ud_send(struct ipath_qp *
 		dev->n_unicast_xmit++;
 		lid = ah_attr->dlid &
 			~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
-		if (unlikely(lid == ipath_layer_get_lid(dev->dd))) {
+		if (unlikely(lid == dev->dd->ipath_lid)) {
 			/*
 			 * Pass in an uninitialized ib_wc to save stack
 			 * space.
@@ -327,7 +328,7 @@ int ipath_post_ud_send(struct ipath_qp *
 		qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
 			dev->gid_prefix;
 		qp->s_hdr.u.l.grh.sgid.global.interface_id =
-			ipath_layer_get_guid(dev->dd);
+			dev->dd->ipath_guid;
 		qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid;
 		/*
 		 * Don't worry about sending to locally attached multicast
@@ -357,7 +358,7 @@ int ipath_post_ud_send(struct ipath_qp *
 	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
 	qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid);	/* DEST LID */
 	qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
-	lid = ipath_layer_get_lid(dev->dd);
+	lid = dev->dd->ipath_lid;
 	if (lid) {
 		lid |= ah_attr->src_path_bits &
 			((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
@@ -368,7 +369,7 @@ int ipath_post_ud_send(struct ipath_qp *
 		bth0 |= 1 << 23;
 	bth0 |= extra_bytes << 20;
 	bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
-		ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+		ipath_get_pkey(dev->dd, qp->s_pkey_index);
 	ohdr->bth[0] = cpu_to_be32(bth0);
 	/*
 	 * Use the multicast QP if the destination LID is a multicast LID.
@@ -458,8 +459,7 @@ void ipath_ud_rcv(struct ipath_ibdev *de
 		 * the eager header buffer size to 56 bytes so the last 12
 		 * bytes of the IB header is in the data buffer.
 		 */
-		header_in_data =
-			ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
+		header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
 		if (header_in_data) {
 			qkey = be32_to_cpu(((__be32 *) data)[1]);
 			src_qp = be32_to_cpu(((__be32 *) data)[2]);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_verbs.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_verbs.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_verbs.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_verbs.c	2006-10-19 12:41:11.000000000 -0400
@@ -33,14 +33,18 @@
 
 #include <rdma/ib_mad.h>
 #include <rdma/ib_user_verbs.h>
+#include <linux/io.h>
 #include <linux/utsname.h>
 
 #include "ipath_kernel.h"
 #include "ipath_verbs.h"
 #include "ipath_common.h"
 
-/* Not static, because we don't want the compiler removing it */
-const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
+#ifdef __x86_64__
+void *memcpy_cachebypass(void *, const void *, __kernel_size_t);
+#else
+#define memcpy_cachebypass(a,b,c) memcpy((a),(b),(c))
+#endif
 
 static unsigned int ib_ipath_qp_table_size = 251;
 module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
@@ -52,10 +56,6 @@ module_param_named(lkey_table_size, ib_i
 MODULE_PARM_DESC(lkey_table_size,
 		 "LKEY table size in bits (2^n, 1 <= n <= 23)");
 
-unsigned int ib_ipath_debug;	/* debug mask */
-module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(debug, "Verbs debug mask");
-
 static unsigned int ib_ipath_max_pds = 0xFFFF;
 module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(max_pds,
@@ -79,6 +79,10 @@ module_param_named(max_qp_wrs, ib_ipath_
 		   S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
 
+unsigned int ib_ipath_max_qps = 16384;
+module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
+
 unsigned int ib_ipath_max_sges = 0x60;
 module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
@@ -109,9 +113,9 @@ module_param_named(max_srq_wrs, ib_ipath
 		   uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("QLogic <support@pathscale.com>");
-MODULE_DESCRIPTION("QLogic InfiniPath driver");
+static unsigned int ib_ipath_disable_sma;
+module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(ib_ipath_disable_sma, "Disable the SMA");
 
 const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
 	[IB_QPS_RESET] = 0,
@@ -125,6 +129,16 @@ const int ib_ipath_state_ops[IB_QPS_ERR 
 	[IB_QPS_ERR] = 0,
 };
 
+struct ipath_ucontext {
+	struct ib_ucontext ibucontext;
+};
+
+static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
+						  *ibucontext)
+{
+	return container_of(ibucontext, struct ipath_ucontext, ibucontext);
+}
+
 /*
  * Translate ib_wr_opcode into ib_wc_opcode.
  */
@@ -159,7 +173,7 @@ void ipath_copy_sge(struct ipath_sge_sta
 		BUG_ON(len == 0);
 		if (len > length)
 			len = length;
-		memcpy(sge->vaddr, data, len);
+		memcpy_cachebypass(sge->vaddr, data, len);
 		sge->vaddr += len;
 		sge->length -= len;
 		sge->sge_length -= len;
@@ -377,6 +391,9 @@ static void ipath_qp_rcv(struct ipath_ib
 	switch (qp->ibqp.qp_type) {
 	case IB_QPT_SMI:
 	case IB_QPT_GSI:
+		if (ib_ipath_disable_sma)
+			break;
+		/* FALLTHROUGH */
 	case IB_QPT_UD:
 		ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
 		break;
@@ -395,7 +412,7 @@ static void ipath_qp_rcv(struct ipath_ib
 }
 
 /**
- * ipath_ib_rcv - process and incoming packet
+ * ipath_ib_rcv - process an incoming packet
  * @arg: the device pointer
  * @rhdr: the header of the packet
  * @data: the packet data
@@ -404,9 +421,9 @@ static void ipath_qp_rcv(struct ipath_ib
  * This is called from ipath_kreceive() to process an incoming packet at
  * interrupt level. Tlen is the length of the header + data + CRC in bytes.
  */
-static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen)
+void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
+		  u32 tlen)
 {
-	struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
 	struct ipath_ib_header *hdr = rhdr;
 	struct ipath_other_headers *ohdr;
 	struct ipath_qp *qp;
@@ -427,7 +444,7 @@ static void ipath_ib_rcv(void *arg, void
 	lid = be16_to_cpu(hdr->lrh[1]);
 	if (lid < IPATH_MULTICAST_LID_BASE) {
 		lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
-		if (unlikely(lid != ipath_layer_get_lid(dev->dd))) {
+		if (unlikely(lid != dev->dd->ipath_lid)) {
 			dev->rcv_errors++;
 			goto bail;
 		}
@@ -495,9 +512,8 @@ bail:;
  * This is called from ipath_do_rcv_timer() at interrupt level to check for
  * QPs which need retransmits and to collect performance numbers.
  */
-static void ipath_ib_timer(void *arg)
+void ipath_ib_timer(struct ipath_ibdev *dev)
 {
-	struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
 	struct ipath_qp *resend = NULL;
 	struct list_head *last;
 	struct ipath_qp *qp;
@@ -539,19 +555,19 @@ static void ipath_ib_timer(void *arg)
 	if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
 	    --dev->pma_sample_start == 0) {
 		dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
-		ipath_layer_snapshot_counters(dev->dd, &dev->ipath_sword,
-					      &dev->ipath_rword,
-					      &dev->ipath_spkts,
-					      &dev->ipath_rpkts,
-					      &dev->ipath_xmit_wait);
+		ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
+					&dev->ipath_rword,
+					&dev->ipath_spkts,
+					&dev->ipath_rpkts,
+					&dev->ipath_xmit_wait);
 	}
 	if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
 		if (dev->pma_sample_interval == 0) {
 			u64 ta, tb, tc, td, te;
 
 			dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
-			ipath_layer_snapshot_counters(dev->dd, &ta, &tb,
-						      &tc, &td, &te);
+			ipath_snapshot_counters(dev->dd, &ta, &tb,
+						&tc, &td, &te);
 
 			dev->ipath_sword = ta - dev->ipath_sword;
 			dev->ipath_rword = tb - dev->ipath_rword;
@@ -581,6 +597,362 @@ static void ipath_ib_timer(void *arg)
 	}
 }
 
+static void update_sge(struct ipath_sge_state *ss, u32 length)
+{
+	struct ipath_sge *sge = &ss->sge;
+
+	sge->vaddr += length;
+	sge->length -= length;
+	sge->sge_length -= length;
+	if (sge->sge_length == 0) {
+		if (--ss->num_sge)
+			*sge = *ss->sg_list++;
+	} else if (sge->length == 0 && sge->mr != NULL) {
+		if (++sge->n >= IPATH_SEGSZ) {
+			if (++sge->m >= sge->mr->mapsz)
+				return;
+			sge->n = 0;
+		}
+		sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
+		sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
+	}
+}
+
+#ifdef __LITTLE_ENDIAN
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+	return data >> shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+	return data << shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+	data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
+	data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+	return data;
+}
+#else
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+	return data << shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+	return data >> shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+	data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
+	data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+	return data;
+}
+#endif
+
+static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
+		    u32 length)
+{
+	u32 extra = 0;
+	u32 data = 0;
+	u32 last;
+
+	while (1) {
+		u32 len = ss->sge.length;
+		u32 off;
+
+		BUG_ON(len == 0);
+		if (len > length)
+			len = length;
+		if (len > ss->sge.sge_length)
+			len = ss->sge.sge_length;
+		/* If the source address is not aligned, try to align it. */
+		off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
+		if (off) {
+			u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
+					    ~(sizeof(u32) - 1));
+			u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
+			u32 y;
+
+			y = sizeof(u32) - off;
+			if (len > y)
+				len = y;
+			if (len + extra >= sizeof(u32)) {
+				data |= set_upper_bits(v, extra *
+						       BITS_PER_BYTE);
+				len = sizeof(u32) - extra;
+				if (len == length) {
+					last = data;
+					break;
+				}
+				__raw_writel(data, piobuf);
+				piobuf++;
+				extra = 0;
+				data = 0;
+			} else {
+				/* Clear unused upper bytes */
+				data |= clear_upper_bytes(v, len, extra);
+				if (len == length) {
+					last = data;
+					break;
+				}
+				extra += len;
+			}
+		} else if (extra) {
+			/* Source address is aligned. */
+			u32 *addr = (u32 *) ss->sge.vaddr;
+			int shift = extra * BITS_PER_BYTE;
+			int ushift = 32 - shift;
+			u32 l = len;
+
+			while (l >= sizeof(u32)) {
+				u32 v = *addr;
+
+				data |= set_upper_bits(v, shift);
+				__raw_writel(data, piobuf);
+				data = get_upper_bits(v, ushift);
+				piobuf++;
+				addr++;
+				l -= sizeof(u32);
+			}
+			/*
+			 * We still have 'extra' number of bytes leftover.
+			 */
+			if (l) {
+				u32 v = *addr;
+
+				if (l + extra >= sizeof(u32)) {
+					data |= set_upper_bits(v, shift);
+					len -= l + extra - sizeof(u32);
+					if (len == length) {
+						last = data;
+						break;
+					}
+					__raw_writel(data, piobuf);
+					piobuf++;
+					extra = 0;
+					data = 0;
+				} else {
+					/* Clear unused upper bytes */
+					data |= clear_upper_bytes(v, l,
+								  extra);
+					if (len == length) {
+						last = data;
+						break;
+					}
+					extra += l;
+				}
+			} else if (len == length) {
+				last = data;
+				break;
+			}
+		} else if (len == length) {
+			u32 w;
+
+			/*
+			 * Need to round up for the last dword in the
+			 * packet.
+			 */
+			w = (len + 3) >> 2;
+			__iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
+			piobuf += w - 1;
+			last = ((u32 *) ss->sge.vaddr)[w - 1];
+			break;
+		} else {
+			u32 w = len >> 2;
+
+			__iowrite32_copy(piobuf, ss->sge.vaddr, w);
+			piobuf += w;
+
+			extra = len & (sizeof(u32) - 1);
+			if (extra) {
+				u32 v = ((u32 *) ss->sge.vaddr)[w];
+
+				/* Clear unused upper bytes */
+				data = clear_upper_bytes(v, extra, 0);
+			}
+		}
+		update_sge(ss, len);
+		length -= len;
+	}
+	/* Update address before sending packet. */
+	update_sge(ss, length);
+	/* must flush early everything before trigger word */
+	ipath_flush_wc();
+	__raw_writel(last, piobuf);
+	/* be sure trigger word is written */
+	ipath_flush_wc();
+}
+
+/**
+ * ipath_verbs_send - send a packet
+ * @dd: the infinipath device
+ * @hdrwords: the number of words in the header
+ * @hdr: the packet header
+ * @len: the length of the packet in bytes
+ * @ss: the SGE to send
+ */
+int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
+		     u32 *hdr, u32 len, struct ipath_sge_state *ss)
+{
+	u32 __iomem *piobuf;
+	u32 plen;
+	int ret;
+
+	/* +1 is for the qword padding of pbc */
+	plen = hdrwords + ((len + 3) >> 2) + 1;
+	if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
+		ipath_dbg("packet len 0x%x too long, failing\n", plen);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/* Get a PIO buffer to use. */
+	piobuf = ipath_getpiobuf(dd, NULL);
+	if (unlikely(piobuf == NULL)) {
+		ret = -EBUSY;
+		goto bail;
+	}
+
+	/*
+	 * Write len to control qword, no flags.
+	 * We have to flush after the PBC for correctness on some cpus
+	 * or WC buffer can be written out of order.
+	 */
+	writeq(plen, piobuf);
+	ipath_flush_wc();
+	piobuf += 2;
+	if (len == 0) {
+		/*
+		 * If there is just the header portion, must flush before
+		 * writing last word of header for correctness, and after
+		 * the last header word (trigger word).
+		 */
+		__iowrite32_copy(piobuf, hdr, hdrwords - 1);
+		ipath_flush_wc();
+		__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
+		ipath_flush_wc();
+		ret = 0;
+		goto bail;
+	}
+
+	__iowrite32_copy(piobuf, hdr, hdrwords);
+	piobuf += hdrwords;
+
+	/* The common case is aligned and contained in one segment. */
+	if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
+		   !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
+		u32 w;
+		u32 *addr = (u32 *) ss->sge.vaddr;
+
+		/* Update address before sending packet. */
+		update_sge(ss, len);
+		/* Need to round up for the last dword in the packet. */
+		w = (len + 3) >> 2;
+		__iowrite32_copy(piobuf, addr, w - 1);
+		/* must flush early everything before trigger word */
+		ipath_flush_wc();
+		__raw_writel(addr[w - 1], piobuf + w - 1);
+		/* be sure trigger word is written */
+		ipath_flush_wc();
+		ret = 0;
+		goto bail;
+	}
+	copy_io(piobuf, ss, len);
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
+			    u64 *rwords, u64 *spkts, u64 *rpkts,
+			    u64 *xmit_wait)
+{
+	int ret;
+
+	if (!(dd->ipath_flags & IPATH_INITTED)) {
+		/* no hardware, freeze, etc. */
+		ipath_dbg("unit %u not usable\n", dd->ipath_unit);
+		ret = -EINVAL;
+		goto bail;
+	}
+	*swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
+	*rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
+	*spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
+	*rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
+	*xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
+/**
+ * ipath_get_counters - get various chip counters
+ * @dd: the infinipath device
+ * @cntrs: counters are placed here
+ *
+ * Return the counters needed by recv_pma_get_portcounters().
+ */
+int ipath_get_counters(struct ipath_devdata *dd,
+		       struct ipath_verbs_counters *cntrs)
+{
+	int ret;
+
+	if (!(dd->ipath_flags & IPATH_INITTED)) {
+		/* no hardware, freeze, etc. */
+		ipath_dbg("unit %u not usable\n", dd->ipath_unit);
+		ret = -EINVAL;
+		goto bail;
+	}
+	cntrs->symbol_error_counter =
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
+	cntrs->link_error_recovery_counter =
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
+	/*
+	 * The link downed counter counts when the other side downs the
+	 * connection.  We add in the number of times we downed the link
+	 * due to local link integrity errors to compensate.
+	 */
+	cntrs->link_downed_counter =
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
+	cntrs->port_rcv_errors =
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
+	cntrs->port_rcv_remphys_errors =
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
+	cntrs->port_xmit_discards =
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
+	cntrs->port_xmit_data =
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
+	cntrs->port_rcv_data =
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
+	cntrs->port_xmit_packets =
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
+	cntrs->port_rcv_packets =
+		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
+	cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
+	cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
+
+	ret = 0;
+
+bail:
+	return ret;
+}
+
 /**
  * ipath_ib_piobufavail - callback when a PIO buffer is available
  * @arg: the device pointer
@@ -591,9 +963,8 @@ static void ipath_ib_timer(void *arg)
  * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
  * return zero).
  */
-static int ipath_ib_piobufavail(void *arg)
+int ipath_ib_piobufavail(struct ipath_ibdev *dev)
 {
-	struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
 	struct ipath_qp *qp;
 	unsigned long flags;
 
@@ -624,14 +995,14 @@ static int ipath_query_device(struct ib_
 		IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
 		IB_DEVICE_SYS_IMAGE_GUID;
 	props->page_size_cap = PAGE_SIZE;
-	props->vendor_id = ipath_layer_get_vendorid(dev->dd);
-	props->vendor_part_id = ipath_layer_get_deviceid(dev->dd);
-	props->hw_ver = ipath_layer_get_pcirev(dev->dd);
+	props->vendor_id = dev->dd->ipath_vendorid;
+	props->vendor_part_id = dev->dd->ipath_deviceid;
+	props->hw_ver = dev->dd->ipath_pcirev;
 
 	props->sys_image_guid = dev->sys_image_guid;
 
 	props->max_mr_size = ~0ull;
-	props->max_qp = dev->qp_table.max;
+	props->max_qp = ib_ipath_max_qps;
 	props->max_qp_wr = ib_ipath_max_qp_wrs;
 	props->max_sge = ib_ipath_max_sges;
 	props->max_cq = ib_ipath_max_cqs;
@@ -647,7 +1018,7 @@ static int ipath_query_device(struct ib_
 	props->max_srq_sge = ib_ipath_max_srq_sges;
 	/* props->local_ca_ack_delay */
 	props->atomic_cap = IB_ATOMIC_HCA;
-	props->max_pkeys = ipath_layer_get_npkeys(dev->dd);
+	props->max_pkeys = ipath_get_npkeys(dev->dd);
 	props->max_mcast_grp = ib_ipath_max_mcast_grps;
 	props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
@@ -672,12 +1043,17 @@ const u8 ipath_cvt_physportstate[16] = {
 	[INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
 };
 
+u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
+{
+	return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
+}
+
 static int ipath_query_port(struct ib_device *ibdev,
 			    u8 port, struct ib_port_attr *props)
 {
 	struct ipath_ibdev *dev = to_idev(ibdev);
 	enum ib_mtu mtu;
-	u16 lid = ipath_layer_get_lid(dev->dd);
+	u16 lid = dev->dd->ipath_lid;
 	u64 ibcstat;
 
 	memset(props, 0, sizeof(*props));
@@ -685,16 +1061,16 @@ static int ipath_query_port(struct ib_de
 	props->lmc = dev->mkeyprot_resv_lmc & 7;
 	props->sm_lid = dev->sm_lid;
 	props->sm_sl = dev->sm_sl;
-	ibcstat = ipath_layer_get_lastibcstat(dev->dd);
+	ibcstat = dev->dd->ipath_lastibcstat;
 	props->state = ((ibcstat >> 4) & 0x3) + 1;
 	/* See phys_state_show() */
 	props->phys_state = ipath_cvt_physportstate[
-		ipath_layer_get_lastibcstat(dev->dd) & 0xf];
+		dev->dd->ipath_lastibcstat & 0xf];
 	props->port_cap_flags = dev->port_cap_flags;
 	props->gid_tbl_len = 1;
 	props->max_msg_sz = 0x80000000;
-	props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd);
-	props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) -
+	props->pkey_tbl_len = ipath_get_npkeys(dev->dd);
+	props->bad_pkey_cntr = ipath_get_cr_errpkey(dev->dd) -
 		dev->z_pkey_violations;
 	props->qkey_viol_cntr = dev->qkey_violations;
 	props->active_width = IB_WIDTH_4X;
@@ -704,7 +1080,7 @@ static int ipath_query_port(struct ib_de
 	props->init_type_reply = 0;
 
 	props->max_mtu = IB_MTU_4096;
-	switch (ipath_layer_get_ibmtu(dev->dd)) {
+	switch (dev->dd->ipath_ibmtu) {
 	case 4096:
 		mtu = IB_MTU_4096;
 		break;
@@ -763,7 +1139,7 @@ static int ipath_modify_port(struct ib_d
 	dev->port_cap_flags |= props->set_port_cap_mask;
 	dev->port_cap_flags &= ~props->clr_port_cap_mask;
 	if (port_modify_mask & IB_PORT_SHUTDOWN)
-		ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
+		ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
 	if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
 		dev->qkey_violations = 0;
 	return 0;
@@ -780,7 +1156,7 @@ static int ipath_query_gid(struct ib_dev
 		goto bail;
 	}
 	gid->global.subnet_prefix = dev->gid_prefix;
-	gid->global.interface_id = ipath_layer_get_guid(dev->dd);
+	gid->global.interface_id = dev->dd->ipath_guid;
 
 	ret = 0;
 
@@ -803,18 +1179,22 @@ static struct ib_pd *ipath_alloc_pd(stru
 	 * we allow allocations of more than we report for this value.
 	 */
 
-	if (dev->n_pds_allocated == ib_ipath_max_pds) {
+	pd = kmalloc(sizeof *pd, GFP_KERNEL);
+	if (!pd) {
 		ret = ERR_PTR(-ENOMEM);
 		goto bail;
 	}
 
-	pd = kmalloc(sizeof *pd, GFP_KERNEL);
-	if (!pd) {
+	spin_lock(&dev->n_pds_lock);
+	if (dev->n_pds_allocated == ib_ipath_max_pds) {
+		spin_unlock(&dev->n_pds_lock);
+		kfree(pd);
 		ret = ERR_PTR(-ENOMEM);
 		goto bail;
 	}
 
 	dev->n_pds_allocated++;
+	spin_unlock(&dev->n_pds_lock);
 
 	/* ib_alloc_pd() will initialize pd->ibpd. */
 	pd->user = udata != NULL;
@@ -830,7 +1210,9 @@ static int ipath_dealloc_pd(struct ib_pd
 	struct ipath_pd *pd = to_ipd(ibpd);
 	struct ipath_ibdev *dev = to_idev(ibpd->device);
 
+	spin_lock(&dev->n_pds_lock);
 	dev->n_pds_allocated--;
+	spin_unlock(&dev->n_pds_lock);
 
 	kfree(pd);
 
@@ -851,11 +1233,6 @@ static struct ib_ah *ipath_create_ah(str
 	struct ib_ah *ret;
 	struct ipath_ibdev *dev = to_idev(pd->device);
 
-	if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
-		ret = ERR_PTR(-ENOMEM);
-		goto bail;
-	}
-
 	/* A multicast address requires a GRH (see ch. 8.4.1). */
 	if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
 	    ah_attr->dlid != IPATH_PERMISSIVE_LID &&
@@ -881,7 +1258,16 @@ static struct ib_ah *ipath_create_ah(str
 		goto bail;
 	}
 
+	spin_lock(&dev->n_ahs_lock);
+	if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
+		spin_unlock(&dev->n_ahs_lock);
+		kfree(ah);
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
 	dev->n_ahs_allocated++;
+	spin_unlock(&dev->n_ahs_lock);
 
 	/* ib_create_ah() will initialize ah->ibah. */
 	ah->attr = *ah_attr;
@@ -903,7 +1289,9 @@ static int ipath_destroy_ah(struct ib_ah
 	struct ipath_ibdev *dev = to_idev(ibah->device);
 	struct ipath_ah *ah = to_iah(ibah);
 
+	spin_lock(&dev->n_ahs_lock);
 	dev->n_ahs_allocated--;
+	spin_unlock(&dev->n_ahs_lock);
 
 	kfree(ah);
 
@@ -919,25 +1307,50 @@ static int ipath_query_ah(struct ib_ah *
 	return 0;
 }
 
+/**
+ * ipath_get_npkeys - return the size of the PKEY table for port 0
+ * @dd: the infinipath device
+ */
+unsigned ipath_get_npkeys(struct ipath_devdata *dd)
+{
+	return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
+}
+
+/**
+ * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
+ * @dd: the infinipath device
+ * @index: the PKEY index
+ */
+unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
+{
+	unsigned ret;
+
+	if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
+		ret = 0;
+	else
+		ret = dd->ipath_pd[0]->port_pkeys[index];
+
+	return ret;
+}
+
 static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
 			    u16 *pkey)
 {
 	struct ipath_ibdev *dev = to_idev(ibdev);
 	int ret;
 
-	if (index >= ipath_layer_get_npkeys(dev->dd)) {
+	if (index >= ipath_get_npkeys(dev->dd)) {
 		ret = -EINVAL;
 		goto bail;
 	}
 
-	*pkey = ipath_layer_get_pkey(dev->dd, index);
+	*pkey = ipath_get_pkey(dev->dd, index);
 	ret = 0;
 
 bail:
 	return ret;
 }
 
-
 /**
  * ipath_alloc_ucontext - allocate a ucontest
  * @ibdev: the infiniband device
@@ -970,26 +1383,91 @@ static int ipath_dealloc_ucontext(struct
 
 static int ipath_verbs_register_sysfs(struct ib_device *dev);
 
+static void __verbs_timer(unsigned long arg)
+{
+	struct ipath_devdata *dd = (struct ipath_devdata *) arg;
+
+	/*
+	 * If port 0 receive packet interrupts are not available, or
+	 * can be missed, poll the receive queue
+	 */
+	if (dd->ipath_flags & IPATH_POLL_RX_INTR)
+		ipath_kreceive(dd);
+
+	/* Handle verbs layer timeouts. */
+	ipath_ib_timer(dd->verbs_dev);
+
+	mod_timer(&dd->verbs_timer, jiffies + 1);
+}
+
+static int enable_timer(struct ipath_devdata *dd)
+{
+	/*
+	 * Early chips had a design flaw where the chip and kernel idea
+	 * of the tail register don't always agree, and therefore we won't
+	 * get an interrupt on the next packet received.
+	 * If the board supports per packet receive interrupts, use it.
+	 * Otherwise, the timer function periodically checks for packets
+	 * to cover this case.
+	 * Either way, the timer is needed for verbs layer related
+	 * processing.
+	 */
+	if (dd->ipath_flags & IPATH_GPIO_INTR) {
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
+				 0x2074076542310ULL);
+		/* Enable GPIO bit 2 interrupt */
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
+				 (u64) (1 << 2));
+	}
+
+	init_timer(&dd->verbs_timer);
+	dd->verbs_timer.function = __verbs_timer;
+	dd->verbs_timer.data = (unsigned long)dd;
+	dd->verbs_timer.expires = jiffies + 1;
+	add_timer(&dd->verbs_timer);
+
+	return 0;
+}
+
+static int disable_timer(struct ipath_devdata *dd)
+{
+	/* Disable GPIO bit 2 interrupt */
+	if (dd->ipath_flags & IPATH_GPIO_INTR)
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
+
+	del_timer_sync(&dd->verbs_timer);
+
+	return 0;
+}
+
 /**
  * ipath_register_ib_device - register our device with the infiniband core
- * @unit: the device number to register
  * @dd: the device data structure
  * Return the allocated ipath_ibdev pointer or NULL on error.
  */
-static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
+int ipath_register_ib_device(struct ipath_devdata *dd)
 {
-	struct ipath_layer_counters cntrs;
+	struct ipath_verbs_counters cntrs;
 	struct ipath_ibdev *idev;
 	struct ib_device *dev;
 	int ret;
 
 	idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
-	if (idev == NULL)
+	if (idev == NULL) {
+		ret = -ENOMEM;
 		goto bail;
+	}
 
 	dev = &idev->ibdev;
 
 	/* Only need to initialize non-zero fields. */
+	spin_lock_init(&idev->n_pds_lock);
+	spin_lock_init(&idev->n_ahs_lock);
+	spin_lock_init(&idev->n_cqs_lock);
+	spin_lock_init(&idev->n_qps_lock);
+	spin_lock_init(&idev->n_srqs_lock);
+	spin_lock_init(&idev->n_mcast_grps_lock);
+
 	spin_lock_init(&idev->qp_table.lock);
 	spin_lock_init(&idev->lk_table.lock);
 	idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
@@ -1030,7 +1508,7 @@ static void *ipath_register_ib_device(in
 	idev->link_width_enabled = 3;	/* 1x or 4x */
 
 	/* Snapshot current HW counters to "clear" them. */
-	ipath_layer_get_counters(dd, &cntrs);
+	ipath_get_counters(dd, &cntrs);
 	idev->z_symbol_error_counter = cntrs.symbol_error_counter;
 	idev->z_link_error_recovery_counter =
 		cntrs.link_error_recovery_counter;
@@ -1054,14 +1532,14 @@ static void *ipath_register_ib_device(in
 	 * device types in the system, we can't be sure this is unique.
 	 */
 	if (!sys_image_guid)
-		sys_image_guid = ipath_layer_get_guid(dd);
+		sys_image_guid = dd->ipath_guid;
 	idev->sys_image_guid = sys_image_guid;
-	idev->ib_unit = unit;
+	idev->ib_unit = dd->ipath_unit;
 	idev->dd = dd;
 
 	strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
 	dev->owner = THIS_MODULE;
-	dev->node_guid = ipath_layer_get_guid(dd);
+	dev->node_guid = dd->ipath_guid;
 	dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
 	dev->uverbs_cmd_mask =
 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
@@ -1095,7 +1573,7 @@ static void *ipath_register_ib_device(in
 		(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
 	dev->node_type = IB_NODE_CA;
 	dev->phys_port_cnt = 1;
-	dev->dma_device = ipath_layer_get_device(dd);
+	dev->dma_device = &dd->pcidev->dev;
 	dev->class_dev.dev = dev->dma_device;
 	dev->query_device = ipath_query_device;
 	dev->modify_device = ipath_modify_device;
@@ -1139,7 +1617,7 @@ static void *ipath_register_ib_device(in
 	dev->process_mad = ipath_process_mad;
 
 	snprintf(dev->node_desc, sizeof(dev->node_desc),
-		 IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename);
+		 IPATH_IDSTR " %s", system_utsname.nodename);
 
 	ret = ib_register_device(dev);
 	if (ret)
@@ -1148,7 +1626,7 @@ static void *ipath_register_ib_device(in
 	if (ipath_verbs_register_sysfs(dev))
 		goto err_class;
 
-	ipath_layer_enable_timer(dd);
+	enable_timer(dd);
 
 	goto bail;
 
@@ -1160,37 +1638,32 @@ err_lk:
 	kfree(idev->qp_table.table);
 err_qp:
 	ib_dealloc_device(dev);
-	_VERBS_ERROR("ib_ipath%d cannot register verbs (%d)!\n",
-		     unit, -ret);
+	ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
 	idev = NULL;
 
 bail:
-	return idev;
+	dd->verbs_dev = idev;
+	return ret;
 }
 
-static void ipath_unregister_ib_device(void *arg)
+void ipath_unregister_ib_device(struct ipath_ibdev *dev)
 {
-	struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
 	struct ib_device *ibdev = &dev->ibdev;
 
-	ipath_layer_disable_timer(dev->dd);
+	disable_timer(dev->dd);
 
 	ib_unregister_device(ibdev);
 
 	if (!list_empty(&dev->pending[0]) ||
 	    !list_empty(&dev->pending[1]) ||
 	    !list_empty(&dev->pending[2]))
-		_VERBS_ERROR("ipath%d pending list not empty!\n",
-			     dev->ib_unit);
+		ipath_dev_err(dev->dd, "pending list not empty!\n");
 	if (!list_empty(&dev->piowait))
-		_VERBS_ERROR("ipath%d piowait list not empty!\n",
-			     dev->ib_unit);
+		ipath_dev_err(dev->dd, "piowait list not empty!\n");
 	if (!list_empty(&dev->rnrwait))
-		_VERBS_ERROR("ipath%d rnrwait list not empty!\n",
-			     dev->ib_unit);
+		ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
 	if (!ipath_mcast_tree_empty())
-		_VERBS_ERROR("ipath%d multicast table memory leak!\n",
-			     dev->ib_unit);
+		ipath_dev_err(dev->dd, "multicast table memory leak!\n");
 	/*
 	 * Note that ipath_unregister_ib_device() can be called before all
 	 * the QPs are destroyed!
@@ -1201,25 +1674,12 @@ static void ipath_unregister_ib_device(v
 	ib_dealloc_device(ibdev);
 }
 
-static int __init ipath_verbs_init(void)
-{
-	return ipath_verbs_register(ipath_register_ib_device,
-				    ipath_unregister_ib_device,
-				    ipath_ib_piobufavail, ipath_ib_rcv,
-				    ipath_ib_timer);
-}
-
-static void __exit ipath_verbs_cleanup(void)
-{
-	ipath_verbs_unregister();
-}
-
 static ssize_t show_rev(struct class_device *cdev, char *buf)
 {
 	struct ipath_ibdev *dev =
 		container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
 
-	return sprintf(buf, "%x\n", ipath_layer_get_pcirev(dev->dd));
+	return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
 }
 
 static ssize_t show_hca(struct class_device *cdev, char *buf)
@@ -1228,7 +1688,7 @@ static ssize_t show_hca(struct class_dev
 		container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
 	int ret;
 
-	ret = ipath_layer_get_boardname(dev->dd, buf, 128);
+	ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
 	if (ret < 0)
 		goto bail;
 	strcat(buf, "\n");
@@ -1255,6 +1715,7 @@ static ssize_t show_stats(struct class_d
 		      "RC OTH NAKs %d\n"
 		      "RC timeouts %d\n"
 		      "RC RDMA dup %d\n"
+		      "RC stalls   %d\n"
 		      "piobuf wait %d\n"
 		      "no piobuf   %d\n"
 		      "PKT drops   %d\n"
@@ -1262,7 +1723,7 @@ static ssize_t show_stats(struct class_d
 		      dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
 		      dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
 		      dev->n_other_naks, dev->n_timeouts,
-		      dev->n_rdma_dup_busy, dev->n_piowait,
+		      dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
 		      dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
 	for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
 		const struct ipath_opcode_stats *si = &dev->opstats[i];
@@ -1305,6 +1766,3 @@ static int ipath_verbs_register_sysfs(st
 bail:
 	return ret;
 }
-
-module_init(ipath_verbs_init);
-module_exit(ipath_verbs_cleanup);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_verbs.h linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_verbs.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_verbs.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_verbs.h	2006-10-19 12:41:11.000000000 -0400
@@ -41,7 +41,6 @@
 #include <rdma/ib_pack.h>
 
 #include "ipath_layer.h"
-#include "verbs_debug.h"
 
 #define QPN_MAX                 (1 << 24)
 #define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
@@ -152,19 +151,6 @@ struct ipath_mcast {
 	int n_attached;
 };
 
-/* Memory region */
-struct ipath_mr {
-	struct ib_mr ibmr;
-	struct ipath_mregion mr;	/* must be last */
-};
-
-/* Fast memory region */
-struct ipath_fmr {
-	struct ib_fmr ibfmr;
-	u8 page_shift;
-	struct ipath_mregion mr;	/* must be last */
-};
-
 /* Protection domain */
 struct ipath_pd {
 	struct ib_pd ibpd;
@@ -233,6 +219,54 @@ struct ipath_cq {
 };
 
 /*
+ * A segment is a linear region of low physical memory.
+ * XXX Maybe we should use phys addr here and kmap()/kunmap().
+ * Used by the verbs layer.
+ */
+struct ipath_seg {
+	void *vaddr;
+	size_t length;
+};
+
+/* The number of ipath_segs that fit in a page. */
+#define IPATH_SEGSZ     (PAGE_SIZE / sizeof (struct ipath_seg))
+
+struct ipath_segarray {
+	struct ipath_seg segs[IPATH_SEGSZ];
+};
+
+struct ipath_mregion {
+	u64 user_base;		/* User's address for this region */
+	u64 iova;		/* IB start address of this region */
+	size_t length;
+	u32 lkey;
+	u32 offset;		/* offset (bytes) to start of region */
+	int access_flags;
+	u32 max_segs;		/* number of ipath_segs in all the arrays */
+	u32 mapsz;		/* size of the map array */
+	struct ipath_segarray *map[0];	/* the segments */
+};
+
+/*
+ * These keep track of the copy progress within a memory region.
+ * Used by the verbs layer.
+ */
+struct ipath_sge {
+	struct ipath_mregion *mr;
+	void *vaddr;		/* current pointer into the segment */
+	u32 sge_length;		/* length of the SGE */
+	u32 length;		/* remaining length of the segment */
+	u16 m;			/* current index: mr->map[m] */
+	u16 n;			/* current index: mr->map[m]->segs[n] */
+};
+
+/* Memory region */
+struct ipath_mr {
+	struct ib_mr ibmr;
+	struct ipath_mregion mr;	/* must be last */
+};
+
+/*
  * Send work request queue entry.
  * The size of the sg_list is determined when the QP is created and stored
  * in qp->s_max_sge.
@@ -274,6 +308,12 @@ struct ipath_srq {
 	u32 limit;
 };
 
+struct ipath_sge_state {
+	struct ipath_sge *sg_list;      /* next SGE to be used if any */
+	struct ipath_sge sge;   /* progress state for the current SGE */
+	u8 num_sge;
+};
+
 /*
  * Variables prefixed with s_ are for the requester (sender).
  * Variables prefixed with r_ are for the responder (receiver).
@@ -333,7 +373,9 @@ struct ipath_qp {
 	u8 s_rnr_retry_cnt;
 	u8 s_retry;		/* requester retry counter */
 	u8 s_rnr_retry;		/* requester RNR retry counter */
+	u8 s_wait_credit;	/* limit number of unacked packets sent */
 	u8 s_pkey_index;	/* PKEY index to use */
+	u8 timeout;		/* Timeout for this QP */
 	enum ib_mtu path_mtu;
 	u32 remote_qpn;
 	u32 qkey;		/* QKEY for this QP (for UD or RD) */
@@ -354,6 +396,8 @@ struct ipath_qp {
 #define IPATH_S_BUSY		0
 #define IPATH_S_SIGNAL_REQ_WR	1
 
+#define IPATH_PSN_CREDIT	2048
+
 /*
  * Since struct ipath_swqe is not a fixed size, we can't simply index into
  * struct ipath_qp.s_wq.  This function does the array index computation.
@@ -435,11 +479,20 @@ struct ipath_ibdev {
 	__be64 sys_image_guid;	/* in network order */
 	__be64 gid_prefix;	/* in network order */
 	__be64 mkey;
+
 	u32 n_pds_allocated;	/* number of PDs allocated for device */
+	spinlock_t n_pds_lock;
 	u32 n_ahs_allocated;	/* number of AHs allocated for device */
+	spinlock_t n_ahs_lock;
 	u32 n_cqs_allocated;	/* number of CQs allocated for device */
+	spinlock_t n_cqs_lock;
+	u32 n_qps_allocated;	/* number of QPs allocated for device */
+	spinlock_t n_qps_lock;
 	u32 n_srqs_allocated;	/* number of SRQs allocated for device */
+	spinlock_t n_srqs_lock;
 	u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
+	spinlock_t n_mcast_grps_lock;
+
 	u64 ipath_sword;	/* total dwords sent (sample result) */
 	u64 ipath_rword;	/* total dwords received (sample result) */
 	u64 ipath_spkts;	/* total packets sent (sample result) */
@@ -472,6 +525,7 @@ struct ipath_ibdev {
 	u32 n_rnr_naks;
 	u32 n_other_naks;
 	u32 n_timeouts;
+	u32 n_rc_stalls;
 	u32 n_pkt_drops;
 	u32 n_vl15_dropped;
 	u32 n_wqe_errs;
@@ -494,8 +548,19 @@ struct ipath_ibdev {
 	struct ipath_opcode_stats opstats[128];
 };
 
-struct ipath_ucontext {
-	struct ib_ucontext ibucontext;
+struct ipath_verbs_counters {
+	u64 symbol_error_counter;
+	u64 link_error_recovery_counter;
+	u64 link_downed_counter;
+	u64 port_rcv_errors;
+	u64 port_rcv_remphys_errors;
+	u64 port_xmit_discards;
+	u64 port_xmit_data;
+	u64 port_rcv_data;
+	u64 port_xmit_packets;
+	u64 port_rcv_packets;
+	u32 local_link_integrity_errors;
+	u32 excessive_buffer_overrun_errors;
 };
 
 static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
@@ -503,11 +568,6 @@ static inline struct ipath_mr *to_imr(st
 	return container_of(ibmr, struct ipath_mr, ibmr);
 }
 
-static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
-	return container_of(ibfmr, struct ipath_fmr, ibfmr);
-}
-
 static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
 {
 	return container_of(ibpd, struct ipath_pd, ibpd);
@@ -545,12 +605,6 @@ int ipath_process_mad(struct ib_device *
 		      struct ib_grh *in_grh,
 		      struct ib_mad *in_mad, struct ib_mad *out_mad);
 
-static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
-						  *ibucontext)
-{
-	return container_of(ibucontext, struct ipath_ucontext, ibucontext);
-}
-
 /*
  * Compare the lower 24 bits of the two values.
  * Returns an integer <, ==, or > than zero.
@@ -562,6 +616,13 @@ static inline int ipath_cmp24(u32 a, u32
 
 struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
 
+int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
+			    u64 *rwords, u64 *spkts, u64 *rpkts,
+			    u64 *xmit_wait);
+
+int ipath_get_counters(struct ipath_devdata *dd,
+		       struct ipath_verbs_counters *cntrs);
+
 int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
 
 int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
@@ -592,6 +653,9 @@ void ipath_sqerror_qp(struct ipath_qp *q
 
 void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
 
+int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
+		     u32 *hdr, u32 len, struct ipath_sge_state *ss);
+
 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
 
 int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
@@ -700,6 +764,22 @@ int ipath_make_rc_req(struct ipath_qp *q
 int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
 		      u32 pmtu, u32 *bth0p, u32 *bth2p);
 
+int ipath_register_ib_device(struct ipath_devdata *);
+
+void ipath_unregister_ib_device(struct ipath_ibdev *);
+
+void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
+
+int ipath_ib_piobufavail(struct ipath_ibdev *);
+
+void ipath_ib_timer(struct ipath_ibdev *);
+
+unsigned ipath_get_npkeys(struct ipath_devdata *);
+
+u32 ipath_get_cr_errpkey(struct ipath_devdata *);
+
+unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
+
 extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
 
 extern const u8 ipath_cvt_physportstate[];
@@ -714,6 +794,8 @@ extern unsigned int ib_ipath_max_cqs;
 
 extern unsigned int ib_ipath_max_qp_wrs;
 
+extern unsigned int ib_ipath_max_qps;
+
 extern unsigned int ib_ipath_max_sges;
 
 extern unsigned int ib_ipath_max_mcast_grps;
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c	2006-10-19 12:41:10.000000000 -0400
@@ -207,12 +207,17 @@ static int ipath_mcast_add(struct ipath_
 		goto bail;
 	}
 
+	spin_lock(&dev->n_mcast_grps_lock);
 	if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
+		spin_unlock(&dev->n_mcast_grps_lock);
 		ret = ENOMEM;
 		goto bail;
 	}
 
 	dev->n_mcast_grps_allocated++;
+	spin_unlock(&dev->n_mcast_grps_lock);
+
+	mcast->n_attached++;
 
 	list_add_tail_rcu(&mqp->list, &mcast->qp_list);
 
@@ -343,7 +348,9 @@ int ipath_multicast_detach(struct ib_qp 
 		atomic_dec(&mcast->refcount);
 		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
 		ipath_mcast_free(mcast);
+		spin_lock(&dev->n_mcast_grps_lock);
 		dev->n_mcast_grps_allocated--;
+		spin_unlock(&dev->n_mcast_grps_lock);
 	}
 
 	ret = 0;
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c	2006-10-19 12:41:10.000000000 -0400
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file is conditionally built on PowerPC only.  Otherwise weak symbol
+ * versions of the functions exported from here are used.
+ */
+
+#include "ipath_kernel.h"
+
+/**
+ * ipath_unordered_wc - indicate whether write combining is ordered
+ *
+ * PowerPC systems (at least those in the 970 processor family)
+ * write partially filled store buffers in address order, but will write
+ * completely filled store buffers in "random" order, and therefore must
+ * have serialization for correctness with current InfiniPath chips.
+ *
+ */
+int ipath_unordered_wc(void)
+{
+	return 1;
+}
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/Kconfig linux-2.6.18.noarch/drivers/infiniband/hw/ipath/Kconfig
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/Kconfig	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/Kconfig	2006-10-19 12:41:11.000000000 -0400
@@ -1,16 +1,9 @@
-config IPATH_CORE
-	tristate "QLogic InfiniPath Driver"
-	depends on 64BIT && PCI_MSI && NET
-	---help---
-	This is a low-level driver for QLogic InfiniPath host channel
-	adapters (HCAs) based on the HT-400 and PE-800 chips.
-
 config INFINIBAND_IPATH
-	tristate "QLogic InfiniPath Verbs Driver"
-	depends on IPATH_CORE && INFINIBAND
+	tristate "QLogic InfiniPath Driver"
+	depends on PCI_MSI && X86_64 && INFINIBAND
 	---help---
-	This is a driver that provides InfiniBand verbs support for
-	QLogic InfiniPath host channel adapters (HCAs).  This
-	allows these devices to be used with both kernel upper level
-	protocols such as IP-over-InfiniBand as well as with userspace
-	applications (in conjunction with InfiniBand userspace access).
+	This is a driver for QLogic InfiniPath host channel adapters,
+	including InfiniBand verbs support.  This driver allows these
+	devices to be used with both kernel upper level protocols such
+	as IP-over-InfiniBand as well as with userspace applications
+	(in conjunction with InfiniBand userspace access).
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/Makefile linux-2.6.18.noarch/drivers/infiniband/hw/ipath/Makefile
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/Makefile	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/Makefile	2006-10-19 12:41:11.000000000 -0400
@@ -1,36 +1,35 @@
 EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \
 	-DIPATH_KERN_TYPE=0
 
-obj-$(CONFIG_IPATH_CORE) += ipath_core.o
 obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
 
-ipath_core-y := \
+ib_ipath-y := \
+	ipath_cq.o \
 	ipath_diag.o \
 	ipath_driver.o \
 	ipath_eeprom.o \
 	ipath_file_ops.o \
 	ipath_fs.o \
-	ipath_ht400.o \
+	ipath_iba6110.o \
+	ipath_iba6120.o \
 	ipath_init_chip.o \
 	ipath_intr.o \
-	ipath_layer.o \
-	ipath_pe800.o \
-	ipath_stats.o \
-	ipath_sysfs.o \
-	ipath_user_pages.o
-
-ipath_core-$(CONFIG_X86_64) += ipath_wc_x86_64.o
-
-ib_ipath-y := \
-	ipath_cq.o \
 	ipath_keys.o \
+	ipath_layer.o \
 	ipath_mad.o \
 	ipath_mr.o \
 	ipath_qp.o \
 	ipath_rc.o \
 	ipath_ruc.o \
 	ipath_srq.o \
+	ipath_stats.o \
+	ipath_sysfs.o \
 	ipath_uc.o \
 	ipath_ud.o \
-	ipath_verbs.o \
-	ipath_verbs_mcast.o
+	ipath_user_pages.o \
+	ipath_verbs_mcast.o \
+	ipath_verbs.o
+
+ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
+ib_ipath-$(CONFIG_X86_64) += memcpy_cachebypass_x86_64.o
+ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/memcpy_cachebypass_x86_64.S linux-2.6.18.noarch/drivers/infiniband/hw/ipath/memcpy_cachebypass_x86_64.S
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/memcpy_cachebypass_x86_64.S	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/memcpy_cachebypass_x86_64.S	2006-10-19 12:41:11.000000000 -0400
@@ -0,0 +1,115 @@
+	.text
+	.p2align 4,,15
+	/* rdi  destination, rsi source, rdx count */
+	.globl	memcpy_cachebypass
+	.type	memcpy_cachebypass, @function
+memcpy_cachebypass:
+	movq	%rdi, %rax
+.L5:
+	cmpq	$15, %rdx
+	ja	.L34
+.L3:
+	cmpl	$8, %edx	/* rdx is 0..15 */
+	jbe	.L9
+.L6:
+	testb	$8, %dxl	/* rdx is 3,5,6,7,9..15 */
+	je	.L13
+	movq	(%rsi), %rcx
+	addq	$8, %rsi
+	movq	%rcx, (%rdi)
+	addq	$8, %rdi
+.L13:
+	testb	$4, %dxl
+	je	.L15
+	movl	(%rsi), %ecx
+	addq	$4, %rsi
+	movl	%ecx, (%rdi)
+	addq	$4, %rdi
+.L15:
+	testb	$2, %dxl
+	je	.L17
+	movzwl	(%rsi), %ecx
+	addq	$2, %rsi
+	movw	%cx, (%rdi)
+	addq	$2, %rdi
+.L17:
+	testb	$1, %dxl
+	je	.L33
+.L1:
+	movzbl	(%rsi), %ecx
+	movb	%cl, (%rdi)
+.L33:
+	ret
+.L34:
+	cmpq	$63, %rdx	/* rdx is > 15 */
+	ja	.L64
+	movl	$16, %ecx	/* rdx is 16..63 */
+.L25:
+	movq	8(%rsi), %r8
+	movq	(%rsi), %r9
+	addq	%rcx, %rsi
+	movq	%r8, 8(%rdi)
+	movq	%r9, (%rdi)
+	addq	%rcx, %rdi
+	subq	%rcx, %rdx
+	cmpl	%edx, %ecx	/* is rdx >= 16? */
+	jbe	.L25
+	jmp	.L3		/* rdx is 0..15 */
+	.p2align 4,,7
+.L64:
+	movl	$64, %ecx
+.L42:
+	prefetchnta	128(%rsi)
+	movq	(%rsi), %r8
+	movq	8(%rsi), %r9
+	movq	16(%rsi), %r10
+	movq	24(%rsi), %r11
+	subq	%rcx, %rdx
+	movq	%r8, (%rdi)
+	movq	32(%rsi), %r8
+	movq	%r9, 8(%rdi)
+	movq	40(%rsi), %r9
+	movq	%r10, 16(%rdi)
+	movq	48(%rsi), %r10
+	movq	%r11, 24(%rdi)
+	movq	56(%rsi), %r11
+	addq	%rcx, %rsi
+	movq	%r8, 32(%rdi)
+	movq	%r9, 40(%rdi)
+	movq	%r10, 48(%rdi)
+	movq	%r11, 56(%rdi)
+	addq	%rcx, %rdi
+	cmpq	%rdx, %rcx	/* is rdx >= 64? */
+	jbe	.L42
+	sfence
+	orl	%edx, %edx
+	je	.L33
+	jmp	.L5
+.L9:
+	jmp	*.L12(,%rdx,8)	/* rdx is 0..8 */
+	.section	.rodata
+	.align 8
+	.align 4
+.L12:
+	.quad	.L33
+	.quad	.L1
+	.quad	.L2
+	.quad	.L6
+	.quad	.L4
+	.quad	.L6
+	.quad	.L6
+	.quad	.L6
+	.quad	.L8
+	.text
+.L2:
+	movzwl	(%rsi), %ecx
+	movw	%cx, (%rdi)
+	ret
+.L4:
+	movl	(%rsi), %ecx
+	movl	%ecx, (%rdi)
+	ret
+.L8:
+	movq	(%rsi), %rcx
+	movq	%rcx, (%rdi)
+	ret
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/verbs_debug.h linux-2.6.18.noarch/drivers/infiniband/hw/ipath/verbs_debug.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/ipath/verbs_debug.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/ipath/verbs_debug.h	1969-12-31 19:00:00.000000000 -0500
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _VERBS_DEBUG_H
-#define _VERBS_DEBUG_H
-
-/*
- * This file contains tracing code for the ib_ipath kernel module.
- */
-#ifndef _VERBS_DEBUGGING	/* tracing enabled or not */
-#define _VERBS_DEBUGGING 1
-#endif
-
-extern unsigned ib_ipath_debug;
-
-#define _VERBS_ERROR(fmt,...) \
-	do { \
-		printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \
-	} while(0)
-
-#define _VERBS_UNIT_ERROR(unit,fmt,...) \
-	do { \
-		printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \
-	} while(0)
-
-#if _VERBS_DEBUGGING
-
-/*
- * Mask values for debugging.  The scheme allows us to compile out any
- * of the debug tracing stuff, and if compiled in, to enable or
- * disable dynamically.
- * This can be set at modprobe time also:
- *      modprobe ib_path ib_ipath_debug=3
- */
-
-#define __VERBS_INFO        0x1	/* generic low verbosity stuff */
-#define __VERBS_DBG         0x2	/* generic debug */
-#define __VERBS_VDBG        0x4	/* verbose debug */
-#define __VERBS_SMADBG      0x8000	/* sma packet debug */
-
-#define _VERBS_INFO(fmt,...) \
-	do { \
-		if (unlikely(ib_ipath_debug&__VERBS_INFO)) \
-			printk(KERN_INFO "%s: " fmt,"ib_ipath", \
-			       ##__VA_ARGS__); \
-	} while(0)
-
-#define _VERBS_DBG(fmt,...) \
-	do { \
-		if (unlikely(ib_ipath_debug&__VERBS_DBG)) \
-			printk(KERN_DEBUG "%s: " fmt, __func__, \
-			       ##__VA_ARGS__); \
-	} while(0)
-
-#define _VERBS_VDBG(fmt,...) \
-	do { \
-		if (unlikely(ib_ipath_debug&__VERBS_VDBG)) \
-			printk(KERN_DEBUG "%s: " fmt, __func__, \
-			       ##__VA_ARGS__); \
-	} while(0)
-
-#define _VERBS_SMADBG(fmt,...) \
-	do { \
-		if (unlikely(ib_ipath_debug&__VERBS_SMADBG)) \
-			printk(KERN_DEBUG "%s: " fmt, __func__, \
-			       ##__VA_ARGS__); \
-	} while(0)
-
-#else /* ! _VERBS_DEBUGGING */
-
-#define _VERBS_INFO(fmt,...)
-#define _VERBS_DBG(fmt,...)
-#define _VERBS_VDBG(fmt,...)
-#define _VERBS_SMADBG(fmt,...)
-
-#endif /* _VERBS_DEBUGGING */
-
-#endif /* _VERBS_DEBUG_H */
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_av.c linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_av.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_av.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_av.c	2006-10-19 12:41:11.000000000 -0400
@@ -90,7 +90,7 @@ static enum ib_rate tavor_rate_to_ib(u8 
 	case MTHCA_RATE_TAVOR_1X:     return IB_RATE_2_5_GBPS;
 	case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS;
 	case MTHCA_RATE_TAVOR_4X:     return IB_RATE_10_GBPS;
-	default:		      return port_rate;
+	default:		      return mult_to_ib_rate(port_rate);
 	}
 }
 
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_catas.c linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_catas.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_catas.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_catas.c	2006-10-19 12:41:11.000000000 -0400
@@ -34,6 +34,7 @@
 
 #include <linux/jiffies.h>
 #include <linux/timer.h>
+#include <linux/workqueue.h>
 
 #include "mthca_dev.h"
 
@@ -48,9 +49,42 @@ enum {
 
 static DEFINE_SPINLOCK(catas_lock);
 
+static struct workqueue_struct *catas_wq;
+static struct list_head catas_list;
+static struct work_struct catas_work;
+
+static int catas_reset_disable = 0;
+module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
+MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if > 0");
+
+static void catas_reset(void *work_ptr)
+{
+	struct mthca_dev *dev, *tmpdev;
+	LIST_HEAD(local_catas);
+	unsigned long flags;
+	int rc;
+
+	mutex_lock(&mthca_device_mutex);
+
+	spin_lock_irqsave(&catas_lock, flags);
+	list_for_each_entry_safe(dev, tmpdev, &catas_list, catas_err.list)
+		list_move_tail(&dev->catas_err.list, &local_catas);
+	spin_unlock_irqrestore(&catas_lock, flags);
+
+	list_for_each_entry_safe(dev, tmpdev, &local_catas, catas_err.list) {
+		rc = mthca_restart_one(dev->pdev);
+		if (rc)
+			mthca_err(dev, "Reset failed (%d)\n", rc);
+		else
+			mthca_dbg(dev, "Reset succeeded\n");
+	}
+	mutex_unlock(&mthca_device_mutex);
+}
+
 static void handle_catas(struct mthca_dev *dev)
 {
 	struct ib_event event;
+	unsigned long flags;
 	const char *type;
 	int i;
 
@@ -82,6 +116,14 @@ static void handle_catas(struct mthca_de
 	for (i = 0; i < dev->catas_err.size; ++i)
 		mthca_err(dev, "  buf[%02x]: %08x\n",
 			  i, swab32(readl(dev->catas_err.map + i)));
+
+	if (catas_reset_disable)
+		return;
+
+	spin_lock_irqsave(&catas_lock, flags);
+	list_add(&dev->catas_err.list, &catas_list);
+	queue_work(catas_wq, &catas_work);
+	spin_unlock_irqrestore(&catas_lock, flags);
 }
 
 static void poll_catas(unsigned long dev_ptr)
@@ -135,11 +177,14 @@ void mthca_start_catas_poll(struct mthca
 	dev->catas_err.timer.data     = (unsigned long) dev;
 	dev->catas_err.timer.function = poll_catas;
 	dev->catas_err.timer.expires  = jiffies + MTHCA_CATAS_POLL_INTERVAL;
+	INIT_LIST_HEAD(&dev->catas_err.list);
 	add_timer(&dev->catas_err.timer);
 }
 
 void mthca_stop_catas_poll(struct mthca_dev *dev)
 {
+	unsigned long flags;
+
 	spin_lock_irq(&catas_lock);
 	dev->catas_err.stop = 1;
 	spin_unlock_irq(&catas_lock);
@@ -153,4 +198,23 @@ void mthca_stop_catas_poll(struct mthca_
 				    dev->catas_err.addr),
 				   dev->catas_err.size * 4);
 	}
+
+	spin_lock_irqsave(&catas_lock, flags);
+	list_del(&dev->catas_err.list);
+	spin_unlock_irqrestore(&catas_lock, flags);
+}
+
+int __init mthca_catas_init(void)
+{
+	INIT_LIST_HEAD(&catas_list);
+	INIT_WORK(&catas_work, catas_reset, NULL);
+	catas_wq = create_singlethread_workqueue("mthcacatas");
+	if (!catas_wq)
+		return -ENOMEM;
+	return 0;
+}
+
+void mthca_catas_cleanup(void)
+{
+	destroy_workqueue(catas_wq);
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_dev.h linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_dev.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_dev.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_dev.h	2006-10-19 12:41:11.000000000 -0400
@@ -45,6 +45,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/timer.h>
 #include <linux/mutex.h>
+#include <linux/list.h>
 
 #include <asm/semaphore.h>
 
@@ -283,8 +284,11 @@ struct mthca_catas_err {
 	unsigned long		stop;
 	u32			size;
 	struct timer_list	timer;
+	struct list_head	list;
 };
 
+extern struct mutex mthca_device_mutex;
+
 struct mthca_dev {
 	struct ib_device  ib_dev;
 	struct pci_dev   *pdev;
@@ -450,6 +454,9 @@ void mthca_unregister_device(struct mthc
 
 void mthca_start_catas_poll(struct mthca_dev *dev);
 void mthca_stop_catas_poll(struct mthca_dev *dev);
+int mthca_restart_one(struct pci_dev *pdev);
+int mthca_catas_init(void);
+void mthca_catas_cleanup(void);
 
 int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar);
 void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_main.c linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_main.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_main.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_main.c	2006-10-19 12:41:11.000000000 -0400
@@ -80,6 +80,8 @@ static int tune_pci = 0;
 module_param(tune_pci, int, 0444);
 MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
 
+struct mutex mthca_device_mutex;
+
 static const char mthca_version[] __devinitdata =
 	DRV_NAME ": Mellanox InfiniBand HCA driver v"
 	DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -978,28 +980,15 @@ static struct {
 					MTHCA_FLAG_SINAI_OPT }
 };
 
-static int __devinit mthca_init_one(struct pci_dev *pdev,
-				    const struct pci_device_id *id)
+static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
 {
-	static int mthca_version_printed = 0;
 	int ddr_hidden = 0;
 	int err;
 	struct mthca_dev *mdev;
 
-	if (!mthca_version_printed) {
-		printk(KERN_INFO "%s", mthca_version);
-		++mthca_version_printed;
-	}
-
 	printk(KERN_INFO PFX "Initializing %s\n",
 	       pci_name(pdev));
 
-	if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
-		printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
-		       pci_name(pdev), id->driver_data);
-		return -ENODEV;
-	}
-
 	err = pci_enable_device(pdev);
 	if (err) {
 		dev_err(&pdev->dev, "Cannot enable PCI device, "
@@ -1065,7 +1054,7 @@ static int __devinit mthca_init_one(stru
 
 	mdev->pdev = pdev;
 
-	mdev->mthca_flags = mthca_hca_table[id->driver_data].flags;
+	mdev->mthca_flags = mthca_hca_table[hca_type].flags;
 	if (ddr_hidden)
 		mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
 
@@ -1099,13 +1088,13 @@ static int __devinit mthca_init_one(stru
 	if (err)
 		goto err_cmd;
 
-	if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) {
+	if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
 		mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n",
 			   (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
 			   (int) (mdev->fw_ver & 0xffff),
-			   (int) (mthca_hca_table[id->driver_data].latest_fw >> 32),
-			   (int) (mthca_hca_table[id->driver_data].latest_fw >> 16) & 0xffff,
-			   (int) (mthca_hca_table[id->driver_data].latest_fw & 0xffff));
+			   (int) (mthca_hca_table[hca_type].latest_fw >> 32),
+			   (int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff,
+			   (int) (mthca_hca_table[hca_type].latest_fw & 0xffff));
 		mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
 	}
 
@@ -1122,6 +1111,7 @@ static int __devinit mthca_init_one(stru
 		goto err_unregister;
 
 	pci_set_drvdata(pdev, mdev);
+	mdev->hca_type = hca_type;
 
 	return 0;
 
@@ -1166,7 +1156,7 @@ err_disable_pdev:
 	return err;
 }
 
-static void __devexit mthca_remove_one(struct pci_dev *pdev)
+static void __mthca_remove_one(struct pci_dev *pdev)
 {
 	struct mthca_dev *mdev = pci_get_drvdata(pdev);
 	u8 status;
@@ -1211,6 +1201,49 @@ static void __devexit mthca_remove_one(s
 	}
 }
 
+static int __devinit mthca_init_one(struct pci_dev *pdev,
+			     const struct pci_device_id *id)
+{
+	static int mthca_version_printed = 0;
+	int rc;
+
+	mutex_lock(&mthca_device_mutex);
+	if (!mthca_version_printed) {
+		printk(KERN_INFO "%s", mthca_version);
+		++mthca_version_printed;
+	}
+
+	if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
+		printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
+		       pci_name(pdev), id->driver_data);
+		mutex_unlock(&mthca_device_mutex);
+		return -ENODEV;
+	}
+
+	rc = __mthca_init_one(pdev, id->driver_data);
+	mutex_unlock(&mthca_device_mutex);
+	return rc;
+}
+
+static void __devexit mthca_remove_one(struct pci_dev *pdev)
+{
+	mutex_lock(&mthca_device_mutex);
+	__mthca_remove_one(pdev);
+	mutex_unlock(&mthca_device_mutex);
+	return;
+}
+
+int mthca_restart_one(struct pci_dev *pdev)
+{
+	struct mthca_dev *mdev;
+
+	mdev = pci_get_drvdata(pdev);
+	if (!mdev)
+		return -ENODEV;
+	__mthca_remove_one(pdev);
+	return __mthca_init_one(pdev, mdev->hca_type);
+}
+
 static struct pci_device_id mthca_pci_table[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
 	  .driver_data = TAVOR },
@@ -1248,13 +1281,22 @@ static int __init mthca_init(void)
 {
 	int ret;
 
+	mutex_init(&mthca_device_mutex);
+	if (mthca_catas_init())
+		return -ENOMEM;
+
 	ret = pci_register_driver(&mthca_driver);
-	return ret < 0 ? ret : 0;
+	if (ret < 0) {
+		mthca_catas_cleanup();
+		return ret;
+	}
+	return 0;
 }
 
 static void __exit mthca_cleanup(void)
 {
 	pci_unregister_driver(&mthca_driver);
+	mthca_catas_cleanup();
 }
 
 module_init(mthca_init);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_provider.c linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_provider.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_provider.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_provider.c	2006-10-19 12:41:11.000000000 -0400
@@ -179,6 +179,8 @@ static int mthca_query_port(struct ib_de
 	props->max_mtu           = out_mad->data[41] & 0xf;
 	props->active_mtu        = out_mad->data[36] >> 4;
 	props->subnet_timeout    = out_mad->data[51] & 0x1f;
+	props->max_vl_num        = out_mad->data[37] >> 4;
+	props->init_type_reply   = out_mad->data[41] >> 4;
 
  out:
 	kfree(in_mad);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_qp.c linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_qp.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_qp.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_qp.c	2006-10-19 12:41:11.000000000 -0400
@@ -408,7 +408,7 @@ static void to_ib_ah_attr(struct mthca_d
 	ib_ah_attr->sl       	  = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
 	ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
 	ib_ah_attr->static_rate   = mthca_rate_to_ib(dev,
-						     path->static_rate & 0x7,
+						     path->static_rate & 0xf,
 						     ib_ah_attr->port_num);
 	ib_ah_attr->ah_flags      = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
 	if (ib_ah_attr->ah_flags) {
@@ -472,10 +472,14 @@ int mthca_query_qp(struct ib_qp *ibqp, s
 	if (qp->transport == RC || qp->transport == UC) {
 		to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
 		to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+		qp_attr->alt_pkey_index =
+			be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
+		qp_attr->alt_port_num 	= qp_attr->alt_ah_attr.port_num;
 	}
 
-	qp_attr->pkey_index     = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
-	qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
+	qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
+	qp_attr->port_num   =
+		(be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;
 
 	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
 	qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
@@ -486,11 +490,9 @@ int mthca_query_qp(struct ib_qp *ibqp, s
 		1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
 	qp_attr->min_rnr_timer 	    =
 		(be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
-	qp_attr->port_num 	    = qp_attr->ah_attr.port_num;
 	qp_attr->timeout 	    = context->pri_path.ackto >> 3;
 	qp_attr->retry_cnt 	    = (be32_to_cpu(context->params1) >> 16) & 0x7;
 	qp_attr->rnr_retry 	    = context->pri_path.rnr_retry >> 5;
-	qp_attr->alt_port_num 	    = qp_attr->alt_ah_attr.port_num;
 	qp_attr->alt_timeout 	    = context->alt_path.ackto >> 3;
 	qp_init_attr->cap 	    = qp_attr->cap;
 
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_uar.c linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_uar.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/hw/mthca/mthca_uar.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/hw/mthca/mthca_uar.c	2006-10-19 12:41:11.000000000 -0400
@@ -60,7 +60,7 @@ int mthca_init_uar_table(struct mthca_de
 	ret = mthca_alloc_init(&dev->uar_table.alloc,
 			       dev->limits.num_uars,
 			       dev->limits.num_uars - 1,
-			       dev->limits.reserved_uars);
+			       dev->limits.reserved_uars + 1);
 	if (ret)
 		return ret;
 
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/Kconfig linux-2.6.18.noarch/drivers/infiniband/Kconfig
--- linux-2.6.18.noarch.OFED/drivers/infiniband/Kconfig	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/Kconfig	2006-10-17 10:15:06.000000000 -0400
@@ -40,10 +40,12 @@ source "drivers/infiniband/hw/ehca/Kconf
 
 source "drivers/infiniband/ulp/ipoib/Kconfig"
 
-source "drivers/infiniband/ulp/sdp/Kconfig"
-
 source "drivers/infiniband/ulp/srp/Kconfig"
 
 source "drivers/infiniband/ulp/iser/Kconfig"
 
+source "drivers/infiniband/ulp/sdp/Kconfig"
+
+source "drivers/infiniband/util/Kconfig"
+
 endmenu
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/Makefile linux-2.6.18.noarch/drivers/infiniband/Makefile
--- linux-2.6.18.noarch.OFED/drivers/infiniband/Makefile	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/Makefile	2006-10-19 12:41:10.000000000 -0400
@@ -1,8 +1,9 @@
 obj-$(CONFIG_INFINIBAND)		+= core/
 obj-$(CONFIG_INFINIBAND_MTHCA)		+= hw/mthca/
-obj-$(CONFIG_IPATH_CORE)		+= hw/ipath/
+obj-$(CONFIG_INFINIBAND_IPATH)		+= hw/ipath/
 obj-$(CONFIG_INFINIBAND_EHCA)		+= hw/ehca/
 obj-$(CONFIG_INFINIBAND_IPOIB)		+= ulp/ipoib/
-obj-$(CONFIG_INFINIBAND_SDP)		+= ulp/sdp/
 obj-$(CONFIG_INFINIBAND_SRP)		+= ulp/srp/
 obj-$(CONFIG_INFINIBAND_ISER)		+= ulp/iser/
+obj-$(CONFIG_INFINIBAND_SDP)		+= ulp/sdp/
+obj-$(CONFIG_INFINIBAND_MADEYE)		+= util/
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/ipoib/ipoib_fs.c linux-2.6.18.noarch/drivers/infiniband/ulp/ipoib/ipoib_fs.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/ipoib/ipoib_fs.c	2006-10-19 14:44:54.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/ulp/ipoib/ipoib_fs.c	2006-10-17 10:15:06.000000000 -0400
@@ -141,7 +141,7 @@ static int ipoib_mcg_open(struct inode *
 		return ret;
 
 	seq = file->private_data;
-	seq->private = inode->i_private;
+	seq->private = inode->u.generic_ip;
 
 	return 0;
 }
@@ -247,7 +247,7 @@ static int ipoib_path_open(struct inode 
 		return ret;
 
 	seq = file->private_data;
-	seq->private = inode->i_private;
+	seq->private = inode->u.generic_ip;
 
 	return 0;
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/ipoib/ipoib_ib.c linux-2.6.18.noarch/drivers/infiniband/ulp/ipoib/ipoib_ib.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/ulp/ipoib/ipoib_ib.c	2006-10-19 12:41:11.000000000 -0400
@@ -619,8 +619,10 @@ void ipoib_ib_dev_flush(void *_dev)
 	 * The device could have been brought down between the start and when
 	 * we get here, don't bring it back up if it's not configured up
 	 */
-	if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+	if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
 		ipoib_ib_dev_up(dev);
+		ipoib_mcast_restart_task(dev);
+	}
 
 	mutex_lock(&priv->vlan_mutex);
 
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/ipoib/ipoib_main.c linux-2.6.18.noarch/drivers/infiniband/ulp/ipoib/ipoib_main.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/ipoib/ipoib_main.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/ulp/ipoib/ipoib_main.c	2006-10-19 12:41:11.000000000 -0400
@@ -182,6 +182,8 @@ static int ipoib_change_mtu(struct net_d
 
 	dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
 
+	queue_work(ipoib_workqueue, &priv->flush_task);
+
 	return 0;
 }
 
@@ -336,7 +338,8 @@ void ipoib_flush_paths(struct net_device
 	struct ipoib_path *path, *tp;
 	LIST_HEAD(remove_list);
 
-	spin_lock_irq(&priv->lock);
+	spin_lock_irq(&priv->tx_lock);
+	spin_lock(&priv->lock);
 
 	list_splice(&priv->path_list, &remove_list);
 	INIT_LIST_HEAD(&priv->path_list);
@@ -347,12 +350,15 @@ void ipoib_flush_paths(struct net_device
 	list_for_each_entry_safe(path, tp, &remove_list, list) {
 		if (path->query)
 			ib_sa_cancel_query(path->query_id, path->query);
-		spin_unlock_irq(&priv->lock);
+		spin_unlock(&priv->lock);
+		spin_unlock_irq(&priv->tx_lock);
 		wait_for_completion(&path->done);
 		path_free(dev, path);
-		spin_lock_irq(&priv->lock);
+		spin_lock_irq(&priv->tx_lock);
+		spin_lock(&priv->lock);
 	}
-	spin_unlock_irq(&priv->lock);
+	spin_unlock(&priv->lock);
+	spin_unlock_irq(&priv->tx_lock);
 }
 
 static void path_rec_completion(int status,
@@ -452,15 +458,39 @@ static int path_rec_start(struct net_dev
 			  struct ipoib_path *path)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
+	ib_sa_comp_mask comp_mask = IB_SA_PATH_REC_MTU_SELECTOR | IB_SA_PATH_REC_MTU;
+
+	path->pathrec.mtu_selector = IB_SA_GT;
 
-	ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
-		  IPOIB_GID_ARG(path->pathrec.dgid));
+	switch (roundup_pow_of_two(dev->mtu + IPOIB_ENCAP_LEN)) {
+	case 512:
+		path->pathrec.mtu = IB_MTU_256;
+		break;
+	case 1024:
+		path->pathrec.mtu = IB_MTU_512;
+		break;
+	case 2048:
+		path->pathrec.mtu = IB_MTU_1024;
+		break;
+	case 4096:
+		path->pathrec.mtu = IB_MTU_2048;
+		break;
+	default:
+		/* Wildcard everything */
+		comp_mask = 0;
+		path->pathrec.mtu = 0;
+		path->pathrec.mtu_selector = 0;
+	}
+
+	ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT " MTU > %d\n",
+		  IPOIB_GID_ARG(path->pathrec.dgid),
+		  comp_mask ? ib_mtu_enum_to_int(path->pathrec.mtu) : 0);
 
 	init_completion(&path->done);
 
 	path->query_id =
 		ib_sa_path_rec_get(priv->ca, priv->port,
-				   &path->pathrec,
+				   &path->pathrec, comp_mask    |
 				   IB_SA_PATH_REC_DGID		|
 				   IB_SA_PATH_REC_SGID		|
 				   IB_SA_PATH_REC_NUMB_PATH	|
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-2.6.18.noarch/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/ulp/ipoib/ipoib_multicast.c	2006-10-19 12:41:11.000000000 -0400
@@ -326,7 +326,6 @@ ipoib_mcast_sendonly_join_complete(int s
 
 		/* Clear the busy flag so we try again */
 		clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-		mcast->query = NULL;
 	}
 
 	complete(&mcast->done);
@@ -472,15 +471,25 @@ static void ipoib_mcast_join(struct net_
 
 	if (create) {
 		comp_mask |=
-			IB_SA_MCMEMBER_REC_QKEY		|
-			IB_SA_MCMEMBER_REC_SL		|
-			IB_SA_MCMEMBER_REC_FLOW_LABEL	|
-			IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
+			IB_SA_MCMEMBER_REC_QKEY          |
+			IB_SA_MCMEMBER_REC_SL		 |
+			IB_SA_MCMEMBER_REC_FLOW_LABEL	 |
+			IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
+			IB_SA_MCMEMBER_REC_RATE_SELECTOR |
+			IB_SA_MCMEMBER_REC_RATE          |
+			IB_SA_MCMEMBER_REC_HOP_LIMIT     |
+			IB_SA_MCMEMBER_REC_MTU_SELECTOR  |
+			IB_SA_MCMEMBER_REC_MTU;
 
 		rec.qkey	  = priv->broadcast->mcmember.qkey;
 		rec.sl		  = priv->broadcast->mcmember.sl;
 		rec.flow_label	  = priv->broadcast->mcmember.flow_label;
 		rec.traffic_class = priv->broadcast->mcmember.traffic_class;
+		rec.rate_selector = IB_SA_EQ;
+		rec.rate          = priv->broadcast->mcmember.rate;
+		rec.hop_limit     = priv->broadcast->mcmember.hop_limit;
+		rec.mtu_selector  = IB_SA_EQ;
+		rec.mtu           = priv->broadcast->mcmember.mtu;
 	}
 
 	init_completion(&mcast->done);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/sdp/sdp_bcopy.c linux-2.6.18.noarch/drivers/infiniband/ulp/sdp/sdp_bcopy.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/ulp/sdp/sdp_bcopy.c	2006-10-19 12:41:11.000000000 -0400
@@ -37,6 +37,10 @@
 #include <rdma/rdma_cm.h>
 #include "sdp.h"
 
+static int rcvbuf_scale = 0x10;
+module_param_named(rcvbuf_scale, rcvbuf_scale, int, 0644);
+MODULE_PARM_DESC(srcvbuf_scale, "Receive buffer size scale factor.");
+
 /* Like tcp_fin */
 static void sdp_fin(struct sock *sk)
 {
@@ -72,7 +76,10 @@ void sdp_post_send(struct sdp_sock *ssk,
 	struct ib_send_wr *bad_wr;
 
 	h->mid = mid;
-	h->flags = 0; /* TODO: OOB */
+	if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_URG))
+		h->flags = SDP_OOB_PRES | SDP_OOB_PEND;
+	else
+		h->flags = 0;
 	h->bufs = htons(ssk->rx_head - ssk->rx_tail);
 	h->len = htonl(skb->len);
 	h->mseq = htonl(mseq);
@@ -86,7 +93,7 @@ void sdp_post_send(struct sdp_sock *ssk,
 			      skb->data, skb->len - skb->data_len,
 			      DMA_TO_DEVICE);
 	tx_req->mapping[0] = addr;
-	
+
 	/* TODO: proper error handling */
 	BUG_ON(dma_mapping_error(addr));
 
@@ -116,10 +123,14 @@ void sdp_post_send(struct sdp_sock *ssk,
 	if (unlikely(mid != SDP_MID_DATA))
 		ssk->tx_wr.send_flags |= IB_SEND_SOLICITED;
 	rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
-	BUG_ON(rc);
 	++ssk->tx_head;
 	--ssk->bufs;
 	ssk->remote_credits = ssk->rx_head - ssk->rx_tail;
+	if (unlikely(rc)) {
+		sdp_dbg(&ssk->isk.sk, "ib_post_send failed with status %d.\n", rc);
+		sdp_set_error(&ssk->isk.sk, -ECONNRESET);
+		wake_up(&ssk->wq);
+	}
 }
 
 struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq)
@@ -174,7 +185,7 @@ static void sdp_post_recv(struct sdp_soc
 	BUG_ON(!skb);
 	h = (struct sdp_bsdh *)skb_push(skb, sizeof *h);
 	for (i = 0; i < SDP_MAX_SEND_SKB_FRAGS; ++i) {
-		page = alloc_pages(GFP_KERNEL, 0);
+		page = alloc_pages(GFP_HIGHUSER, 0);
 		BUG_ON(!page);
 		frag = &skb_shinfo(skb)->frags[i];
 		frag->page                = page;
@@ -195,7 +206,7 @@ static void sdp_post_recv(struct sdp_soc
 	BUG_ON(dma_mapping_error(addr));
 
 	rx_req->mapping[0] = addr;
-	
+
 	/* TODO: proper error handling */
 	sge->addr = (u64)addr;
 	sge->length = skb_headlen(skb);
@@ -219,22 +230,22 @@ static void sdp_post_recv(struct sdp_soc
 	ssk->rx_wr.sg_list = ssk->ibsge;
 	ssk->rx_wr.num_sge = frags + 1;
 	rc = ib_post_recv(ssk->qp, &ssk->rx_wr, &bad_wr);
-	/* TODO */
-	BUG_ON(rc);
 	++ssk->rx_head;
+	if (unlikely(rc)) {
+		sdp_dbg(&ssk->isk.sk, "ib_post_recv failed with status %d\n", rc);
+		sdp_reset(&ssk->isk.sk);
+	}
 }
 
 void sdp_post_recvs(struct sdp_sock *ssk)
 {
-	int rmem = atomic_read(&ssk->isk.sk.sk_rmem_alloc);
-
 	if (unlikely(!ssk->id))
 		return;
 
 	while ((likely(ssk->rx_head - ssk->rx_tail < SDP_RX_SIZE) &&
 		(ssk->rx_head - ssk->rx_tail - SDP_MIN_BUFS) *
-		SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE + rmem <
-		ssk->isk.sk.sk_rcvbuf * 0x10) ||
+		SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE + ssk->rcv_nxt - ssk->copied_seq <
+		ssk->isk.sk.sk_rcvbuf * rcvbuf_scale) ||
 	       unlikely(ssk->rx_head - ssk->rx_tail < SDP_MIN_BUFS))
 		sdp_post_recv(ssk);
 }
@@ -271,11 +282,17 @@ struct sk_buff *sdp_recv_completion(stru
 static inline int sdp_sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
 	int skb_len;
+	struct sdp_sock *ssk = sdp_sk(sk);
 
-	skb_set_owner_r(skb, sk);
+	/* not needed since sk_rmem_alloc is not currently used
+	 * TODO - remove this?
+	skb_set_owner_r(skb, sk); */
 
 	skb_len = skb->len;
 
+	TCP_SKB_CB(skb)->seq = ssk->rcv_nxt;
+	ssk->rcv_nxt += skb_len;
+
 	skb_queue_tail(&sk->sk_receive_queue, skb);
 
 	if (!sock_flag(sk, SOCK_DEAD))
@@ -285,23 +302,63 @@ static inline int sdp_sock_queue_rcv_skb
 
 static inline void update_send_head(struct sock *sk, struct sk_buff *skb)
 {
+	struct page *page;
 	sk->sk_send_head = skb->next;
-	if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
+	if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) {
 		sk->sk_send_head = NULL;
+		page = sk->sk_sndmsg_page;
+		if (page) {
+			put_page(page);
+			sk->sk_sndmsg_page = NULL;
+		}
+	}
+}
+
+static inline int sdp_nagle_off(struct sdp_sock *ssk, struct sk_buff *skb)
+{
+	return (ssk->nonagle & TCP_NAGLE_OFF) ||
+		skb->next != (struct sk_buff *)&ssk->isk.sk.sk_write_queue ||
+		skb->len + sizeof(struct sdp_bsdh) >= ssk->xmit_size_goal ||
+		(ssk->tx_tail == ssk->tx_head &&
+		 !(ssk->nonagle & TCP_NAGLE_CORK)) ||
+		(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_PSH);
+}
+
+int sdp_post_credits(struct sdp_sock *ssk)
+{
+	if (likely(ssk->bufs > 1) &&
+	    likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+		struct sk_buff *skb;
+		skb = sk_stream_alloc_skb(&ssk->isk.sk,
+					  sizeof(struct sdp_bsdh),
+					  GFP_KERNEL);
+		if (!skb)
+			return -ENOMEM;
+		sdp_post_send(ssk, skb, SDP_MID_DATA);
+	}
+	return 0;
 }
 
 void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
 {
-	/* TODO: nonagle */
+	/* TODO: nonagle? */
 	struct sk_buff *skb;
 	int c;
 
-	if (unlikely(!ssk->id))
+	if (unlikely(!ssk->id)) {
+		if (ssk->isk.sk.sk_send_head) {
+			sdp_dbg(&ssk->isk.sk,
+				"Send on socket without cmid ECONNRESET.\n");
+			/* TODO: flush send queue? */
+			sdp_reset(&ssk->isk.sk);
+		}
 		return;
+	}
 
 	while (ssk->bufs > SDP_MIN_BUFS &&
 	       ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE &&
-	       (skb = ssk->isk.sk.sk_send_head)) {
+	       (skb = ssk->isk.sk.sk_send_head) &&
+		sdp_nagle_off(ssk, skb)) {
 		update_send_head(&ssk->isk.sk, skb);
 		__skb_dequeue(&ssk->isk.sk.sk_write_queue);
 		sdp_post_send(ssk, skb, SDP_MID_DATA);
@@ -342,6 +399,7 @@ static void sdp_handle_wc(struct sdp_soc
 {
 	struct sk_buff *skb;
 	struct sdp_bsdh *h;
+	int pagesz, i;
 
 	if (wc->wr_id & SDP_OP_RECV) {
 		skb = sdp_recv_completion(ssk, wc->wr_id);
@@ -349,18 +407,18 @@ static void sdp_handle_wc(struct sdp_soc
 			return;
 
 		if (unlikely(wc->status)) {
-			if (wc->status != IB_WC_WR_FLUSH_ERR)
+			if (wc->status != IB_WC_WR_FLUSH_ERR) {
 				sdp_dbg(&ssk->isk.sk,
-					"Recv completion with error. "
-					"Status %d\n", wc->status);
+						"Recv completion with error. "
+						"Status %d\n", wc->status);
+				sdp_reset(&ssk->isk.sk);
+			}
 			__kfree_skb(skb);
-			sdp_set_error(&ssk->isk.sk, -ECONNRESET);
-			wake_up(&ssk->wq);
 		} else {
 			/* TODO: handle msg < bsdh */
-			sdp_dbg(&ssk->isk.sk,
-				"Recv completion. ID %d Length %d\n",
-				(int)wc->wr_id, wc->byte_len);
+			sdp_dbg_data(&ssk->isk.sk,
+				     "Recv completion. ID %d Length %d\n",
+				     (int)wc->wr_id, wc->byte_len);
 			skb->len = wc->byte_len;
 			skb->data_len = wc->byte_len - sizeof(struct sdp_bsdh);
 			if (unlikely(skb->data_len < 0)) {
@@ -375,17 +433,29 @@ static void sdp_handle_wc(struct sdp_soc
 			ssk->bufs = ntohl(h->mseq_ack) - ssk->tx_head + 1 +
 				ntohs(h->bufs);
 
+			pagesz = PAGE_ALIGN(skb->data_len);
+			skb_shinfo(skb)->nr_frags = pagesz / PAGE_SIZE;
+
+			for (i = skb_shinfo(skb)->nr_frags;
+			     i < SDP_MAX_SEND_SKB_FRAGS; ++i) {
+				put_page(skb_shinfo(skb)->frags[i].page);
+				skb->truesize -= PAGE_SIZE;
+			}
+
+			if (unlikely(h->flags & SDP_OOB_PEND))
+				sk_send_sigurg(&ssk->isk.sk);
+
 			if (likely(h->mid == SDP_MID_DATA) &&
 			    likely(skb->data_len > 0)) {
 				skb_pull(skb, sizeof(struct sdp_bsdh));
 				/* TODO: queue can fail? */
-				/* TODO: free unused fragments */
 				sdp_sock_queue_rcv_skb(&ssk->isk.sk, skb);
+				if (unlikely(h->flags & SDP_OOB_PRES))
+					sdp_urg(ssk, skb);
 			} else if (likely(h->mid == SDP_MID_DATA)) {
 				__kfree_skb(skb);
 			} else if (h->mid == SDP_MID_DISCONN) {
 				skb_pull(skb, sizeof(struct sdp_bsdh));
-				/* TODO: free unused fragments */
 				/* this will wake recvmsg */
 				sdp_sock_queue_rcv_skb(&ssk->isk.sk, skb);
 				sdp_fin(&ssk->isk.sk);
@@ -402,12 +472,13 @@ static void sdp_handle_wc(struct sdp_soc
 			return;
 		sk_stream_free_skb(&ssk->isk.sk, skb);
 		if (unlikely(wc->status)) {
-			if (wc->status != IB_WC_WR_FLUSH_ERR)
+			if (wc->status != IB_WC_WR_FLUSH_ERR) {
 				sdp_dbg(&ssk->isk.sk,
-					"Send completion with error. "
-					"Status %d\n", wc->status);
-			sdp_set_error(&ssk->isk.sk, -ECONNRESET);
-			wake_up(&ssk->wq);
+						"Send completion with error. "
+						"Status %d\n", wc->status);
+				sdp_set_error(&ssk->isk.sk, -ECONNRESET);
+				wake_up(&ssk->wq);
+			}
 		}
 
 		sk_stream_write_space(&ssk->isk.sk);
@@ -420,11 +491,9 @@ static void sdp_handle_wc(struct sdp_soc
 
 	if (ssk->time_wait && !ssk->isk.sk.sk_send_head &&
 	    ssk->tx_head == ssk->tx_tail) {
-		ssk->time_wait = 0;
-		ssk->isk.sk.sk_state = TCP_CLOSE;
 		sdp_dbg(&ssk->isk.sk, "%s: destroy in time wait state\n",
 			__func__);
-		queue_work(sdp_workqueue, &ssk->destroy_work);
+		sdp_time_wait_destroy_sk(ssk);
 	}
 }
 
@@ -435,35 +504,49 @@ void sdp_completion_handler(struct ib_cq
 	schedule_work(&ssk->work);
 }
 
+int sdp_poll_cq(struct sdp_sock *ssk, struct ib_cq *cq)
+{
+	int n, i;
+	int ret = -EAGAIN;
+	do {
+		n = ib_poll_cq(cq, SDP_NUM_WC, ssk->ibwc);
+		for (i = 0; i < n; ++i) {
+			sdp_handle_wc(ssk, ssk->ibwc + i);
+			ret = 0;
+		}
+	} while (n == SDP_NUM_WC);
+	return ret;
+}
+
 void sdp_work(void *data)
 {
 	struct sock *sk = (struct sock *)data;
 	struct sdp_sock *ssk = sdp_sk(sk);
 	struct ib_cq *cq;
-	int n, i;
 
-	sdp_dbg(sk, "%s\n", __func__);
+	sdp_dbg_data(sk, "%s\n", __func__);
 
+	lock_sock(sk);
 	cq = ssk->cq;
 	if (unlikely(!cq))
-		return;
+		goto out;
 
-	do {
-		lock_sock(sk);
-		n = ib_poll_cq(cq, SDP_NUM_WC, ssk->ibwc);
-		for (i = 0; i < n; ++i) {
-			sdp_handle_wc(ssk, ssk->ibwc + i);
-		}
-		release_sock(sk);
-	} while (n == SDP_NUM_WC);
+	if (unlikely(!ssk->poll_cq)) {
+		struct rdma_cm_id *id = ssk->id;
+		if (id && id->qp)
+			rdma_establish(id);
+		goto out;
+	}
+
+	sdp_poll_cq(ssk, cq);
+	release_sock(sk);
 	sk_stream_mem_reclaim(sk);
+	lock_sock(sk);
+	cq = ssk->cq;
+	if (unlikely(!cq))
+		goto out;
 	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
-	do {
-		lock_sock(sk);
-		n = ib_poll_cq(cq, SDP_NUM_WC, ssk->ibwc);
-		for (i = 0; i < n; ++i) {
-			sdp_handle_wc(ssk, ssk->ibwc + i);
-		}
-		release_sock(sk);
-	} while (n == SDP_NUM_WC);
+	sdp_poll_cq(ssk, cq);
+out:
+	release_sock(sk);
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/sdp/sdp_cma.c linux-2.6.18.noarch/drivers/infiniband/ulp/sdp/sdp_cma.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/sdp/sdp_cma.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/ulp/sdp/sdp_cma.c	2006-10-19 12:41:11.000000000 -0400
@@ -102,11 +102,17 @@ int sdp_init_qp(struct sock *sk, struct 
 	};
 	struct ib_device *device = id->device;
 	struct ib_cq *cq;
+	struct ib_mr *mr;
 	struct ib_pd *pd;
 	int rc;
 
 	sdp_dbg(sk, "%s\n", __func__);
 
+	sdp_sk(sk)->tx_head = 1;
+	sdp_sk(sk)->tx_tail = 1;
+	sdp_sk(sk)->rx_head = 1;
+	sdp_sk(sk)->rx_tail = 1;
+
 	sdp_sk(sk)->tx_ring = kmalloc(sizeof *sdp_sk(sk)->tx_ring * SDP_TX_SIZE,
 				      GFP_KERNEL);
 	if (!sdp_sk(sk)->tx_ring) {
@@ -131,14 +137,15 @@ int sdp_init_qp(struct sock *sk, struct 
 		sdp_warn(sk, "Unable to allocate PD: %d.\n", rc);
 		goto err_pd;
 	}
-	
-        sdp_sk(sk)->mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
-        if (IS_ERR(sdp_sk(sk)->mr)) {
-                rc = PTR_ERR(sdp_sk(sk)->mr);
+
+        mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
+        if (IS_ERR(mr)) {
+                rc = PTR_ERR(mr);
 		sdp_warn(sk, "Unable to get dma MR: %d.\n", rc);
                 goto err_mr;
         }
 
+	sdp_sk(sk)->mr = mr;
 	INIT_WORK(&sdp_sk(sk)->work, sdp_work, sdp_sk(sk));
 
 	cq = ib_create_cq(device, sdp_completion_handler, sdp_cq_event_handler,
@@ -150,6 +157,8 @@ int sdp_init_qp(struct sock *sk, struct 
 		goto err_cq;
 	}
 
+	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+
         qp_init_attr.send_cq = qp_init_attr.recv_cq = cq;
 
 	rc = rdma_create_qp(id, pd, &qp_init_attr);
@@ -237,9 +246,11 @@ int sdp_connect_handler(struct sock *sk,
 	return 0;
 }
 
-static int sdp_response_handler(struct sock *sk, struct rdma_cm_event *event)
+static int sdp_response_handler(struct sock *sk, struct rdma_cm_id *id,
+				struct rdma_cm_event *event)
 {
 	struct sdp_hah *h;
+	struct sockaddr_in *dst_addr;
 	sdp_dbg(sk, "%s\n", __func__);
 
 	sk->sk_state = TCP_ESTABLISHED;
@@ -259,10 +270,17 @@ static int sdp_response_handler(struct s
 		sdp_sk(sk)->bufs,
 		sdp_sk(sk)->xmit_size_goal);
 
+	sdp_sk(sk)->poll_cq = 1;
 	ib_req_notify_cq(sdp_sk(sk)->cq, IB_CQ_NEXT_COMP);
+	sdp_poll_cq(sdp_sk(sk), sdp_sk(sk)->cq);
 
 	sk->sk_state_change(sk);
 	sk_wake_async(sk, 0, POLL_OUT);
+
+	dst_addr = (struct sockaddr_in *)&id->route.addr.dst_addr;
+	inet_sk(sk)->dport = dst_addr->sin_port;
+	inet_sk(sk)->daddr = dst_addr->sin_addr.s_addr;
+
 	return 0;
 }
 
@@ -283,6 +301,10 @@ int sdp_connected_handler(struct sock *s
 		return 0;
 
 	lock_sock(parent);
+	if (!sdp_sk(parent)->id) { /* TODO: look at SOCK_DEAD? */
+		sdp_dbg(sk, "parent is going away.\n");
+		goto done;
+	}
 #if 0
 	/* TODO: backlog */
 	if (sk_acceptq_is_full(parent)) {
@@ -298,6 +320,7 @@ int sdp_connected_handler(struct sock *s
 
 	parent->sk_state_change(parent);
 	sk_wake_async(parent, 0, POLL_OUT);
+done:
 	release_sock(parent);
 
 	return 0;
@@ -316,7 +339,7 @@ int sdp_cma_handler(struct rdma_cm_id *i
 	struct sock *sk;
 	struct sdp_hah hah;
 	struct sdp_hh hh;
-	
+
 	int rc = 0;
 
 	sk = id->context;
@@ -360,7 +383,8 @@ int sdp_cma_handler(struct rdma_cm_id *i
 		hh.localrcvsz = hh.desremrcvsz = htonl(SDP_MAX_SEND_SKB_FRAGS *
 			PAGE_SIZE + sizeof(struct sdp_bsdh));
 		hh.max_adverts = 0x1;
-
+		inet_sk(sk)->saddr = inet_sk(sk)->rcv_saddr =
+			((struct sockaddr_in *)&id->route.addr.src_addr)->sin_addr.s_addr;
 		memset(&conn_param, 0, sizeof conn_param);
 		conn_param.private_data_len = sizeof hh;
 		conn_param.private_data = &hh;
@@ -405,11 +429,14 @@ int sdp_cma_handler(struct rdma_cm_id *i
 		break;
 	case RDMA_CM_EVENT_CONNECT_RESPONSE:
 		sdp_dbg(sk, "RDMA_CM_EVENT_CONNECT_RESPONSE\n");
-		rc = sdp_response_handler(sk, event);
+		rc = sdp_response_handler(sk, id, event);
 		if (rc)
 			rdma_reject(id, NULL, 0);
 		else
 			rc = rdma_accept(id, NULL);
+
+		if (!rc)
+			rc = sdp_post_credits(sdp_sk(sk));
 		break;
 	case RDMA_CM_EVENT_CONNECT_ERROR:
 		sdp_dbg(sk, "RDMA_CM_EVENT_CONNECT_ERROR\n");
@@ -425,6 +452,8 @@ int sdp_cma_handler(struct rdma_cm_id *i
 		break;
 	case RDMA_CM_EVENT_ESTABLISHED:
 		sdp_dbg(sk, "RDMA_CM_EVENT_ESTABLISHED\n");
+		inet_sk(sk)->saddr = inet_sk(sk)->rcv_saddr =
+			((struct sockaddr_in *)&id->route.addr.src_addr)->sin_addr.s_addr;
 		rc = sdp_connected_handler(sk, event);
 		break;
 	case RDMA_CM_EVENT_DISCONNECTED:
@@ -452,8 +481,8 @@ int sdp_cma_handler(struct rdma_cm_id *i
 		sdp_sk(sk)->id = NULL;
 		id->qp = NULL;
 		id->context = NULL;
-		sdp_set_error(sk, rc);
 		parent = sdp_sk(sk)->parent;
+		sdp_reset_sk(sk, rc);
 	}
 
 	release_sock(sk);
@@ -463,13 +492,19 @@ int sdp_cma_handler(struct rdma_cm_id *i
 	if (parent) {
 		sdp_dbg(parent, "deleting child %d done. status %d\n", event->event, rc);
 		lock_sock(parent);
-		list_del_init(&sdp_sk(child)->backlog_queue);
-		if (!list_empty(&sdp_sk(child)->accept_queue)) {
-			list_del_init(&sdp_sk(child)->accept_queue);
-			sk_acceptq_removed(parent);
+		if (!sdp_sk(parent)->id) { /* TODO: look at SOCK_DEAD? */
+			sdp_dbg(sk, "parent is going away.\n");
+			child = NULL;
+			goto done;
 		}
+		if (!list_empty(&sdp_sk(child)->backlog_queue))
+			list_del_init(&sdp_sk(child)->backlog_queue);
+		else
+			child = NULL;
+done:
 		release_sock(parent);
-		sk_common_release(child);
+		if (child)
+			sk_common_release(child);
 	}
 	return rc;
 }
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/sdp/sdp.h linux-2.6.18.noarch/drivers/infiniband/ulp/sdp/sdp.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/sdp/sdp.h	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/ulp/sdp/sdp.h	2006-10-19 12:41:11.000000000 -0400
@@ -27,6 +27,18 @@ extern int sdp_debug_level;
 	do { (void) (priv); } while (0)
 #endif /* CONFIG_INFINIBAND_SDP_DEBUG */
 
+#ifdef CONFIG_INFINIBAND_SDP_DEBUG_DATA
+extern int sdp_data_debug_level;
+#define sdp_dbg_data(sk, format, arg...)                     \
+	do {                                                 \
+		if (sdp_data_debug_level > 0)                \
+		sdp_printk(KERN_DEBUG, sk, format , ## arg); \
+	} while (0)
+#else
+#define sdp_dbg_data(priv, format, arg...)                   \
+	do { (void) (priv); } while (0)
+#endif
+
 #define SDP_RESOLVE_TIMEOUT 1000
 #define SDP_ROUTE_TIMEOUT 1000
 #define SDP_RETRY_COUNT 5
@@ -47,6 +59,11 @@ enum sdp_mid {
 	SDP_MID_DATA = 0xFF,
 };
 
+enum sdp_flags {
+        SDP_OOB_PRES = 1 << 0,
+        SDP_OOB_PEND = 1 << 1,
+};
+
 enum {
 	SDP_MIN_BUFS = 2
 };
@@ -74,30 +91,35 @@ struct sdp_sock {
 	struct list_head accept_queue;
 	struct list_head backlog_queue;
 	struct sock *parent;
-	/* rdma specific */
-	struct rdma_cm_id *id;
-	struct ib_qp *qp;
-	struct ib_cq *cq;
-	struct ib_mr *mr;
-	struct device *dma_device;
-	/* Like tcp_sock */
-	__u16 urg_data;
-	int offset; /* like seq in tcp */
 
-	int xmit_size_goal;
-	int write_seq;
-	int pushed_seq;
-	int nonagle;
-
-	/* SDP specific */
+	struct work_struct work;
 	wait_queue_head_t wq;
 
 	struct work_struct time_wait_work;
 	struct work_struct destroy_work;
 
+	/* Like tcp_sock */
+	u16 urg_data;
+	u32 urg_seq;
+	u32 copied_seq;
+	u32 rcv_nxt;
+
+	int write_seq;
+	int pushed_seq;
+	int xmit_size_goal;
+	int nonagle;
+
 	int time_wait;
 
-	spinlock_t lock;
+	/* Data below will be reset on error */
+	/* rdma specific */
+	struct rdma_cm_id *id;
+	struct ib_qp *qp;
+	struct ib_cq *cq;
+	struct ib_mr *mr;
+	struct device *dma_device;
+
+	/* SDP specific */
 	struct sdp_buf *rx_ring;
 	struct ib_recv_wr rx_wr;
 	unsigned rx_head;
@@ -106,8 +128,8 @@ struct sdp_sock {
 	unsigned bufs;
 
 	int               remote_credits;
+	int 		  poll_cq;
 
-	spinlock_t        tx_lock;
 	struct sdp_buf   *tx_ring;
 	unsigned          tx_head;
 	unsigned          tx_tail;
@@ -115,7 +137,6 @@ struct sdp_sock {
 
 	struct ib_sge ibsge[SDP_MAX_SEND_SKB_FRAGS + 1];
 	struct ib_wc  ibwc[SDP_NUM_WC];
-	struct work_struct work;
 };
 
 extern struct proto sdp_proto;
@@ -151,15 +172,20 @@ static inline void sdp_set_state(struct 
 extern struct workqueue_struct *sdp_workqueue;
 
 int sdp_cma_handler(struct rdma_cm_id *, struct rdma_cm_event *);
-void sdp_close_sk(struct sock *sk);
+void sdp_reset(struct sock *sk);
+void sdp_reset_sk(struct sock *sk, int rc);
+void sdp_time_wait_destroy_sk(struct sdp_sock *ssk);
 void sdp_completion_handler(struct ib_cq *cq, void *cq_context);
 void sdp_work(void *);
+int sdp_post_credits(struct sdp_sock *ssk);
 void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid);
 void sdp_post_recvs(struct sdp_sock *ssk);
+int sdp_poll_cq(struct sdp_sock *ssk, struct ib_cq *cq);
 void sdp_post_sends(struct sdp_sock *ssk, int nonagle);
 void sdp_destroy_work(void *data);
 void sdp_time_wait_work(void *data);
 struct sk_buff *sdp_recv_completion(struct sdp_sock *ssk, int id);
 struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq);
+void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb);
 
 #endif
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/sdp/sdp_main.c linux-2.6.18.noarch/drivers/infiniband/ulp/sdp/sdp_main.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/sdp/sdp_main.c	2006-10-19 14:45:03.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/ulp/sdp/sdp_main.c	2006-10-19 12:41:10.000000000 -0400
@@ -73,6 +73,42 @@ int sdp_debug_level;
 module_param_named(debug_level, sdp_debug_level, int, 0644);
 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0.");
 #endif
+#ifdef CONFIG_INFINIBAND_SDP_DEBUG
+int sdp_data_debug_level;
+
+module_param_named(data_debug_level, sdp_data_debug_level, int, 0644);
+MODULE_PARM_DESC(data_debug_level, "Enable data path debug tracing if > 0.");
+#endif
+
+static int send_poll_hit;
+
+module_param_named(send_poll_hit, send_poll_hit, int, 0644);
+MODULE_PARM_DESC(send_poll_hit, "How many times send poll helped.");
+
+static int send_poll_miss;
+
+module_param_named(send_poll_miss, send_poll_miss, int, 0644);
+MODULE_PARM_DESC(send_poll_miss, "How many times send poll missed.");
+
+static int recv_poll_hit;
+
+module_param_named(recv_poll_hit, recv_poll_hit, int, 0644);
+MODULE_PARM_DESC(recv_poll_hit, "How many times recv poll helped.");
+
+static int recv_poll_miss;
+
+module_param_named(recv_poll_miss, recv_poll_miss, int, 0644);
+MODULE_PARM_DESC(recv_poll_miss, "How many times recv poll missed.");
+
+static int send_poll = 100;
+
+module_param_named(send_poll, send_poll, int, 0644);
+MODULE_PARM_DESC(send_poll, "How many times to poll send.");
+
+static int recv_poll = 1000;
+
+module_param_named(recv_poll, recv_poll, int, 0644);
+MODULE_PARM_DESC(recv_poll, "How many times to poll recv.");
 
 struct workqueue_struct *sdp_workqueue;
 
@@ -114,35 +150,15 @@ static int sdp_get_port(struct sock *sk,
 	return 0;
 }
 
-/* TODO: linger? */
-void sdp_close_sk(struct sock *sk)
+static void sdp_destroy_qp(struct sdp_sock *ssk)
 {
-	struct sdp_sock *ssk = sdp_sk(sk);
-	struct rdma_cm_id *id = NULL;
 	struct ib_pd *pd = NULL;
 	struct ib_cq *cq = NULL;
 
-	sdp_dbg(sk, "%s\n", __func__);
-
-	lock_sock(sk);
-
-	sk->sk_send_head = NULL;
-	skb_queue_purge(&sk->sk_write_queue);
-
-	id = ssk->id;
-	if (ssk->id) {
-		id->qp = NULL;
-		ssk->id = NULL;
-		release_sock(sk);
-		rdma_destroy_id(id);
-	} else
-		release_sock(sk);
-
 	if (ssk->qp) {
 		pd = ssk->qp->pd;
 		cq = ssk->cq;
-		sdp_sk(sk)->cq = NULL;
-		flush_scheduled_work();
+		ssk->cq = NULL;
 		ib_destroy_qp(ssk->qp);
 
 		while (ssk->rx_head != ssk->rx_tail) {
@@ -161,10 +177,8 @@ void sdp_close_sk(struct sock *sk)
 		}
 	}
 
-	if (cq) {
+	if (cq)
 		ib_destroy_cq(cq);
-		flush_scheduled_work();
-	}
 
 	if (ssk->mr)
 		ib_dereg_mr(ssk->mr);
@@ -172,12 +186,89 @@ void sdp_close_sk(struct sock *sk)
 	if (pd)
 		ib_dealloc_pd(pd);
 
+	kfree(ssk->rx_ring);
+	kfree(ssk->tx_ring);
+}
+
+void sdp_reset_sk(struct sock *sk, int rc)
+{
+	struct sdp_sock *ssk = sdp_sk(sk);
+
+	sdp_dbg(sk, "%s\n", __func__);
+
+	if (ssk->cq)
+		sdp_poll_cq(ssk, ssk->cq);
+
+	if (!(sk->sk_shutdown & RCV_SHUTDOWN))
+		sdp_set_error(sk, rc);
+
+	sdp_destroy_qp(ssk);
+
+	memset((void *)&ssk->id, 0, sizeof(*ssk) - offsetof(typeof(*ssk), id));
+
+	if (ssk->time_wait) {
+		sdp_dbg(sk, "%s: destroy in time wait state\n", __func__);
+		sdp_time_wait_destroy_sk(ssk);
+	}
+
+	sk->sk_state_change(sk);
+}
+
+/* Like tcp_reset */
+/* When we get a reset (completion with error) we do this. */
+void sdp_reset(struct sock *sk)
+{
+	int err;
+
+	if (sk->sk_state != TCP_ESTABLISHED)
+		return;
+
+	/* We want the right error as BSD sees it (and indeed as we do). */
+
+	/* On fin we currently only set RCV_SHUTDOWN, so .. */
+	err = (sk->sk_shutdown & RCV_SHUTDOWN) ? EPIPE : ECONNRESET;
+
+	sdp_set_error(sk, -err);
+	wake_up(&sdp_sk(sk)->wq);
+	sk->sk_state_change(sk);
+}
+
+/* TODO: linger? */
+static void sdp_close_sk(struct sock *sk)
+{
+	struct sdp_sock *ssk = sdp_sk(sk);
+	struct rdma_cm_id *id = NULL;
+	sdp_dbg(sk, "%s\n", __func__);
+
+	lock_sock(sk);
+
+	sk->sk_send_head = NULL;
+	skb_queue_purge(&sk->sk_write_queue);
+        /*
+         * If sendmsg cached page exists, toss it.
+         */
+        if (sk->sk_sndmsg_page) {
+                __free_page(sk->sk_sndmsg_page);
+                sk->sk_sndmsg_page = NULL;
+        }
+
+	id = ssk->id;
+	if (ssk->id) {
+		id->qp = NULL;
+		ssk->id = NULL;
+		release_sock(sk);
+		rdma_destroy_id(id);
+		lock_sock(sk);
+	}
+
 	skb_queue_purge(&sk->sk_receive_queue);
 
-	kfree(sdp_sk(sk)->rx_ring);
-	kfree(sdp_sk(sk)->tx_ring);
+	sdp_destroy_qp(ssk);
 
 	sdp_dbg(sk, "%s done; releasing sock\n", __func__);
+	release_sock(sk);
+
+	flush_scheduled_work();
 }
 
 static void sdp_destruct(struct sock *sk)
@@ -189,13 +280,16 @@ static void sdp_destruct(struct sock *sk
 
 	sdp_close_sk(sk);
 
+	if (ssk->parent)
+		goto done;
+
 	list_for_each_entry_safe(s, t, &ssk->backlog_queue, backlog_queue) {
 		sk_common_release(&s->isk.sk);
 	}
 	list_for_each_entry_safe(s, t, &ssk->accept_queue, accept_queue) {
 		sk_common_release(&s->isk.sk);
 	}
-
+done:
 	sdp_dbg(sk, "%s done\n", __func__);
 }
 
@@ -279,6 +373,11 @@ static void sdp_close(struct sock *sk, l
 		sdp_post_sends(sdp_sk(sk), 0);
 	}
 
+	/* TODO: state should move to CLOSE or CLOSE_WAIT etc on disconnect.
+	   Since it currently doesn't, do it here to avoid blocking below. */
+	if (!sdp_sk(sk)->id)
+		sdp_set_state(sk, TCP_CLOSE);
+
 	sk_stream_wait_close(sk, timeout);
 
 adjudge_to_death:
@@ -357,6 +456,7 @@ static int sdp_connect(struct sock *sk, 
 		rc = sdp_get_port(sk, 0);
 		if (rc)
 			return rc;
+		inet_sk(sk)->sport = htons(inet_sk(sk)->num);
 	}
 
 	sdp_dbg(sk, "%s %u.%u.%u.%u:%hu -> %u.%u.%u.%u:%hu\n", __func__,
@@ -483,11 +583,18 @@ static struct sock *sdp_accept(struct so
 	newssk->parent = NULL;
 	sk_acceptq_removed(sk);
 	newsk = &newssk->isk.sk;
-	sdp_dbg(sk, "%s: ib_req_notify_cq\n", __func__);
-	ib_req_notify_cq(newssk->cq, IB_CQ_NEXT_COMP);
-	/* TODO: poll cq here */
 out:
 	release_sock(sk);
+	if (newsk) {
+		lock_sock(newsk);
+		if (newssk->cq) {
+			sdp_dbg(newsk, "%s: ib_req_notify_cq\n", __func__);
+			newssk->poll_cq = 1;
+			ib_req_notify_cq(newssk->cq, IB_CQ_NEXT_COMP);
+			sdp_poll_cq(newssk, newssk->cq);
+		}
+		release_sock(newsk);
+	}
 	sdp_dbg(sk, "%s: status %d sk %p newsk %p\n", __func__,
 		*err, sk, newsk);
 	return newsk;
@@ -498,15 +605,47 @@ out_err:
 	goto out;
 }
 
+/* Like tcp_ioctl */
 static int sdp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
+	struct sdp_sock *ssk = sdp_sk(sk);
+	int answ;
+
 	sdp_dbg(sk, "%s\n", __func__);
+
+	switch (cmd) {
+	case SIOCINQ:
+		if (sk->sk_state == TCP_LISTEN)
+			return -EINVAL;
+
+		lock_sock(sk);
+		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+			answ = 0;
+		else if (sock_flag(sk, SOCK_URGINLINE) ||
+			 !ssk->urg_data ||
+			 before(ssk->urg_seq, ssk->copied_seq) ||
+			 !before(ssk->urg_seq, ssk->rcv_nxt)) {
+			answ = ssk->rcv_nxt - ssk->copied_seq;
+
+			/* Subtract 1, if FIN is in queue. */
+			if (answ && !skb_queue_empty(&sk->sk_receive_queue))
+				answ -=
+		        ((struct sk_buff *)sk->sk_receive_queue.prev)->h.raw[0]
+		        == SDP_MID_DISCONN ? 1 : 0;
+		} else
+			answ = ssk->urg_seq - ssk->copied_seq;
+		release_sock(sk);
+		break;
+	case SIOCATMARK:
+		answ = ssk->urg_data && ssk->urg_seq == ssk->copied_seq;
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
 	/* TODO: Need to handle:
-	         case SIOCINQ:
-		 case SIOCOUTQ:
-		 case SIOCATMARK:
+	   case SIOCOUTQ:
 	 */
-	return -ENOIOCTLCMD;
+	return put_user(answ, (int __user *)arg); 
 }
 
 void sdp_destroy_work(void *data)
@@ -541,6 +680,13 @@ void sdp_time_wait_work(void *data)
 	sock_put(data);
 }
 
+void sdp_time_wait_destroy_sk(struct sdp_sock *ssk)
+{
+	ssk->time_wait = 0;
+	ssk->isk.sk.sk_state = TCP_CLOSE;
+	queue_work(sdp_workqueue, &ssk->destroy_work);
+}
+
 static int sdp_init_sock(struct sock *sk)
 {
 	struct sdp_sock *ssk = sdp_sk(sk);
@@ -552,10 +698,6 @@ static int sdp_init_sock(struct sock *sk
 	INIT_WORK(&ssk->time_wait_work, sdp_time_wait_work, sk);
 	INIT_WORK(&ssk->destroy_work, sdp_destroy_work, sk);
 
-	ssk->tx_head = 1;
-	ssk->tx_tail = 1;
-	ssk->rx_head = 1;
-	ssk->rx_tail = 1;
 	sk->sk_route_caps |= NETIF_F_SG | NETIF_F_NO_CSUM;
 	return 0;
 }
@@ -581,8 +723,19 @@ static void sdp_shutdown(struct sock *sk
 	sdp_post_sends(ssk, 0);
 }
 
+static void sdp_mark_push(struct sdp_sock *ssk, struct sk_buff *skb)
+{
+	TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+	ssk->pushed_seq = ssk->write_seq;
+}
+
 static inline void sdp_push_pending_frames(struct sock *sk)
 {
+	struct sk_buff *skb = sk->sk_send_head;
+	if (skb) {
+		sdp_mark_push(sdp_sk(sk), skb);
+		sdp_post_sends(sdp_sk(sk), 0);
+	}
 }
 
 /* SOL_SOCKET level options are handled by sock_setsockopt */
@@ -691,6 +844,33 @@ static int sdp_getsockopt(struct sock *s
 	return 0;
 }
 
+static inline int poll_recv_cq(struct sock *sk)
+{
+	int i;
+	if (sdp_sk(sk)->cq) {
+		for (i = 0; i < recv_poll; ++i)
+			if (!sdp_poll_cq(sdp_sk(sk), sdp_sk(sk)->cq)) {
+				++recv_poll_hit;
+				return 0;
+			}
+		++recv_poll_miss;
+	}
+	return 1;
+}
+
+static inline void poll_send_cq(struct sock *sk)
+{
+	int i;
+	if (sdp_sk(sk)->cq) {
+		for (i = 0; i < send_poll; ++i)
+			if (!sdp_poll_cq(sdp_sk(sk), sdp_sk(sk)->cq)) {
+				++send_poll_hit;
+				return;
+			}
+		++send_poll_miss;
+	}
+}
+
 /* Like tcp_recv_urg */
 /*
  *	Handle reading urgent data. BSD has very simple semantics for
@@ -703,6 +883,8 @@ static int sdp_recv_urg(struct sock *sk,
 {
 	struct sdp_sock *ssk = sdp_sk(sk);
 
+	poll_recv_cq(sk);
+
 	/* No URG data to read. */
 	if (sock_flag(sk, SOCK_URGINLINE) || !ssk->urg_data ||
 	    ssk->urg_data == TCP_URG_READ)
@@ -743,12 +925,6 @@ static int sdp_recv_urg(struct sock *sk,
 	return -EAGAIN;
 }
 
-static inline int sdp_has_urgent_data(struct sk_buff *skb)
-{
-	/* TODO: handle inline urgent data */
-	return 0;
-}
-
 static void sdp_rcv_space_adjust(struct sock *sk)
 {
 	sdp_post_recvs(sdp_sk(sk));
@@ -767,20 +943,24 @@ static int forced_push(struct sdp_sock *
 	return 0;
 }
 
-static void sdp_mark_push(struct sdp_sock *ssk, struct sk_buff *skb)
+static inline int select_size(struct sock *sk, struct sdp_sock *ssk)
 {
-	TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
-	ssk->pushed_seq = ssk->write_seq;
+	return 0;
 }
 
-static inline int select_size(struct sock *sk, struct sdp_sock *ssk)
+static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags)
 {
-	return 0;
+	if (unlikely(flags & MSG_OOB)) {
+		struct sk_buff *skb = sk->sk_write_queue.prev;
+		TCP_SKB_CB(skb)->flags |= TCPCB_URG;
+	}
 }
 
 static inline void sdp_push(struct sock *sk, struct sdp_sock *ssk, int flags,
 			    int mss_now, int nonagle)
 {
+	if (sk->sk_send_head)
+		sdp_mark_urg(sk, ssk, flags);
 	sdp_post_sends(ssk, nonagle);
 }
 
@@ -816,7 +996,7 @@ int sdp_sendmsg(struct kiocb *iocb, stru
 	long timeo;
 
 	lock_sock(sk);
-	sdp_dbg(sk, "%s\n", __func__);
+	sdp_dbg_data(sk, "%s\n", __func__);
 
 	flags = msg->msg_flags;
 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
@@ -883,6 +1063,13 @@ new_segment:
 			if (copy > seglen)
 				copy = seglen;
 
+			/* OOB data byte should be the last byte of
+			   the data payload */
+			if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_URG) &&
+			    !(flags & MSG_OOB)) {
+				sdp_mark_push(ssk, skb);
+				goto new_segment;
+			}
 			/* Where to copy to? */
 			if (skb_tailroom(skb) > 0) {
 				/* We have some space in skb head. Superb! */
@@ -968,6 +1155,7 @@ new_segment:
 
 			ssk->write_seq += copy;
 			TCP_SKB_CB(skb)->end_seq += copy;
+			/*unused: skb_shinfo(skb)->gso_segs = 0;*/
 
 			from += copy;
 			copied += copy;
@@ -1002,6 +1190,7 @@ wait_for_memory:
 out:
 	if (copied)
 		sdp_push(sk, ssk, flags, mss_now, ssk->nonagle);
+	poll_send_cq(sk);
 	release_sock(sk);
 	return copied;
 
@@ -1026,20 +1215,22 @@ out_err:
 /* Maybe use skb_recv_datagram here? */
 /* Note this does not seem to handle vectored messages. Relevant? */
 static int sdp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-					size_t len, int noblock, int flags, 
-					int *addr_len)
+		       size_t len, int noblock, int flags, 
+		       int *addr_len)
 {
 	struct sk_buff *skb = NULL;
+	struct sdp_sock *ssk = sdp_sk(sk);
 	long timeo;
 	int target;
 	unsigned long used;
 	int err;
-	int offset = sdp_sk(sk)->offset;
+	u32 peek_seq;
+	u32 *seq;
 	int copied = 0;
-	int urg_data = 0;
+	int rc;
 
 	lock_sock(sk);
-	sdp_dbg(sk, "%s\n", __func__);
+	sdp_dbg_data(sk, "%s\n", __func__);
 
 	err = -ENOTCONN;
 	if (sk->sk_state == TCP_LISTEN)
@@ -1050,26 +1241,48 @@ static int sdp_recvmsg(struct kiocb *ioc
 	if (flags & MSG_OOB)
 		goto recv_urg;
 
+	seq = &ssk->copied_seq;
+	if (flags & MSG_PEEK) {
+		peek_seq = ssk->copied_seq;
+		seq = &peek_seq;
+	}
+
 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
 
 	do {
+		u32 offset;
 
-                /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
-                if (urg_data) {
-                        if (copied)
-                                break;
-                        if (signal_pending(current)) {
-                                copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
-                                break;
-                        }
-                }
+		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
+		if (ssk->urg_data && ssk->urg_seq == *seq) {
+			if (copied)
+				break;
+			if (signal_pending(current)) {
+				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
+				break;
+			}
+		}
 
 		skb = skb_peek(&sk->sk_receive_queue);
-		if (skb) {
+		do {
+			if (!skb)
+				break;
+
 			if (skb->h.raw[0] == SDP_MID_DISCONN)
 				goto found_fin_ok;
-			goto found_ok_skb;
-		}
+
+			if (before(*seq, TCP_SKB_CB(skb)->seq)) {
+				printk(KERN_INFO "recvmsg bug: copied %X "
+				       "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
+				break;
+			}
+
+			offset = *seq - TCP_SKB_CB(skb)->seq;
+			if (offset < skb->len)
+				goto found_ok_skb;
+
+			BUG_TRAP(flags & MSG_PEEK);
+			skb = skb->next;
+		} while (skb != (struct sk_buff *)&sk->sk_receive_queue);
 
 		if (copied >= target)
 			break;
@@ -1116,27 +1329,43 @@ static int sdp_recvmsg(struct kiocb *ioc
 			}
 		}
 
-		if (copied >= target) {
+		rc = poll_recv_cq(sk);
+
+		if (copied >= target && !recv_poll) {
 			/* Do not sleep, just process backlog. */
 			release_sock(sk);
 			lock_sock(sk);
-		} else {
-			sdp_dbg(sk, "%s: sk_wait_data %ld\n", __func__, timeo);
+		} else if (rc) {
+			sdp_dbg_data(sk, "%s: sk_wait_data %ld\n", __func__, timeo);
 			sk_wait_data(sk, &timeo);
 		}
 		continue;
 
 	found_ok_skb:
-		sdp_dbg(sk, "%s: found_ok_skb len %d\n", __func__, skb->len);
-		sdp_dbg(sk, "%s: len %Zd offset %d\n", __func__, len, offset);
-		sdp_dbg(sk, "%s: copied %d target %d\n", __func__, copied, target);
-		urg_data = sdp_has_urgent_data(skb);
+		sdp_dbg_data(sk, "%s: found_ok_skb len %d\n", __func__, skb->len);
+		sdp_dbg_data(sk, "%s: len %Zd offset %d\n", __func__, len, offset);
+		sdp_dbg_data(sk, "%s: copied %d target %d\n", __func__, copied, target);
 		used = skb->len - offset;
 		if (len < used)
-		       	used = len;
+			used = len;
 
-		sdp_dbg(sk, "%s: used %ld\n", __func__, used);
+		sdp_dbg_data(sk, "%s: used %ld\n", __func__, used);
 
+		if (ssk->urg_data) {
+			u32 urg_offset = ssk->urg_seq - *seq;
+			if (urg_offset < used) {
+				if (!urg_offset) {
+					if (!sock_flag(sk, SOCK_URGINLINE)) {
+						++*seq;
+						offset++;
+						used--;
+						if (!used)
+							goto skip_copy;
+					}
+				} else
+					used = urg_offset;
+			}
+		}
 		if (!(flags & MSG_TRUNC)) {
 			int err;
 			err = skb_copy_datagram_iovec(skb, offset,
@@ -1153,34 +1382,32 @@ static int sdp_recvmsg(struct kiocb *ioc
 			}
 		}
 
-                copied += used;
-                len -= used;
-		offset += used;
-		sdp_dbg(sk, "%s: done copied %d target %d\n", __func__, copied, target);
+		copied += used;
+		len -= used;
+		*seq += used;
 
-		sdp_rcv_space_adjust(sk);
+		sdp_dbg_data(sk, "%s: done copied %d target %d\n", __func__, copied, target);
 
-                if (offset < skb->len)
-			continue; /* TODO: break? */
+		sdp_rcv_space_adjust(sk);
+skip_copy:
+		if (ssk->urg_data && after(ssk->copied_seq, ssk->urg_seq))
+			ssk->urg_data = 0;
+		if (used + offset < skb->len)
+			continue;
+		offset = 0;
 
-                if (!(flags & MSG_PEEK))
+		if (!(flags & MSG_PEEK))
 			sk_eat_skb(sk, skb, 0);
 
-		offset = 0;
-		skb = NULL;
-
 		continue;
 found_fin_ok:
+		++*seq;
 		if (!(flags & MSG_PEEK))
 			sk_eat_skb(sk, skb, 0);
 
-		offset = 0;
-		skb = NULL;
 		break;
 	} while (len > 0);
 
-	sdp_sk(sk)->offset = skb && !(flags & MSG_PEEK) ? offset : 0;
-
 	release_sock(sk);
 	return copied;
 
@@ -1204,6 +1431,7 @@ static int sdp_listen(struct sock *sk, i
 		rc = sdp_get_port(sk, 0);
 		if (rc)
 			return rc;
+		inet_sk(sk)->sport = htons(inet_sk(sk)->num);
 	}
 
 	rc = rdma_listen(ssk->id, backlog);
@@ -1265,7 +1493,7 @@ static unsigned int sdp_poll(struct file
 			     struct poll_table_struct *wait)
 {
 	int mask;
-	sdp_dbg(socket->sk, "%s\n", __func__);
+	sdp_dbg_data(socket->sk, "%s\n", __func__);
 
 	mask = datagram_poll(file, socket, wait);
 	/* TODO: Slightly ugly: it would be nicer if there was function
@@ -1284,6 +1512,21 @@ static void sdp_enter_memory_pressure(vo
 	sdp_dbg(NULL, "%s\n", __func__);
 }
 
+void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb)
+{
+	struct sock *sk = &ssk->isk.sk;
+	u8 tmp;
+	u32 ptr = skb->len - 1;
+
+	ssk->urg_seq = TCP_SKB_CB(skb)->seq + ptr;
+
+	if (skb_copy_bits(skb, ptr, &tmp, 1))
+		BUG();
+	ssk->urg_data = TCP_URG_VALID | tmp;
+	if (!sock_flag(sk, SOCK_DEAD))
+		sk->sk_data_ready(sk, 0);
+}
+
 static atomic_t sockets_allocated;
 static atomic_t memory_allocated;
 static atomic_t orphan_count;
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/srp/ib_srp.c linux-2.6.18.noarch/drivers/infiniband/ulp/srp/ib_srp.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/srp/ib_srp.c	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/ulp/srp/ib_srp.c	2006-10-19 12:45:48.000000000 -0400
@@ -329,40 +329,45 @@ static int srp_send_req(struct srp_targe
 	req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
 	req->priv.req_buf_fmt 	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
 					      SRP_BUF_FORMAT_INDIRECT);
+
 	/*
 	 * In the published SRP specification (draft rev. 16a), the 
 	 * port identifier format is 8 bytes of ID extension followed
-	 * by 8 bytes of GUID.  Older drafts put the two halves in the
-	 * opposite order, so that the GUID comes first.
+	 * by 8 bytes of port_GUID.  Older drafts put the two halves in the
+	 * opposite order, so that the port_GUID comes first.
 	 *
 	 * Targets conforming to these obsolete drafts can be
 	 * recognized by the I/O Class they report.
 	 */
+
 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
 		memcpy(req->priv.initiator_port_id,
-		       target->srp_host->initiator_port_id + 8, 8);
+		       &target->path.sgid.global.interface_id, 8);
 		memcpy(req->priv.initiator_port_id + 8,
-		       target->srp_host->initiator_port_id, 8);
+		       &target->initiator_ext, 8);
 		memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
 		memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
 	} else {
 		memcpy(req->priv.initiator_port_id,
-		       target->srp_host->initiator_port_id, 16);
+		       &target->initiator_ext, 8);
+		memcpy(req->priv.initiator_port_id + 8,
+		       &target->path.sgid.global.interface_id, 8);
 		memcpy(req->priv.target_port_id,     &target->id_ext, 8);
 		memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
 	}
 
 	/*
 	 * Topspin/Cisco SRP targets will reject our login unless we
-	 * zero out the first 8 bytes of our initiator port ID.  The
-	 * second 8 bytes must be our local node GUID, but we always
-	 * use that anyway.
+	 * zero out the first 8 bytes of our initiator port ID and set
+	 * the second 8 bytes to the local node GUID.
 	 */
 	if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) {
 		printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "
 		       "activated for target GUID %016llx\n",
 		       (unsigned long long) be64_to_cpu(target->ioc_guid));
 		memset(req->priv.initiator_port_id, 0, 8);
+		memcpy(req->priv.initiator_port_id + 8,
+		       &target->srp_host->dev->dev->node_guid, 8);
 	}
 
 	status = ib_send_cm_req(target->cm_id, &req->param);
@@ -495,10 +500,10 @@ static void srp_reset_req(struct srp_tar
 static int srp_reconnect_target(struct srp_target_port *target)
 {
 	struct ib_cm_id *new_cm_id;
-	struct ib_qp_attr qp_attr;
 	struct srp_request *req, *tmp;
-	struct ib_wc wc;
 	int ret;
+	struct ib_cq *old_cq;
+	struct ib_qp *old_qp;
 
 	spin_lock_irq(target->scsi_host->host_lock);
 	if (target->state != SRP_TARGET_LIVE) {
@@ -522,17 +527,17 @@ static int srp_reconnect_target(struct s
 	ib_destroy_cm_id(target->cm_id);
 	target->cm_id = new_cm_id;
 
-	qp_attr.qp_state = IB_QPS_RESET;
-	ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
-	if (ret)
-		goto err;
-
-	ret = srp_init_qp(target, target->qp);
-	if (ret)
+	old_qp = target->qp;
+	old_cq = target->cq;
+	ret = srp_create_target_ib(target);
+	if (ret) {
+		target->qp = old_qp;
+		target->cq = old_cq;
 		goto err;
+	}
 
-	while (ib_poll_cq(target->cq, 1, &wc) > 0)
-		; /* nothing */
+	ib_destroy_qp(old_qp);
+	ib_destroy_cq(old_cq);
 
 	spin_lock_irq(target->scsi_host->host_lock);
 	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
@@ -543,6 +548,7 @@ static int srp_reconnect_target(struct s
 	target->tx_head	 = 0;
 	target->tx_tail  = 0;
 
+	target->need_reset = 0;
 	ret = srp_connect_target(target);
 	if (ret)
 		goto err;
@@ -858,6 +864,7 @@ static void srp_completion(struct ib_cq 
 			printk(KERN_ERR PFX "failed %s status %d\n",
 			       wc.wr_id & SRP_OP_RECV ? "receive" : "send",
 			       wc.status);
+			target->need_reset = 1;
 			break;
 		}
 
@@ -1313,6 +1320,8 @@ static int srp_abort(struct scsi_cmnd *s
 
 	printk(KERN_ERR "SRP abort called\n");
 
+	if (target->need_reset)
+		return FAILED;
 	if (srp_find_req(target, scmnd, &req))
 		return FAILED;
 	if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
@@ -1341,6 +1350,8 @@ static int srp_reset_device(struct scsi_
 
 	printk(KERN_ERR "SRP reset_device called\n");
 
+	if (target->need_reset)
+		return FAILED;
 	if (srp_find_req(target, scmnd, &req))
 		return FAILED;
 	if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
@@ -1449,12 +1460,29 @@ static ssize_t show_zero_req_lim(struct 
 	return sprintf(buf, "%d\n", target->zero_req_lim);
 }
 
-static CLASS_DEVICE_ATTR(id_ext,	S_IRUGO, show_id_ext,		NULL);
-static CLASS_DEVICE_ATTR(ioc_guid,	S_IRUGO, show_ioc_guid,		NULL);
-static CLASS_DEVICE_ATTR(service_id,	S_IRUGO, show_service_id,	NULL);
-static CLASS_DEVICE_ATTR(pkey,		S_IRUGO, show_pkey,		NULL);
-static CLASS_DEVICE_ATTR(dgid,		S_IRUGO, show_dgid,		NULL);
-static CLASS_DEVICE_ATTR(zero_req_lim,	S_IRUGO, show_zero_req_lim,	NULL);
+static ssize_t show_local_ib_port(struct class_device *cdev, char *buf)
+{
+	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+
+	return sprintf(buf, "%d\n", target->srp_host->port);
+}
+
+static ssize_t show_local_ib_device(struct class_device *cdev, char *buf)
+{
+	struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+
+	return sprintf(buf, "%s\n", target->srp_host->dev->dev->name);
+}
+
+
+static CLASS_DEVICE_ATTR(id_ext,	  S_IRUGO, show_id_ext,		 NULL);
+static CLASS_DEVICE_ATTR(ioc_guid,	  S_IRUGO, show_ioc_guid,	 NULL);
+static CLASS_DEVICE_ATTR(service_id,	  S_IRUGO, show_service_id,	 NULL);
+static CLASS_DEVICE_ATTR(pkey,		  S_IRUGO, show_pkey,		 NULL);
+static CLASS_DEVICE_ATTR(dgid,		  S_IRUGO, show_dgid,		 NULL);
+static CLASS_DEVICE_ATTR(zero_req_lim,	  S_IRUGO, show_zero_req_lim,	 NULL);
+static CLASS_DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,	 NULL);
+static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
 
 static struct class_device_attribute *srp_host_attrs[] = {
 	&class_device_attr_id_ext,
@@ -1463,6 +1491,8 @@ static struct class_device_attribute *sr
 	&class_device_attr_pkey,
 	&class_device_attr_dgid,
 	&class_device_attr_zero_req_lim,
+	&class_device_attr_local_ib_port,
+	&class_device_attr_local_ib_device,
 	NULL
 };
 
@@ -1532,6 +1562,7 @@ enum {
 	SRP_OPT_MAX_SECT	= 1 << 5,
 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
 	SRP_OPT_IO_CLASS	= 1 << 7,
+	SRP_OPT_INITIATOR_EXT	= 1 << 8,
 	SRP_OPT_ALL		= (SRP_OPT_ID_EXT	|
 				   SRP_OPT_IOC_GUID	|
 				   SRP_OPT_DGID		|
@@ -1548,6 +1579,7 @@ static match_table_t srp_opt_tokens = {
 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
+	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
 	{ SRP_OPT_ERR,			NULL 			}
 };
 
@@ -1647,6 +1679,12 @@ static int srp_parse_options(const char 
 			target->io_class = token;
 			break;
 
+		case SRP_OPT_INITIATOR_EXT:
+			p = match_strdup(args);
+			target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
+			kfree(p);
+			break;
+
 		default:
 			printk(KERN_WARNING PFX "unknown parameter or missing value "
 			       "'%s' in target creation request\n", p);
@@ -1731,6 +1769,7 @@ static ssize_t srp_create_target(struct 
 		goto err_free;
 	}
 
+	target->need_reset = 0;
 	ret = srp_connect_target(target);
 	if (ret) {
 		printk(KERN_ERR PFX "Connection failed\n");
@@ -1794,9 +1833,6 @@ static struct srp_host *srp_add_port(str
 	host->dev  = device;
 	host->port = port;
 
-	host->initiator_port_id[7] = port;
-	memcpy(host->initiator_port_id + 8, &device->dev->node_guid, 8);
-
 	host->class_dev.class = &srp_class;
 	host->class_dev.dev   = device->dev->dma_device;
 	snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d",
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/srp/ib_srp.h linux-2.6.18.noarch/drivers/infiniband/ulp/srp/ib_srp.h
--- linux-2.6.18.noarch.OFED/drivers/infiniband/ulp/srp/ib_srp.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/drivers/infiniband/ulp/srp/ib_srp.h	2006-10-19 12:41:11.000000000 -0400
@@ -91,7 +91,6 @@ struct srp_device {
 };
 
 struct srp_host {
-	u8			initiator_port_id[16];
 	struct srp_device      *dev;
 	u8			port;
 	struct class_device	class_dev;
@@ -122,6 +121,7 @@ struct srp_target_port {
 	__be64			id_ext;
 	__be64			ioc_guid;
 	__be64			service_id;
+	__be64			initiator_ext;
 	u16			io_class;
 	struct srp_host	       *srp_host;
 	struct Scsi_Host       *scsi_host;
@@ -158,6 +158,7 @@ struct srp_target_port {
 	struct completion	done;
 	int			status;
 	enum srp_target_state	state;
+	int			need_reset;
 };
 
 struct srp_iu {
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/util/Kconfig linux-2.6.18.noarch/drivers/infiniband/util/Kconfig
--- linux-2.6.18.noarch.OFED/drivers/infiniband/util/Kconfig	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.noarch/drivers/infiniband/util/Kconfig	2006-10-17 10:15:06.000000000 -0400
@@ -0,0 +1,6 @@
+config INFINIBAND_MADEYE
+	tristate "MAD debug viewer for InfiniBand"
+	depends on INFINIBAND
+	---help---
+	  Prints sent and received MADs on QP 0/1 for debugging.
+
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/util/madeye.c linux-2.6.18.noarch/drivers/infiniband/util/madeye.c
--- linux-2.6.18.noarch.OFED/drivers/infiniband/util/madeye.c	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.noarch/drivers/infiniband/util/madeye.c	2006-10-17 10:15:06.000000000 -0400
@@ -0,0 +1,590 @@
+/*
+ * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2005, 2006 Voltaire Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directorY of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id$
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_sa.h>
+
+MODULE_AUTHOR("Sean Hefty");
+MODULE_DESCRIPTION("InfiniBand MAD viewer");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static void madeye_remove_one(struct ib_device *device);
+static void madeye_add_one(struct ib_device *device);
+
+static struct ib_client madeye_client = {
+	.name   = "madeye",
+	.add    = madeye_add_one,
+	.remove = madeye_remove_one
+};
+
+struct madeye_port {
+	struct ib_mad_agent *smi_agent;
+	struct ib_mad_agent *gsi_agent;
+};
+
+static int smp = 1;
+static int gmp = 1;
+static int mgmt_class = 0;
+static int attr_id = 0;
+static int data = 0;
+
+module_param(smp, int, 0444);
+module_param(gmp, int, 0444);
+module_param(mgmt_class, int, 0444);
+module_param(attr_id, int, 0444);
+module_param(data, int, 0444);
+
+MODULE_PARM_DESC(smp, "Display all SMPs (default=1)");
+MODULE_PARM_DESC(gmp, "Display all GMPs (default=1)");
+MODULE_PARM_DESC(mgmt_class, "Display all MADs of specified class (default=0)");
+MODULE_PARM_DESC(attr_id, "Display add MADs of specified attribute ID (default=0)");
+MODULE_PARM_DESC(data, "Display data area of MADs (default=0)");
+
+static char * get_class_name(u8 mgmt_class)
+{
+	switch(mgmt_class) {
+	case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+		return "LID routed SMP";
+	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+		return "Directed route SMP";
+	case IB_MGMT_CLASS_SUBN_ADM:
+		return "Subnet admin.";
+	case IB_MGMT_CLASS_PERF_MGMT:
+		return "Perf. mgmt.";
+	case IB_MGMT_CLASS_BM:
+		return "Baseboard mgmt.";
+	case IB_MGMT_CLASS_DEVICE_MGMT:
+		return "Device mgmt.";
+	case IB_MGMT_CLASS_CM:
+		return "Comm. mgmt.";
+	case IB_MGMT_CLASS_SNMP:
+		return "SNMP";
+	default:
+		return "Unknown vendor/application";
+	}
+}
+
+static char * get_method_name(u8 mgmt_class, u8 method)
+{
+	switch(method) {
+	case IB_MGMT_METHOD_GET:
+		return "Get";
+	case IB_MGMT_METHOD_SET:
+		return "Set";
+	case IB_MGMT_METHOD_GET_RESP:
+		return "Get response";
+	case IB_MGMT_METHOD_SEND:
+		return "Send";
+	case IB_MGMT_METHOD_SEND | IB_MGMT_METHOD_RESP:
+		return "Send response";
+	case IB_MGMT_METHOD_TRAP:
+		return "Trap";
+	case IB_MGMT_METHOD_REPORT:
+		return "Report";
+	case IB_MGMT_METHOD_REPORT_RESP:
+		return "Report response";
+	case IB_MGMT_METHOD_TRAP_REPRESS:
+		return "Trap repress";
+	default:
+		break;
+	}
+
+	switch (mgmt_class) {
+	case IB_MGMT_CLASS_SUBN_ADM:
+		switch (method) {
+		case IB_SA_METHOD_GET_TABLE:
+			return "Get table";
+		case IB_SA_METHOD_GET_TABLE_RESP:
+			return "Get table response";
+		case IB_SA_METHOD_DELETE:
+			return "Delete";
+		case IB_SA_METHOD_DELETE_RESP:
+			return "Delete response";
+		case IB_SA_METHOD_GET_MULTI:
+			return "Get Multi";
+		case IB_SA_METHOD_GET_MULTI_RESP:
+			return "Get Multi response";
+		case IB_SA_METHOD_GET_TRACE_TBL:
+			return "Get Trace Table response";
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	return "Unknown";
+}
+
+static void print_status_details(u16 status)
+{
+	if (status & 0x0001)
+		printk("               busy\n");
+	if (status & 0x0002)
+		printk("               redirection required\n");
+	switch((status & 0x001C) >> 2) {
+	case 1:
+		printk("               bad version\n");
+		break;
+	case 2:
+		printk("               method not supported\n");
+		break;
+	case 3:
+		printk("               method/attribute combo not supported\n");
+		break;
+	case 7:
+		printk("               invalid attribute/modifier value\n");
+		break;
+	}
+}
+
+static char * get_sa_attr(__be16 attr)
+{
+	switch(attr) {
+	case IB_SA_ATTR_CLASS_PORTINFO:
+		return "Class Port Info";
+	case IB_SA_ATTR_NOTICE:
+		return "Notice";
+	case IB_SA_ATTR_INFORM_INFO:
+		return "Inform Info";
+	case IB_SA_ATTR_NODE_REC:
+		return "Node Record";
+	case IB_SA_ATTR_PORT_INFO_REC:
+		return "PortInfo Record";
+	case IB_SA_ATTR_SL2VL_REC:
+		return "SL to VL Record";
+	case IB_SA_ATTR_SWITCH_REC:
+		return "Switch Record";
+	case IB_SA_ATTR_LINEAR_FDB_REC:
+		return "Linear FDB Record";
+	case IB_SA_ATTR_RANDOM_FDB_REC:
+		return "Random FDB Record";
+	case IB_SA_ATTR_MCAST_FDB_REC:
+		return "Multicast FDB Record";
+	case IB_SA_ATTR_SM_INFO_REC:
+		return "SM Info Record";
+	case IB_SA_ATTR_LINK_REC:
+		return "Link Record";
+	case IB_SA_ATTR_GUID_INFO_REC:
+		return "Guid Info Record";
+	case IB_SA_ATTR_SERVICE_REC:
+		return "Service Record";
+	case IB_SA_ATTR_PARTITION_REC:
+		return "Partition Record";
+	case IB_SA_ATTR_PATH_REC:
+		return "Path Record";
+	case IB_SA_ATTR_VL_ARB_REC:
+		return "VL Arb Record";
+	case IB_SA_ATTR_MC_MEMBER_REC:
+		return "MC Member Record";
+	case IB_SA_ATTR_TRACE_REC:
+		return "Trace Record";
+	case IB_SA_ATTR_MULTI_PATH_REC:
+		return "Multi Path Record";
+	case IB_SA_ATTR_SERVICE_ASSOC_REC:
+		return "Service Assoc Record";
+	case IB_SA_ATTR_INFORM_INFO_REC:
+		return "Inform Info Record";
+	default:
+		return "";
+	}
+}
+
+static void print_mad_hdr(struct ib_mad_hdr *mad_hdr)
+{
+	printk("MAD version....0x%01x\n", mad_hdr->base_version);
+	printk("Class..........0x%01x (%s)\n", mad_hdr->mgmt_class,
+	       get_class_name(mad_hdr->mgmt_class));
+	printk("Class version..0x%01x\n", mad_hdr->class_version);
+	printk("Method.........0x%01x (%s)\n", mad_hdr->method,
+	       get_method_name(mad_hdr->mgmt_class, mad_hdr->method));
+	printk("Status.........0x%02x\n", be16_to_cpu(mad_hdr->status));
+	if (mad_hdr->status)
+		print_status_details(be16_to_cpu(mad_hdr->status));
+	printk("Class specific.0x%02x\n", be16_to_cpu(mad_hdr->class_specific));
+	printk("Trans ID.......0x%llx\n", mad_hdr->tid);
+	if (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
+		printk("Attr ID........0x%02x (%s)\n",
+		       be16_to_cpu(mad_hdr->attr_id),
+		       get_sa_attr(be16_to_cpu(mad_hdr->attr_id)));
+	else
+		printk("Attr ID........0x%02x\n",
+		       be16_to_cpu(mad_hdr->attr_id));
+	printk("Attr modifier..0x%04x\n", be32_to_cpu(mad_hdr->attr_mod));
+}
+
+static char * get_rmpp_type(u8 rmpp_type)
+{
+	switch (rmpp_type) {
+	case IB_MGMT_RMPP_TYPE_DATA:
+		return "Data";
+	case IB_MGMT_RMPP_TYPE_ACK:
+		return "Ack";
+	case IB_MGMT_RMPP_TYPE_STOP:
+		return "Stop";
+	case IB_MGMT_RMPP_TYPE_ABORT:
+		return "Abort";
+	default:
+		return "Unknown";
+	}
+}
+
+static char * get_rmpp_flags(u8 rmpp_flags)
+{
+	if (rmpp_flags & IB_MGMT_RMPP_FLAG_ACTIVE)
+		if (rmpp_flags & IB_MGMT_RMPP_FLAG_FIRST)
+			if (rmpp_flags & IB_MGMT_RMPP_FLAG_LAST)
+				return "Active - First & Last";
+			else
+				return "Active - First";
+		else
+			if (rmpp_flags & IB_MGMT_RMPP_FLAG_LAST)
+				return "Active - Last";
+			else
+				return "Active";
+	else
+		return "Inactive";
+}
+
+static void print_rmpp_hdr(struct ib_rmpp_hdr *rmpp_hdr)
+{
+	printk("RMPP version...0x%01x\n", rmpp_hdr->rmpp_version);
+	printk("RMPP type......0x%01x (%s)\n", rmpp_hdr->rmpp_type,
+	       get_rmpp_type(rmpp_hdr->rmpp_type));
+	printk("RMPP RRespTime.0x%01x\n", ib_get_rmpp_resptime(rmpp_hdr));
+	printk("RMPP flags.....0x%01x (%s)\n", ib_get_rmpp_flags(rmpp_hdr),
+	       get_rmpp_flags(ib_get_rmpp_flags(rmpp_hdr)));
+	printk("RMPP status....0x%01x\n", rmpp_hdr->rmpp_status);
+	printk("Seg number.....0x%04x\n", be32_to_cpu(rmpp_hdr->seg_num));
+	switch (rmpp_hdr->rmpp_type) {
+	case IB_MGMT_RMPP_TYPE_DATA:
+		printk("Payload len....0x%04x\n",
+		       be32_to_cpu(rmpp_hdr->paylen_newwin));
+		break;
+	case IB_MGMT_RMPP_TYPE_ACK:
+		printk("New window.....0x%04x\n",
+		       be32_to_cpu(rmpp_hdr->paylen_newwin));
+		break;
+	default:
+		printk("Data 2.........0x%04x\n",
+		       be32_to_cpu(rmpp_hdr->paylen_newwin));
+		break;
+	}
+}
+
+static char * get_smp_attr(__be16 attr)
+{
+	switch (attr) {
+	case IB_SMP_ATTR_NOTICE:
+		return "notice";
+	case IB_SMP_ATTR_NODE_DESC:
+		return "node description";
+	case IB_SMP_ATTR_NODE_INFO:
+		return "node info";
+	case IB_SMP_ATTR_SWITCH_INFO:
+		return "switch info";
+	case IB_SMP_ATTR_GUID_INFO:
+		return "GUID info";
+	case IB_SMP_ATTR_PORT_INFO:
+		return "port info";
+	case IB_SMP_ATTR_PKEY_TABLE:
+		return "pkey table";
+	case IB_SMP_ATTR_SL_TO_VL_TABLE:
+		return "SL to VL table";
+	case IB_SMP_ATTR_VL_ARB_TABLE:
+		return "VL arbitration table";
+	case IB_SMP_ATTR_LINEAR_FORWARD_TABLE:
+		return "linear forwarding table";
+	case IB_SMP_ATTR_RANDOM_FORWARD_TABLE:
+		return "random forward table";
+	case IB_SMP_ATTR_MCAST_FORWARD_TABLE:
+		return "multicast forward table";
+	case IB_SMP_ATTR_SM_INFO:
+		return "SM info";
+	case IB_SMP_ATTR_VENDOR_DIAG:
+		return "vendor diags";
+	case IB_SMP_ATTR_LED_INFO:
+		return "LED info";
+	default:
+		return "";
+	}
+}
+
+static void print_smp(struct ib_smp *smp)
+{
+	int i;
+
+	printk("MAD version....0x%01x\n", smp->base_version);
+	printk("Class..........0x%01x (%s)\n", smp->mgmt_class,
+	       get_class_name(smp->mgmt_class));
+	printk("Class version..0x%01x\n", smp->class_version);
+	printk("Method.........0x%01x (%s)\n", smp->method,
+	       get_method_name(smp->mgmt_class, smp->method));
+	printk("Status.........0x%02x\n", be16_to_cpu(smp->status));
+	if (smp->status)
+		print_status_details(be16_to_cpu(smp->status));
+	printk("Hop pointer...0x%01x\n", smp->hop_ptr);
+	printk("Hop counter...0x%01x\n", smp->hop_cnt);
+	printk("Trans ID.......0x%llx\n", smp->tid);
+	printk("Attr ID........0x%02x (%s)\n", be16_to_cpu(smp->attr_id),
+		get_smp_attr(smp->attr_id));
+	printk("Attr modifier..0x%04x\n", be32_to_cpu(smp->attr_mod));
+
+	printk("Mkey...........0x%llx\n", be64_to_cpu(smp->mkey));
+	printk("DR SLID........0x%02x\n", be16_to_cpu(smp->dr_slid));
+	printk("DR DLID........0x%02x", be16_to_cpu(smp->dr_dlid));
+
+	if (data) {
+		for (i = 0; i < IB_SMP_DATA_SIZE; i++) {
+			if (i % 16 == 0) 
+				printk("\nSMP Data.......");
+			printk("%01x ", smp->data[i]);
+		}
+		for (i = 0; i < IB_SMP_MAX_PATH_HOPS; i++) {
+			if (i % 16 == 0) 
+				printk("\nInitial path...");
+			printk("%01x ", smp->initial_path[i]);
+		}
+		for (i = 0; i < IB_SMP_MAX_PATH_HOPS; i++) {
+			if (i % 16 == 0) 
+				printk("\nReturn path....");
+			printk("%01x ", smp->return_path[i]);
+		}
+	}
+	printk("\n");
+}
+
+static void snoop_smi_handler(struct ib_mad_agent *mad_agent,
+			      struct ib_mad_send_buf *send_buf,
+			      struct ib_mad_send_wc *mad_send_wc)
+{
+	struct ib_mad_hdr *hdr = send_buf->mad;
+
+	if (!smp && hdr->mgmt_class != mgmt_class)
+		return;
+	if (attr_id && hdr->attr_id != attr_id)
+		return;
+
+	printk("Madeye:sent SMP\n");
+	print_smp(send_buf->mad);
+}
+
+static void recv_smi_handler(struct ib_mad_agent *mad_agent,
+			     struct ib_mad_recv_wc *mad_recv_wc)
+{
+	if (!smp && mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class != mgmt_class)
+		return;
+	if (attr_id && mad_recv_wc->recv_buf.mad->mad_hdr.attr_id != attr_id)
+		return;
+
+	printk("Madeye:recv SMP\n");
+	print_smp((struct ib_smp *)&mad_recv_wc->recv_buf.mad->mad_hdr);
+}
+
+static int is_rmpp_mad(struct ib_mad_hdr *mad_hdr)
+{
+	if (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
+		switch (mad_hdr->method) {
+		case IB_SA_METHOD_GET_TABLE:
+		case IB_SA_METHOD_GET_TABLE_RESP:
+		case IB_SA_METHOD_GET_MULTI_RESP:
+			return 1;
+		default:
+			break;
+		}
+	} else if ((mad_hdr->mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
+		   (mad_hdr->mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
+		return 1;
+
+	return 0;
+}
+
+static void snoop_gsi_handler(struct ib_mad_agent *mad_agent,
+			      struct ib_mad_send_buf *send_buf,
+			      struct ib_mad_send_wc *mad_send_wc)
+{
+	struct ib_mad_hdr *hdr = send_buf->mad;
+
+	if (!gmp && hdr->mgmt_class != mgmt_class)
+		return;
+	if (attr_id && hdr->attr_id != attr_id)
+		return;
+
+	printk("Madeye:sent GMP\n");
+	print_mad_hdr(hdr);
+
+	if (is_rmpp_mad(hdr))
+		print_rmpp_hdr(&((struct ib_rmpp_mad *) hdr)->rmpp_hdr);
+}
+
+static void recv_gsi_handler(struct ib_mad_agent *mad_agent,
+			     struct ib_mad_recv_wc *mad_recv_wc)
+{
+	struct ib_mad_hdr *hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
+	struct ib_rmpp_mad *mad = NULL;
+	struct ib_sa_mad *sa_mad;
+	struct ib_vendor_mad *vendor_mad;
+	u8 *mad_data;
+	int i, j;
+
+	if (!gmp && hdr->mgmt_class != mgmt_class)
+		return;
+	if (attr_id && mad_recv_wc->recv_buf.mad->mad_hdr.attr_id != attr_id)
+		return;
+
+	printk("Madeye:recv GMP\n");
+	print_mad_hdr(hdr);
+
+	if (is_rmpp_mad(hdr)) {
+		mad = (struct ib_rmpp_mad *) hdr;
+		print_rmpp_hdr(&mad->rmpp_hdr);
+	}
+
+	if (data) {
+		if (hdr->mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
+			j = IB_MGMT_SA_DATA;
+			/* Display SA header */
+			if (is_rmpp_mad(hdr) &&
+			    mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
+				return;
+			sa_mad = (struct ib_sa_mad *)
+				 &mad_recv_wc->recv_buf.mad;
+			mad_data = sa_mad->data;
+		} else {
+			if (is_rmpp_mad(hdr)) {
+				j = IB_MGMT_VENDOR_DATA;
+				/* Display OUI */
+				vendor_mad = (struct ib_vendor_mad *)
+					     &mad_recv_wc->recv_buf.mad;
+				printk("Vendor OUI......%01x %01x %01x\n",
+					vendor_mad->oui[0],
+					vendor_mad->oui[1],
+					vendor_mad->oui[2]);
+				mad_data = vendor_mad->data;
+			} else {
+				j = IB_MGMT_MAD_DATA;
+				mad_data = mad_recv_wc->recv_buf.mad->data;
+			}
+		}
+		for (i = 0; i < j; i++) {
+			if (i % 16 == 0) 
+				printk("\nData...........");
+			printk("%01x ", mad_data[i]);
+		}
+		printk("\n");
+	}
+}
+
+static void madeye_add_one(struct ib_device *device)
+{
+	struct madeye_port *port;
+	int reg_flags;
+	u8 i, s, e;
+
+	if (device->node_type == IB_NODE_SWITCH) {
+		s = 0;
+		e = 0;
+	} else {
+		s = 1;
+		e = device->phys_port_cnt;
+	}
+
+	port = kmalloc(sizeof *port * (e - s + 1), GFP_KERNEL);
+	if (!port)
+		goto out;
+
+	reg_flags = IB_MAD_SNOOP_SEND_COMPLETIONS | IB_MAD_SNOOP_RECVS;
+	for (i = s; i <= e; i++) {
+		port[i].smi_agent = ib_register_mad_snoop(device, i,
+							  IB_QPT_SMI,
+							  reg_flags,
+							  snoop_smi_handler,
+							  recv_smi_handler,
+							  &port[i]);
+		port[i].gsi_agent = ib_register_mad_snoop(device, i,
+							  IB_QPT_GSI,
+							  reg_flags,
+							  snoop_gsi_handler,
+							  recv_gsi_handler,
+							  &port[i]);
+	}
+
+out:
+	ib_set_client_data(device, &madeye_client, port);
+}
+
+static void madeye_remove_one(struct ib_device *device)
+{
+	struct madeye_port *port;
+	int i, s, e;
+
+	port = (struct madeye_port *)
+		ib_get_client_data(device, &madeye_client);
+	if (!port)
+		return;
+
+	if (device->node_type == IB_NODE_SWITCH) {
+		s = 0;
+		e = 0;
+	} else {
+		s = 1;
+		e = device->phys_port_cnt;
+	}
+
+	for (i = s; i <= e; i++) {
+		if (!IS_ERR(port[i].smi_agent))
+			ib_unregister_mad_agent(port[i].smi_agent);
+		if (!IS_ERR(port[i].gsi_agent))
+			ib_unregister_mad_agent(port[i].gsi_agent);
+	}
+	kfree(port);
+}
+
+static int __init ib_madeye_init(void)
+{
+	return ib_register_client(&madeye_client);
+}
+
+static void __exit ib_madeye_cleanup(void)
+{
+	ib_unregister_client(&madeye_client);
+}
+
+module_init(ib_madeye_init);
+module_exit(ib_madeye_cleanup);
diff -uprN linux-2.6.18.noarch.OFED/drivers/infiniband/util/Makefile linux-2.6.18.noarch/drivers/infiniband/util/Makefile
--- linux-2.6.18.noarch.OFED/drivers/infiniband/util/Makefile	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.noarch/drivers/infiniband/util/Makefile	2006-10-17 10:15:06.000000000 -0400
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_MADEYE)	+= ib_madeye.o
+
+ib_madeye-y := madeye.o 
diff -uprN linux-2.6.18.noarch.OFED/include/rdma/ib_sa.h linux-2.6.18.noarch/include/rdma/ib_sa.h
--- linux-2.6.18.noarch.OFED/include/rdma/ib_sa.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/include/rdma/ib_sa.h	2006-10-19 12:41:10.000000000 -0400
@@ -79,8 +79,8 @@ enum {
 };
 
 enum ib_sa_selector {
-	IB_SA_GTE  = 0,
-	IB_SA_LTE  = 1,
+	IB_SA_GT   = 0,
+	IB_SA_LT   = 1,
 	IB_SA_EQ   = 2,
 	/*
 	 * The meaning of "best" depends on the attribute: for
diff -uprN linux-2.6.18.noarch.OFED/include/rdma/Kbuild linux-2.6.18.noarch/include/rdma/Kbuild
--- linux-2.6.18.noarch.OFED/include/rdma/Kbuild	2006-10-19 14:44:38.000000000 -0400
+++ linux-2.6.18.noarch/include/rdma/Kbuild	2006-10-17 10:15:06.000000000 -0400
@@ -1 +1 @@
-header-y += ib_user_mad.h
+header-y := ib_user_mad.h
diff -uprN linux-2.6.18.noarch.OFED/include/rdma/rdma_cm.h linux-2.6.18.noarch/include/rdma/rdma_cm.h
--- linux-2.6.18.noarch.OFED/include/rdma/rdma_cm.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/include/rdma/rdma_cm.h	2006-10-19 12:41:11.000000000 -0400
@@ -241,6 +241,16 @@ int rdma_listen(struct rdma_cm_id *id, i
 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
 
 /**
+ * rdma_establish - Forces a connection state to established.
+ * @id: Connection identifier to transition to established.
+ *
+ * This routine should be invoked by users who receive messages on a
+ * QP before being notified that the connection has been established by the
+ * RDMA CM.
+ */
+int rdma_establish(struct rdma_cm_id *id);
+
+/**
  * rdma_reject - Called to reject a connection request or response.
  */
 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
diff -uprN linux-2.6.18.noarch.OFED/include/rdma/rdma_cm_ib.h linux-2.6.18.noarch/include/rdma/rdma_cm_ib.h
--- linux-2.6.18.noarch.OFED/include/rdma/rdma_cm_ib.h	2006-09-19 23:42:06.000000000 -0400
+++ linux-2.6.18.noarch/include/rdma/rdma_cm_ib.h	2006-10-17 10:15:06.000000000 -0400
@@ -44,4 +44,26 @@
 int rdma_set_ib_paths(struct rdma_cm_id *id,
 		      struct ib_sa_path_rec *path_rec, int num_paths);
 
+struct ib_cm_req_opt {
+	u8	remote_cm_response_timeout;
+	u8	local_cm_response_timeout;
+	u8	max_cm_retries;
+};
+
+/**
+ * rdma_get_ib_req_info - Retrieves the current IB CM REQ / SIDR REQ values
+ *   that will be used when connection, or performing service ID resolution.
+ * @id: Connection identifier associated with the request.
+ * @info: Current values for CM REQ messages.
+ */
+int rdma_get_ib_req_info(struct rdma_cm_id *id, struct ib_cm_req_opt *info);
+
+/**
+ * rdma_set_ib_req_info - Sets the current IB CM REQ / SIDR REQ values
+ *   that will be used when connection, or performing service ID resolution.
+ * @id: Connection identifier associated with the request.
+ * @info: New values for CM REQ messages.
+ */
+int rdma_set_ib_req_info(struct rdma_cm_id *id, struct ib_cm_req_opt *info);
+
 #endif /* RDMA_CM_IB_H */
diff -uprN linux-2.6.18.noarch.OFED/include/rdma/rdma_user_cm.h linux-2.6.18.noarch/include/rdma/rdma_user_cm.h
--- linux-2.6.18.noarch.OFED/include/rdma/rdma_user_cm.h	1969-12-31 19:00:00.000000000 -0500
+++ linux-2.6.18.noarch/include/rdma/rdma_user_cm.h	2006-10-17 10:15:06.000000000 -0400
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2005 Intel Corporation.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RDMA_USER_CM_H
+#define RDMA_USER_CM_H
+
+#include <linux/types.h>
+#include <linux/in6.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_user_sa.h>
+
+#define RDMA_USER_CM_ABI_VERSION	1
+
+#define RDMA_MAX_PRIVATE_DATA		256
+
+enum {
+	RDMA_USER_CM_CMD_CREATE_ID,
+	RDMA_USER_CM_CMD_DESTROY_ID,
+	RDMA_USER_CM_CMD_BIND_ADDR,
+	RDMA_USER_CM_CMD_RESOLVE_ADDR,
+	RDMA_USER_CM_CMD_RESOLVE_ROUTE,
+	RDMA_USER_CM_CMD_QUERY_ROUTE,
+	RDMA_USER_CM_CMD_CONNECT,
+	RDMA_USER_CM_CMD_LISTEN,
+	RDMA_USER_CM_CMD_ACCEPT,
+	RDMA_USER_CM_CMD_REJECT,
+	RDMA_USER_CM_CMD_DISCONNECT,
+	RDMA_USER_CM_CMD_INIT_QP_ATTR,
+	RDMA_USER_CM_CMD_GET_EVENT,
+	RDMA_USER_CM_CMD_GET_OPTION,
+	RDMA_USER_CM_CMD_SET_OPTION,
+};
+
+/*
+ * command ABI structures.
+ */
+struct rdma_ucm_cmd_hdr {
+	__u32 cmd;
+	__u16 in;
+	__u16 out;
+};
+
+struct rdma_ucm_create_id {
+	__u64 uid;
+	__u64 response;
+};
+
+struct rdma_ucm_create_id_resp {
+	__u32 id;
+};
+
+struct rdma_ucm_destroy_id {
+	__u64 response;
+	__u32 id;
+	__u32 reserved;
+};
+
+struct rdma_ucm_destroy_id_resp {
+	__u32 events_reported;
+};
+
+struct rdma_ucm_bind_addr {
+	__u64 response;
+	struct sockaddr_in6 addr;
+	__u32 id;
+};
+
+struct rdma_ucm_resolve_addr {
+	struct sockaddr_in6 src_addr;
+	struct sockaddr_in6 dst_addr;
+	__u32 id;
+	__u32 timeout_ms;
+};
+
+struct rdma_ucm_resolve_route {
+	__u32 id;
+	__u32 timeout_ms;
+};
+
+struct rdma_ucm_query_route {
+	__u64 response;
+	__u32 id;
+	__u32 reserved;
+};
+
+struct rdma_ucm_query_route_resp {
+	__u64 node_guid;
+	struct ib_user_path_rec ib_route[2];
+	struct sockaddr_in6 src_addr;
+	struct sockaddr_in6 dst_addr;
+	__u32 num_paths;
+	__u8 port_num;
+	__u8 reserved[3];
+};
+
+struct rdma_ucm_conn_param {
+	__u32 qp_num;
+	__u32 qp_type;
+	__u8  private_data[RDMA_MAX_PRIVATE_DATA];
+	__u8  private_data_len;
+	__u8  srq;
+	__u8  responder_resources;
+	__u8  initiator_depth;
+	__u8  flow_control;
+	__u8  retry_count;
+	__u8  rnr_retry_count;
+	__u8  valid;
+};
+
+struct rdma_ucm_connect {
+	struct rdma_ucm_conn_param conn_param;
+	__u32 id;
+	__u32 reserved;
+};
+
+struct rdma_ucm_listen {
+	__u32 id;
+	__u32 backlog;
+};
+
+struct rdma_ucm_accept {
+	__u64 uid;
+	struct rdma_ucm_conn_param conn_param;
+	__u32 id;
+	__u32 reserved;
+};
+
+struct rdma_ucm_reject {
+	__u32 id;
+	__u8  private_data_len;
+	__u8  reserved[3];
+	__u8  private_data[RDMA_MAX_PRIVATE_DATA];
+};
+
+struct rdma_ucm_disconnect {
+	__u32 id;
+};
+
+struct rdma_ucm_init_qp_attr {
+	__u64 response;
+	__u32 id;
+	__u32 qp_state;
+};
+
+struct rdma_ucm_get_event {
+	__u64 response;
+};
+
+struct rdma_ucm_event_resp {
+	__u64 uid;
+	__u32 id;
+	__u32 event;
+	__u32 status;
+	__u8  private_data_len;
+	__u8  reserved[3];
+	__u8  private_data[RDMA_MAX_PRIVATE_DATA];
+};
+
+struct rdma_ucm_get_option {
+	__u64 response;
+	__u64 optval;
+	__u32 id;
+	__u32 level;
+	__u32 optname;
+	__u32 optlen;
+};
+
+/* Protocol levels for get/set options. */
+enum {
+	RDMA_PROTO_IP = 0,
+	RDMA_PROTO_IB = 1,
+};
+
+/* IB specific option names for get/set. */
+enum {
+	IB_PATH_OPTIONS = 1,
+	IB_CM_REQ_OPTIONS = 2,
+};
+
+struct rdma_ucm_get_option_resp {
+	__u32 optlen;
+};
+
+struct rdma_ucm_set_option {
+	__u64 optval;
+	__u32 id;
+	__u32 level;
+	__u32 optname;
+	__u32 optlen;
+};
+
+#endif /* RDMA_USER_CM_H */
--- openib-1.1/drivers/infiniband/hw/ipath/ipath_fs.c.inode	2006-10-19 23:28:14.000000000 -0400
+++ openib-1.1/drivers/infiniband/hw/ipath/ipath_fs.c	2006-10-19 23:28:17.000000000 -0400
@@ -66,10 +66,9 @@ static int ipathfs_mknod(struct inode *d
 	inode->i_mode = mode;
 	inode->i_uid = 0;
 	inode->i_gid = 0;
-	inode->i_blksize = PAGE_CACHE_SIZE;
 	inode->i_blocks = 0;
 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-	inode->u.generic_ip = data;
+	inode->i_private = data;
 	if ((mode & S_IFMT) == S_IFDIR) {
 		inode->i_op = &simple_dir_inode_operations;
 		inode->i_nlink++;
@@ -132,7 +131,7 @@ static ssize_t atomic_counters_read(stru
 	u16 i;
 	struct ipath_devdata *dd;
 
-	dd = file->f_dentry->d_inode->u.generic_ip;
+	dd = file->f_dentry->d_inode->i_private;
 
 	for (i = 0; i < NUM_COUNTERS; i++)
 		counters[i] = ipath_snap_cntr(dd, i);
@@ -152,7 +151,7 @@ static ssize_t atomic_node_info_read(str
 	struct ipath_devdata *dd;
 	u64 guid;
 
-	dd = file->f_dentry->d_inode->u.generic_ip;
+	dd = file->f_dentry->d_inode->i_private;
 
 	guid = be64_to_cpu(dd->ipath_guid);
 
@@ -191,7 +190,7 @@ static ssize_t atomic_port_info_read(str
 	u32 tmp, tmp2;
 	struct ipath_devdata *dd;
 
-	dd = file->f_dentry->d_inode->u.generic_ip;
+	dd = file->f_dentry->d_inode->i_private;
 
 	/* so we only initialize non-zero fields. */
 	memset(portinfo, 0, sizeof portinfo);
@@ -338,7 +337,7 @@ static ssize_t flash_read(struct file *f
 		goto bail;
 	}
 
-	dd = file->f_dentry->d_inode->u.generic_ip;
+	dd = file->f_dentry->d_inode->i_private;
 	if (ipath_eeprom_read(dd, pos, tmp, count)) {
 		ipath_dev_err(dd, "failed to read from flash\n");
 		ret = -ENXIO;
@@ -394,7 +393,7 @@ static ssize_t flash_write(struct file *
 		goto bail_tmp;
 	}
 
-	dd = file->f_dentry->d_inode->u.generic_ip;
+	dd = file->f_dentry->d_inode->i_private;
 	if (ipath_eeprom_write(dd, pos, tmp, count)) {
 		ret = -ENXIO;
 		ipath_dev_err(dd, "failed to write to flash\n");
--- linux-2.6.18.x86_64/drivers/infiniband/ulp/ipoib/ipoib_fs.c.inode	2006-10-20 00:50:14.000000000 -0400
+++ linux-2.6.18.x86_64/drivers/infiniband/ulp/ipoib/ipoib_fs.c	2006-10-20 00:52:50.000000000 -0400
@@ -141,7 +141,7 @@ static int ipoib_mcg_open(struct inode *
 		return ret;
 
 	seq = file->private_data;
-	seq->private = inode->u.generic_ip;
+	seq->private = inode->i_private;
 
 	return 0;
 }
@@ -247,7 +247,7 @@ static int ipoib_path_open(struct inode 
 		return ret;
 
 	seq = file->private_data;
-	seq->private = inode->u.generic_ip;
+	seq->private = inode->i_private;
 
 	return 0;
 }