Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 2864

kernel-2.6.18-238.el5.src.rpm

From: Doug Ledford <dledford@redhat.com>
Date: Tue, 1 Dec 2009 19:33:21 -0500
Subject: [net] mlx4: update to recent version with SRIOV support
Message-id: <1259696003-21028-7-git-send-email-dledford@redhat.com>
Patchwork-id: 21591
O-Subject: [Patch RHEL5.5] [mlx4] Update to recent version with SRIOV support
Bugzilla: 503113 512162 520674 527499 529396 534158
RH-Acked-by: David S. Miller <davem@redhat.com>

This updates the mlx4_core, mlx4_ib, and mlx4_en drivers to the most
recent version, including SRIOV support.

Resolves: bz527499, bz503113, bz520674, bz529396, bz512162, bz534158

This should also likely resolve bz456130, but would need testing by
reporter to know for sure.  The reported requested changes were *not* made,
but other changes to the internal driver ring handling and such might render
this bug resolved via a different path.

Signed-off-by: Doug Ledford <dledford@redhat.com>

diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4a9dfc2..1c5cc2d 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -465,13 +465,15 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 	return mlx4_multicast_attach(to_mdev(ibqp->device)->dev,
 				     &to_mqp(ibqp)->mqp, gid->raw,
 				     !!(to_mqp(ibqp)->flags &
-					MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
+					MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+				     MLX4_PROT_IB_IPV6);
 }
 
 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
 	return mlx4_multicast_detach(to_mdev(ibqp->device)->dev,
-				     &to_mqp(ibqp)->mqp, gid->raw);
+				     &to_mqp(ibqp)->mqp, gid->raw,
+				     MLX4_PROT_IB_IPV6);
 }
 
 static int init_node_data(struct mlx4_ib_dev *dev)
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index e26d6d3..e52f728 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -10,5 +10,5 @@ mlx4_core-y :=	alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
 
 obj-$(CONFIG_MLX4_EN)			+= mlx4_en.o
 
-mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \
-	     en_resources.o en_netdev.o en_frag.o en_lro.o
+mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
+	     en_resources.o en_netdev.o en_frag.o en_lro.o en_selftest.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 5538db1..8566312 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -172,6 +172,16 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
 	return 0;
 }
 
+/* Like bitmap_init, but doesn't require 'num' to be a power of 2 or
+ * a non-trivial mask */
+int mlx4_bitmap_init_no_mask(struct mlx4_bitmap *bitmap, u32 num,
+			     u32 reserved_bot, u32 reserved_top)
+{
+	u32 num_rounded = roundup_pow_of_two(num);
+	return mlx4_bitmap_init(bitmap, num_rounded, num_rounded - 1,
+				reserved_bot, num_rounded - num + reserved_top);
+}
+
 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
 {
 	kfree(bitmap->table);
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index f094ee0..4fe7152 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -66,11 +66,17 @@ static void poll_catas(unsigned long dev_ptr)
 {
 	struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
 	struct mlx4_priv *priv = mlx4_priv(dev);
+	int err = 0;
 
 	if (readl(priv->catas_err.map)) {
 		dump_err_buf(dev);
+		err++;
+	}
+	err += priv->catas_state;
 
+	if (err) {
 		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
+		priv->catas_state = 1;
 
 		if (internal_err_reset) {
 			spin_lock(&catas_lock);
@@ -92,6 +98,9 @@ static void catas_reset(struct work_struct *work)
 	LIST_HEAD(tlist);
 	int ret;
 
+	if (!mutex_trylock(&drv_mutex))
+		return;
+
 	spin_lock_irq(&catas_lock);
 	list_splice_init(&catas_list, &tlist);
 	spin_unlock_irq(&catas_lock);
@@ -104,6 +113,7 @@ static void catas_reset(struct work_struct *work)
 		else
 			mlx4_dbg(dev, "Reset succeeded\n");
 	}
+	mutex_unlock(&drv_mutex);
 }
 
 void mlx4_start_catas_poll(struct mlx4_dev *dev)
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index afe9ec0..0f30cae 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -36,11 +36,10 @@
 #include <linux/pci.h>
 #include <linux/errno.h>
 
-#include <linux/mlx4/cmd.h>
-
 #include <asm/io.h>
 
 #include "mlx4.h"
+#include "fw.h"
 
 #define CMD_POLL_TOKEN 0xffff
 
@@ -141,6 +140,46 @@ static int mlx4_status_to_errno(u8 status)
 	return trans_table[status];
 }
 
+static int comm_pending(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	u32 status = readl(&priv->mfunc.comm->slave_read);
+
+	return (swab32(status) >> 30) != priv->cmd.comm_toggle;
+}
+
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, unsigned long timeout)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	unsigned long end;
+	u32 val;
+
+	/* First, verify that the master reports correct status */
+	if (comm_pending(dev)) {
+		mlx4_warn(dev, "Communication channel is not idle\n");
+		return -EAGAIN;
+	}
+
+	/* Write command */
+	if (cmd == MLX4_COMM_CMD_RESET)
+		priv->cmd.comm_toggle = 0;
+	else if (++priv->cmd.comm_toggle > 2)
+		priv->cmd.comm_toggle = 1;
+	val = param | (cmd << 16) | (priv->cmd.comm_toggle << 30);
+	__raw_writel((__force u32) cpu_to_be32(val), &priv->mfunc.comm->slave_write);
+	wmb();
+
+	end = msecs_to_jiffies(timeout) + jiffies;
+	while (comm_pending(dev) && time_before(jiffies, end))
+		cond_resched();
+
+	if (comm_pending(dev)) {
+		mlx4_warn(dev, "Communication channel timed out\n");
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
 static int cmd_pending(struct mlx4_dev *dev)
 {
 	u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -208,6 +247,33 @@ out:
 	return ret;
 }
 
+static int mlx4_slave_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+			 int out_is_imm, u32 in_modifier, u8 op_modifier,
+			 u16 op, unsigned long timeout)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_vhcr *vhcr = priv->mfunc.vhcr;
+	int ret;
+
+	down(&priv->cmd.poll_sem);
+	vhcr->in_param = in_param;
+	vhcr->out_param = out_param ? *out_param : 0;
+	vhcr->in_modifier = in_modifier;
+	vhcr->timeout = timeout;
+	vhcr->op = op;
+	vhcr->token = CMD_POLL_TOKEN;
+	vhcr->op_modifier = op_modifier;
+	vhcr->errno = 0;
+	ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, MLX4_COMM_TIME + timeout);
+	if (!ret) {
+		if (out_is_imm)
+			*out_param = vhcr->out_param;
+		ret = vhcr->errno;
+	}
+	up(&priv->cmd.poll_sem);
+	return ret;
+}
+
 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 			 int out_is_imm, u32 in_modifier, u8 op_modifier,
 			 u16 op, unsigned long timeout)
@@ -319,15 +385,888 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 	       int out_is_imm, u32 in_modifier, u8 op_modifier,
 	       u16 op, unsigned long timeout)
 {
-	if (mlx4_priv(dev)->cmd.use_events)
-		return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int err;
+
+	if (priv->catas_state)
+		return 0;
+
+	if (priv->cmd.use_events)
+		err = mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
 				     in_modifier, op_modifier, op, timeout);
+	else {
+		if (mlx4_is_slave(dev))
+			err = mlx4_slave_cmd_poll(dev, in_param, out_param, out_is_imm,
+						  in_modifier, op_modifier, op, timeout);
+		else
+			err = mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
+					    in_modifier, op_modifier, op, timeout);
+	}
+	if (err == -EBUSY || err == -ETIMEDOUT) {
+		if (++priv->tout_counter >= 3) {
+			mlx4_err(dev, "%d Commands failed to execute, FW is "
+				      "dead, reseting\n", priv->tout_counter);
+			priv->catas_state = 1;
+		}
+	}
 	else
-		return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
-				     in_modifier, op_modifier, op, timeout);
+		priv->tout_counter = 0;
+
+	return err;
 }
 EXPORT_SYMBOL_GPL(__mlx4_cmd);
 
+static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
+			   int slave, u64 slave_addr,
+			   int size, int is_read)
+{
+	u64 in_param;
+	u64 out_param;
+	int slave_id = slave + 1; /* index 0 is reserved for the master */
+
+	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
+	    (slave_id & ~0x7f) | (size & 0xff)) {
+		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
+			      "master_addr:0x%llx slave_id:%d size:%d\n",
+			      slave_addr, master_addr, slave_id, size);
+		return -EINVAL;
+	}
+
+	if (is_read) {
+		in_param = (u64) slave_id | slave_addr;
+		out_param = master_addr;
+	} else {
+		in_param = master_addr;
+		out_param = (u64) slave_id | slave_addr;
+	}
+
+	return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
+					   MLX4_CMD_ACCESS_MEM,
+					   MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_RESOURCE_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+						       struct mlx4_cmd_mailbox *inbox,
+						       struct mlx4_cmd_mailbox *outbox)
+{
+	u32 param1 = *((u32 *) &vhcr->in_param);
+	u32 param2 = *(((u32 *) &vhcr->in_param) + 1);
+	int ret;
+
+#if 0
+	char *res[] = {"QP", "CQ", "SRQ", "MPT", "MTT"};
+	mlx4_warn(dev, "resource wrapper - %s (mode: %s) type:%s param1:%d param2:%d\n",
+			vhcr->op == MLX4_CMD_ALLOC_RES ? "allocate" : "free",
+			vhcr->op_modifier == ICM_RESERVE ? "reserve" :
+				(vhcr->op_modifier == ICM_ALLOC ? "alloc" : "reserve+alloc"),
+			res[vhcr->in_modifier], param1, param2);
+#endif
+
+	vhcr->errno = 0;
+	switch (vhcr->in_modifier) {
+	case RES_QP:
+		switch (vhcr->op_modifier) {
+		case ICM_RESERVE:
+			if (vhcr->op == MLX4_CMD_ALLOC_RES) {
+				vhcr->errno = mlx4_qp_reserve_range(dev, param1, param2, &ret);
+				if (!vhcr->errno)
+					vhcr->out_param = ret;
+			} else {
+				mlx4_qp_release_range(dev, param1, param2);
+			}
+			break;
+		case ICM_ALLOC:
+			if (vhcr->op == MLX4_CMD_ALLOC_RES)
+				vhcr->errno = mlx4_qp_alloc_icm(dev, param1);
+			else
+				mlx4_qp_free_icm(dev, param1);
+			break;
+		default:
+			vhcr->errno = -EINVAL;
+		}
+		break;
+	case RES_CQ:
+		if (vhcr->op == MLX4_CMD_ALLOC_RES) {
+			vhcr->errno = mlx4_cq_alloc_icm(dev, &ret);
+			if (!vhcr->errno)
+				vhcr->out_param = ret;
+		} else
+			mlx4_cq_free_icm(dev, param1);
+		break;
+	case RES_SRQ:
+		if (vhcr->op == MLX4_CMD_ALLOC_RES) {
+			vhcr->errno = mlx4_srq_alloc_icm(dev, &ret);
+			if (!vhcr->errno)
+				vhcr->out_param = ret;
+		} else
+			mlx4_srq_free_icm(dev, param1);
+		break;
+	case RES_MPT:
+		switch (vhcr->op_modifier) {
+		case ICM_RESERVE:
+			if (vhcr->op == MLX4_CMD_ALLOC_RES) {
+				ret = mlx4_mr_reserve(dev);
+				if (ret == -1)
+					vhcr->errno = -ENOMEM;
+				else
+					vhcr->out_param = ret;
+			} else
+				mlx4_mr_release(dev, param1);
+			break;
+		case ICM_ALLOC:
+			if (vhcr->op == MLX4_CMD_ALLOC_RES)
+				vhcr->errno = mlx4_mr_alloc_icm(dev, param1);
+			else
+				mlx4_mr_free_icm(dev, param1);
+			break;
+		default:
+			vhcr->errno = -EINVAL;
+		}
+		break;
+	case RES_MTT:
+		if (vhcr->op == MLX4_CMD_ALLOC_RES) {
+			ret = mlx4_alloc_mtt_range(dev, param1 /* order */);
+			if (ret == -1)
+				vhcr->errno = -ENOMEM;
+			else
+				vhcr->out_param = ret;
+		} else
+			mlx4_free_mtt_range(dev, param1 /* first */, param2 /* order */);
+		break;
+	case RES_MAC:
+		switch (vhcr->op) {
+		case MLX4_CMD_ALLOC_RES:
+			ret = mlx4_register_mac(dev, vhcr->op_modifier,
+						vhcr->in_param, (int *) &vhcr->out_param);
+			vhcr->errno = ret;
+			break;
+		case MLX4_CMD_FREE_RES:
+			mlx4_unregister_mac(dev, vhcr->op_modifier, vhcr->in_param);
+			break;
+		case MLX4_CMD_REPLACE_RES:
+			ret = mlx4_replace_mac(dev, vhcr->op_modifier,
+					       vhcr->out_param, vhcr->in_param);
+			vhcr->errno = ret;
+			break;
+		default:
+			vhcr->errno = -EINVAL;
+		}
+	default:
+		vhcr->errno = -EINVAL;
+	}
+	return 0;
+}
+
+static struct mlx4_cmd_info {
+	u8 opcode;
+	bool has_inbox;
+	bool has_outbox;
+	bool out_is_imm;
+	int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+					    struct mlx4_cmd_mailbox *inbox);
+	int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+					     struct mlx4_cmd_mailbox *inbox,
+					     struct mlx4_cmd_mailbox *outbox);
+} cmd_info[] = {
+	{
+		.opcode = MLX4_CMD_QUERY_FW,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_SLAVE_CAP,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = mlx4_QUERY_SLAVE_CAP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_ADAPTER,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_GET_SLAVE_SQP,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = mlx4_GET_SLAVE_SQP_wrapper
+	},
+
+	{
+		.opcode = MLX4_CMD_INIT_PORT,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = mlx4_INIT_PORT_wrapper},
+	{
+		.opcode = MLX4_CMD_CLOSE_PORT,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm  = false,
+		.verify = NULL,
+		.wrapper = mlx4_CLOSE_PORT_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_PORT,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = mlx4_QUERY_PORT_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SET_PORT,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = mlx4_SET_PORT_wrapper
+	},
+
+	{
+		.opcode = MLX4_CMD_SW2HW_EQ,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /*need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_NOP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_ALLOC_RES,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = true,
+		.verify = NULL,
+		.wrapper = mlx4_RESOURCE_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_FREE_RES,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = mlx4_RESOURCE_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_REPLACE_RES,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = true,
+		.verify = NULL,
+		.wrapper = mlx4_RESOURCE_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_GET_EVENT,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = true,
+		.verify = NULL,
+		.wrapper = mlx4_GET_EVENT_wrapper
+	},
+
+	{
+		.opcode = MLX4_CMD_SW2HW_MPT,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_MPT,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_HW2SW_MPT,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_READ_MTT,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_WRITE_MTT,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = mlx4_WRITE_MTT_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SYNC_TPT,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+
+	{
+		.opcode = MLX4_CMD_HW2SW_EQ,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_EQ,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_SW2HW_CQ,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_HW2SW_CQ,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_CQ,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_MODIFY_CQ,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = true,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_SW2HW_SRQ,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_HW2SW_SRQ,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_SRQ,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_ARM_SRQ,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_RST2INIT_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_INIT2RTR_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_RTR2RTS_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_RTS2RTS_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_SQERR2RTS_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_2ERR_QP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_RTS2SQD_QP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_SQD2SQD_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_SQD2RTS_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_2RST_QP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_QUERY_QP,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_INIT2INIT_QP,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_SUSPEND_QP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_UNSUSPEND_QP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+	{
+		.opcode = MLX4_CMD_CONF_SPECIAL_QP,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = mlx4_CONF_SPECIAL_QP_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_MAD_IFC,
+		.has_inbox = true,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+
+	/* Native multicast commands are not available for guests */
+	{
+		.opcode = MLX4_CMD_MCAST_ATTACH,
+		.has_inbox = true, 
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = mlx4_MCAST_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_DIAG_RPRT,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL, /* need verifier */
+		.wrapper = NULL
+	},
+
+	/* Ethernet specific commands */
+	{
+		.opcode = MLX4_CMD_SET_VLAN_FLTR,
+		.has_inbox = true,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = mlx4_SET_VLAN_FLTR_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_SET_MCAST_FLTR,
+		.has_inbox = false,
+		.has_outbox = false,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = mlx4_SET_MCAST_FLTR_wrapper
+	},
+	{
+		.opcode = MLX4_CMD_DUMP_ETH_STATS,
+		.has_inbox = false,
+		.has_outbox = true,
+		.out_is_imm = false,
+		.verify = NULL,
+		.wrapper = NULL /* need wrapper*/
+	},
+};
+
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_cmd_info *cmd = NULL;
+	struct mlx4_vhcr *vhcr = priv->mfunc.vhcr;
+	struct mlx4_cmd_mailbox *inbox = NULL;
+	struct mlx4_cmd_mailbox *outbox = NULL;
+	u64 in_param;
+	u64 out_param;
+	int ret;
+	int i;
+
+	/* DMA in the vHCR */
+	ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+			      priv->mfunc.master.slave_state[slave].vhcr_dma,
+			      ALIGN(sizeof(struct mlx4_vhcr),
+				    MLX4_ACCESS_MEM_ALIGN), 1);
+	if (ret) {
+		mlx4_err(dev, "Failed reading vhcr\n");
+		return ret;
+	}
+
+	/* Lookup command */
+	for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
+		if (vhcr->op == cmd_info[i].opcode) {
+			cmd = &cmd_info[i];
+			break;
+		}
+	}
+	if (!cmd) {
+		mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
+							      vhcr->op, slave);
+		vhcr->errno = -EINVAL;
+		goto out_status;
+	}
+
+	/* Read inbox */
+	if (cmd->has_inbox) {
+		inbox = mlx4_alloc_cmd_mailbox(dev);
+		if (IS_ERR(inbox)) {
+			ret = PTR_ERR(inbox);
+			inbox = NULL;
+			goto out;
+		}
+
+		/* FIXME: add mailbox size per-command */
+		ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+				      vhcr->in_param,
+				      MLX4_MAILBOX_SIZE, 1);
+		if (ret) {
+			mlx4_err(dev, "Failed reading inbox\n");
+			goto out;
+		}
+	}
+
+	/* Apply permission and bound checks if applicable */
+	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
+		mlx4_warn(dev, "Command:0x%x failed protection checks\n", vhcr->op);
+		vhcr->errno = -EPERM;
+		goto out_status;
+	}
+
+	/* Allocate outbox */
+	if (cmd->has_outbox) {
+		outbox = mlx4_alloc_cmd_mailbox(dev);
+		if (IS_ERR(outbox)) {
+			ret = PTR_ERR(outbox);
+			outbox = NULL;
+			goto out;
+		}
+	}
+
+	/* Execute the command! */
+	if (cmd->wrapper)
+		vhcr->errno = cmd->wrapper(dev, slave, vhcr, inbox, outbox);
+	else {
+		in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
+		out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
+		vhcr->errno = __mlx4_cmd(dev, in_param, &out_param,
+							cmd->out_is_imm,
+							vhcr->in_modifier,
+							vhcr->op_modifier,
+							vhcr->op,
+							vhcr->timeout);
+		if (cmd->out_is_imm)
+			vhcr->out_param = out_param;
+	}
+
+	/* Write outbox if command completed successfully */
+	if (cmd->has_outbox && !vhcr->errno) {
+		ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
+				      vhcr->out_param,
+				      MLX4_MAILBOX_SIZE, 0);
+		if (ret) {
+			mlx4_err(dev, "Failed writing outbox\n");
+			goto out;
+		}
+	}
+
+out_status:
+	/* DMA back vhcr result */
+	ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+			      priv->mfunc.master.slave_state[slave].vhcr_dma,
+			      ALIGN(sizeof(struct mlx4_vhcr),
+				    MLX4_ACCESS_MEM_ALIGN), 0);
+	if (ret)
+		mlx4_err(dev, "Failed writing vhcr result\n");
+
+	if (vhcr->errno)
+		mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d\n",
+							vhcr->op, slave, vhcr->errno);
+	/* Fall through... */
+
+out:
+	mlx4_free_cmd_mailbox(dev, inbox);
+	mlx4_free_cmd_mailbox(dev, outbox);
+	return ret;
+}
+
+static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, u16 param, u8 toggle)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+	u8 toggle_next;
+	u32 reply;
+
+	if (cmd == MLX4_COMM_CMD_RESET) {
+		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
+		goto reset_slave;
+	}
+
+	/* Increment next toggle token */
+	toggle_next = slave_state[slave].comm_toggle + 1;
+	if (toggle_next > 2)
+		toggle_next = 1;
+	if (toggle != toggle_next) {
+		mlx4_warn(dev, "Incorrect token:%d from slave:%d expected:%d\n",
+							toggle, toggle_next, slave);
+		goto reset_slave;
+	}
+
+	switch (cmd) {
+	case MLX4_COMM_CMD_VHCR0:
+		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
+			goto reset_slave;
+		slave_state[slave].vhcr_dma = ((u64) param) << 48;
+		break;
+	case MLX4_COMM_CMD_VHCR1:
+		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
+			goto reset_slave;
+		slave_state[slave].vhcr_dma |= ((u64) param) << 32;
+		break;
+	case MLX4_COMM_CMD_VHCR2:
+		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
+			goto reset_slave;
+		slave_state[slave].vhcr_dma |= ((u64) param) << 16;
+		break;
+	case MLX4_COMM_CMD_VHCR_EN:
+		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
+			goto reset_slave;
+		slave_state[slave].vhcr_dma |= param;
+		break;
+	case MLX4_COMM_CMD_VHCR_POST:
+		if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
+		    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+			goto reset_slave;
+		if (mlx4_master_process_vhcr(dev, slave)) {
+			mlx4_err(dev, "Failed processing vhcr for slave:%d, reseting slave.\n", slave);
+			goto reset_slave;
+		}
+		break;
+	default:
+		mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
+		goto reset_slave;
+	}
+
+	slave_state[slave].last_cmd = cmd;
+	slave_state[slave].comm_toggle = toggle_next;
+	reply = (u32) toggle_next << 30;
+	__raw_writel((__force u32) cpu_to_be32(reply),
+		     &priv->mfunc.comm[slave].slave_read);
+	wmb();
+	return;
+
+reset_slave:
+	/* FIXME: cleanup any slave resources */
+	slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
+	slave_state[slave].comm_toggle = 0;
+	__raw_writel((__force u32) 0, &priv->mfunc.comm[slave].slave_write);
+	__raw_writel((__force u32) 0, &priv->mfunc.comm[slave].slave_read);
+	wmb();
+}
+
+/* master command processing */
+static void mlx4_master_poll_comm(struct work_struct *work)
+{
+	struct delayed_work *delay = container_of(work, struct delayed_work, work);
+	struct mlx4_mfunc *mfunc = container_of(delay, struct mlx4_mfunc, comm_work);
+	struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
+	struct mlx4_dev *dev = &priv->dev;
+	u32 comm_cmd;
+	int polled = 0;
+	int i;
+
+	/* Give each slave a chance for one command */
+	for (i = 0; i < dev->num_slaves; i++) {
+		comm_cmd = swab32(readl(&priv->mfunc.comm[i].slave_write));
+		if (comm_cmd >> 30 != priv->mfunc.master.slave_state[i].comm_toggle) {
+			mlx4_master_do_cmd(dev, i, comm_cmd >> 16, comm_cmd, comm_cmd >> 30);
+			polled = 1;
+		}
+	}
+	queue_delayed_work(priv->mfunc.comm_wq, &priv->mfunc.comm_work,
+						polled ? 0 : HZ / 10);
+}
+
+int mlx4_multi_func_init(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int i, port;
+
+	priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+					    &priv->mfunc.vhcr_dma,
+					    GFP_KERNEL);
+	if (!priv->mfunc.vhcr) {
+		mlx4_err(dev, "Couldn't allocate vhcr.\n");
+		return -ENOMEM;
+	}
+
+	if (mlx4_is_master(dev))
+		priv->mfunc.comm = ioremap(pci_resource_start(dev->pdev,
+							    priv->fw.comm_bar) +
+								priv->fw.comm_base,
+							    MLX4_COMM_PAGESIZE);
+	else
+		priv->mfunc.comm = ioremap(pci_resource_start(dev->pdev, 0) +
+							    MLX4_SLAVE_COMM_BASE,
+							    MLX4_COMM_PAGESIZE);
+	if (!priv->mfunc.comm) {
+		mlx4_err(dev, "Couldn't map communication vector.");
+		goto err_vhcr;
+	}
+
+	if (mlx4_is_master(dev)) {
+		priv->mfunc.master.slave_state = kzalloc(dev->num_slaves *
+					   sizeof(struct mlx4_slave_state),
+					   GFP_KERNEL);
+		if (!priv->mfunc.master.slave_state)
+			goto err_comm;
+
+		for (i = 0; i < dev->num_slaves; ++i) {
+			priv->mfunc.master.slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
+			for (port = 1; port <= MLX4_MAX_PORTS; port++)
+				INIT_LIST_HEAD(&priv->mfunc.master.slave_state[i].mcast_filters[port]);
+			spin_lock_init(&priv->mfunc.master.slave_state[i].lock);
+		}
+
+		INIT_DELAYED_WORK(&priv->mfunc.comm_work, mlx4_master_poll_comm);
+		priv->mfunc.comm_wq = create_singlethread_workqueue("mlx4_comm");
+		if (!priv->mfunc.comm_wq) {
+			kfree(priv->mfunc.master.slave_state);
+			goto err_comm;
+		}
+	} else {
+		priv->cmd.comm_toggle = 0;
+		INIT_DELAYED_WORK(&priv->mfunc.comm_work, mlx4_slave_async_eq_poll);
+		priv->mfunc.comm_wq = create_singlethread_workqueue("mlx4_event");
+		if (!priv->mfunc.comm_wq)
+			goto err_comm;
+	}
+	return 0;
+
+err_comm:
+	iounmap(priv->mfunc.comm);
+err_vhcr:
+	dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+					     priv->mfunc.vhcr,
+					     priv->mfunc.vhcr_dma);
+	priv->mfunc.vhcr = NULL;
+	return -ENOMEM;
+}
+
 int mlx4_cmd_init(struct mlx4_dev *dev)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -337,22 +1276,48 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
 	priv->cmd.use_events = 0;
 	priv->cmd.toggle     = 1;
 
-	priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
-				MLX4_HCR_SIZE);
-	if (!priv->cmd.hcr) {
-		mlx4_err(dev, "Couldn't map command register.");
-		return -ENOMEM;
+	priv->cmd.hcr = NULL;
+	priv->mfunc.vhcr = NULL;
+
+	if (!mlx4_is_slave(dev)) {
+		priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
+					((dev->flags & MLX4_FLAG_SRIOV) ?
+						MLX4_HCR_SRIOV_BASE :
+						MLX4_HCR_BASE),
+					MLX4_HCR_SIZE);
+		if (!priv->cmd.hcr) {
+			mlx4_err(dev, "Couldn't map command register.");
+			return -ENOMEM;
+		}
 	}
 
 	priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
 					 MLX4_MAILBOX_SIZE,
 					 MLX4_MAILBOX_SIZE, 0);
-	if (!priv->cmd.pool) {
-		iounmap(priv->cmd.hcr);
-		return -ENOMEM;
-	}
+	if (!priv->cmd.pool)
+		goto err_hcr;
 
 	return 0;
+
+err_hcr:
+	if (!mlx4_is_slave(dev))
+		iounmap(priv->cmd.hcr);
+	return -ENOMEM;
+}
+
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+
+	if (priv->mfunc.vhcr) {
+		destroy_workqueue(priv->mfunc.comm_wq);
+		kfree(priv->mfunc.master.slave_state);
+		iounmap(priv->mfunc.comm);
+		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+						     priv->mfunc.vhcr,
+						     priv->mfunc.vhcr_dma);
+		priv->mfunc.vhcr = NULL;
+	}
 }
 
 void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -360,7 +1325,9 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
 	pci_pool_destroy(priv->cmd.pool);
-	iounmap(priv->cmd.hcr);
+
+	if (!mlx4_is_slave(dev))
+		iounmap(priv->cmd.hcr);
 }
 
 /*
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 31a5190..76c46e5 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -60,7 +60,8 @@ struct mlx4_cq_context {
 	__be32			solicit_producer_index;
 	__be32			consumer_index;
 	__be32			producer_index;
-	u32			reserved4[2];
+	__be32			pd; /* for sriov guest id */
+	u32			reserved4;
 	__be64			db_rec_addr;
 };
 
@@ -203,34 +204,90 @@ static int mlx4_find_least_loaded_vector(struct mlx4_priv *priv)
 	return index;
 }
 
-int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
-		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
-		  unsigned vector, int collapsed)
+int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_cq_table *cq_table = &priv->cq_table;
-	struct mlx4_cmd_mailbox *mailbox;
-	struct mlx4_cq_context *cq_context;
-	u64 mtt_addr;
+	u64 out_param;
 	int err;
 
-	cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
-	if (cq->cqn == -1)
+	if (mlx4_is_slave(dev)) {
+		err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
+						       ICM_RESERVE_AND_ALLOC,
+						       MLX4_CMD_ALLOC_RES,
+						       MLX4_CMD_TIME_CLASS_A);
+		if (err) {
+			*cqn = -1;
+			return err;
+		} else {
+			*cqn = out_param;
+			return 0;
+		}
+	}
+
+	*cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+	if (*cqn == -1)
 		return -ENOMEM;
 
-	err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
+	err = mlx4_table_get(dev, &cq_table->table, *cqn);
 	if (err)
 		goto err_out;
 
-	err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+	err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
 	if (err)
 		goto err_put;
+	return 0;
+
+err_put:
+	mlx4_table_put(dev, &cq_table->table, *cqn);
+
+err_out:
+	mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+	return err;
+}
+
+void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_cq_table *cq_table = &priv->cq_table;
+	u64 in_param;
+	int err;
+
+	if (mlx4_is_slave(dev)) {
+		*((u32 *) &in_param) = cqn;
+		*(((u32 *) &in_param) + 1) = 0;
+		err = mlx4_cmd(dev, in_param, RES_CQ, ICM_RESERVE_AND_ALLOC,
+						      MLX4_CMD_FREE_RES,
+						      MLX4_CMD_TIME_CLASS_A);
+		if (err)
+			mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
+	} else {
+		mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
+		mlx4_table_put(dev, &cq_table->table, cqn);
+		mlx4_bitmap_free(&cq_table->bitmap, cqn);
+	}
+}
+
+int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
+		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
+		  unsigned vector, int collapsed)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_cq_table *cq_table = &priv->cq_table;
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_cq_context *cq_context;
+	u64 mtt_addr;
+	int err;
+
+	err = mlx4_cq_alloc_icm(dev, &cq->cqn);
+	if (err)
+		return err;
 
 	spin_lock_irq(&cq_table->lock);
 	err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
 	spin_unlock_irq(&cq_table->lock);
 	if (err)
-		goto err_cmpt_put;
+		goto err_icm;
 
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox)) {
@@ -259,6 +316,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
 	mtt_addr = mlx4_mtt_addr(dev, mtt);
 	cq_context->mtt_base_addr_h = mtt_addr >> 32;
 	cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+	cq_context->pd = cpu_to_be32(dev->caps.pd_base << dev->caps.slave_pd_shift);
 	cq_context->db_rec_addr     = cpu_to_be64(db_rec);
 
 	err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
@@ -280,14 +338,8 @@ err_radix:
 	radix_tree_delete(&cq_table->tree, cq->cqn);
 	spin_unlock_irq(&cq_table->lock);
 
-err_cmpt_put:
-	mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
-
-err_put:
-	mlx4_table_put(dev, &cq_table->table, cq->cqn);
-
-err_out:
-	mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+err_icm:
+	mlx4_cq_free_icm(dev, cq->cqn);
 
 	return err;
 }
@@ -314,8 +366,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
 		complete(&cq->free);
 	wait_for_completion(&cq->free);
 
-	mlx4_table_put(dev, &cq_table->table, cq->cqn);
-	mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+	mlx4_cq_free_icm(dev, cq->cqn);
 }
 EXPORT_SYMBOL_GPL(mlx4_cq_free);
 
@@ -326,6 +377,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
 
 	spin_lock_init(&cq_table->lock);
 	INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+	if (mlx4_is_slave(dev))
+		return 0;
 
 	err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
 			       dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
@@ -337,6 +390,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
 {
+	if (mlx4_is_slave(dev))
+		return;
 	/* Nothing to do to clean up radix_tree */
 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
 }
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 440633a..54a59ec 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -70,13 +70,16 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
 
 	err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
 				cq->buf_size, 2 * PAGE_SIZE);
-	if (err)
+	if (err) {
+		en_err(priv, "Failed to allocate CQ buffer\n");
 		return err;
+	}
 
 	err = mlx4_en_map_buffer(&cq->wqres.buf);
-	if (err)
+	if (err) {
+		en_err(priv, "Failed to map CQ buffer\n");
 		mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
-	else
+	} else
 		cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
 
 	return err;
@@ -94,6 +97,9 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
 	*cq->mcq.arm_db    = 0;
 	memset(cq->buf, 0, cq->buf_size);
 
+	if (!cq->is_tx)
+		cq->size = priv->rx_ring[cq->ring].actual_size;
+
 	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
 			    cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
 	if (err)
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
new file mode 100644
index 0000000..e0ff25f
--- /dev/null
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "mlx4_en.h"
+#include "en_port.h"
+
+
+static void
+mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+
+	sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
+	strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
+	sprintf(drvinfo->fw_version, "%d.%d.%d",
+		(u16) (mdev->dev->caps.fw_ver >> 32),
+		(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
+		(u16) (mdev->dev->caps.fw_ver & 0xffff));
+	strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+	drvinfo->n_stats = 0;
+	drvinfo->regdump_len = 0;
+	drvinfo->eedump_len = 0;
+}
+
+static u32 mlx4_en_get_tso(struct net_device *dev)
+{
+	return (dev->features & NETIF_F_TSO) != 0;
+}
+
+static int mlx4_en_set_tso(struct net_device *dev, u32 data)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	if (data) {
+		if (!priv->mdev->LSO_support)
+			return -EPERM;
+		dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+	} else
+		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+	return 0;
+}
+
+static u32 mlx4_en_get_rx_csum(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	return priv->rx_csum;
+}
+
+static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	priv->rx_csum = (data != 0);
+	return 0;
+}
+
+static const char main_strings[][ETH_GSTRING_LEN] = {
+	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
+	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+	"tx_heartbeat_errors", "tx_window_errors",
+
+	/* port statistics */
+	"lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
+	"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
+	"rx_csum_good", "rx_csum_none", "tx_chksum_offload",
+
+	/* packet statistics */
+	"broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
+	"rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
+	"tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
+	"tx_prio_6", "tx_prio_7",
+};
+#define NUM_MAIN_STATS	21
+#define NUM_ALL_STATS	(NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
+
+static const char mlx4_en_test_names[][ETH_GSTRING_LEN] = {
+	"Interupt Test",
+	"Link Test",
+	"Speed Test",
+	"Register Test",
+	"Looback Test",
+};
+
+static u32 mlx4_en_get_msglevel(struct net_device *dev)
+{
+	return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
+}
+
+static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
+{
+	((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
+}
+
+static void mlx4_en_get_wol(struct net_device *netdev,
+			    struct ethtool_wolinfo *wol)
+{
+	wol->supported = 0;
+	wol->wolopts = 0;
+
+	return;
+}
+
+static int mlx4_en_get_sset_count(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
+}
+
+static int mlx4_en_self_test_count(struct net_device *dev)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.loopback_support) * 2;
+}
+
+static void mlx4_en_get_ethtool_stats(struct net_device *dev,
+		struct ethtool_stats *stats, uint64_t *data)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int index = 0;
+	int i;
+
+	spin_lock_bh(&priv->stats_lock);
+
+	for (i = 0; i < NUM_MAIN_STATS; i++)
+		data[index++] = ((unsigned long *) &priv->stats)[i];
+	for (i = 0; i < NUM_PORT_STATS; i++)
+		data[index++] = ((unsigned long *) &priv->port_stats)[i];
+	for (i = 0; i < priv->tx_ring_num; i++) {
+		data[index++] = priv->tx_ring[i].packets;
+		data[index++] = priv->tx_ring[i].bytes;
+	}
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		data[index++] = priv->rx_ring[i].packets;
+		data[index++] = priv->rx_ring[i].bytes;
+	}
+	for (i = 0; i < NUM_PKT_STATS; i++)
+		data[index++] = ((unsigned long *) &priv->pkstats)[i];
+	spin_unlock_bh(&priv->stats_lock);
+
+}
+
+static void mlx4_en_self_test(struct net_device *dev,
+			      struct ethtool_test *etest, u64 *buf)
+{
+	mlx4_en_ex_selftest(dev, &etest->flags, buf);
+}
+
+static void mlx4_en_get_strings(struct net_device *dev,
+				uint32_t stringset, uint8_t *data)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int index = 0;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
+			strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+		if (priv->mdev->dev->caps.loopback_support)
+			for (; i < MLX4_EN_NUM_SELF_TEST; i++)
+				strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+		break;
+
+	case ETH_SS_STATS:
+		/* Add main counters */
+		for (i = 0; i < NUM_MAIN_STATS; i++)
+			strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
+		for (i = 0; i < NUM_PORT_STATS; i++)
+			strcpy(data + (index++) * ETH_GSTRING_LEN,
+				main_strings[i + NUM_MAIN_STATS]);
+		for (i = 0; i < priv->tx_ring_num; i++) {
+			sprintf(data + (index++) * ETH_GSTRING_LEN,
+				"tx%d_packets", i);
+			sprintf(data + (index++) * ETH_GSTRING_LEN,
+				"tx%d_bytes", i);
+		}
+		for (i = 0; i < priv->rx_ring_num; i++) {
+			sprintf(data + (index++) * ETH_GSTRING_LEN,
+				"rx%d_packets", i);
+			sprintf(data + (index++) * ETH_GSTRING_LEN,
+				"rx%d_bytes", i);
+		}
+		for (i = 0; i < NUM_PKT_STATS; i++)
+			strcpy(data + (index++) * ETH_GSTRING_LEN,
+				main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
+		break;
+	}
+}
+
+static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int trans_type;
+
+	cmd->autoneg = AUTONEG_DISABLE;
+	cmd->supported = SUPPORTED_10000baseT_Full;
+	cmd->advertising = ADVERTISED_10000baseT_Full;
+
+	if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+		return -ENOMEM;
+
+	trans_type = priv->port_state.transciver;
+	if (netif_carrier_ok(dev)) {
+		cmd->speed = priv->port_state.link_speed;
+		cmd->duplex = DUPLEX_FULL;
+	} else {
+		cmd->speed = -1;
+		cmd->duplex = -1;
+	}
+
+	if (trans_type > 0 && trans_type <= 0xC) {
+		cmd->port = PORT_FIBRE;
+		cmd->transceiver = XCVR_EXTERNAL;
+		cmd->supported |= SUPPORTED_FIBRE;
+		cmd->advertising |= ADVERTISED_FIBRE;
+	} else if (trans_type == 0x80 || trans_type == 0) {
+		cmd->port = PORT_TP;
+		cmd->transceiver = XCVR_INTERNAL;
+		cmd->supported |= SUPPORTED_TP;
+		cmd->advertising |= ADVERTISED_TP;
+	} else  {
+		cmd->port = -1;
+		cmd->transceiver = -1;
+	}
+	return 0;
+}
+
+static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	if ((cmd->autoneg == AUTONEG_ENABLE) ||
+	    (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
+		return -EINVAL;
+
+	/* Nothing to change */
+	return 0;
+}
+
+static int mlx4_en_get_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	coal->tx_coalesce_usecs = 0;
+	coal->tx_max_coalesced_frames = 0;
+	coal->rx_coalesce_usecs = priv->rx_usecs;
+	coal->rx_max_coalesced_frames = priv->rx_frames;
+
+	coal->pkt_rate_low = priv->pkt_rate_low;
+	coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
+	coal->pkt_rate_high = priv->pkt_rate_high;
+	coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
+	coal->rate_sample_interval = priv->sample_interval;
+	coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+	return 0;
+}
+
+static int mlx4_en_set_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int err, i;
+
+	priv->rx_frames = (coal->rx_max_coalesced_frames ==
+			   MLX4_EN_AUTO_CONF) ?
+				MLX4_EN_RX_COAL_TARGET /
+				priv->dev->mtu + 1 :
+				coal->rx_max_coalesced_frames;
+	priv->rx_usecs = (coal->rx_coalesce_usecs ==
+			  MLX4_EN_AUTO_CONF) ?
+				MLX4_EN_RX_COAL_TIME :
+				coal->rx_coalesce_usecs;
+
+	/* Set adaptive coalescing params */
+	priv->pkt_rate_low = coal->pkt_rate_low;
+	priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
+	priv->pkt_rate_high = coal->pkt_rate_high;
+	priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
+	priv->sample_interval = coal->rate_sample_interval;
+	priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+	priv->last_moder_time = MLX4_EN_AUTO_CONF;
+	if (priv->adaptive_rx_coal)
+		return 0;
+
+	for (i = 0; i < priv->rx_ring_num; i++) {
+		priv->rx_cq[i].moder_cnt = priv->rx_frames;
+		priv->rx_cq[i].moder_time = priv->rx_usecs;
+		err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+static int mlx4_en_set_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *pause)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	priv->prof->tx_pause = pause->tx_pause != 0;
+	priv->prof->rx_pause = pause->rx_pause != 0;
+	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+				    priv->rx_skb_size + ETH_FCS_LEN,
+				    priv->prof->tx_pause,
+				    priv->prof->tx_ppp,
+				    priv->prof->rx_pause,
+				    priv->prof->rx_ppp);
+	if (err)
+		en_err(priv, "Failed setting pause params\n");
+
+	return err;
+}
+
+static void mlx4_en_get_pauseparam(struct net_device *dev,
+				 struct ethtool_pauseparam *pause)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	pause->tx_pause = priv->prof->tx_pause;
+	pause->rx_pause = priv->prof->rx_pause;
+}
+
+static int mlx4_en_set_ringparam(struct net_device *dev,
+				 struct ethtool_ringparam *param)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	u32 rx_size, tx_size;
+	int port_up = 0;
+	int err = 0;
+
+	if (param->rx_jumbo_pending || param->rx_mini_pending)
+		return -EINVAL;
+
+	rx_size = roundup_pow_of_two(param->rx_pending);
+	rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
+	rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
+	tx_size = roundup_pow_of_two(param->tx_pending);
+	tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
+	tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
+
+	if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
+					priv->rx_ring[0].size) &&
+	    tx_size == priv->tx_ring[0].size)
+		return 0;
+
+	mutex_lock(&mdev->state_lock);
+	if (priv->port_up) {
+		port_up = 1;
+		mlx4_en_stop_port(dev);
+	}
+
+	mlx4_en_free_resources(priv);
+
+	priv->prof->tx_ring_size = tx_size;
+	priv->prof->rx_ring_size = rx_size;
+
+	err = mlx4_en_alloc_resources(priv);
+	if (err) {
+		en_err(priv, "Failed reallocating port resources\n");
+		goto out;
+	}
+	if (port_up) {
+		err = mlx4_en_start_port(dev);
+		if (err)
+			en_err(priv, "Failed starting port\n");
+	}
+
+out:
+	mutex_unlock(&mdev->state_lock);
+	return err;
+}
+
+void mlx4_en_get_ringparam(struct net_device *dev, struct ethtool_ringparam *param)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	memset(param, 0, sizeof(*param));
+	param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
+	param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
+	param->rx_pending = priv->port_up ?
+		priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
+	param->tx_pending = priv->tx_ring[0].size;
+}
+
+const struct ethtool_ops mlx4_en_ethtool_ops = {
+	.get_drvinfo = mlx4_en_get_drvinfo,
+	.get_settings = mlx4_en_get_settings,
+	.set_settings = mlx4_en_set_settings,
+#ifdef NETIF_F_TSO
+	.get_tso = mlx4_en_get_tso,
+	.set_tso = mlx4_en_set_tso,
+#endif
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+	.get_link = ethtool_op_get_link,
+	.get_rx_csum = mlx4_en_get_rx_csum,
+	.set_rx_csum = mlx4_en_set_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = ethtool_op_set_tx_csum,
+	.get_strings = mlx4_en_get_strings,
+	.get_stats_count = mlx4_en_get_sset_count,
+	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
+	.self_test_count = mlx4_en_self_test_count,
+	.self_test = mlx4_en_self_test,
+	.get_wol = mlx4_en_get_wol,
+	.get_msglevel = mlx4_en_get_msglevel,
+	.set_msglevel = mlx4_en_set_msglevel,
+	.get_coalesce = mlx4_en_get_coalesce,
+	.set_coalesce = mlx4_en_set_coalesce,
+	.get_pauseparam = mlx4_en_get_pauseparam,
+	.set_pauseparam = mlx4_en_set_pauseparam,
+	.get_ringparam = mlx4_en_get_ringparam,
+	.set_ringparam = mlx4_en_set_ringparam,
+};
+
+
+
+
+
diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
index 79122c7..808b030 100644
--- a/drivers/net/mlx4/en_lro.c
+++ b/drivers/net/mlx4/en_lro.c
@@ -44,12 +44,12 @@
 /* LRO hash function - using sum of source and destination port LSBs is
  * good enough */
 #define LRO_INDEX(th, size) \
-	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
+	((*((u8 *) &th->source + 1) + *((u8 *) &th->dest + 1)) & (size - 1))
 
 /* #define CONFIG_MLX4_EN_DEBUG_LRO */
 
 #ifdef CONFIG_MLX4_EN_DEBUG_LRO
-static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
+static void mlx4_en_lro_validate(struct mlx4_en_priv *priv, struct mlx4_en_lro *lro)
 {
 	int i;
 	int size, size2;
@@ -85,7 +85,7 @@ static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *
 
 		if (size2 != len2) {
 			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
+				 "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
 			return;
 		}
 		size += size2;
@@ -97,8 +97,8 @@ static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *
 }
 #endif /* MLX4_EN_DEBUG_LRO */
 
-static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
+static void mlx4_en_lro_flush_single(struct mlx4_en_priv *priv,
+		   struct mlx4_en_rx_ring *ring, struct mlx4_en_lro *lro)
 {
 	struct sk_buff *skb = lro->skb;
 	struct iphdr *iph = (struct iphdr *) skb->data;
@@ -156,7 +156,7 @@ static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
 	hlist_add_head(&lro->node, &ring->lro_free);
 }
 
-void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
+void mlx4_en_lro_flush(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, u8 all)
 {
 	struct mlx4_en_lro *lro;
 	struct hlist_node *node, *tmp;
@@ -192,7 +192,7 @@ static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
 						page_alloc,
 						data_len + hlen);
 	if (!nr_frags) {
-		mlx4_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
+		en_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
 		return -ENOMEM;
 	}
 
@@ -230,7 +230,7 @@ static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *m
 	struct hlist_head *list = &ring->lro_hash[index];
 
 	hlist_for_each_entry(lro, node, list, node) {
-		if (lro->sport_dport == *((u32*) &th->source) &&
+		if (lro->sport_dport == *((u32 *) &th->source) &&
 		    lro->saddr == iph->saddr &&
 		    lro->daddr == iph->daddr)
 			return lro;
@@ -313,7 +313,7 @@ int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
 	/* We only handle aligned timestamp options */
 	tcp_hlen = (th->doff << 2);
 	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-		ts = (u32*) (th + 1);
+		ts = (u32 *) (th + 1);
 		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
 					  (TCPOPT_NOP << 16) |
 					  (TCPOPT_TIMESTAMP << 8) |
@@ -323,7 +323,6 @@ int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
 		tsecr = ts[2];
 	} else if (tcp_hlen != sizeof(*th))
 		goto sync_device;
-	
 
 	/* At this point we know we have a TCP packet that is likely to be
 	 * eligible for LRO. Therefore, see now if we have an oustanding
@@ -344,7 +343,7 @@ int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
 	/* Get ip length and verify that the frame is big enough */
 	ip_len = ntohs(iph->tot_len);
 	if (unlikely(length < ETH_HLEN + ip_len)) {
-		mlx4_warn(mdev, "Cannot LRO - ip payload exceeds frame!\n");
+		en_warn(priv, "Cannot LRO - ip payload exceeds frame!\n");
 		goto sync_device;
 	}
 
@@ -442,7 +441,7 @@ new_session:
 			/* Initialize session */
 			lro->saddr = iph->saddr;
 			lro->daddr = iph->daddr;
-			lro->sport_dport = *((u32*) &th->source);
+			lro->sport_dport = *((u32 *) &th->source);
 
 			lro->next_seq = seq + tcp_data_len;
 			lro->tot_len = ip_len;
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index ad12e6f..cfb31df 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -52,6 +52,58 @@ static const char mlx4_en_version[] __devinitdata =
 	DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
 	DRV_VERSION " (" DRV_RELDATE ")\n";
 
+
+#define MLX4_EN_PARM_INT(X, def_val, desc) \
+	static unsigned int X = def_val;\
+	module_param(X , uint, 0444); \
+	MODULE_PARM_DESC(X, desc);
+
+
+/*
+ * Device scope module parameters
+ */
+
+
+/* Use a XOR rathern than Toeplitz hash function for RSS */
+MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
+
+/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
+MLX4_EN_PARM_INT(rss_mask, 0x5, "RSS hash type bitmask");
+
+/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
+MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
+		 "Number of LRO sessions per ring or disabled (0)");
+
+/* Allow reassembly of fragmented IP packets */
+MLX4_EN_PARM_INT(ip_reasm, 1, "Allow reassembly of fragmented IP packets (!0)");
+
+/* Priority pausing */
+MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
+			   " Per priority bit mask");
+MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
+			   " Per priority bit mask");
+
+int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
+{
+	struct mlx4_en_profile *params = &mdev->profile;
+	int i;
+
+	params->rss_xor = (rss_xor != 0);
+	params->rss_mask = rss_mask & 0x1f;
+	params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
+	params->ip_reasm = ip_reasm;
+	for (i = 1; i <= MLX4_MAX_PORTS; i++) {
+		params->prof[i].rx_pause = 1;
+		params->prof[i].rx_ppp = pfcrx;
+		params->prof[i].tx_pause = 1;
+		params->prof[i].tx_ppp = pfctx;
+		params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
+		params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+	}
+
+	return 0;
+}
+
 static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
 			  enum mlx4_dev_event event, int port)
 {
@@ -142,13 +194,13 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
 		mlx4_warn(mdev, "LSO not supported, please upgrade to later "
 				"FW version to enable LSO\n");
 
-	if(mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
-			 MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
-			 0, 0, &mdev->mr)){
+	if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
+			  MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
+			  0, 0, &mdev->mr)) {
 		mlx4_err(mdev, "Failed allocating memory region\n");
 		goto err_uar;
 	}
-	if(mlx4_mr_enable(mdev->dev, &mdev->mr)){
+	if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
 		mlx4_err(mdev, "Failed enabling memory region\n");
 		goto err_mr;
 	}
@@ -169,12 +221,11 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
 	 * number of completion vextors + 1 (for default ring)
 	 * and MAX_RX_RINGS */
 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
-		mlx4_info(mdev, "Using %d tx rings for port:%d\n",
-			  mdev->profile.prof[i].tx_ring_num, i);
-		mdev->profile.prof[i].rx_ring_num =
-			min_t(int, dev->caps.num_comp_vectors + 1, MAX_RX_RINGS);
-		mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
-			  mdev->profile.prof[i].rx_ring_num, i);
+		mdev->profile.prof[i].tx_ring_num = MLX4_EN_NUM_HASH_RINGS + 1 +
+					(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
+		mdev->profile.prof[i].rx_ring_num = max_t(int,
+			min_t(int, roundup_pow_of_two(dev->caps.num_comp_vectors) + 1,
+			MAX_RX_RINGS), MIN_RX_RINGS);
 	}
 
 	/* Create our own workqueue for reset/multicast tasks
@@ -196,28 +247,11 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
 	/* Create a netdev for each port */
 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
 		mlx4_info(mdev, "Activating port:%d\n", i);
-		if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) {
+		if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
 			mdev->pndev[i] = NULL;
-			goto err_free_netdev;
-		}
 	}
 	return mdev;
 
-
-err_free_netdev:
-	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
-		if (mdev->pndev[i])
-			mlx4_en_destroy_netdev(mdev->pndev[i]);
-	}
-
-	mutex_lock(&mdev->state_lock);
-	mdev->device_up = false;
-	mutex_unlock(&mdev->state_lock);
-	flush_workqueue(mdev->workqueue);
-
-	/* Stop event queue before we drop down to release shared SW state */
-	destroy_workqueue(mdev->workqueue);
-
 err_mr:
 	mlx4_mr_free(dev, &mdev->mr);
 err_uar:
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 450b918..cfee71f 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -51,14 +51,14 @@ static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *
 	struct mlx4_en_dev *mdev = priv->mdev;
 	int err;
 
-	mlx4_dbg(HW, priv, "Regsitering VLAN group:%p\n", grp);
+	en_dbg(HW, priv, "Regsitering VLAN group:%p\n", grp);
 	priv->vlgrp = grp;
 
 	mutex_lock(&mdev->state_lock);
 	if (mdev->device_up && priv->port_up) {
 		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
 		if (err)
-			mlx4_err(mdev, "Failed configuring VLAN filter\n");
+			en_err(priv, "Failed configuring VLAN filter\n");
 	}
 	mutex_unlock(&mdev->state_lock);
 }
@@ -72,15 +72,15 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 	if (!priv->vlgrp)
 		return;
 
-	mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
-		 vid, vlan_group_get_device(priv->vlgrp, vid));
+	en_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
+	       vid, vlan_group_get_device(priv->vlgrp, vid));
 
 	/* Add VID to port VLAN filter */
 	mutex_lock(&mdev->state_lock);
 	if (mdev->device_up && priv->port_up) {
 		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
 		if (err)
-			mlx4_err(mdev, "Failed configuring VLAN filter\n");
+			en_err(priv, "Failed configuring VLAN filter\n");
 	}
 	mutex_unlock(&mdev->state_lock);
 }
@@ -94,9 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 	if (!priv->vlgrp)
 		return;
 
-	mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp "
-		 "entry:%p)\n", vid, priv->vlgrp,
-		 vlan_group_get_device(priv->vlgrp, vid));
+	en_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp entry:%p)\n",
+	       vid, priv->vlgrp, vlan_group_get_device(priv->vlgrp, vid));
 	vlan_group_set_device(priv->vlgrp, vid, NULL);
 
 	/* Remove VID from port VLAN filter */
@@ -104,12 +103,12 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 	if (mdev->device_up && priv->port_up) {
 		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
 		if (err)
-			mlx4_err(mdev, "Failed configuring VLAN filter\n");
+			en_err(priv, "Failed configuring VLAN filter\n");
 	}
 	mutex_unlock(&mdev->state_lock);
 }
 
-static u64 mlx4_en_mac_to_u64(u8 *addr)
+u64 mlx4_en_mac_to_u64(u8 *addr)
 {
 	u64 mac = 0;
 	int i;
@@ -127,7 +126,7 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct sockaddr *saddr = addr;
 
-	if (!is_valid_ether_addr(saddr->sa_data)) 
+	if (!is_valid_ether_addr(saddr->sa_data))
 		return -EADDRNOTAVAIL;
 
 	memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
@@ -146,13 +145,13 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
 	mutex_lock(&mdev->state_lock);
 	if (priv->port_up) {
 		/* Remove old MAC and insert the new one */
-		mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
-		err = mlx4_register_mac(mdev->dev, priv->port,
-					priv->mac, &priv->mac_index);
+		err = mlx4_replace_mac(mdev->dev, priv->port,
+				       priv->base_qpn, priv->mac);
 		if (err)
-			mlx4_err(mdev, "Failed changing HW MAC address\n");
+			en_err(priv, "Failed changing HW MAC address\n");
 	} else
-		mlx4_dbg(HW, priv, "Port is down, exiting...\n");
+		en_dbg(HW, priv, "Port is down while "
+				 "registering mac, exiting...\n");
 
 	mutex_unlock(&mdev->state_lock);
 }
@@ -174,15 +173,15 @@ static void mlx4_en_clear_list(struct net_device *dev)
 static void mlx4_en_cache_mclist(struct net_device *dev)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
-	struct mlx4_en_dev *mdev = priv->mdev;
 	struct dev_mc_list *mclist;
 	struct dev_mc_list *tmp;
 	struct dev_mc_list *plist = NULL;
 
+	mlx4_en_clear_list(dev);
 	for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
 		tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
 		if (!tmp) {
-			mlx4_err(mdev, "failed to allocate multicast list\n");
+			en_err(priv, "failed to allocate multicast list\n");
 			mlx4_en_clear_list(dev);
 			return;
 		}
@@ -215,17 +214,18 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
 	struct net_device *dev = priv->dev;
 	struct dev_mc_list *mclist;
 	u64 mcast_addr = 0;
+	u8 mc_list[16] = {0};
 	int err;
 
 	mutex_lock(&mdev->state_lock);
 	if (!mdev->device_up) {
-		mlx4_dbg(HW, priv, "Card is not up, ignoring "
-				   "multicast change.\n");
+		en_dbg(HW, priv, "Card is not up, "
+				 "ignoring multicast change.\n");
 		goto out;
 	}
 	if (!priv->port_up) {
-		mlx4_dbg(HW, priv, "Port is down, ignoring "
-				   "multicast change.\n");
+		en_dbg(HW, priv, "Port is down, "
+				 "ignoring  multicast change.\n");
 		goto out;
 	}
 
@@ -236,29 +236,27 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
 	if (dev->flags & IFF_PROMISC) {
 		if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
 			if (netif_msg_rx_status(priv))
-				mlx4_warn(mdev, "Port:%d entering promiscuous mode\n",
-					  priv->port);
+				en_warn(priv, "Entering promiscuous mode\n");
 			priv->flags |= MLX4_EN_FLAG_PROMISC;
 
 			/* Enable promiscouos mode */
 			err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
 						     priv->base_qpn, 1);
 			if (err)
-				mlx4_err(mdev, "Failed enabling "
-					 "promiscous mode\n");
+				en_err(priv, "Failed enabling "
+					     "promiscous mode\n");
 
 			/* Disable port multicast filter (unconditionally) */
 			err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
 						  0, MLX4_MCAST_DISABLE);
 			if (err)
-				mlx4_err(mdev, "Failed disabling "
-					 "multicast filter\n");
+				en_err(priv, "Failed disabling "
+					     "multicast filter\n");
 
 			/* Disable port VLAN filter */
 			err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
 			if (err)
-				mlx4_err(mdev, "Failed disabling "
-					 "VLAN filter\n");
+				en_err(priv, "Failed disabling VLAN filter\n");
 		}
 		goto out;
 	}
@@ -268,8 +266,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
 	 */
 
 	if (priv->flags & MLX4_EN_FLAG_PROMISC) {
-		if (netif_msg_rx_status(priv)) 
-			mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n",
+		if (netif_msg_rx_status(priv))
+			en_warn(priv, "Port:%d leaving promiscuous mode\n",
 				  priv->port);
 		priv->flags &= ~MLX4_EN_FLAG_PROMISC;
 
@@ -277,12 +275,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
 		err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
 					     priv->base_qpn, 0);
 		if (err)
-			mlx4_err(mdev, "Failed disabling promiscous mode\n");
+			en_err(priv, "Failed disabling promiscous mode\n");
 
 		/* Enable port VLAN filter */
 		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
 		if (err)
-			mlx4_err(mdev, "Failed enabling VLAN filter\n");
+			en_err(priv, "Failed enabling VLAN filter\n");
 	}
 
 	/* Enable/disable the multicast filter according to IFF_ALLMULTI */
@@ -290,13 +288,19 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
 		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
 					  0, MLX4_MCAST_DISABLE);
 		if (err)
-			mlx4_err(mdev, "Failed disabling multicast filter\n");
+			en_err(priv, "Failed disabling multicast filter\n");
 	} else {
 		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
 					  0, MLX4_MCAST_DISABLE);
 		if (err)
-			mlx4_err(mdev, "Failed disabling multicast filter\n");
+			en_err(priv, "Failed disabling multicast filter\n");
 
+		/* Detach our qp from all the multicast addresses */
+		for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
+			memcpy(&mc_list[10], mclist->dmi_addr, ETH_ALEN);
+			mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
+					      mc_list, MLX4_PROT_ETH);
+		}
 		/* Flush mcast filter and init it with broadcast address */
 		mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
 				    1, MLX4_MCAST_CONFIG);
@@ -307,6 +311,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
 		mlx4_en_cache_mclist(dev);
 		netif_tx_unlock_bh(dev);
 		for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
+			memcpy(&mc_list[10], mclist->dmi_addr, ETH_ALEN);
+			mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
+					      mc_list, 0, MLX4_PROT_ETH);
 			mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
 			mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
 					    mcast_addr, 0, MLX4_MCAST_CONFIG);
@@ -314,9 +321,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
 		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
 					  0, MLX4_MCAST_ENABLE);
 		if (err)
-			mlx4_err(mdev, "Failed enabling multicast filter\n");
-
-		mlx4_en_clear_list(dev);
+			en_err(priv, "Failed enabling multicast filter\n");
 	}
 out:
 	mutex_unlock(&mdev->state_lock);
@@ -350,10 +355,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
 	struct mlx4_en_dev *mdev = priv->mdev;
 
 	if (netif_msg_timer(priv))
-		mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
+		en_warn(priv, "Tx timeout called\n");
 
 	priv->port_stats.tx_timeout++;
-	mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
+	en_dbg(DRV, priv, "Scheduling watchdog\n");
 	queue_work(mdev->workqueue, &priv->watchdog_task);
 }
 
@@ -382,8 +387,8 @@ void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
 	 */
 	priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->mtu + 1;
 	priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
-	mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
-			     "rx_frames:%d rx_usecs:%d\n",
+	en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
+			   "rx_frames:%d rx_usecs:%d\n",
 		 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
 
 	/* Setup cq moderation params */
@@ -416,7 +421,6 @@ void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
 {
 	unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
-	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_cq *cq;
 	unsigned long packets;
 	unsigned long rate;
@@ -460,7 +464,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
 		    2 * rx_pkt_diff > 3 * tx_pkt_diff) {
 			moder_time = priv->rx_usecs_high;
 		} else {
-			if (rate < priv->pkt_rate_low)
+			if (rate < priv->pkt_rate_low ||
+			    avg_pkt_size < MLX4_EN_AVG_PKT_SMALL)
 				moder_time = priv->rx_usecs_low;
 			else if (rate > priv->pkt_rate_high)
 				moder_time = priv->rx_usecs_high;
@@ -476,11 +481,11 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
 		moder_time = priv->rx_usecs;
 	}
 
-	mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
-		 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
+	en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
+	       tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
 
-	mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
-		 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
+	en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
+	       "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
 		 priv->last_moder_time, moder_time, period, packets,
 		 avg_pkt_size, rate);
 
@@ -491,8 +496,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
 			cq->moder_time = moder_time;
 			err = mlx4_en_set_cq_moder(priv, cq);
 			if (err) {
-				mlx4_err(mdev, "Failed modifying moderation for cq:%d "
-					 "on port:%d\n", i, priv->port);
+				en_err(priv, "Failed modifying moderation for cq:%d\n", i);
 				break;
 			}
 		}
@@ -515,8 +519,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
 
 	err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
 	if (err)
-		mlx4_dbg(HW, priv, "Could not update stats for "
-				   "port:%d\n", priv->port);
+		en_dbg(HW, priv, "Could not update stats \n");
 
 	mutex_lock(&mdev->state_lock);
 	if (mdev->device_up) {
@@ -540,12 +543,10 @@ static void mlx4_en_linkstate(struct work_struct *work)
 	 * report to system log */
 	if (priv->last_link_state != linkstate) {
 		if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
-			if (netif_msg_link(priv))
-				mlx4_info(mdev, "Port %d - link down\n", priv->port);
+			en_info(priv, "Link Down\n");
 			netif_carrier_off(priv->dev);
 		} else {
-			if (netif_msg_link(priv))
-				mlx4_info(mdev, "Port %d - link up\n", priv->port);
+			en_info(priv, "Link Up\n");
 			netif_carrier_on(priv->dev);
 		}
 	}
@@ -560,55 +561,63 @@ int mlx4_en_start_port(struct net_device *dev)
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_cq *cq;
 	struct mlx4_en_tx_ring *tx_ring;
-	struct mlx4_en_rx_ring *rx_ring;
 	int rx_index = 0;
 	int tx_index = 0;
 	int err = 0;
 	int i;
 	int j;
+	u8 mc_list[16] = {0};
 
 	if (priv->port_up) {
-		mlx4_dbg(DRV, priv, "start port called while port already up\n");
+		en_dbg(DRV, priv, "start port called while port already up\n");
 		return 0;
 	}
 
 	/* Calculate Rx buf size */
 	dev->mtu = min(dev->mtu, priv->max_mtu);
 	mlx4_en_calc_rx_buf(dev);
-	mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
+	en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
+
 	/* Configure rx cq's and rings */
+	err = mlx4_en_activate_rx_rings(priv);
+	if (err) {
+		en_err(priv, "Failed to activate RX rings\n");
+		return err;
+	}
 	for (i = 0; i < priv->rx_ring_num; i++) {
 		cq = &priv->rx_cq[i];
-		rx_ring = &priv->rx_ring[i];
 
 		err = mlx4_en_activate_cq(priv, cq);
 		if (err) {
-			mlx4_err(mdev, "Failed activating Rx CQ\n");
+			en_err(priv, "Failed activating Rx CQ\n");
 			goto cq_err;
 		}
 		for (j = 0; j < cq->size; j++)
 			cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
 		err = mlx4_en_set_cq_moder(priv, cq);
 		if (err) {
-			mlx4_err(mdev, "Failed setting cq moderation parameters");
+			en_err(priv, "Failed setting cq moderation parameters");
 			mlx4_en_deactivate_cq(priv, cq);
 			goto cq_err;
 		}
 		mlx4_en_arm_cq(priv, cq);
-
+		priv->rx_ring[i].cqn = cq->mcq.cqn;
 		++rx_index;
 	}
 
-	err = mlx4_en_activate_rx_rings(priv);
-	if (err){
-		mlx4_err(mdev, "Failed to activate RX rings\n");
+	/* Set port mac number */
+	en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
+	err = mlx4_register_mac(mdev->dev, priv->port,
+				priv->mac, &priv->base_qpn);
+	if (err) {
+		en_err(priv, "Failed setting port mac\n");
 		goto cq_err;
 	}
 
 	err = mlx4_en_config_rss_steer(priv);
 	if (err) {
-		mlx4_err(mdev, "Failed configuring rss steering\n");
-		goto rx_err;
+		en_err(priv, "Failed configuring rss steering\n");
+		goto mac_err;
 	}
 
 	/* Configure tx cq's and rings */
@@ -617,25 +626,24 @@ int mlx4_en_start_port(struct net_device *dev)
 		cq = &priv->tx_cq[i];
 		err = mlx4_en_activate_cq(priv, cq);
 		if (err) {
-			mlx4_err(mdev, "Failed allocating Tx CQ\n");
+			en_err(priv, "Failed allocating Tx CQ\n");
 			goto tx_err;
 		}
 		err = mlx4_en_set_cq_moder(priv, cq);
 		if (err) {
-			mlx4_err(mdev, "Failed setting cq moderation parameters");
+			en_err(priv, "Failed setting cq moderation parameters");
 			mlx4_en_deactivate_cq(priv, cq);
 			goto tx_err;
 		}
-		mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d "
-				"to -1\n", i);
+		en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d "
+				  "to -1\n", i);
 		cq->buf->wqe_index = 0xffff;
 
 		/* Configure ring */
 		tx_ring = &priv->tx_ring[i];
-		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
-					       priv->rx_ring[0].srq.srqn);
+		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn);
 		if (err) {
-			mlx4_err(mdev, "Failed allocating Tx ring\n");
+			en_err(priv, "Failed allocating Tx ring\n");
 			mlx4_en_deactivate_cq(priv, cq);
 			goto tx_err;
 		}
@@ -663,32 +671,31 @@ int mlx4_en_start_port(struct net_device *dev)
 				    priv->prof->rx_pause,
 				    priv->prof->rx_ppp);
 	if (err) {
-		mlx4_err(mdev, "Failed setting port general configurations"
+		en_err(priv, "Failed setting port general configurations"
 			       " for port %d, with error %d\n", priv->port, err);
 		goto tx_err;
 	}
 	/* Set default qp number */
 	err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
 	if (err) {
-		mlx4_err(mdev, "Failed setting default qp numbers\n");
-		goto tx_err;
-	}
-	/* Set port mac number */
-	err = mlx4_register_mac(mdev->dev, priv->port,
-				priv->mac, &priv->mac_index);
-	if (err) {
-		mlx4_err(mdev, "Failed setting port mac\n");
+		en_err(priv, "Failed setting default qp numbers\n");
 		goto tx_err;
 	}
 
 	/* Init port */
-	mlx4_dbg(HW, priv, "Initializing port\n");
+	en_dbg(HW, priv, "Initializing port\n");
 	err = mlx4_INIT_PORT(mdev->dev, priv->port);
 	if (err) {
-		mlx4_err(mdev, "Failed Initializing port\n");
-		goto mac_err;
+		en_err(priv, "Failed Initializing port\n");
+		goto tx_err;
 	}
 
+	/* Attach rx QP to bradcast address */
+	memset(&mc_list[10], 0xff, ETH_ALEN);
+	if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+				  0, MLX4_PROT_ETH))
+		mlx4_warn(mdev, "Failed Attaching Broadcast\n");
+
 	/* Schedule multicast task to populate multicast list */
 	queue_work(mdev->workqueue, &priv->mcast_task);
 
@@ -696,8 +703,6 @@ int mlx4_en_start_port(struct net_device *dev)
 	netif_start_queue(dev);
 	return 0;
 
-mac_err:
-	mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
 tx_err:
 	while (tx_index--) {
 		mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
@@ -705,12 +710,13 @@ tx_err:
 	}
 
 	mlx4_en_release_rss_steer(priv);
-rx_err:
-	for (i = 0; i < priv->rx_ring_num; i++)
-		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+mac_err:
+	mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
 cq_err:
-	while (rx_index--)	
+	while (rx_index--)
 		mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
+	for (i = 0; i < priv->rx_ring_num; i++)
+		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
 
 	return err; /* need to close devices */
 }
@@ -720,11 +726,12 @@ void mlx4_en_stop_port(struct net_device *dev)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
+	struct dev_mc_list *mclist;
 	int i;
+	u8 mc_list[16] = {0};
 
 	if (!priv->port_up) {
-		mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n",
-			 priv->port);
+		en_dbg(DRV, priv, "stop port called while port already down\n");
 		return;
 	}
 	netif_stop_queue(dev);
@@ -734,11 +741,19 @@ void mlx4_en_stop_port(struct net_device *dev)
 	priv->port_up = false;
 	netif_tx_unlock_bh(dev);
 
-	/* close port*/
-	mlx4_CLOSE_PORT(mdev->dev, priv->port);
+	/* Detach All multicasts */
+	memset(&mc_list[10], 0xff, ETH_ALEN);
+	mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+			      MLX4_PROT_ETH);
+	for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
+		memcpy(&mc_list[10], mclist->dmi_addr, ETH_ALEN);
+		mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
+				      mc_list, MLX4_PROT_ETH);
+	}
+	mlx4_en_clear_list(dev);
 
 	/* Unregister Mac address for the port */
-	mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+	mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
 
 	/* Free TX Rings */
 	for (i = 0; i < priv->tx_ring_num; i++) {
@@ -758,6 +773,9 @@ void mlx4_en_stop_port(struct net_device *dev)
 		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
 		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
 	}
+
+	/* close port*/
+	mlx4_CLOSE_PORT(mdev->dev, priv->port);
 }
 
 static void mlx4_en_restart(struct work_struct *work)
@@ -767,13 +785,13 @@ static void mlx4_en_restart(struct work_struct *work)
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct net_device *dev = priv->dev;
 
-	mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
+	en_dbg(DRV, priv, "Watchdog task called \n");
 
 	mutex_lock(&mdev->state_lock);
 	if (priv->port_up) {
 		mlx4_en_stop_port(dev);
 		if (mlx4_en_start_port(dev))
-		    mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
+		    en_err(priv, "Failed restarting port\n");
 	}
 	mutex_unlock(&mdev->state_lock);
 }
@@ -789,18 +807,18 @@ int mlx4_en_open(struct net_device *dev)
 	mutex_lock(&mdev->state_lock);
 
 	if (!mdev->device_up) {
-		mlx4_err(mdev, "Cannot open - device down/disabled\n");
+		en_err(priv, "Cannot open - device down/disabled\n");
 		err = -EBUSY;
 		goto out;
 	}
 
 	/* Reset HW statistics and performance counters */
 	if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
-		mlx4_dbg(HW, priv, "Failed dumping statistics\n");
+		en_dbg(HW, priv, "Failed dumping statistics\n");
 
 	memset(&priv->stats, 0, sizeof(priv->stats));
 	memset(&priv->pstats, 0, sizeof(priv->pstats));
-	
+
 	for (i = 0; i < priv->tx_ring_num; i++) {
 		priv->tx_ring[i].bytes = 0;
 		priv->tx_ring[i].packets = 0;
@@ -813,7 +831,7 @@ int mlx4_en_open(struct net_device *dev)
 	mlx4_en_set_default_moderation(priv);
 	err = mlx4_en_start_port(dev);
 	if (err)
-		mlx4_err(mdev, "Failed starting port:%d\n", priv->port);
+		en_err(priv, "Failed starting port:%d\n", priv->port);
 
 out:
 	mutex_unlock(&mdev->state_lock);
@@ -826,8 +844,7 @@ int mlx4_en_close(struct net_device *dev)
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
 
-	if (netif_msg_ifdown(priv))
-		mlx4_info(mdev, "Close called for port:%d\n", priv->port);
+	en_dbg(IFDOWN, priv, "Close port called\n");
 
 	mutex_lock(&mdev->state_lock);
 
@@ -859,7 +876,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
 
 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 {
-	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_port_profile *prof = priv->prof;
 	int i;
 
@@ -870,7 +886,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 			goto err;
 
 		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
-					   prof->tx_ring_size, TXBB_SIZE)) 
+					   prof->tx_ring_size, TXBB_SIZE))
 			goto err;
 	}
 
@@ -893,7 +909,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 	return 0;
 
 err:
-	mlx4_err(mdev, "Failed to allocate NIC resources\n");
+	en_err(priv, "Failed to allocate NIC resources\n");
 	return -ENOMEM;
 }
 
@@ -903,7 +919,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
 
-	mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
+	en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
 
 	/* Unregsiter device - this will close the port if it was up */
 	if (priv->registered)
@@ -913,7 +929,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
 		mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
 
 	cancel_delayed_work(&priv->stats_task);
-        cancel_delayed_work(&priv->refill_task);
 	/* flush any pending task for this netdev */
 	flush_workqueue(mdev->workqueue);
 
@@ -932,11 +947,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
 	struct mlx4_en_dev *mdev = priv->mdev;
 	int err = 0;
 
-	mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
+	en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
 		 dev->mtu, new_mtu);
 
 	if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
-		mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu);
+		en_err(priv, "Bad MTU size:%d.\n", new_mtu);
 		return -EPERM;
 	}
 	dev->mtu = new_mtu;
@@ -946,13 +961,13 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
 		if (!mdev->device_up) {
 			/* NIC is probably restarting - let watchdog task reset
 			 * the port */
-			mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n");
+			en_dbg(DRV, priv, "Change MTU called with card down!?\n");
 		} else {
 			mlx4_en_stop_port(dev);
 			mlx4_en_set_default_moderation(priv);
 			err = mlx4_en_start_port(dev);
 			if (err) {
-				mlx4_err(mdev, "Failed restarting port:%d\n",
+				en_err(priv, "Failed restarting port:%d\n",
 					 priv->port);
 				queue_work(mdev->workqueue, &priv->watchdog_task);
 			}
@@ -999,7 +1014,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 	spin_lock_init(&priv->stats_lock);
 	INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
 	INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
-	INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
 	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
 	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
 	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
@@ -1008,7 +1022,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 	priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
 	priv->mac = mdev->dev->caps.def_mac[priv->port];
 	if (ILLEGAL_MAC(priv->mac)) {
-		mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
+		en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
 			 priv->port, priv->mac);
 		err = -EINVAL;
 		goto out;
@@ -1018,21 +1032,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 	if (err)
 		goto out;
 
-	/* Populate Rx default RSS mappings */
-	mlx4_en_set_default_rss_map(priv, &priv->rss_map, priv->rx_ring_num *
-						RSS_FACTOR, priv->rx_ring_num);
 	/* Allocate page for receive rings */
 	err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
 				MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
 	if (err) {
-		mlx4_err(mdev, "Failed to allocate page for rx qps\n");
+		en_err(priv, "Failed to allocate page for rx qps\n");
 		goto out;
 	}
-	priv->allocated = 1;
+	priv->allocated = true;
 
 	/* Populate Tx priority mappings */
-	mlx4_en_set_prio_map(priv, priv->tx_prio_map,
-			     prof->tx_ring_num - MLX4_EN_NUM_HASH_RINGS);
+	mlx4_en_set_prio_map(priv, priv->tx_prio_map);
 
 	/*
 	 * Initialize netdev entry points
@@ -1057,9 +1067,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 
 	/* Set defualt MAC */
 	dev->addr_len = ETH_ALEN;
-	for (i = 0; i < ETH_ALEN; i++)
-		dev->dev_addr[ETH_ALEN - 1 - i] =
-		(u8) (priv->mac >> (8 * i));
+	for (i = 0; i < ETH_ALEN; i++) {
+		dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
+		dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
+	}
 
 	/*
 	 * Set driver features
@@ -1077,15 +1088,21 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 		dev->features |= NETIF_F_TSO6;
 	}
 
+	dev->tx_queue_len = 100;
+
 	mdev->pndev[port] = dev;
 
 	netif_carrier_off(dev);
 	err = register_netdev(dev);
 	if (err) {
-		mlx4_err(mdev, "Netdev registration failed\n");
+		mlx4_err(mdev, "Netdev registration failed for port %d\n", port);
 		goto out;
 	}
-	priv->registered = 1;
+
+	en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
+	en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
+
+	priv->registered = true;
 	queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
 	return 0;
 
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
deleted file mode 100644
index 0b2b7c0..0000000
--- a/drivers/net/mlx4/en_params.c
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/ethtool.h>
-#include <linux/netdevice.h>
-
-#include "mlx4_en.h"
-#include "en_port.h"
-
-#define MLX4_EN_PARM_INT(X, def_val, desc) \
-	static unsigned int X = def_val;\
-	module_param(X , uint, 0444); \
-	MODULE_PARM_DESC(X, desc);
-
-
-/*
- * Device scope module parameters
- */
-
-
-/* Use a XOR rathern than Toeplitz hash function for RSS */
-MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
-
-/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
-MLX4_EN_PARM_INT(rss_mask, 0x5, "RSS hash type bitmask");
-
-/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
-MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
-		 "Number of LRO sessions per ring or disabled (0)");
-
-/* Allow reassembly of fragmented IP packets */
-MLX4_EN_PARM_INT(ip_reasm, 1, "Allow reassembly of fragmented IP packets (!0)");
-
-/* Priority pausing */
-MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
-			   " Per priority bit mask");
-MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
-			   " Per priority bit mask");
-
-int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-{
-	struct mlx4_en_profile *params = &mdev->profile;
-	int i;
-
-	params->rss_xor = (rss_xor != 0);
-	params->rss_mask = rss_mask & 0x1f;
-	params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
-	params->ip_reasm = ip_reasm;
-	for (i = 1; i <= MLX4_MAX_PORTS; i++) {
-		params->prof[i].rx_pause = 1;
-		params->prof[i].rx_ppp = pfcrx;
-		params->prof[i].tx_pause = 1;
-		params->prof[i].tx_ppp = pfctx;
-		params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
-		params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
-		params->prof[i].tx_ring_num = MLX4_EN_NUM_HASH_RINGS + 1 +
-			(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
-	}
-
-	return 0;
-}
-
-
-/*
- * Ethtool support
- */
-
-static void
-mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	struct mlx4_en_dev *mdev = priv->mdev;
-
-	sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
-	strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
-	sprintf(drvinfo->fw_version, "%d.%d.%d",
-		(u16) (mdev->dev->caps.fw_ver >> 32),
-		(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
-		(u16) (mdev->dev->caps.fw_ver & 0xffff));
-	strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
-	drvinfo->n_stats = 0;
-	drvinfo->regdump_len = 0;
-	drvinfo->eedump_len = 0;
-}
-
-static u32 mlx4_en_get_tso(struct net_device *dev)
-{
-	return (dev->features & NETIF_F_TSO) != 0;
-}
-
-static int mlx4_en_set_tso(struct net_device *dev, u32 data)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-
-	if (data) {
-		if (!priv->mdev->LSO_support)
-			return -EPERM;
-		dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
-	} else
-		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
-	return 0;
-}
-
-static u32 mlx4_en_get_rx_csum(struct net_device *dev)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	return priv->rx_csum;
-}
-
-static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	priv->rx_csum = (data != 0);
-	return 0;
-}
-
-static const char main_strings[][ETH_GSTRING_LEN] = {
-	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
-	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
-	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
-	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
-	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
-	"tx_heartbeat_errors", "tx_window_errors",
-
-	/* port statistics */
-	"lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
-	"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
-	"rx_csum_good", "rx_csum_none", "tx_chksum_offload",
-
-	/* packet statistics */
-	"broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
-	"rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
-	"tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
-	"tx_prio_6", "tx_prio_7",
-};
-#define NUM_MAIN_STATS	21
-#define NUM_ALL_STATS	(NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
-
-static u32 mlx4_en_get_msglevel(struct net_device *dev)
-{
-	return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
-}
-
-static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
-{
-	((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
-}
-
-static void mlx4_en_get_wol(struct net_device *netdev,
-			    struct ethtool_wolinfo *wol)
-{
-	wol->supported = 0;
-	wol->wolopts = 0;
-
-	return;
-}
-
-static int mlx4_en_get_sset_count(struct net_device *dev)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-
-	return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
-}
-
-static void mlx4_en_get_ethtool_stats(struct net_device *dev,
-		struct ethtool_stats *stats, uint64_t *data)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	int index = 0;
-	int i;
-
-	spin_lock_bh(&priv->stats_lock);
-
-	for (i = 0; i < NUM_MAIN_STATS; i++)
-		data[index++] = ((unsigned long *) &priv->stats)[i];
-	for (i = 0; i < NUM_PORT_STATS; i++)
-		data[index++] = ((unsigned long *) &priv->port_stats)[i];
-	for (i = 0; i < priv->tx_ring_num; i++) {
-		data[index++] = priv->tx_ring[i].packets;
-		data[index++] = priv->tx_ring[i].bytes;
-	}
-	for (i = 0; i < priv->rx_ring_num; i++) {
-		data[index++] = priv->rx_ring[i].packets;
-		data[index++] = priv->rx_ring[i].bytes;
-	}
-	for (i = 0; i < NUM_PKT_STATS; i++)
-		data[index++] = ((unsigned long *) &priv->pkstats)[i];
-	spin_unlock_bh(&priv->stats_lock);
-
-}
-
-static void mlx4_en_get_strings(struct net_device *dev,
-				uint32_t stringset, uint8_t *data)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	int index = 0;
-	int i;
-
-	if (stringset != ETH_SS_STATS)
-		return;
-
-	/* Add main counters */
-	for (i = 0; i < NUM_MAIN_STATS; i++)
-		strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
-	for (i = 0; i< NUM_PORT_STATS; i++)
-		strcpy(data + (index++) * ETH_GSTRING_LEN,
-			main_strings[i + NUM_MAIN_STATS]);
-	for (i = 0; i < priv->tx_ring_num; i++) {
-		sprintf(data + (index++) * ETH_GSTRING_LEN,
-			"tx%d_packets", i);
-		sprintf(data + (index++) * ETH_GSTRING_LEN,
-			"tx%d_bytes", i);
-	}
-	for (i = 0; i < priv->rx_ring_num; i++) {
-		sprintf(data + (index++) * ETH_GSTRING_LEN,
-			"rx%d_packets", i);
-		sprintf(data + (index++) * ETH_GSTRING_LEN,
-			"rx%d_bytes", i);
-	}
-	for (i = 0; i< NUM_PKT_STATS; i++)
-		strcpy(data + (index++) * ETH_GSTRING_LEN,
-			main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
-}
-
-static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	int trans_type = priv->mdev->dev->caps.trans_type[priv->port];
-
-	cmd->autoneg = AUTONEG_DISABLE;
-	cmd->supported = SUPPORTED_10000baseT_Full;
-	cmd->advertising = ADVERTISED_10000baseT_Full;
-	if (netif_carrier_ok(dev)) {
-		cmd->speed = SPEED_10000;
-		cmd->duplex = DUPLEX_FULL;
-	} else {
-		cmd->speed = -1;
-		cmd->duplex = -1;
-	}
-
-	if (trans_type > 0 && trans_type <= 0xC) {
-		cmd->port = PORT_FIBRE;
-		cmd->transceiver = XCVR_EXTERNAL;
-		cmd->supported |= SUPPORTED_FIBRE;
-		cmd->advertising |= ADVERTISED_FIBRE;
-	} else if (trans_type == 0x80 || trans_type == 0) {
-		cmd->port = PORT_TP;
-		cmd->transceiver = XCVR_INTERNAL;
-		cmd->supported |= SUPPORTED_TP;
-		cmd->advertising |= ADVERTISED_TP;
-	} else  {
-		cmd->port = -1;
-		cmd->transceiver = -1;
-	}
-	return 0;
-}
-
-static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	if ((cmd->autoneg == AUTONEG_ENABLE) ||
-	    (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
-		return -EINVAL;
-
-	/* Nothing to change */
-	return 0;
-}
-
-static int mlx4_en_get_coalesce(struct net_device *dev,
-			      struct ethtool_coalesce *coal)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-
-	coal->tx_coalesce_usecs = 0;
-	coal->tx_max_coalesced_frames = 0;
-	coal->rx_coalesce_usecs = priv->rx_usecs;
-	coal->rx_max_coalesced_frames = priv->rx_frames;
-
-	coal->pkt_rate_low = priv->pkt_rate_low;
-	coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
-	coal->pkt_rate_high = priv->pkt_rate_high;
-	coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
-	coal->rate_sample_interval = priv->sample_interval;
-	coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
-	return 0;
-}
-
-static int mlx4_en_set_coalesce(struct net_device *dev,
-			      struct ethtool_coalesce *coal)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	int err, i;
-
-	priv->rx_frames = (coal->rx_max_coalesced_frames ==
-			   MLX4_EN_AUTO_CONF) ?
-				MLX4_EN_RX_COAL_TARGET /
-				priv->dev->mtu + 1 :
-				coal->rx_max_coalesced_frames;
-	priv->rx_usecs = (coal->rx_coalesce_usecs ==
-			  MLX4_EN_AUTO_CONF) ?
-				MLX4_EN_RX_COAL_TIME :
-				coal->rx_coalesce_usecs;
-
-	/* Set adaptive coalescing params */
-	priv->pkt_rate_low = coal->pkt_rate_low;
-	priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
-	priv->pkt_rate_high = coal->pkt_rate_high;
-	priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
-	priv->sample_interval = coal->rate_sample_interval;
-	priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
-	priv->last_moder_time = MLX4_EN_AUTO_CONF;
-	if (priv->adaptive_rx_coal)
-		return 0;
-
-	for (i = 0; i < priv->rx_ring_num; i++) {
-		priv->rx_cq[i].moder_cnt = priv->rx_frames;
-		priv->rx_cq[i].moder_time = priv->rx_usecs;
-		err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
-		if (err)
-			return err;
-	}
-	return 0;
-}
-
-static int mlx4_en_set_pauseparam(struct net_device *dev,
-				struct ethtool_pauseparam *pause)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	struct mlx4_en_dev *mdev = priv->mdev;
-	int err;
-
-	priv->prof->tx_pause = pause->tx_pause != 0;
-	priv->prof->rx_pause = pause->rx_pause != 0;
-	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
-				    priv->rx_skb_size + ETH_FCS_LEN,
-				    priv->prof->tx_pause,
-				    priv->prof->tx_ppp,
-				    priv->prof->rx_pause,
-				    priv->prof->rx_ppp);
-	if (err)
-		mlx4_err(mdev, "Failed setting pause params to\n");
-
-	return err;
-}
-
-static void mlx4_en_get_pauseparam(struct net_device *dev,
-				 struct ethtool_pauseparam *pause)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-
-	pause->tx_pause = priv->prof->tx_pause;
-	pause->rx_pause = priv->prof->rx_pause;
-}
-
-static int mlx4_en_set_ringparam(struct net_device *dev,
-				 struct ethtool_ringparam *param)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	struct mlx4_en_dev *mdev = priv->mdev;
-	u32 rx_size, tx_size;
-	int port_up = 0;
-	int err = 0;
-
-	if (param->rx_jumbo_pending || param->rx_mini_pending)
-		return -EINVAL;
-
-	rx_size = roundup_pow_of_two(param->rx_pending);
-	rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
-	rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
-	tx_size = roundup_pow_of_two(param->tx_pending);
-	tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
-	tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
-
-	if (rx_size == priv->prof->rx_ring_size &&
-	    tx_size == priv->prof->tx_ring_size)
-		return 0;
-
-	mutex_lock(&mdev->state_lock);
-	if (priv->port_up) {
-		port_up = 1;
-		mlx4_en_stop_port(dev);
-	}
-
-	mlx4_en_free_resources(priv);
-
-	priv->prof->tx_ring_size = tx_size;
-	priv->prof->rx_ring_size = rx_size;
-
-	err = mlx4_en_alloc_resources(priv);
-	if (err) {
-		mlx4_err(mdev, "Failed reallocating port resources\n");
-		goto out;
-	}
-	if (port_up) {
-		err = mlx4_en_start_port(dev);
-		if (err)
-			mlx4_err(mdev, "Failed starting port\n");
-	}
-
-out:
-	mutex_unlock(&mdev->state_lock);
-	return err;
-}
-
-void mlx4_en_get_ringparam(struct net_device *dev, struct ethtool_ringparam *param)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	struct mlx4_en_dev *mdev = priv->mdev;
-
-	memset(param, 0, sizeof(*param));
-	param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
-	param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
-	param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
-	param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
-}
-
-const struct ethtool_ops mlx4_en_ethtool_ops = {
-	.get_drvinfo = mlx4_en_get_drvinfo,
-	.get_settings = mlx4_en_get_settings,
-	.set_settings = mlx4_en_set_settings,
-#ifdef NETIF_F_TSO
-	.get_tso = mlx4_en_get_tso,
-	.set_tso = mlx4_en_set_tso,
-#endif
-	.get_sg = ethtool_op_get_sg,
-	.set_sg = ethtool_op_set_sg,
-	.get_link = ethtool_op_get_link,
-	.get_rx_csum = mlx4_en_get_rx_csum,
-	.set_rx_csum = mlx4_en_set_rx_csum,
-	.get_tx_csum = ethtool_op_get_tx_csum,
-	.set_tx_csum = ethtool_op_set_tx_csum,
-	.get_strings = mlx4_en_get_strings,
-	.get_stats_count = mlx4_en_get_sset_count,
-	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
-	.get_wol = mlx4_en_get_wol,
-	.get_msglevel = mlx4_en_get_msglevel,
-	.set_msglevel = mlx4_en_set_msglevel,
-	.get_coalesce = mlx4_en_get_coalesce,
-	.set_coalesce = mlx4_en_set_coalesce,
-	.get_pauseparam = mlx4_en_get_pauseparam,
-	.set_pauseparam = mlx4_en_set_pauseparam,
-	.get_ringparam = mlx4_en_get_ringparam,
-	.set_ringparam = mlx4_en_set_ringparam,
-};
-
-
-
-
-
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index a29abe8..41b972a 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -41,48 +41,6 @@
 #include "mlx4_en.h"
 
 
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
-			u64 mac, u64 clear, u8 mode)
-{
-	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
-			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
-}
-
-int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
-{
-	struct mlx4_cmd_mailbox *mailbox;
-	struct mlx4_set_vlan_fltr_mbox *filter;
-	int i;
-	int j;
-	int index = 0;
-	u32 entry;
-	int err = 0;
-
-	mailbox = mlx4_alloc_cmd_mailbox(dev);
-	if (IS_ERR(mailbox))
-		return PTR_ERR(mailbox);
-
-	filter = mailbox->buf;
-	if (grp) {
-		memset(filter, 0, sizeof *filter);
-		for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
-			entry = 0;
-			for (j = 0; j < 32; j++)
-				if (vlan_group_get_device(grp, index++))
-					entry |= 1 << j;
-			filter->entry[i] = cpu_to_be32(entry);
-		}
-	} else {
-		/* When no vlans are configured we block all vlans */
-		memset(filter, 0, sizeof(*filter));
-	}
-	err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
-		       MLX4_CMD_TIME_CLASS_B);
-	mlx4_free_cmd_mailbox(dev, mailbox);
-	return err;
-}
-
-
 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
 {
@@ -127,8 +85,9 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
 	memset(context, 0, sizeof *context);
 
 	context->base_qpn = cpu_to_be32(base_qpn);
+	context->n_mac = 0x7;
 	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
-	context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
+	context->mcast = cpu_to_be32(MCAST_DIRECT << SET_PORT_MC_PROMISC_SHIFT | base_qpn);
 	context->intra_no_vlan = 0;
 	context->no_vlan = MLX4_NO_VLAN_IDX;
 	context->intra_vlan_miss = 0;
@@ -142,6 +101,38 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
 	return err;
 }
 
+int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
+{
+	struct mlx4_en_query_port_context *qport_context;
+	struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
+	struct mlx4_en_port_state *state = &priv->port_state;
+	struct mlx4_cmd_mailbox *mailbox;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	memset(mailbox->buf, 0, sizeof(*qport_context));
+	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
+			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+	if (err)
+		goto out;
+	qport_context = mailbox->buf;
+
+	/* This command is always accessed from Ethtool context
+	 * already synchronized, no need in locking */
+	state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
+	if ((qport_context->link_speed & MLX4_EN_SPEED_MASK) ==
+	    MLX4_EN_1G_SPEED)
+		state->link_speed = 1000;
+	else
+		state->link_speed = 10000;
+	state->transciver = qport_context->transceiver;
+
+out:
+	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+	return err;
+}
 
 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
 {
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index 6e87843..e2a1c14 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -35,15 +35,17 @@
 #define _MLX4_EN_PORT_H_
 
 
-#define SET_PORT_GEN_ALL_VALID	0x7
-#define SET_PORT_PROMISC_SHIFT	31
+#define SET_PORT_GEN_ALL_VALID		0x7
+#define SET_PORT_PROMISC_SHIFT		31
+#define SET_PORT_MC_PROMISC_SHIFT	30
 
 enum {
-	MLX4_CMD_SET_VLAN_FLTR  = 0x47,
-	MLX4_CMD_SET_MCAST_FLTR = 0x48,
-	MLX4_CMD_DUMP_ETH_STATS = 0x49,
+	MCAST_DIRECT_ONLY	= 0,
+	MCAST_DIRECT		= 1,
+	MCAST_DEFAULT		= 2
 };
 
+
 struct mlx4_set_port_general_context {
 	u8 reserved[3];
 	u8 flags;
@@ -59,24 +61,23 @@ struct mlx4_set_port_general_context {
 
 struct mlx4_set_port_rqp_calc_context {
 	__be32 base_qpn;
+	u8 reserved;
+	u8 n_mac;
+	u8 n_vlan;
+	u8 n_prio;
 	__be32 flags;
-	u8 reserved[3];
+	u8 reserved2[3];
 	u8 mac_miss;
 	u8 intra_no_vlan;
 	u8 no_vlan;
 	u8 intra_vlan_miss;
 	u8 vlan_miss;
-	u8 reserved4[3];
+	u8 reserved3[3];
 	u8 no_vlan_prio;
 	__be32 promisc;
 	__be32 mcast;
 };
 
-#define VLAN_FLTR_SIZE	128
-struct mlx4_set_vlan_fltr_mbox {
-	__be32 entry[VLAN_FLTR_SIZE];
-};
-
 
 enum {
 	MLX4_MCAST_CONFIG       = 0,
@@ -84,6 +85,20 @@ enum {
 	MLX4_MCAST_ENABLE       = 2,
 };
 
+struct mlx4_en_query_port_context {
+	u8 link_up;
+#define MLX4_EN_LINK_UP_MASK	0x80
+	u8 reserved;
+	__be16 mtu;
+	u8 reserved2;
+	u8 link_speed;
+#define MLX4_EN_SPEED_MASK	0x3
+#define MLX4_EN_1G_SPEED	0x2
+	u16 reserved3[5];
+	__be64 mac;
+	u8 transceiver;
+};
+
 
 struct mlx4_en_stat_out_mbox {
 	/* Received frames with a length of 64 octets */
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c
index f14ed14..df3ff7e 100644
--- a/drivers/net/mlx4/en_resources.c
+++ b/drivers/net/mlx4/en_resources.c
@@ -37,7 +37,7 @@
 #include "mlx4_en.h"
 
 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
-			     int is_tx, int rss, int qpn, int cqn, int srqn,
+			     int is_tx, int rss, int qpn, int cqn,
 			     struct mlx4_qp_context *context)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
@@ -46,11 +46,13 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
 	context->flags = cpu_to_be32(7 << 16 | rss << 13);
 	context->pd = cpu_to_be32(mdev->priv_pdn);
 	context->mtu_msgmax = 0xff;
-	context->rq_size_stride = 0;
+	if (!is_tx && !rss) {
+		context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
+	}
 	if (is_tx)
 		context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
 	else
-		context->sq_size_stride = 1;
+		context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
 	context->usr_page = cpu_to_be32(mdev->priv_uar.index);
 	context->local_qpn = cpu_to_be32(qpn);
 	context->pri_path.ackto = 1 & 0x07;
@@ -59,8 +61,6 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
 	context->cqn_send = cpu_to_be32(cqn);
 	context->cqn_recv = cpu_to_be32(cqn);
 	context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
-	if (!rss)
-		context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn);
 }
 
 
@@ -71,7 +71,7 @@ int mlx4_en_map_buffer(struct mlx4_buf *buf)
 
 	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
 		return 0;
-	
+
 	pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
 	if (!pages)
 		return -ENOMEM;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index dc79768..f21389c 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -41,16 +41,6 @@
 
 #include "mlx4_en.h"
 
-static void *get_wqe(struct mlx4_en_rx_ring *ring, int n)
-{
-	int offset = n << ring->srq.wqe_shift;
-	return ring->buf + offset;
-}
-
-static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
-{
-	return;
-}
 
 static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
 			      struct mlx4_en_rx_desc *rx_desc,
@@ -103,8 +93,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
 			goto out;
 
 		page_alloc->offset = priv->frag_info[i].frag_align;
-		mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
-			 i, page_alloc->page);
+		en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
+		       i, page_alloc->page);
 	}
 	return 0;
 
@@ -125,8 +115,8 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
 
 	for (i = 0; i < priv->num_frags; i++) {
 		page_alloc = &ring->page_alloc[i];
-		mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
-			 i, page_count(page_alloc->page));
+		en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
+		       i, page_count(page_alloc->page));
 
 		put_page(page_alloc->page);
 		page_alloc->page = NULL;
@@ -139,8 +129,6 @@ mlx4_en_init_rx_desc_skb(struct mlx4_en_priv *priv,
 {
 	struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
 
-	/* Pre-link descriptor */
-	rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask);
 	rx_desc->data->byte_count = cpu_to_be32(priv->rx_skb_size);
 	rx_desc->data->lkey = cpu_to_be32(priv->mdev->mr.key);
 }
@@ -154,9 +142,6 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
 	int possible_frags;
 	int i;
 
-	/* Pre-link descriptor */
-	rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask);
-
 	/* Set size and memtype fields */
 	for (i = 0; i < priv->num_frags; i++) {
 		skb_frags[i].size = priv->frag_info[i].frag_size;
@@ -179,15 +164,20 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
 static int
 mlx4_en_alloc_rx_skb(struct mlx4_en_priv *priv,
 		     struct mlx4_en_rx_desc *rx_desc,
-		     struct sk_buff **pskb)
+		     struct sk_buff **pskb, int unmap)
 {
+	struct mlx4_en_dev *mdev = priv->mdev;
 	dma_addr_t dma;
 	int size = priv->rx_skb_size + NET_IP_ALIGN;
-	struct sk_buff *new_skb = alloc_skb(size, GFP_ATOMIC);
+	struct sk_buff *new_skb = dev_alloc_skb(size);
 
 	if (unlikely(new_skb == NULL))
 		return -ENOMEM;
 
+	if (unmap)
+		pci_unmap_single(mdev->pdev, be64_to_cpu(rx_desc->data->addr),
+				 be32_to_cpu(rx_desc->data->byte_count),
+				 PCI_DMA_FROMDEVICE);
 	new_skb->dev = priv->dev;
 	skb_reserve(new_skb, NET_IP_ALIGN);
 	dma = pci_map_single(priv->mdev->pdev, new_skb->data, size, DMA_FROM_DEVICE);
@@ -203,7 +193,7 @@ mlx4_en_prepare_rx_desc_skb(struct mlx4_en_priv *priv,
 	struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
 	struct sk_buff **pskb = (struct sk_buff **) ring->rx_info + index;
 
-	return mlx4_en_alloc_rx_skb(priv, rx_desc, pskb);
+	return mlx4_en_alloc_rx_skb(priv, rx_desc, pskb, 0);
 }
 
 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
@@ -231,13 +221,45 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
 	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
 }
 
-static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
+static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
+				 struct mlx4_en_rx_ring *ring,
+				 int index)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
+	struct skb_frag_struct *skb_frags;
+	struct sk_buff *skb;
+	struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
+	dma_addr_t dma;
+	int nr;
+
+	if (ring->use_frags) {
+		skb_frags = ring->rx_info + (index << priv->log_rx_info);
+		for (nr = 0; nr < priv->num_frags; nr++) {
+			en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
+			dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+			en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
+			pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+					 PCI_DMA_FROMDEVICE);
+			put_page(skb_frags[nr].page);
+		}
+	} else {
+		skb = *((struct sk_buff **) ring->rx_info + index);
+		dma = be64_to_cpu(rx_desc->data->addr);
+		pci_unmap_single(mdev->pdev, dma,
+				 priv->rx_skb_size + NET_IP_ALIGN,
+				 PCI_DMA_FROMDEVICE);
+		kfree_skb(skb);
+	}
+}
+
+static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
+{
 	struct mlx4_en_rx_ring *ring;
 	int ring_ind;
 	int buf_ind;
 	int err;
+	int new_size;
 
 	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
 		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
@@ -250,128 +272,54 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
 				err = mlx4_en_prepare_rx_desc_skb(priv, ring,
 								  ring->actual_size);
 			if (err) {
-				mlx4_err(mdev, "Failed to allocate "
-					       "enough rx buffers\n");
-				return -ENOMEM;
+				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
+					en_err(priv, "Failed to allocate "
+						     "enough rx buffers\n");
+					return -ENOMEM;
+				} else {
+					new_size = rounddown_pow_of_two(ring->actual_size);
+					en_warn(priv, "Only %d buffers allocated, "
+						      "reducing ring size to %d\n",
+						ring->actual_size, new_size);
+					goto reduce_rings;
+				}
 			}
 			ring->actual_size++;
 			ring->prod++;
 		}
 	}
-out:
 	return 0;
-}
 
-static int mlx4_en_fill_rx_buf(struct net_device *dev,
-			       struct mlx4_en_rx_ring *ring)
-{
-	struct mlx4_en_priv *priv = netdev_priv(dev);
-	int num = 0;
-	int err;
-
-	while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
-		if (ring->use_frags)
-			err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
-						      ring->size_mask);
-		else
-			err = mlx4_en_prepare_rx_desc_skb(priv, ring, ring->prod &
-							  ring->size_mask);
-		if (err) {
-			if (netif_msg_rx_err(priv))
-				mlx4_warn(priv->mdev,
-					  "Failed preparing rx descriptor\n");
-			priv->port_stats.rx_alloc_failed++;
-			break;
+reduce_rings:
+	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+		ring = &priv->rx_ring[ring_ind];
+		while (ring->actual_size > new_size) {
+			ring->actual_size--;
+			ring->prod--;
+			mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
 		}
-		++num;
-		++ring->prod;
 	}
-	if ((u32) (ring->prod - ring->cons) == ring->size)
-		ring->full = 1;
-
-	return num;
+	return 0;
 }
 
 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
 				struct mlx4_en_rx_ring *ring)
 {
-	struct mlx4_en_dev *mdev = priv->mdev;
-	struct skb_frag_struct *skb_frags;
-	struct sk_buff *skb;
-	struct mlx4_en_rx_desc *rx_desc;
-	dma_addr_t dma;
 	int index;
-	int nr;
 
-	mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
-			ring->cons, ring->prod);
+	en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
+	       ring->cons, ring->prod);
 
 	/* Unmap and free Rx buffers */
-	BUG_ON((u32) (ring->prod - ring->cons) > ring->size);
+	BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
 	while (ring->cons != ring->prod) {
 		index = ring->cons & ring->size_mask;
-		rx_desc = ring->buf + (index << ring->log_stride);
-		mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
-
-		if (ring->use_frags) {
-			skb_frags = ring->rx_info + (index << priv->log_rx_info);
-			for (nr = 0; nr < priv->num_frags; nr++) {
-				mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
-				dma = be64_to_cpu(rx_desc->data[nr].addr);
-
-				mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
-				pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
-						 PCI_DMA_FROMDEVICE);
-				put_page(skb_frags[nr].page);
-			}
-		} else {
-			skb = *((struct sk_buff **) ring->rx_info + index);
-			dma = be64_to_cpu(rx_desc->data->addr);
-			pci_unmap_single(mdev->pdev, dma,
-					 priv->rx_skb_size + NET_IP_ALIGN,
-					 PCI_DMA_FROMDEVICE);
-			kfree_skb(skb);
-		}
+		en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
+		mlx4_en_free_rx_desc(priv, ring, index);
 		++ring->cons;
 	}
 }
 
-
-void mlx4_en_rx_refill(struct work_struct *work)
-{
-	struct delayed_work *delay = container_of(work, struct delayed_work, work);
-	struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
-						 refill_task);
-	struct mlx4_en_dev *mdev = priv->mdev;
-	struct net_device *dev = priv->dev;
-	struct mlx4_en_rx_ring *ring;
-	int need_refill = 0;
-	int i;
-
-	mutex_lock(&mdev->state_lock);
-	if (!mdev->device_up || !priv->port_up)
-		goto out;
-
-	/* We only get here if there are no receive buffers, so we can't race
-	 * with Rx interrupts while filling buffers */
-	for (i = 0; i < priv->rx_ring_num; i++) {
-		ring = &priv->rx_ring[i];
-		if (ring->need_refill) {
-			if (mlx4_en_fill_rx_buf(dev, ring)) {
-				ring->need_refill = 0;
-				mlx4_en_update_rx_prod_db(ring);
-			} else
-				need_refill = 1;
-		}
-	}
-	if (need_refill)
-		queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
-
-out:
-	mutex_unlock(&mdev->state_lock);
-}
-
-
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 			   struct mlx4_en_rx_ring *ring, u32 size)
 {
@@ -379,9 +327,6 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 	int err;
 	int tmp;
 
-	/* Sanity check SRQ size before proceeding */
-	if (size >= mdev->dev->caps.max_srq_wqes)
-		return -EINVAL;
 
 	ring->prod = 0;
 	ring->cons = 0;
@@ -391,7 +336,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 					  DS_SIZE * (ring->use_frags ?
 						     MLX4_EN_MAX_RX_FRAGS : 1));
 	ring->log_stride = ffs(ring->stride) - 1;
-	ring->buf_size = ring->size * ring->stride;
+	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
 
 	if (ring->use_frags)
 		tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
@@ -401,10 +346,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 
 	ring->rx_info = vmalloc(tmp);
 	if (!ring->rx_info) {
-		mlx4_err(mdev, "Failed allocating rx_info ring\n");
+		en_err(priv, "Failed allocating rx_info ring\n");
 		return -ENOMEM;
 	}
-	mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
+	en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
 		 ring->rx_info, tmp);
 
 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
@@ -414,15 +359,18 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 
 	err = mlx4_en_map_buffer(&ring->wqres.buf);
 	if (err) {
-		mlx4_err(mdev, "Failed to map RX buffer\n");
+		en_err(priv, "Failed to map RX buffer\n");
 		goto err_hwq;
 	}
 	ring->buf = ring->wqres.buf.direct.buf;
 
 	/* Allocate LRO sessions */
-	if (mdev->profile.num_lro && mlx4_en_lro_init(ring, mdev->profile.num_lro)) {
-		mlx4_err(mdev, "Failed allocating lro sessions\n");
-		goto err_map;
+	if (mdev->profile.num_lro) {
+		err =  mlx4_en_lro_init(ring, mdev->profile.num_lro);
+		if (err) {
+			en_err(priv, "Failed allocating lro sessions\n");
+			goto err_map;
+		}
 	}
 
 	return 0;
@@ -439,15 +387,12 @@ err_ring:
 
 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
 {
-	struct mlx4_en_dev *mdev = priv->mdev;
-	struct mlx4_wqe_srq_next_seg *next;
 	struct mlx4_en_rx_ring *ring;
 	int i;
 	int ring_ind;
 	int err;
 	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
 					DS_SIZE * priv->num_frags);
-	int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE;
 
 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
 		ring = &priv->rx_ring[ring_ind];
@@ -459,9 +404,12 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
 
 		if (ring->use_frags)
 			ring->stride = stride;
+		if (ring->stride <= TXBB_SIZE)
+			ring->buf += TXBB_SIZE;
+
 		ring->log_stride = ffs(ring->stride) - 1;
 		ring->buf_size = ring->size * ring->stride;
-	
+
 		memset(ring->rx_info, 0, sizeof(*(ring->rx_info)));
 		memset(ring->buf, 0, ring->buf_size);
 		mlx4_en_update_rx_prod_db(ring);
@@ -474,7 +422,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
 			/* Initialize page allocators */
 			err = mlx4_en_init_allocator(priv, ring);
 			if (err) {
-				 mlx4_err(mdev, "Failed initializing ring allocator\n");
+				 en_err(priv, "Failed initializing ring allocator\n");
 				 ring_ind--;
 				 goto err_allocator;
 			}
@@ -482,9 +430,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
 			for (i = 0; i < ring->size; i++)
 				mlx4_en_init_rx_desc_skb(priv, ring, i);
 		}
-	
-		/* Fill Rx buffers */
-		ring->full = 0;
 	}
 	err = mlx4_en_fill_rx_buffers(priv);
 	if (err)
@@ -493,39 +438,12 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
 		ring = &priv->rx_ring[ring_ind];
 
+		ring->size_mask = ring->actual_size - 1;
 		mlx4_en_update_rx_prod_db(ring);
-	
-		/* Configure SRQ representing the ring */
-		ring->srq.max    = ring->size;
-		ring->srq.max_gs = max_gs;
-		ring->srq.wqe_shift = ilog2(ring->stride);
-	
-		for (i = 0; i < ring->srq.max; ++i) {
-			next = get_wqe(ring, i);
-			next->next_wqe_index =
-			cpu_to_be16((i + 1) & (ring->srq.max - 1));
-		}
-	
-		err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn,
-				     &ring->wqres.mtt, ring->wqres.db.dma,
-				     &ring->srq);
-		if (err){
-			mlx4_err(mdev, "Failed to allocate srq\n");
-			ring_ind--;
-			goto err_srq;
-		}
-		ring->srq.event = mlx4_en_srq_event;
 	}
 
 	return 0;
 
-err_srq:
-	while (ring_ind >= 0) {
-		ring = &priv->rx_ring[ring_ind];
-		mlx4_srq_free(mdev->dev, &ring->srq);
-		ring_ind--;
-	}
-
 err_buffers:
 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
 		mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
@@ -548,7 +466,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
 	if (mdev->profile.num_lro)
 		mlx4_en_lro_destroy(ring);
 	mlx4_en_unmap_buffer(&ring->wqres.buf);
-	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
 	vfree(ring->rx_info);
 	ring->rx_info = NULL;
 }
@@ -556,10 +474,9 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
 				struct mlx4_en_rx_ring *ring)
 {
-	struct mlx4_en_dev *mdev = priv->mdev;
-
-	mlx4_srq_free(mdev->dev, &ring->srq);
 	mlx4_en_free_rx_buf(priv, ring);
+	if (ring->stride <= TXBB_SIZE)
+		ring->buf -= TXBB_SIZE;
 	if (ring->use_frags)
 		mlx4_en_destroy_allocator(priv, ring);
 }
@@ -595,7 +512,7 @@ int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
 			goto fail;
 
 		/* Unmap buffer */
-		pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+		pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
 				 PCI_DMA_FROMDEVICE);
 	}
 	/* Adjust size of last fragment to match actual length */
@@ -628,7 +545,7 @@ struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
 
 	skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
 	if (!skb) {
-		mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n");
+		en_dbg(RX_ERR, priv, "Failed allocating skb\n");
 		return NULL;
 	}
 	skb->dev = priv->dev;
@@ -676,47 +593,20 @@ struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
 	return skb;
 }
 
-static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
-			      struct mlx4_en_rx_ring *ring,
-			      int from, int to, int num)
-{
-	struct skb_frag_struct *skb_frags_from;
-	struct skb_frag_struct *skb_frags_to;
-	struct mlx4_en_rx_desc *rx_desc_from;
-	struct mlx4_en_rx_desc *rx_desc_to;
-	int from_index, to_index;
-	int nr, i;
-
-	for (i = 0; i < num; i++) {
-		from_index = (from + i) & ring->size_mask;
-		to_index = (to + i) & ring->size_mask;
-		skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
-		skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
-		rx_desc_from = ring->buf + (from_index << ring->log_stride);
-		rx_desc_to = ring->buf + (to_index << ring->log_stride);
-
-		for (nr = 0; nr < priv->num_frags; nr++) {
-			skb_frags_to[nr].page = skb_frags_from[nr].page;
-			skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
-			rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
-		}
-	}
-}
-
 static inline int invalid_cqe(struct mlx4_en_priv *priv,
 			      struct mlx4_cqe *cqe)
 {
 	/* Drop packet on bad receive or bad checksum */
 	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
 		     MLX4_CQE_OPCODE_ERROR)) {
-		mlx4_err(priv->mdev, "CQE completed in error - vendor "
+		en_err(priv, "CQE completed in error - vendor "
 			 "syndrom:%d syndrom:%d\n",
 			 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
 			 ((struct mlx4_err_cqe *) cqe)->syndrome);
 		return 1;
 	}
 	if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
-		mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
+		en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
 		return 1;;
 	}
 
@@ -738,6 +628,7 @@ mlx4_en_get_rx_skb(struct mlx4_en_priv *priv,
 		if (unlikely(!skb))
 			return NULL;
 
+		skb->dev = priv->dev;
 		skb_reserve(skb, NET_IP_ALIGN);
 		/* We are copying all relevant data to the skb - temporarily
 		 * synch buffers for the copy */
@@ -750,12 +641,8 @@ mlx4_en_get_rx_skb(struct mlx4_en_priv *priv,
 
 	} else {
 		skb = *pskb;
-		if (unlikely(mlx4_en_alloc_rx_skb(priv, rx_desc, pskb)))
+		if (unlikely(mlx4_en_alloc_rx_skb(priv, rx_desc, pskb, 1)))
 			return NULL;
-
-		pci_unmap_single(mdev->pdev, be64_to_cpu(rx_desc->data->addr),
-				 be32_to_cpu(rx_desc->data->byte_count),
-				 PCI_DMA_FROMDEVICE);
 	}
 
 	skb->tail += length;
@@ -764,6 +651,22 @@ mlx4_en_get_rx_skb(struct mlx4_en_priv *priv,
 	return skb;
 }
 
+static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
+{
+	int i;
+	int offset = ETH_HLEN;
+
+	for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
+		if (*(skb->data + offset) != (unsigned char) (i & 0xff))
+			goto out_loopback;
+	}
+	/* Loopback found */
+	priv->loopback_ok = 1;
+
+out_loopback:
+	dev_kfree_skb_any(skb);
+}
+
 int mlx4_en_process_rx_cq_skb(struct net_device *dev,
 			      struct mlx4_en_cq *cq, int budget)
 {
@@ -810,8 +713,15 @@ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
 		ring->packets++;
 
 		skb = mlx4_en_get_rx_skb(priv, rx_desc, pskb, length);
-		if (unlikely(!skb))
+		if (unlikely(!skb)) {
+			priv->stats.rx_dropped++;
+			goto next;
+		}
+
+		if (unlikely(priv->validate_loopback)) {
+			validate_loopback(priv, skb);
 			goto next;
+		}
 		skb->protocol = eth_type_trans(skb, dev);
 
 		if (likely(priv->rx_csum && cqe->checksum == 0xffff)) {
@@ -853,8 +763,6 @@ out:
 	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
 	ring->cons = cq->mcq.cons_index;
 	ring->prod += polled; /* Polled descriptors were realocated in place */
-	if (unlikely(!ring->full))
-		mlx4_en_fill_rx_buf(dev, ring);
 	mlx4_en_update_rx_prod_db(ring);
 	return polled;
 }
@@ -932,6 +840,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 			goto next;
 		}
 
+		if (unlikely(priv->validate_loopback)) {
+			validate_loopback(priv, skb);
+			goto next;
+		}
+
 		skb->ip_summed = ip_summed;
 		skb->protocol = eth_type_trans(skb, dev);
 
@@ -968,11 +881,6 @@ out:
 	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
 	ring->cons = cq->mcq.cons_index;
 	ring->prod += polled; /* Polled descriptors were realocated in place */
-	if (unlikely(!ring->full)) {
-		mlx4_en_copy_desc(priv, ring, ring->cons - polled,
-				  ring->prod - polled, polled);
-		mlx4_en_fill_rx_buf(dev, ring);
-	}
 	mlx4_en_update_rx_prod_db(ring);
 	return polled;
 }
@@ -1022,7 +930,7 @@ static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16
 	u16 res = MLX4_EN_ALLOC_SIZE % stride;
 	u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
 
-	mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
+	en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
 			    "res:%d offset:%d\n", stride, align, res, offset);
 	return offset;
 }
@@ -1067,10 +975,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
 	priv->rx_skb_size = eff_mtu;
 	priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
 
-	mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
+	en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
 		  "num_frags:%d):\n", eff_mtu, priv->num_frags);
 	for (i = 0; i < priv->num_frags; i++) {
-		mlx4_dbg(DRV, priv, "  frag:%d - size:%d prefix:%d align:%d "
+		en_dbg(DRV, priv, "  frag:%d - size:%d prefix:%d align:%d "
 				"stride:%d last_offset:%d\n", i,
 				priv->frag_info[i].frag_size,
 				priv->frag_info[i].frag_prefix_size,
@@ -1082,25 +990,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
 
 /* RSS related functions */
 
-/* Calculate rss size and map each entry in rss table to rx ring */
-void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
-				 struct mlx4_en_rss_map *rss_map,
-				 int num_entries, int num_rings)
-{
-	int i;
-
-	rss_map->size = roundup_pow_of_two(num_entries);
-	mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
-		 rss_map->size);
-
-	for (i = 0; i < rss_map->size; i++) {
-		rss_map->map[i] = i % num_rings;
-		mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
-	}
-}
-
-static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
-				 int qpn, int srqn, int cqn,
+static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
+				 struct mlx4_en_rx_ring *ring,
 				 enum mlx4_qp_state *state,
 				 struct mlx4_qp *qp)
 {
@@ -1110,26 +1001,29 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
 
 	context = kmalloc(sizeof *context , GFP_KERNEL);
 	if (!context) {
-		mlx4_err(mdev, "Failed to allocate qp context\n");
+		en_err(priv, "Failed to allocate qp context\n");
 		return -ENOMEM;
 	}
 
 	err = mlx4_qp_alloc(mdev->dev, qpn, qp);
 	if (err) {
-		mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn);
+		en_err(priv, "Failed to allocate qp #%x\n", qpn);
 		goto out;
 		return err;
 	}
 	qp->event = mlx4_en_sqp_event;
 
 	memset(context, 0, sizeof *context);
-	mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context);
+	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
+				qpn, ring->cqn, context);
+	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
 
-	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state);
+	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
 	if (err) {
 		mlx4_qp_remove(mdev->dev, qp);
 		mlx4_qp_free(mdev->dev, qp);
 	}
+	mlx4_en_update_rx_prod_db(ring);
 out:
 	kfree(context);
 	return err;
@@ -1145,24 +1039,23 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
 	void *ptr;
 	int rss_xor = mdev->profile.rss_xor;
 	u8 rss_mask = mdev->profile.rss_mask;
-	int i, srqn, qpn, cqn;
+	int i, qpn;
 	int err = 0;
 	int good_qps = 0;
 
-	mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port);
-	err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
-				    rss_map->size, &rss_map->base_qpn);
+	en_dbg(DRV, priv, "Configuring rss steering\n");
+	err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
+				    roundup_pow_of_two(priv->rx_ring_num),
+				    &rss_map->base_qpn);
 	if (err) {
-		mlx4_err(mdev, "Failed reserving %d qps for port %u\n",
-			 rss_map->size, priv->port);
+		en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
 		return err;
 	}
 
-	for (i = 0; i < rss_map->size; i++) {
-		cqn = priv->rx_ring[rss_map->map[i]].cqn;
-		srqn = priv->rx_ring[rss_map->map[i]].srq.srqn;
+	for (i = 0; i < priv->rx_ring_num; i++) {
 		qpn = rss_map->base_qpn + i;
-		err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn,
+		err = mlx4_en_config_rss_qp(priv, qpn,
+					    &priv->rx_ring[i],
 					    &rss_map->state[i],
 					    &rss_map->qps[i]);
 		if (err)
@@ -1172,24 +1065,18 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
 	}
 
 	/* Configure RSS indirection qp */
-	err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
-	if (err) {
-		mlx4_err(mdev, "Failed to reserve range for RSS "
-			       "indirection qp\n");
-		goto rss_err;
-	}
 	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
 	if (err) {
-		mlx4_err(mdev, "Failed to allocate RSS indirection QP\n");
-		goto reserve_err;
+		en_err(priv, "Failed to allocate RSS indirection QP\n");
+		goto rss_err;
 	}
 	rss_map->indir_qp.event = mlx4_en_sqp_event;
 	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
-				priv->rx_ring[0].cqn, 0, &context);
+				priv->rx_ring[0].cqn, &context);
 
 	ptr = ((void *) &context) + 0x3c;
 	rss_context = (struct mlx4_en_rss_context *) ptr;
-	rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size - 1) << 24 |
+	rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num - 1) << 24 |
 					    (rss_map->base_qpn + 1));
 	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
 	rss_context->hash_fn = rss_xor & 0x3;
@@ -1207,8 +1094,6 @@ indir_err:
 		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
 	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
 	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
-reserve_err:
-	mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
 rss_err:
 	for (i = 0; i < good_qps; i++) {
 		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
@@ -1216,7 +1101,7 @@ rss_err:
 		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
 		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
 	}
-	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
+	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
 	return err;
 }
 
@@ -1230,15 +1115,14 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
 		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
 	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
 	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
-	mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
 
-	for (i = 0; i < rss_map->size; i++) {
+	for (i = 0; i < priv->rx_ring_num; i++) {
 		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
 			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
 		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
 		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
 	}
-	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
+	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
 }
 
 
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
new file mode 100644
index 0000000..a715e47
--- /dev/null
+++ b/drivers/net/mlx4/en_selftest.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/mlx4/driver.h>
+
+#include "mlx4_en.h"
+
+
+static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
+{
+	return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
+			MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
+{
+	struct sk_buff *skb;
+	struct ethhdr *ethh;
+	unsigned char *packet;
+	unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
+	unsigned int i;
+	int err;
+
+
+	/* build the pkt before xmit */
+	skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
+	if (!skb) {
+		en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
+		return -ENOMEM;
+	}
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
+	packet	= (unsigned char *)skb_put(skb, packet_size);
+	memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
+	memset(ethh->h_source, 0, ETH_ALEN);
+	ethh->h_proto = htons(ETH_P_ARP);
+	skb_set_mac_header(skb, 0);
+	for (i = 0; i < packet_size; ++i)	/* fill our packet */
+		packet[i] = (unsigned char)(i & 0xff);
+
+	/* xmit the pkt */
+	err = mlx4_en_xmit(skb, priv->dev);
+	return err;
+}
+
+static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
+{
+	u32 loopback_ok = 0;
+	int i;
+
+	priv->loopback_ok = 0;
+	priv->validate_loopback = 1;
+
+	/* xmit */
+	if (mlx4_en_test_loopback_xmit(priv)) {
+		en_err(priv, "Transmitting loopback packet failed\n");
+		goto mlx4_en_test_loopback_exit;
+	}
+
+	/* polling for result */
+	for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
+		msleep(MLX4_EN_LOOPBACK_TIMEOUT);
+		if (priv->loopback_ok) {
+			loopback_ok = 1;
+			break;
+		}
+	}
+	if (!loopback_ok)
+		en_err(priv, "Loopback packet didn't arrive\n");
+
+mlx4_en_test_loopback_exit:
+
+	priv->validate_loopback = 0;
+	return !loopback_ok;
+}
+
+
+static int mlx4_en_test_link(struct mlx4_en_priv *priv)
+{
+	if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+		return -ENOMEM;
+	if (priv->port_state.link_state == 1)
+		return 0;
+	else
+		return 1;
+}
+
+static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
+{
+
+	if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+		return -ENOMEM;
+
+	/* The device currently only supports 10G speed */
+	if (priv->port_state.link_speed != SPEED_10000)
+		return priv->port_state.link_speed;
+	return 0;
+}
+
+
+void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	struct mlx4_en_tx_ring *tx_ring;
+	int i, running;
+
+	memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
+
+	if (*flags & ETH_TEST_FL_OFFLINE) {
+		/* disable the interface */
+		running = netif_running(dev);
+
+		if (running) {
+			netif_tx_disable(dev);
+			dev->trans_start = jiffies;
+		}
+retry_tx:
+		/* Wait untill all tx queues are empty.
+		 * there should not be any additional incoming traffic
+		 * since we turned the carrier off */
+		msleep(200);
+		for (i = 0; i < priv->tx_ring_num && running; i++) {
+			tx_ring = &priv->tx_ring[i];
+			if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
+				goto retry_tx;
+		}
+
+		if (priv->mdev->dev->caps.loopback_support) {
+			buf[3] = mlx4_en_test_registers(priv);
+			buf[4] = mlx4_en_test_loopback(priv);
+		}
+
+		if (running)
+			netif_wake_queue(dev);
+
+	}
+	buf[0] = mlx4_test_interrupts(mdev->dev);
+	buf[1] = mlx4_en_test_link(priv);
+	buf[2] = mlx4_en_test_speed(priv);
+
+	for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
+		if (buf[i])
+			*flags |= ETH_TEST_FL_FAILED;
+	}
+}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 9203626..66fabba 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -71,15 +71,15 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 	tmp = size * sizeof(struct mlx4_en_tx_info);
 	ring->tx_info = vmalloc(tmp);
 	if (!ring->tx_info) {
-		mlx4_err(mdev, "Failed allocating tx_info ring\n");
+		en_err(priv, "Failed allocating tx_info ring\n");
 		return -ENOMEM;
 	}
-	mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
+	en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
 		 ring->tx_info, tmp);
 
 	ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
 	if (!ring->bounce_buf) {
-		mlx4_err(mdev, "Failed allocating bounce buffer\n");
+		en_err(priv, "Failed allocating bounce buffer\n");
 		err = -ENOMEM;
 		goto err_tx;
 	}
@@ -88,31 +88,31 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
 				 2 * PAGE_SIZE);
 	if (err) {
-		mlx4_err(mdev, "Failed allocating hwq resources\n");
+		en_err(priv, "Failed allocating hwq resources\n");
 		goto err_bounce;
 	}
 
 	err = mlx4_en_map_buffer(&ring->wqres.buf);
 	if (err) {
-		mlx4_err(mdev, "Failed to map TX buffer\n");
+		en_err(priv, "Failed to map TX buffer\n");
 		goto err_hwq_res;
 	}
 
 	ring->buf = ring->wqres.buf.direct.buf;
 
-	mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
+	en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
 		 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
 		 ring->buf_size, ring->wqres.buf.direct.map);
 
 	err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
 	if (err) {
-		mlx4_err(mdev, "Failed reserving qp for tx ring.\n");
+		en_err(priv, "Failed reserving qp for tx ring.\n");
 		goto err_map;
 	}
 
 	err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
 	if (err) {
-		mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
+		en_err(priv, "Failed allocating qp %d\n", ring->qpn);
 		goto err_reserve;
 	}
 	ring->qp.event = mlx4_en_sqp_event;
@@ -138,7 +138,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_tx_ring *ring)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
-	mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
+	en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
 
 	mlx4_qp_remove(mdev->dev, &ring->qp);
 	mlx4_qp_free(mdev->dev, &ring->qp);
@@ -153,7 +153,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
 
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_tx_ring *ring,
-			     int cq, int srqn)
+			     int cq)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	int err;
@@ -171,7 +171,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 	ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
 
 	mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
-				ring->cqn, srqn, &ring->context);
+				ring->cqn, &ring->context);
 
 	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
 			       &ring->qp, &ring->qp_state);
@@ -252,6 +252,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
 				pci_unmap_page(mdev->pdev,
 					(dma_addr_t) be64_to_cpu(data->addr),
 					 frag->size, PCI_DMA_TODEVICE);
+				++data;
 			}
 		}
 		/* Stamp the freed descriptor */
@@ -277,7 +278,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
 
 	/* Skip last polled descriptor */
 	ring->cons += ring->last_nr_txbb;
-	mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
+	en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
 		 ring->cons, ring->prod);
 
 	if ((u32) (ring->prod - ring->cons) > ring->size) {
@@ -295,37 +296,25 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
 	}
 
 	if (cnt)
-		mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
+		en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
 
 	return cnt;
 }
 
-void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
+void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map)
 {
-	int block = 8 / ring_num;
-	int extra = 8 - (block * ring_num);
-	int num = 0;
-	u16 ring = MLX4_EN_NUM_HASH_RINGS + 1;
+	u16 ring;
 	int prio;
 
-	if (ring_num == 1) {
+	if (!priv->prof->rx_ppp || priv->prof->tx_ring_num < 8) {
 		for (prio = 0; prio < 8; prio++)
 			prio_map[prio] = 0;
 		return;
 	}
 
-	for (prio = 0; prio < 8; prio++) {
-		if (extra && (num == block + 1)) {
-			ring++;
-			num = 0;
-			extra--;
-		} else if (!extra && (num == block)) {
-			ring++;
-			num = 0;
-		}
+	for (prio = 7, ring = priv->prof->tx_ring_num; prio > 0; prio--, ring--) {
 		prio_map[prio] = ring;
-		mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
-		num++;
+		en_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
 	}
 }
 
@@ -429,7 +418,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
 
 	INC_PERF_COUNTER(priv->pstats.tx_poll);
 
-	if (!spin_trylock(&ring->comp_lock)){
+	if (!spin_trylock_irq(&ring->comp_lock)){
 		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
 		return;
 	}
@@ -442,7 +431,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
 	if (inflight && priv->port_up)
 		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
 
-	spin_unlock(&ring->comp_lock);
+	spin_unlock_irq(&ring->comp_lock);
 }
 
 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
@@ -485,9 +474,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
 
 	/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
 	if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
-		if (spin_trylock(&ring->comp_lock)) {
+		if (spin_trylock_irq(&ring->comp_lock)) {
 			mlx4_en_process_tx_cq(priv->dev, cq);
-			spin_unlock(&ring->comp_lock);
+			spin_unlock_irq(&ring->comp_lock);
 		}
 }
 
@@ -498,7 +487,7 @@ static void *get_frag_ptr(struct sk_buff *skb)
 	void *ptr;
 
 	ptr = page_address(page);
-        if (unlikely(!ptr))
+	if (unlikely(!ptr))
 		return NULL;
 
 	return ptr + frag->page_offset;
@@ -542,7 +531,6 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
 			 int *lso_header_size)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
-	struct mlx4_en_dev *mdev = priv->mdev;
 	int real_size;
 
 	if (skb_is_gso(skb)) {
@@ -556,17 +544,10 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
 				real_size += DS_SIZE;
 			else {
 				if (netif_msg_tx_err(priv))
-					mlx4_warn(mdev, "Non-linear headers\n");
-				dev_kfree_skb_any(skb);
+					en_warn(priv, "Non-linear headers\n");
 				return 0;
 			}
 		}
-		if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
-			if (netif_msg_tx_err(priv))
-				mlx4_warn(mdev, "LSO header size too big\n");
-			dev_kfree_skb_any(skb);
-			return 0;
-		}
 	} else {
 		*lso_header_size = 0;
 		if (!is_inline(skb, NULL))
@@ -601,7 +582,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
 				fragptr +=  spc - skb_headlen(skb);
 			}
 			inl = (void *) (inl + 1) + spc;
-			memcpy(((void *)(inl + 1)), fragptr, skb->len -spc);
+			memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
 		} else {
 			skb_copy_from_linear_data(skb, inl + 1, spc);
 			inl = (void *) (inl + 1) + spc;
@@ -610,7 +591,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
 			if (skb_shinfo(skb)->nr_frags)
 				memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
 					fragptr, skb_shinfo(skb)->frags[0].size);
-                }
+		}
 
 		wmb();
 		inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
@@ -644,7 +625,7 @@ static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
 		return MLX4_EN_NUM_HASH_RINGS;
 
 	hash_index = be32_to_cpu(iph->daddr) & MLX4_EN_TX_HASH_MASK;
-	switch(iph->protocol) {
+	switch (iph->protocol) {
 	case IPPROTO_UDP:
 		break;
 	case IPPROTO_TCP:
@@ -656,18 +637,19 @@ static int mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
 	}
 
 	entry = &priv->tx_hash[hash_index];
-	if (skb->len > MLX4_EN_SMALL_PKT_SIZE)
-		entry->big_pkts++;
-	else
-		entry->small_pkts++;
-
-	if(unlikely(!(++entry->cnt))) {
+	if (unlikely(!entry->cnt)) {
 		tx_ind = hash_index & (MLX4_EN_NUM_HASH_RINGS / 2 - 1);
-		if (2 * entry->big_pkts > entry->small_pkts)
+		if (2 * entry->small_pkts > entry->big_pkts)
 			tx_ind += MLX4_EN_NUM_HASH_RINGS / 2;
 		entry->small_pkts = entry->big_pkts = 0;
 		entry->ring = tx_ind;
 	}
+
+	entry->cnt++;
+	if (skb->len > MLX4_EN_SMALL_PKT_SIZE)
+		entry->big_pkts++;
+	else
+		entry->small_pkts++;
 	return entry->ring;
 }
 
@@ -681,6 +663,9 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 	struct mlx4_wqe_data_seg *data;
 	struct skb_frag_struct *frag;
 	struct mlx4_en_tx_info *tx_info;
+	struct ethhdr *ethh;
+	u64 mac;
+	u32 mac_l, mac_h;
 	int tx_ind = 0;
 	int nr_txbb;
 	int desc_size;
@@ -693,22 +678,17 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 	int lso_header_size;
 	void *fragptr;
 
-	if (unlikely(!skb->len)) {
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
-	}
 	real_size = get_real_size(skb, dev, &lso_header_size);
 	if (unlikely(!real_size))
-		return NETDEV_TX_OK;
+		goto tx_drop;
 
 	/* Allign descriptor to TXBB size */
 	desc_size = ALIGN(real_size, TXBB_SIZE);
 	nr_txbb = desc_size / TXBB_SIZE;
 	if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
 		if (netif_msg_tx_err(priv))
-			mlx4_warn(mdev, "Oversized header or SG list\n");
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
+			en_warn(priv, "Oversized header or SG list\n");
+		goto tx_drop;
 	}
 
 	tx_ind = mlx4_en_select_queue(dev, skb);
@@ -717,7 +697,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 		vlan_tag = vlan_tx_tag_get(skb);
 
 	/* Check available TXBBs And 2K spare for prefetch */
-        if (unlikely(((int)(ring->prod - ring->cons)) >
+	if (unlikely(((int)(ring->prod - ring->cons)) >
 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
 		/* every full Tx ring stops queue.
 		 * TODO: implement multi-queue support (per-queue stop) */
@@ -731,14 +711,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 		return NETDEV_TX_BUSY;
 	}
 
-	/* Now that we know what Tx ring to use */
-	if (unlikely(!priv->port_up)) {
-		if (netif_msg_tx_err(priv))
-			mlx4_warn(mdev, "xmit: port down!\n");
-		dev_kfree_skb_any(skb);
-		return NETDEV_TX_OK;
-	}
-
 	/* Track current inflight packets for performance analysis */
 	AVG_PERF_COUNTER(priv->pstats.inflight_avg,
 			 (u32) (ring->prod - ring->cons - 1));
@@ -771,6 +743,19 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 		priv->port_stats.tx_chksum_offload++;
 	}
 
+	if (unlikely(priv->validate_loopback)) {
+		/* Copy dst mac address to wqe */
+		skb_reset_mac_header(skb);
+		ethh = eth_hdr(skb);
+		if (ethh && ethh->h_dest) {
+			mac = mlx4_en_mac_to_u64(ethh->h_dest);
+			mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+			mac_l = (u32) (mac & 0xffffffff);
+			tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+			tx_desc->ctrl.imm = cpu_to_be32(mac_l);
+		}
+	}
+
 	/* Handle LSO (TSO) packets */
 	if (lso_header_size) {
 		/* Mark opcode as LSO */
@@ -806,7 +791,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 	AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
 
 
-        /* valid only for none inline segments */
+	/* valid only for none inline segments */
 	tx_info->data_offset = (void *) data - (void *) tx_desc;
 
 	tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
@@ -864,5 +849,10 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 	mlx4_en_xmit_poll(priv, tx_ind);
 
 	return 0;
+
+tx_drop:
+	dev_kfree_skb_any(skb);
+	priv->stats.tx_dropped++;
+	return NETDEV_TX_OK;
 }
 
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 99aa04c..7e05b56 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -157,6 +157,77 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
 	return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
 }
 
+void mlx4_slave_event(struct mlx4_dev *dev, int slave, u8 type, u8 port, u32 param)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_state *ctx = &priv->mfunc.master.slave_state[slave];
+	unsigned long flags;
+
+	if (ctx->last_cmd != MLX4_COMM_CMD_VHCR_POST) {
+		mlx4_warn(dev, "received event for inactive slave:%d\n", slave);
+		return;
+	}
+
+	/* Unconditionally add the new event - during overflows, we drop the
+	 * oldest events */
+	spin_lock_irqsave(&ctx->lock, flags);
+	ctx->eq[ctx->eq_pi & MLX4_MFUNC_EQE_MASK].type = type;
+	ctx->eq[ctx->eq_pi & MLX4_MFUNC_EQE_MASK].port = port;
+	ctx->eq[ctx->eq_pi & MLX4_MFUNC_EQE_MASK].param = param;
+	++ctx->eq_pi;
+	spin_unlock_irqrestore(&ctx->lock, flags);
+}
+
+static void mlx4_slave_event_all(struct mlx4_dev *dev, u8 type, u8 port, u32 param)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int i;
+
+	for (i = 0; i < dev->num_slaves; ++i)
+		if (priv->mfunc.master.slave_state[i].last_cmd == MLX4_COMM_CMD_VHCR_POST)
+			mlx4_slave_event(dev, i, type, port, param);
+}
+
+int mlx4_GET_EVENT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+						 struct mlx4_cmd_mailbox *inbox,
+						 struct mlx4_cmd_mailbox *outbox)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_slave_state *ctx = &priv->mfunc.master.slave_state[slave];
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->lock, flags);
+	if (ctx->eq_ci == ctx->eq_pi) {
+		vhcr->out_param = MLX4_EVENT_TYPE_NONE;
+	} else if ((u16) (ctx->eq_pi - ctx->eq_ci) > MLX4_MFUNC_MAX_EQES) {
+		ctx->eq_ci = ctx->eq_pi - MLX4_MFUNC_MAX_EQES;
+		vhcr->out_param = MLX4_EVENT_TYPE_EQ_OVERFLOW;
+	} else {
+		vhcr->out_param = ctx->eq[ctx->eq_ci & MLX4_MFUNC_EQE_MASK].type |
+				  ((u64) ctx->eq[ctx->eq_ci & MLX4_MFUNC_EQE_MASK].port << 8) |
+				  ((u64) ctx->eq[ctx->eq_ci & MLX4_MFUNC_EQE_MASK].param << 32);
+		++ctx->eq_ci;
+	}
+	spin_unlock_irqrestore(&ctx->lock, flags);
+	return 0;
+}
+
+static int mlx4_GET_EVENT(struct mlx4_dev *dev, struct mlx4_slave_eqe *eqe)
+{
+	int ret;
+	u64 out_param;
+
+	ret = mlx4_cmd_imm(dev, 0, &out_param, 0, 0, MLX4_CMD_GET_EVENT,
+						     MLX4_CMD_TIME_CLASS_A);
+	if (!ret) {
+		eqe->type = out_param & 0xff;
+		eqe->port = (out_param >> 8) & 0xff;
+		eqe->param = out_param >> 32;
+	} else
+		mlx4_err(dev, "Failed retrieving event\n");
+	return ret;
+}
+
 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 {
 	struct mlx4_eqe *eqe;
@@ -186,14 +257,26 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 		case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
 		case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 		case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
-			mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
-				      eqe->type);
+			if (mlx4_is_master(dev)) {
+				/* TODO: forward only to slave owning the QP */
+				mlx4_slave_event(dev, 0, eqe->type, 0,
+					      be32_to_cpu(eqe->event.qp.qpn) &
+					      0xffffff);
+			} else
+				mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
+						   0xffffff, eqe->type);
 			break;
 
 		case MLX4_EVENT_TYPE_SRQ_LIMIT:
 		case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
-			mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
-				      eqe->type);
+			if (mlx4_is_master(dev)) {
+				/* TODO: forward only to slave owning the SRQ */
+				mlx4_slave_event(dev, 0, eqe->type, 0,
+					      be32_to_cpu(eqe->event.srq.srqn) &
+					      0xffffff);
+			} else
+				mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
+						    0xffffff, eqe->type);
 			break;
 
 		case MLX4_EVENT_TYPE_CMD:
@@ -208,10 +291,18 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 			if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
 				mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
 						    port);
+				if (mlx4_is_master(dev)) {
+					mlx4_slave_event_all(dev, MLX4_EVENT_TYPE_PORT_CHANGE,
+							     port, MLX4_DEV_EVENT_PORT_DOWN);
+				}
 				mlx4_priv(dev)->sense.do_sense_port[port] = 1;
 			} else {
 				mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
 						    port);
+				if (mlx4_is_master(dev)) {
+					mlx4_slave_event_all(dev, MLX4_EVENT_TYPE_PORT_CHANGE,
+							     port, MLX4_DEV_EVENT_PORT_UP);
+				}
 				mlx4_priv(dev)->sense.do_sense_port[port] = 0;
 			}
 			break;
@@ -221,8 +312,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 				  eqe->event.cq_err.syndrome == 1 ?
 				  "overrun" : "access violation",
 				  be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
-			mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
-				      eqe->type);
+			if (mlx4_is_master(dev)) {
+				/* TODO: forward only to slave owning the CQ */
+				mlx4_slave_event(dev, 0, eqe->type, 0,
+					      be32_to_cpu(eqe->event.cq_err.cqn));
+			} else
+				mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+									   eqe->type);
 			break;
 
 		case MLX4_EVENT_TYPE_EQ_OVERFLOW:
@@ -263,6 +359,72 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 	return eqes_found;
 }
 
+static void mlx4_update_sqp(struct mlx4_dev *dev)
+{
+	if (!dev->caps.sqp_demux) {
+		mlx4_warn(dev, "unexpected update_sqp event\n");
+		return;
+	}
+	if (mlx4_GET_SLAVE_SQP(dev, mlx4_priv(dev)->mfunc.demux_sqp,
+			       dev->caps.sqp_demux))
+		mlx4_warn(dev, "couldn't update sqp\n");
+}
+
+void mlx4_slave_async_eq_poll(struct work_struct *work)
+{
+	struct delayed_work *delay = container_of(work, struct delayed_work, work);
+	struct mlx4_mfunc *mfunc = container_of(delay, struct mlx4_mfunc, comm_work);
+	struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
+	struct mlx4_dev *dev = &priv->dev;
+	struct mlx4_slave_eqe eqe;
+	int ret;
+	int i;
+
+	for (i = 0; i < MLX4_MFUNC_MAX_EQES; i++) {
+		ret = mlx4_GET_EVENT(dev, &eqe);
+		if (ret || eqe.type == MLX4_EVENT_TYPE_NONE)
+			break;
+
+		switch (eqe.type) {
+		case MLX4_EVENT_TYPE_PATH_MIG:
+		case MLX4_EVENT_TYPE_COMM_EST:
+		case MLX4_EVENT_TYPE_SQ_DRAINED:
+		case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
+		case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
+		case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
+		case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+		case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
+			mlx4_qp_event(dev, eqe.param, eqe.type);
+			break;
+
+		case MLX4_EVENT_TYPE_SRQ_LIMIT:
+		case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
+			mlx4_srq_event(dev, eqe.param, eqe.type);
+			break;
+
+		case MLX4_EVENT_TYPE_PORT_CHANGE:
+			mlx4_dispatch_event(dev, eqe.param, eqe.port);
+			break;
+
+		case MLX4_EVENT_TYPE_CQ_ERROR:
+			mlx4_cq_event(dev, eqe.param, eqe.type);
+			break;
+
+		case MLX4_EVENT_TYPE_EQ_OVERFLOW:
+			mlx4_warn(dev, "slave async EQ overrun\n");
+			break;
+
+		case MLX4_EVENT_TYPE_SQP_UPDATE:
+			mlx4_update_sqp(dev);
+			break;
+
+		default:
+			mlx4_warn(dev, "Unhandled event:%02x\n", eqe.type);
+		}
+	}
+	queue_delayed_work(priv->mfunc.comm_wq, &priv->mfunc.comm_work, HZ);
+}
+
 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
 {
 	struct mlx4_dev *dev = dev_ptr;
@@ -314,9 +476,20 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int index;
+	int offset;
 
-	index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
+	/* CX1: slave EQ DBs are located in the comm channel page */
+	if (mlx4_is_slave(dev) || mlx4_is_master(dev)) {
+		if (eq->eqn - dev->caps.reserved_eqs >= MLX4_MFUNC_EQ_NUM) {
+			mlx4_err(dev, "eqn:%d doorbell out of range (reserved:%d)\n",
+				 eq->eqn, dev->caps.reserved_eqs);
+			return NULL;
+		}
+		offset = 0x800 + (eq->eqn - dev->caps.reserved_eqs) * 8;
+		return ((void *) priv->mfunc.comm) + offset;
+	}
 
+	index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
 	if (!priv->eq_table.uar_map[index]) {
 		priv->eq_table.uar_map[index] =
 			ioremap(pci_resource_start(dev->pdev, 2) +
@@ -571,28 +744,30 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
 	int err;
 	int i;
 
-	err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
-			       dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
+	err = mlx4_bitmap_init_no_mask(&priv->eq_table.bitmap, dev->caps.num_eqs,
+				       dev->caps.reserved_eqs, 0);
 	if (err)
 		return err;
 
 	for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
 		priv->eq_table.uar_map[i] = NULL;
 
-	err = mlx4_map_clr_int(dev);
-	if (err)
-		goto err_out_free;
+	if (!mlx4_is_slave(dev)) {
+		err = mlx4_map_clr_int(dev);
+		if (err)
+			goto err_out_free;
 
-	priv->eq_table.clr_mask =
-		swab32(1 << (priv->eq_table.inta_pin & 31));
-	priv->eq_table.clr_int  = priv->clr_base +
-		(priv->eq_table.inta_pin < 32 ? 4 : 0);
+		priv->eq_table.clr_mask =
+			swab32(1 << (priv->eq_table.inta_pin & 31));
+		priv->eq_table.clr_int  = priv->clr_base +
+			(priv->eq_table.inta_pin < 32 ? 4 : 0);
+	}
 
+	req_eqs = dev->caps.num_comp_vectors;
 	dev->caps.num_comp_vectors = 0;
-	req_eqs = (dev->flags & MLX4_FLAG_MSI_X) ? num_online_cpus() : 1;
 	while (req_eqs) {
 		err = mlx4_create_eq(
-			dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
+			dev, dev->caps.num_cqs - dev->caps.reserved_cqs + MLX4_NUM_SPARE_EQE,
 			(dev->flags & MLX4_FLAG_MSI_X) ?
 			(MLX4_EQ_COMP_CPU0 + dev->caps.num_comp_vectors) : 0,
 			&priv->eq_table.eq[MLX4_EQ_COMP_CPU0 +
@@ -604,16 +779,18 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
 		req_eqs--;
 	}
 
-	err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
-			     (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
-			     &priv->eq_table.eq[MLX4_EQ_ASYNC]);
-	if (err)
-		goto err_out_comp;
+	if (!mlx4_is_slave(dev)) {
+		err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+				     (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
+				     &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+		if (err)
+			goto err_out_comp;
+	}
 
 	if (dev->flags & MLX4_FLAG_MSI_X) {
 		static char eq_name[MLX4_NUM_EQ][20];
 
-		for (i = 0; i < MLX4_EQ_COMP_CPU0 +
+		for (i = !!mlx4_is_slave(dev); i < MLX4_EQ_COMP_CPU0 +
 		      dev->caps.num_comp_vectors; ++i) {
 			if (i == 0)
 				snprintf(eq_name[0], 20, DRV_NAME "(async)");
@@ -639,25 +816,29 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
 		priv->eq_table.have_irq = 1;
 	}
 
-	err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
-			  priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
-	if (err)
-		mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
-			   priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
+	if (!mlx4_is_slave(dev)) { /* hw async events cannot be shared */
+		err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+				  priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+		if (err)
+			mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
+				   priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
+	}
 
-	for (i = 0; i < MLX4_EQ_COMP_CPU0 + dev->caps.num_comp_vectors; ++i)
+	for (i = !!mlx4_is_slave(dev); i < MLX4_EQ_COMP_CPU0 + dev->caps.num_comp_vectors; ++i)
 		eq_set_ci(&priv->eq_table.eq[i], 1);
 
 	return 0;
 
 err_out_async:
-	mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+	if (!mlx4_is_slave(dev))
+		mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
 
 err_out_comp:
-	for (i = 0; i < dev->caps.num_comp_vectors; ++i)
+	for (i = !!mlx4_is_slave(dev); i < dev->caps.num_comp_vectors; ++i)
 		mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP_CPU0 + i]);
 
-	mlx4_unmap_clr_int(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_unmap_clr_int(dev);
 	mlx4_free_irqs(dev);
 
 err_out_free:
@@ -670,15 +851,17 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int i;
 
-	mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
-		    priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+	if (!mlx4_is_slave(dev))
+		mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
+			    priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
 
 	mlx4_free_irqs(dev);
 
 	for (i = 0; i < MLX4_EQ_COMP_CPU0 + dev->caps.num_comp_vectors; ++i)
 		mlx4_free_eq(dev, &priv->eq_table.eq[i]);
 
-	mlx4_unmap_clr_int(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_unmap_clr_int(dev);
 
 	for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
 		if (priv->eq_table.uar_map[i])
@@ -686,3 +869,47 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
 
 	mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
 }
+
+/* A test that verifies that we can accept interrupts on all
+ * the irq vectors of the device.
+ * Interrupts are checked using the NOP command.
+ */
+int mlx4_test_interrupts(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int i;
+	int err;
+
+	err = mlx4_NOP(dev);
+	/* When not in MSI_X, there is only one irq to check */
+	if (!(dev->flags & MLX4_FLAG_MSI_X))
+		return err;
+
+	/* A loop over all completion vectors, for each vector we will check
+	 * whether it works by mapping command completions to that vector
+	 * and performing a NOP command
+	 */
+	for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
+		/* Temporary use polling for command completions */
+		mlx4_cmd_use_polling(dev);
+
+		/* Map the new eq to handle all asyncronous events */
+		err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+				  priv->eq_table.eq[MLX4_EQ_COMP_CPU0 + i].eqn);
+		if (err) {
+			mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
+			mlx4_cmd_use_events(dev);
+			break;
+		}
+
+		/* Go back to using events */
+		mlx4_cmd_use_events(dev);
+		err = mlx4_NOP(dev);
+	}
+
+	/* Return to default */
+	mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+		    priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+	return err;
+}
+EXPORT_SYMBOL(mlx4_test_interrupts);
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index fe2ff64..33e35d3 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -32,6 +32,7 @@
  * SOFTWARE.
  */
 
+#include <linux/etherdevice.h>
 #include <linux/mlx4/cmd.h>
 
 #include "fw.h"
@@ -133,6 +134,89 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
 	return err;
 }
 
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+							  struct mlx4_cmd_mailbox *inbox,
+							  struct mlx4_cmd_mailbox *outbox)
+{
+	return mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, MLX4_CMD_QUERY_PORT,
+					   MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_QUERY_SLAVE_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+						       struct mlx4_cmd_mailbox *inbox,
+						       struct mlx4_cmd_mailbox *outbox)
+{
+	struct mlx4_caps *caps = outbox->buf;
+	u8 rand_mac[6];
+	int i, j;
+
+	memcpy(caps, &dev->caps, sizeof *caps);
+
+	/* CX1: blue-flame not supported in vfs */
+	caps->bf_reg_size = 0;
+	caps->bf_regs_per_page = 0;
+
+	/* CX1:
+	 * On ingress, slave0 gets full ownership over qp1, and demultiplexes GSI mads
+	 * on behalf of other slaves.
+	 * On egress, slave>0 must tunnel their GSI mads for validation by the qp1 owner */
+	if (slave) {
+		caps->sqp_demux = 0;
+		for (i = 1; i <= dev->caps.num_ports; ++i) {
+			caps->gid_table_len[i] = 1;
+			caps->pkey_table_len[i] = 1;
+			random_ether_addr(rand_mac);
+			caps->def_mac[i] = 0;
+			for (j = 0; j < ETH_ALEN; j++)
+				 caps->def_mac[i] |= ((u64)(rand_mac[1]) << 8 * j);
+		}
+	} else {
+		caps->sqp_demux = dev->num_slaves;
+		for (i = 1; i <= dev->caps.num_ports; ++i) {
+			caps->gid_table_len[i] = dev->num_slaves;
+			caps->pkey_table_len[i] = 1;
+		}
+	}
+
+	/* Slave functions allocate themselves EQs, UARs, and PDs.
+	 * - num is the maximum resource index
+	 * - reserved is the minimum resource index */
+	caps->num_eqs = dev->caps.reserved_eqs + MLX4_MFUNC_EQ_NUM * (slave + 2);
+	caps->reserved_eqs = dev->caps.reserved_eqs + MLX4_MFUNC_EQ_NUM * (slave + 1);
+	caps->num_uars = dev->caps.num_uars * (slave + 1);
+	caps->reserved_uars = max_t(int, dev->caps.num_uars * slave, dev->caps.reserved_uars);
+
+	/* PDs have the same range in every guest; the distinction is in the msbs,
+	 * which contains the guest ID (vf + 1) */
+	caps->pd_base = slave + 1;
+
+	/* All other resources are allocated by the master, but we still report
+	 * 'num' and 'reserved' capabilities as follows:
+	 * - num remains the maximum resource index
+	 * - 'num - reserved' is the total available objects of a resource, but
+	 *   resource indices may be less than 'reserved'
+	 * TODO: set per-resource quotas */
+	return 0;
+}
+
+int mlx4_QUERY_SLAVE_CAP(struct mlx4_dev *dev, struct mlx4_caps *caps)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_SLAVE_CAP,
+			   MLX4_CMD_TIME_CLASS_A);
+	if (!err)
+		memcpy(caps, mailbox->buf, sizeof *caps);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -178,6 +262,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_MAX_GID_OFFSET		0x3b
 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET	0x3c
 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET		0x3f
+#define QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET	0x43
 #define QUERY_DEV_CAP_FLAGS_OFFSET		0x44
 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET		0x48
 #define QUERY_DEV_CAP_UAR_SZ_OFFSET		0x49
@@ -236,7 +321,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
 	dev_cap->max_mpts = 1 << (field & 0x3f);
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
-	dev_cap->reserved_eqs = 1 << (field & 0xf);
+	dev_cap->reserved_eqs = field & 0xf;
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
 	dev_cap->max_eqs = 1 << (field & 0xf);
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
@@ -268,6 +353,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 	dev_cap->max_msg_sz = 1 << (field & 0x1f);
 	MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
 	dev_cap->stat_rate_support = stat_rate;
+	MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
+	dev_cap->loopback_support = field & 0x1;
 	MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
 	dev_cap->reserved_uars = field >> 4;
@@ -407,9 +494,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 	 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
 	 * we can't use any EQs whose doorbell falls on that page,
 	 * even if the EQ itself isn't reserved.
+	 * CX1: in multi-function mode, EQ doorbells are virtualized so the whole EQ
+	 * range is available: the master and each of the slaves get 4 EQ doorbells
+	 * starting from the first non-reserved EQ.
 	 */
-	dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
-				    dev_cap->reserved_eqs);
+	if (!mlx4_is_mfunc(dev))
+		dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
+					    dev_cap->reserved_eqs);
 
 	mlx4_dbg(dev, "Max ICM size %lld MB\n",
 		 (unsigned long long) dev_cap->max_icm_sz >> 20);
@@ -564,6 +655,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
 #define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
 #define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
 
+#define QUERY_FW_COMM_BASE_OFFSET      0x40
+#define QUERY_FW_COMM_BAR_OFFSET       0x48
+
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
@@ -624,6 +718,11 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
 	MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
 	fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
 
+	MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
+	MLX4_GET(fw->comm_bar,  outbox, QUERY_FW_COMM_BAR_OFFSET);
+	fw->comm_bar = (fw->comm_bar >> 6) * 2;
+	mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", fw->comm_bar,
+								     fw->comm_base);
 	mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
 
 	/*
@@ -812,6 +911,29 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
 	return err;
 }
 
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+							 struct mlx4_cmd_mailbox *inbox,
+							 struct mlx4_cmd_mailbox *outbox)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int port;
+	int err;
+
+	port = vhcr->in_modifier;
+	if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
+		return 0;
+
+	/* Enable port only if it was previously disabled */
+	if (!priv->mfunc.master.init_port_ref[port]) {
+		err = mlx4_INIT_PORT(dev, port);
+		if (err)
+			return err;
+	}
+	++priv->mfunc.master.init_port_ref[port];
+	priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
+	return 0;
+}
+
 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -866,6 +988,30 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
 }
 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
 
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+							  struct mlx4_cmd_mailbox *inbox,
+							  struct mlx4_cmd_mailbox *outbox)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int port;
+	int err;
+
+	port = vhcr->in_modifier;
+	if (!(priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)))
+		return 0;
+
+	/* CX1: master doesn't have interfaces - close port if this slave is
+	 * the last user */
+	if (priv->mfunc.master.init_port_ref[port] == 1) {
+		err = mlx4_CLOSE_PORT(dev, port);
+		if (err)
+			return err;
+	}
+	--priv->mfunc.master.init_port_ref[port];
+	priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+	return 0;
+}
+
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
 {
 	return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index cabcb87..05ca09c 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -78,6 +78,7 @@ struct mlx4_dev_cap {
 	int wavelength[MLX4_MAX_PORTS + 1];
 	u64 trans_code[MLX4_MAX_PORTS + 1];
 	u16 stat_rate_support;
+	int loopback_support;
 	u32 flags;
 	int reserved_uars;
 	int uar_size;
@@ -164,6 +165,10 @@ struct mlx4_set_ib_param {
 };
 
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_QUERY_SLAVE_CAP(struct mlx4_dev *dev, struct mlx4_caps *caps);
+int mlx4_QUERY_SLAVE_CAP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+						    struct mlx4_cmd_mailbox *inbox,
+						    struct mlx4_cmd_mailbox *outbox);
 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
 int mlx4_UNMAP_FA(struct mlx4_dev *dev);
 int mlx4_RUN_FW(struct mlx4_dev *dev);
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index cb72e28..f2c9f4d 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -88,8 +88,11 @@ int mlx4_register_interface(struct mlx4_interface *intf)
 	mutex_lock(&intf_mutex);
 
 	list_add_tail(&intf->list, &intf_list);
-	list_for_each_entry(priv, &dev_list, dev_list)
-		mlx4_add_device(intf, priv);
+	list_for_each_entry(priv, &dev_list, dev_list) {
+		/* CX1: master cannot run interfaces */
+		if (!mlx4_is_master(&priv->dev))
+			mlx4_add_device(intf, priv);
+	}
 
 	mutex_unlock(&intf_mutex);
 
@@ -165,11 +168,14 @@ int mlx4_register_device(struct mlx4_dev *dev)
 	mutex_lock(&intf_mutex);
 
 	list_add_tail(&priv->dev_list, &dev_list);
-	list_for_each_entry(intf, &intf_list, list)
-		mlx4_add_device(intf, priv);
-
+	/* CX1: master cannot run interfaces */
+	if (!mlx4_is_master(dev)) {
+		list_for_each_entry(intf, &intf_list, list)
+			mlx4_add_device(intf, priv);
+	}
 	mutex_unlock(&intf_mutex);
-	mlx4_start_catas_poll(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_start_catas_poll(dev);
 
 	mlx4_start_sense(dev);
 
@@ -183,7 +189,8 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
 
 	mlx4_stop_sense(dev);
 
-	mlx4_stop_catas_poll(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_stop_catas_poll(dev);
 	mutex_lock(&intf_mutex);
 
 	list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 1ea81e9..977f56f 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -75,10 +75,29 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
 
 #endif /* CONFIG_PCI_MSI */
 
+#ifdef CONFIG_PCI_IOV
+
+static int sr_iov;
+module_param(sr_iov, int, 0444);
+MODULE_PARM_DESC(sr_iov, "enable #sr_iov functions if sr_iov > 0");
+
+static int probe_vf;
+module_param(probe_vf, int, 0444);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (sr_iov > 0)");
+
+#else /* CONFIG_PCI_IOV */
+
+#define sr_iov 0
+#define probe_vf 0
+
+#endif /* CONFIG_PCI_IOV */
+
 static char mlx4_version[] __devinitdata =
 	DRV_NAME ": Mellanox ConnectX core driver v"
 	DRV_VERSION " (" DRV_RELDATE ")\n";
 
+struct mutex drv_mutex;
+
 static struct mlx4_profile default_profile = {
 	.num_qp		= 1 << 18,
 	.num_srq	= 1 << 16,
@@ -143,7 +162,7 @@ static void process_mod_param_profile(void)
 				  default_profile.num_mtt);
 }
 
-static int log_num_mac = 2;
+static int log_num_mac = 7;
 module_param_named(log_num_mac, log_num_mac, int, 0444);
 MODULE_PARM_DESC(log_num_mac, "Log 2 Max number of MACs per ETH port (1-7)");
 
@@ -260,6 +279,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 		dev->caps.trans_code[i]     = dev_cap->trans_code[i];
 	}
 
+	dev->caps.uar_page_size	     = PAGE_SIZE;
 	dev->caps.num_uars	     = dev_cap->uar_size / PAGE_SIZE;
 	dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
 	dev->caps.bf_reg_size	     = dev_cap->bf_reg_size;
@@ -287,7 +307,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 	dev->caps.reserved_mtts	     = DIV_ROUND_UP(dev_cap->reserved_mtts,
 						    dev->caps.mtts_per_seg);
 	dev->caps.reserved_mrws	     = dev_cap->reserved_mrws;
-	dev->caps.reserved_uars	     = dev_cap->reserved_uars;
+
+	/* The first 128 UARs are used for EQ doorbells */
+	dev->caps.reserved_uars	     = max_t(int, 128, dev_cap->reserved_uars);
 	dev->caps.reserved_pds	     = dev_cap->reserved_pds;
 	dev->caps.mtt_entry_sz	     = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
 	dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
@@ -296,6 +318,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 	dev->caps.bmme_flags	     = dev_cap->bmme_flags;
 	dev->caps.reserved_lkey	     = dev_cap->reserved_lkey;
 	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
+	dev->caps.loopback_support   = dev_cap->loopback_support;
 	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
 
 	dev->caps.log_num_macs  = log_num_mac;
@@ -337,6 +360,70 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
 		dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR];
 
+	/* CX1: vf0 takes role of GSI multiplexing instead of master */
+	dev->caps.sqp_demux = 0;
+	return 0;
+}
+
+static int mlx4_slave_cap(struct mlx4_dev *dev)
+{
+	int err;
+	u32 page_size;
+
+	err = mlx4_QUERY_SLAVE_CAP(dev, &dev->caps);
+	if (err)
+		return err;
+
+	page_size = ~dev->caps.page_size_cap + 1;
+	mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
+	if (page_size > PAGE_SIZE) {
+		mlx4_err(dev, "HCA minimum page size of %d bigger than "
+			 "kernel PAGE_SIZE of %ld, aborting.\n",
+			 page_size, PAGE_SIZE);
+		return -ENODEV;
+	}
+
+	/* TODO: relax this assumption */
+	if (dev->caps.uar_page_size != PAGE_SIZE) {
+		mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
+			 dev->caps.uar_page_size, PAGE_SIZE);
+		return -ENODEV;
+	}
+
+	if (dev->caps.num_ports > MLX4_MAX_PORTS) {
+		mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+			 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+		return -ENODEV;
+	}
+
+	if (dev->caps.uar_page_size * (dev->caps.num_uars -
+				       dev->caps.reserved_uars) >
+				       pci_resource_len(dev->pdev, 2)) {
+		mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
+			 "PCI resource 2 size of 0x%llx, aborting.\n",
+			 dev->caps.uar_page_size * dev->caps.num_uars,
+			 (unsigned long long) pci_resource_len(dev->pdev, 2));
+		return -ENODEV;
+	}
+
+	/* Adjust eq number */
+	if (dev->caps.num_eqs - dev->caps.reserved_eqs > num_possible_cpus() + 1)
+		dev->caps.num_eqs = dev->caps.reserved_eqs + num_possible_cpus() + 1;
+
+#if 0
+	mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
+	mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
+					  dev->caps.num_uars, dev->caps.reserved_uars,
+					  dev->caps.uar_page_size * dev->caps.num_uars,
+					  pci_resource_len(dev->pdev, 2));
+	mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
+						       dev->caps.reserved_eqs);
+	mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
+							dev->caps.num_pds,
+							dev->caps.reserved_pds,
+							dev->caps.slave_pd_shift,
+							dev->caps.pd_base);
+#endif
 	return 0;
 }
 
@@ -836,12 +923,56 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
 	mlx4_free_icm(dev, priv->fw.aux_icm, 0);
 }
 
+static void mlx4_slave_exit(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+
+	down(&priv->cmd.poll_sem);
+	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+		mlx4_warn(dev, "Failed to close slave function.\n");
+	up(&priv->cmd.poll_sem);
+}
+
 static void mlx4_close_hca(struct mlx4_dev *dev)
 {
-	mlx4_CLOSE_HCA(dev, 0);
-	mlx4_free_icms(dev);
-	mlx4_UNMAP_FA(dev);
-	mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+	if (mlx4_is_slave(dev))
+		mlx4_slave_exit(dev);
+	else {
+		mlx4_CLOSE_HCA(dev, 0);
+		mlx4_free_icms(dev);
+		mlx4_UNMAP_FA(dev);
+		mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+	}
+}
+
+static int mlx4_init_slave(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	u64 dma = (u64) priv->mfunc.vhcr_dma;
+
+	down(&priv->cmd.poll_sem);
+	mlx4_warn(dev, "Sending reset\n");
+	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+		goto err;
+	mlx4_warn(dev, "Sending vhcr0\n");
+	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
+						    MLX4_COMM_TIME))
+		goto err;
+	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
+						    MLX4_COMM_TIME))
+		goto err;
+	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
+						    MLX4_COMM_TIME))
+		goto err;
+	if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+		goto err;
+	up(&priv->cmd.poll_sem);
+	return 0;
+
+err:
+	mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+	up(&priv->cmd.poll_sem);
+	return -EIO;
 }
 
 static int mlx4_init_hca(struct mlx4_dev *dev)
@@ -857,65 +988,79 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 	int err;
 	int i;
 
-	err = mlx4_QUERY_FW(dev);
-	if (err) {
-		if (err == -EACCES)
-			mlx4_dbg(dev, "Function disabled, please upgrade "
-				      "to multi function driver.\n");
-		else
-			mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
-		return err;
-	}
+	if (!mlx4_is_slave(dev)) {
+		err = mlx4_QUERY_FW(dev);
+		if (err) {
+			if (err == -EACCES)
+				mlx4_dbg(dev, "Function disabled, please upgrade "
+					      "to multi function driver.\n");
+			else
+				mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+			return err;
+		}
 
-	err = mlx4_load_fw(dev);
-	if (err) {
-		mlx4_err(dev, "Failed to start FW, aborting.\n");
-		return err;
-	}
+		err = mlx4_load_fw(dev);
+		if (err) {
+			mlx4_err(dev, "Failed to start FW, aborting.\n");
+			return err;
+		}
 
-	mlx4_cfg.log_pg_sz_m = 1;
-	mlx4_cfg.log_pg_sz = 0;
-	err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
-	if (err)
-		mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+		mlx4_cfg.log_pg_sz_m = 1;
+		mlx4_cfg.log_pg_sz = 0;
+		err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+		if (err)
+			mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
 
-	err = mlx4_dev_cap(dev, &dev_cap);
-	if (err) {
-		mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
-		goto err_stop_fw;
-	}
+		err = mlx4_dev_cap(dev, &dev_cap);
+		if (err) {
+			mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+			goto err_stop_fw;
+		}
 
-	process_mod_param_profile();
-	profile = default_profile;
+		process_mod_param_profile();
+		profile = default_profile;
 
-	list_for_each_entry(config, &config_list, list) {
-		if (config->pdev == dev->pdev) {
-			for (i = 1; i <= dev->caps.num_ports; i++) {
-				dev->caps.possible_type[i] = config->port_type[i];
-				if (config->port_type[i] != MLX4_PORT_TYPE_AUTO)
-					dev->caps.port_type[i] = config->port_type[i];
+		list_for_each_entry(config, &config_list, list) {
+			if (config->pdev == dev->pdev) {
+				for (i = 1; i <= dev->caps.num_ports; i++) {
+					dev->caps.possible_type[i] = config->port_type[i];
+					if (config->port_type[i] != MLX4_PORT_TYPE_AUTO)
+						dev->caps.port_type[i] = config->port_type[i];
+				}
 			}
 		}
-	}
 
-	mlx4_set_port_mask(dev);
+		mlx4_set_port_mask(dev);
 
-	icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
-	if ((long long) icm_size < 0) {
-		err = icm_size;
-		goto err_stop_fw;
-	}
+		icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
+		if ((long long) icm_size < 0) {
+			err = icm_size;
+			goto err_stop_fw;
+		}
 
-	init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+		init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
 
-	err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
-	if (err)
-		goto err_stop_fw;
+		err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+		if (err)
+			goto err_stop_fw;
 
-	err = mlx4_INIT_HCA(dev, &init_hca);
-	if (err) {
-		mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
-		goto err_free_icm;
+		err = mlx4_INIT_HCA(dev, &init_hca);
+		if (err) {
+			mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+			goto err_free_icm;
+		}
+	} else {
+		err = mlx4_init_slave(dev);
+		if (err) {
+			mlx4_err(dev, "Failed to initialize slave\n");
+			return err;
+		}
+
+		err = mlx4_slave_cap(dev);
+		if (err) {
+			mlx4_err(dev, "Failed to obtain slave caps\n");
+			goto err_close;
+		}
 	}
 
 	err = mlx4_QUERY_ADAPTER(dev, &adapter);
@@ -933,12 +1078,14 @@ err_close:
 	mlx4_close_hca(dev);
 
 err_free_icm:
-	mlx4_free_icms(dev);
+	if (!mlx4_is_slave(dev))
+		mlx4_free_icms(dev);
 
 err_stop_fw:
-	mlx4_UNMAP_FA(dev);
-	mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-
+	if (!mlx4_is_slave(dev)) {
+		mlx4_UNMAP_FA(dev);
+		mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+	}
 	return err;
 }
 
@@ -956,26 +1103,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
 		return err;
 	}
 
-	err = mlx4_uar_alloc(dev, &priv->driver_uar);
-	if (err) {
-		mlx4_err(dev, "Failed to allocate driver access region, "
-			 "aborting.\n");
-		goto err_uar_table_free;
-	}
-
-	priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
-	if (!priv->kar) {
-		mlx4_err(dev, "Couldn't map kernel access region, "
-			 "aborting.\n");
-		err = -ENOMEM;
-		goto err_uar_free;
-	}
-
 	err = mlx4_init_pd_table(dev);
 	if (err) {
 		mlx4_err(dev, "Failed to initialize "
 			 "protection domain table, aborting.\n");
-		goto err_kar_unmap;
+		goto err_uar_table_free;
 	}
 
 	err = mlx4_init_mr_table(dev);
@@ -992,11 +1124,14 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
 		goto err_mr_table_free;
 	}
 
-	err = mlx4_cmd_use_events(dev);
-	if (err) {
-		mlx4_err(dev, "Failed to switch to event-driven "
-			 "firmware commands, aborting.\n");
-		goto err_eq_table_free;
+	/* CX1: no comm channel events */
+	if (!mlx4_is_master(dev) && !mlx4_is_slave(dev)) {
+		err = mlx4_cmd_use_events(dev);
+		if (err) {
+			mlx4_err(dev, "Failed to switch to event-driven "
+				 "firmware commands, aborting.\n");
+			goto err_eq_table_free;
+		}
 	}
 
 	err = mlx4_NOP(dev);
@@ -1046,22 +1181,23 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
 		goto err_qp_table_free;
 	}
 
-	for (port = 1; port <= dev->caps.num_ports; port++) {
-		ib_port_default_caps = 0;
-		err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
-		if (err)
-			mlx4_warn(dev, "failed to get port %d default "
-				  "ib capabilities (%d). Continuing with "
-				  "caps = 0\n", port, err);
-		dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
-		err = mlx4_SET_PORT(dev, port);
-		if (err) {
-			mlx4_err(dev, "Failed to set port %d, aborting\n",
-				port);
-			goto err_mcg_table_free;
+	if (!mlx4_is_slave(dev)) {
+		for (port = 1; port <= dev->caps.num_ports; port++) {
+			ib_port_default_caps = 0;
+			err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
+			if (err)
+				mlx4_warn(dev, "failed to get port %d default "
+					  "ib capabilities (%d). Continuing with "
+					  "caps = 0\n", port, err);
+			dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+			err = mlx4_SET_PORT(dev, port);
+			if (err) {
+				mlx4_err(dev, "Failed to set port %d, aborting\n",
+					port);
+				goto err_mcg_table_free;
+			}
 		}
 	}
-
 	return 0;
 
 err_mcg_table_free:
@@ -1077,7 +1213,8 @@ err_cq_table_free:
 	mlx4_cleanup_cq_table(dev);
 
 err_cmd_poll:
-	mlx4_cmd_use_polling(dev);
+	if (!mlx4_is_master(dev) && !mlx4_is_slave(dev))
+		mlx4_cmd_use_polling(dev);
 
 err_eq_table_free:
 	mlx4_cleanup_eq_table(dev);
@@ -1088,12 +1225,6 @@ err_mr_table_free:
 err_pd_table_free:
 	mlx4_cleanup_pd_table(dev);
 
-err_kar_unmap:
-	iounmap(priv->kar);
-
-err_uar_free:
-	mlx4_uar_free(dev, &priv->driver_uar);
-
 err_uar_table_free:
 	mlx4_cleanup_uar_table(dev);
 	return err;
@@ -1108,26 +1239,40 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
 	int i;
 
 	if (msi_x) {
+		/* The master only uses an event EQ,
+		 * Each one of the slaves have 1 completion eq */
+		if (mlx4_is_mfunc(dev))
+			needed_vectors = 1;
+		else
+			needed_vectors = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
+					       MLX4_EQ_COMP_CPU0 + num_online_cpus());
 		for (i = 0; i < needed_vectors; ++i)
 			entries[i].entry = i;
 
+retry:
 		err = pci_enable_msix(dev->pdev, entries, needed_vectors);
 		if (err) {
-			if (err > 0)
-				mlx4_info(dev, "Only %d MSI-X vectors "
-					  "available, need %d. Not using MSI-X\n",
-					  err, needed_vectors);
+			if (err > 1) {
+       				mlx4_info(dev, "Only %d MSI-X vectors "
+       					  "available, need %d. Trying again\n",
+       					  err, needed_vectors);
+				needed_vectors = err;
+				goto retry;
+			}
 			goto no_msi;
 		}
 
+		dev->caps.num_comp_vectors = needed_vectors - !mlx4_is_slave(dev);
 		for (i = 0; i < needed_vectors; ++i)
 			priv->eq_table.eq[i].irq = entries[i].vector;
 
 		dev->flags |= MLX4_FLAG_MSI_X;
+		dev->caps.num_msix = needed_vectors;
 		return;
 	}
 
 no_msi:
+	dev->caps.num_comp_vectors = mlx4_is_master(dev) ? 0 : 1;
 	for (i = 0; i < needed_vectors; ++i)
 		priv->eq_table.eq[i].irq = dev->pdev->irq;
 }
@@ -1141,9 +1286,12 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
 
 	info->dev = dev;
 	info->port = port;
-	mlx4_init_mac_table(dev, &info->mac_table);
-	mlx4_init_vlan_table(dev, &info->vlan_table);
-
+	if (!mlx4_is_slave(dev)) {
+		mlx4_init_mac_table(dev, &info->mac_table);
+		mlx4_init_vlan_table(dev, &info->vlan_table);
+		info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+			(port - 1) * (1 << log_num_mac);
+	}
 	sprintf(info->dev_name, "mlx4_port%d", port);
 	memcpy(&info->port_attr.attr, &attr, sizeof(attr));
 	info->port_attr.show = show_port_type;
@@ -1191,10 +1339,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	}
 
 	/*
-	 * Check for BARs.  We expect 0: 1MB
+	 * Check for BARs.
 	 */
-	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
-	    pci_resource_len(pdev, 0) != 1 << 20) {
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 		dev_err(&pdev->dev, "Missing DCS, aborting.\n");
 		err = -ENODEV;
 		goto err_disable_pdev;
@@ -1258,36 +1405,81 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	INIT_LIST_HEAD(&priv->pgdir_list);
 	mutex_init(&priv->pgdir_mutex);
 
-	/*
-	 * Now reset the HCA before we touch the PCI capabilities or
-	 * attempt a firmware command, since a boot ROM may have left
-	 * the HCA in an undefined state.
-	 */
-	err = mlx4_reset(dev);
-	if (err) {
-		mlx4_err(dev, "Failed to reset HCA, aborting.\n");
-		goto err_free_dev;
+
+	/* Detect if this device is a virtual function */
+	if (sr_iov && id->driver_data & MLX4_VF) {
+		/* When acting as pf, we normally skip vfs unless explicitly
+		 * requested to probe them.
+		 * TODO: add ARI support */
+		if (PCI_FUNC(pdev->devfn) > probe_vf) {
+			mlx4_warn(dev, "Skipping virtual function:%d\n",
+						PCI_FUNC(pdev->devfn));
+			err = -ENODEV;
+			goto err_free_dev;
+		}
+		mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
+		dev->flags |= MLX4_FLAG_SLAVE;
+	}
+
+	/* We reset the device and enable SRIOV only for physical devices */
+	if (!mlx4_is_slave(dev)) {
+		/*
+		 * Now reset the HCA before we touch the PCI capabilities or
+		 * attempt a firmware command, since a boot ROM may have left
+		 * the HCA in an undefined state.
+		 */
+		err = mlx4_reset(dev);
+		if (err) {
+			mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+			goto err_free_dev;
+		}
+		if (sr_iov) {
+			mlx4_warn(dev, "Enabling sriov with:%d vfs\n", sr_iov);
+			if (pci_enable_sriov(pdev, sr_iov)) {
+				mlx4_err(dev, "Failed to enable sriov, aborting.\n");
+				goto err_free_dev;
+			}
+			mlx4_warn(dev, "Running in master mode\n");
+			dev->flags |= MLX4_FLAG_SRIOV | MLX4_FLAG_MASTER;
+			dev->num_slaves = sr_iov;
+		}
 	}
 
 	if (mlx4_cmd_init(dev)) {
 		mlx4_err(dev, "Failed to init command interface, aborting.\n");
-		goto err_free_dev;
+		goto err_sriov;
 	}
 
-	err = mlx4_init_hca(dev);
-	if (err) {
-		if (err == -EACCES) {
-			dev->flags |= MLX4_FLAG_NOT_PRIME;
-			pci_set_drvdata(pdev, dev);
-			return 0;
-		} else
+	/* In slave functions, the communication channel must be initialized before
+	 * posting commands */
+	if (mlx4_is_slave(dev)) {
+		if (mlx4_multi_func_init(dev)) {
+			mlx4_err(dev, "Failed to init slave mfunc interface, aborting.\n");
 			goto err_cmd;
+		}
+	}
+
+	err = mlx4_init_hca(dev);
+	if (err)
+		goto err_cmd;
+
+	/* In master functions, the communication channel must be initialized after obtaining
+	 * its address from fw */
+	if (mlx4_is_master(dev)) {
+		if (mlx4_multi_func_init(dev)) {
+			mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
+			goto err_close;
+		}
 	}
 
 	mlx4_enable_msi_x(dev);
+	if (mlx4_is_slave(dev) && !(dev->flags & MLX4_FLAG_MSI_X)) {
+		mlx4_err(dev, "INTx is not supported in slave mode, aborting.\n");
+		goto err_close;
+	}
 
 	err = mlx4_setup_hca(dev);
-	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+	if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && !mlx4_is_slave(dev)) {
 		dev->flags &= ~MLX4_FLAG_MSI_X;
 		pci_disable_msix(pdev);
 		err = mlx4_setup_hca(dev);
@@ -1318,6 +1510,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	pci_set_drvdata(pdev, dev);
 
+	/* Start serving comm channel:
+	 * - In master function: poll for commands
+	 * - in slave functions: poll for events
+	 * TODO - enable comm channel interrupts */
+	if (mlx4_is_master(dev) || mlx4_is_slave(dev))
+		queue_delayed_work(priv->mfunc.comm_wq, &priv->mfunc.comm_work, 0);
 	return 0;
 
 err_sense:
@@ -1332,7 +1530,8 @@ err_port:
 	mlx4_cleanup_qp_table(dev);
 	mlx4_cleanup_srq_table(dev);
 	mlx4_cleanup_cq_table(dev);
-	mlx4_cmd_use_polling(dev);
+	if (!mlx4_is_master(dev) && !mlx4_is_slave(dev))
+		mlx4_cmd_use_polling(dev);
 	mlx4_cleanup_eq_table(dev);
 	mlx4_cleanup_mr_table(dev);
 	mlx4_cleanup_pd_table(dev);
@@ -1347,6 +1546,12 @@ err_close:
 err_cmd:
 	mlx4_cmd_cleanup(dev);
 
+err_sriov:
+	if (mlx4_is_master(dev) || mlx4_is_slave(dev))
+		mlx4_multi_func_cleanup(dev);
+	if (sr_iov && (dev->flags & MLX4_FLAG_SRIOV))
+		pci_disable_sriov(pdev);
+
 err_free_dev:
 	kfree(priv);
 
@@ -1382,34 +1587,40 @@ static void mlx4_remove_one(struct pci_dev *pdev)
 	int p;
 
 	if (dev) {
-		if (dev->flags & MLX4_FLAG_NOT_PRIME)
-			goto cmd_cleanup;
+		/* Stop serving commands and events over comm channel */
+		if (mlx4_is_master(dev) || mlx4_is_slave(dev))
+			cancel_delayed_work_sync(&priv->mfunc.comm_work);
 		mlx4_sense_cleanup(dev);
 		mlx4_unregister_device(dev);
 		device_remove_file(&dev->pdev->dev, &priv->trigger_attr);
 		for (p = 1; p <= dev->caps.num_ports; p++) {
 			mlx4_cleanup_port_info(&priv->port[p]);
-			mlx4_CLOSE_PORT(dev, p);
+			if (!mlx4_is_slave(dev))
+				mlx4_CLOSE_PORT(dev, p);
 		}
 		mlx4_cleanup_mcg_table(dev);
 		mlx4_cleanup_qp_table(dev);
 		mlx4_cleanup_srq_table(dev);
 		mlx4_cleanup_cq_table(dev);
-		mlx4_cmd_use_polling(dev);
+		if (!mlx4_is_master(dev) && !mlx4_is_slave(dev))
+			mlx4_cmd_use_polling(dev);
 		mlx4_cleanup_eq_table(dev);
 		mlx4_cleanup_mr_table(dev);
 		mlx4_cleanup_pd_table(dev);
 
-		iounmap(priv->kar);
-		mlx4_uar_free(dev, &priv->driver_uar);
 		mlx4_cleanup_uar_table(dev);
 		mlx4_close_hca(dev);
+		if (mlx4_is_master(dev) || mlx4_is_slave(dev))
+			mlx4_multi_func_cleanup(dev);
 
 		if (dev->flags & MLX4_FLAG_MSI_X)
 			pci_disable_msix(pdev);
 
-cmd_cleanup:
 		mlx4_cmd_cleanup(dev);
+		if (sr_iov && (dev->flags & MLX4_FLAG_SRIOV)) {
+			mlx4_warn(dev, "Disabling sriov\n");
+			pci_disable_sriov(pdev);
+		}
 		kfree(priv);
 		pci_release_region(pdev, 2);
 		pci_release_region(pdev, 0);
@@ -1425,16 +1636,25 @@ int mlx4_restart_one(struct pci_dev *pdev)
 }
 
 static struct pci_device_id mlx4_pci_table[] = {
-	{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
-	{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
-	{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
-	{ PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon"EN 10GigE */
-	{ PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon"EN 10GigE + Gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25408 "YATIR" EN 10GigE */
-	{ PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25408 "YATIR" EN 10GigE + Gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 "ConnectX 10GigE, PCIe, 2.0 5Gt/s] */
+	{ MLX4_VDEVICE(MELLANOX, 0x6340, 0) }, /* MT25408 "Hermon" SDR */
+	{ MLX4_VDEVICE(MELLANOX, 0x6341, MLX4_VF) }, /* MT25408 "Hermon" SDR VF */
+	{ MLX4_VDEVICE(MELLANOX, 0x634a, 0) }, /* MT25408 "Hermon" DDR */
+	{ MLX4_VDEVICE(MELLANOX, 0x634b, MLX4_VF) }, /* MT25408 "Hermon" DDR VF */
+	{ MLX4_VDEVICE(MELLANOX, 0x6354, 0) }, /* MT25408 "Hermon" QDR */
+	{ MLX4_VDEVICE(MELLANOX, 0x6732, 0) }, /* MT25408 "Hermon" DDR PCIe gen2 */
+	{ MLX4_VDEVICE(MELLANOX, 0x6733, MLX4_VF) }, /* MT25408 "Hermon" DDR PCIe gen2 VF */
+	{ MLX4_VDEVICE(MELLANOX, 0x673c, 0) }, /* MT25408 "Hermon" QDR PCIe gen2 */
+	{ MLX4_VDEVICE(MELLANOX, 0x673d, MLX4_VF) }, /* MT25408 "Hermon" QDR PCIe gen2 VF */
+	{ MLX4_VDEVICE(MELLANOX, 0x6368, 0) }, /* MT25408 "Hermon" EN 10GigE */
+	{ MLX4_VDEVICE(MELLANOX, 0x6369, MLX4_VF) }, /* MT25408 "Hermon" EN 10GigE VF */
+	{ MLX4_VDEVICE(MELLANOX, 0x6750, 0) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+	{ MLX4_VDEVICE(MELLANOX, 0x6751, MLX4_VF) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 VF */
+	{ MLX4_VDEVICE(MELLANOX, 0x6372, 0) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+	{ MLX4_VDEVICE(MELLANOX, 0x675a, 0) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+	{ MLX4_VDEVICE(MELLANOX, 0x6764, MLX4_VF) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+	{ MLX4_VDEVICE(MELLANOX, 0x6765, 0) }, /* MT26468 ConnectX EN 10GigE PCIe gen2 VF*/
+	{ MLX4_VDEVICE(MELLANOX, 0x6746, 0) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+	{ MLX4_VDEVICE(MELLANOX, 0x676e, 0) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
 	{ 0, }
 };
 
@@ -1471,6 +1691,8 @@ static int __init mlx4_init(void)
 {
 	int ret;
 
+	mutex_init(&drv_mutex);
+
 	if (mlx4_verify_params())
 		return -EINVAL;
 
@@ -1484,7 +1706,9 @@ static int __init mlx4_init(void)
 
 static void __exit mlx4_cleanup(void)
 {
+	mutex_lock(&drv_mutex);
 	pci_unregister_driver(&mlx4_driver);
+	mutex_unlock(&drv_mutex);
 	mlx4_catas_cleanup();
 	mlx4_config_cleanup();
 }
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index ad62dca..7de7990 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -97,7 +97,8 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
  * entry in hash chain and *mgm holds end of hash chain.
  */
 static int find_mgm(struct mlx4_dev *dev,
-		    u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox,
+		    u8 *gid, enum mlx4_protocol prot,
+		    struct mlx4_cmd_mailbox *mgm_mailbox,
 		    u16 *hash, int *prev, int *index)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -146,8 +147,9 @@ static int find_mgm(struct mlx4_dev *dev,
 			return err;
 		}
 
-		if (!memcmp(mgm->gid, gid, 16))
-			return err;
+		if (!memcmp(mgm->gid, gid, 16) &&
+				(prot == be32_to_cpu(mgm->members_count) >> 30))
+				return err;
 
 		*prev = *index;
 		*index = be32_to_cpu(mgm->next_gid_index) >> 6;
@@ -157,8 +159,51 @@ static int find_mgm(struct mlx4_dev *dev,
 	return err;
 }
 
+int mlx4_MCAST_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+						     struct mlx4_cmd_mailbox *inbox,
+						     struct mlx4_cmd_mailbox *outbox)
+{
+	struct mlx4_qp qp; /* dummy for calling attach/detach */
+
+	qp.qpn = vhcr->in_modifier & 0xffffff;
+	if (vhcr->op_modifier)
+		return mlx4_multicast_attach(dev, &qp, inbox->buf,
+					     vhcr->in_modifier >> 31,
+					     (vhcr->in_modifier >> 28) & 0x7);
+	else
+		return mlx4_multicast_detach(dev, &qp, inbox->buf,
+					     (vhcr->in_modifier >> 28) & 0x7);
+}
+
+static int mlx4_MCAST(struct mlx4_dev *dev, struct mlx4_qp *qp,
+		      u8 gid[16], u8 attach, u8 block_loopback,
+		      enum mlx4_protocol prot)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	int err;
+	int qpn;
+
+	if (!mlx4_is_slave(dev))
+		return -EBADF;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	memcpy(mailbox->buf, gid, 16);
+	qpn = qp->qpn;
+	qpn |= (prot << 28);
+	if (attach && block_loopback)
+		qpn |= (1 << 31);
+
+	err = mlx4_cmd(dev, mailbox->dma, qpn, attach, MLX4_CMD_MCAST_ATTACH,
+						       MLX4_CMD_TIME_CLASS_A);
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-			  int block_mcast_loopback)
+			  int block_mcast_loopback, enum mlx4_protocol prot)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_cmd_mailbox *mailbox;
@@ -170,6 +215,9 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 	int i;
 	int err;
 
+	if (mlx4_is_slave(dev))
+		return mlx4_MCAST(dev, qp, gid, 1, block_mcast_loopback, prot);
+
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
@@ -177,7 +225,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 
 	mutex_lock(&priv->mcg_table.mutex);
 
-	err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+	err = find_mgm(dev, gid, prot, mailbox, &hash, &prev, &index);
 	if (err)
 		goto out;
 
@@ -199,7 +247,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 		memcpy(mgm->gid, gid, 16);
 	}
 
-	members_count = be32_to_cpu(mgm->members_count);
+	members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
 	if (members_count == MLX4_QP_PER_MGM) {
 		mlx4_err(dev, "MGM at index %x is full.\n", index);
 		err = -ENOMEM;
@@ -216,7 +264,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 	mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
 					       (!!mlx4_blck_lb << MGM_BLCK_LB_BIT));
 
-	mgm->members_count       = cpu_to_be32(members_count);
+	mgm->members_count       = cpu_to_be32(members_count | ((u32) prot << 30));
 
 	err = mlx4_WRITE_MCG(dev, index, mailbox);
 	if (err)
@@ -251,7 +299,8 @@ out:
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
 
-int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+						enum mlx4_protocol prot)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_cmd_mailbox *mailbox;
@@ -262,6 +311,9 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
 	int i, loc;
 	int err;
 
+	if (mlx4_is_slave(dev))
+		return mlx4_MCAST(dev, qp, gid, 0, 0, prot);
+
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
@@ -269,7 +321,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
 
 	mutex_lock(&priv->mcg_table.mutex);
 
-	err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+	err = find_mgm(dev, gid, prot, mailbox, &hash, &prev, &index);
 	if (err)
 		goto out;
 
@@ -288,7 +340,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
 		goto out;
 	}
 
-	members_count = be32_to_cpu(mgm->members_count);
+	members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
 	for (loc = -1, i = 0; i < members_count; ++i)
 		if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
 			loc = i;
@@ -300,7 +352,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
 	}
 
 
-	mgm->members_count = cpu_to_be32(--members_count);
+	mgm->members_count = cpu_to_be32(--members_count | ((u32) prot << 30));
 	mgm->qp[loc]       = mgm->qp[i - 1];
 	mgm->qp[i - 1]     = 0;
 
@@ -365,6 +417,10 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int err;
 
+	/* Nothing to do for slaves - mcg handling is para-virtualized */
+	if (mlx4_is_slave(dev))
+		return 0;
+
 	err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
 			       dev->caps.num_amgms - 1, 0, 0);
 	if (err)
@@ -377,5 +433,7 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
 {
+	if (mlx4_is_slave(dev))
+		return;
 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
 }
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 2cd8b78..94b7302 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -46,6 +46,8 @@
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
+#include <rdma/ib_verbs.h>
 
 #define DRV_NAME	"mlx4_core"
 #define PFX		DRV_NAME ": "
@@ -54,8 +56,11 @@
 
 enum {
 	MLX4_HCR_BASE		= 0x80680,
+	MLX4_HCR_SRIOV_BASE	= 0x4080680, /* good for SRIOV FW ony */
 	MLX4_HCR_SIZE		= 0x0001c,
-	MLX4_CLR_INT_SIZE	= 0x00008
+	MLX4_CLR_INT_SIZE	= 0x00008,
+	MLX4_SLAVE_COMM_BASE	= 0x0,
+	MLX4_COMM_PAGESIZE	= 0x1000
 };
 
 enum {
@@ -71,7 +76,8 @@ enum {
 };
 
 enum {
-	MLX4_NUM_PDS		= 1 << 15
+	MLX4_NUM_PDS		= 1 << 15,
+	MLX4_SLAVE_PD_SHIFT	= 17, /* the 7 msbs encode the slave id */
 };
 
 enum {
@@ -87,6 +93,41 @@ enum {
 	MLX4_NUM_CMPTS		= MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
 };
 
+#define MLX4_COMM_TIME		10000
+enum {
+	MLX4_COMM_CMD_RESET,
+	MLX4_COMM_CMD_VHCR0,
+	MLX4_COMM_CMD_VHCR1,
+	MLX4_COMM_CMD_VHCR2,
+	MLX4_COMM_CMD_VHCR_EN,
+	MLX4_COMM_CMD_VHCR_POST
+};
+
+enum mlx4_resource {
+	RES_QP,
+	RES_CQ,
+	RES_SRQ,
+	RES_MPT,
+	RES_MTT,
+	RES_MAC,
+	RES_VLAN,
+	RES_MCAST
+};
+
+enum mlx4_alloc_mode {
+	ICM_RESERVE_AND_ALLOC,
+	ICM_RESERVE,
+	ICM_ALLOC,
+	ICM_MAC_VLAN,
+};
+
+enum {
+	MLX4_MFUNC_MAX		= 64,
+	MLX4_MFUNC_EQ_NUM	= 4,
+	MLX4_MFUNC_MAX_EQES     = 8,
+	MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
+};
+
 #ifdef CONFIG_MLX4_DEBUG
 extern int mlx4_debug_level;
 
@@ -111,6 +152,10 @@ extern int mlx4_debug_level;
 
 extern int mlx4_blck_lb;
 
+#define MLX4_VF					(1 << 0)
+#define MLX4_VDEVICE(vendor, device, flags)	\
+	PCI_VDEVICE(vendor, device), (flags)
+
 struct mlx4_bitmap {
 	u32			last;
 	u32			top;
@@ -167,12 +212,80 @@ struct mlx4_profile {
 struct mlx4_fw {
 	u64			clr_int_base;
 	u64			catas_offset;
+	u64			comm_base;
 	struct mlx4_icm	       *fw_icm;
 	struct mlx4_icm	       *aux_icm;
 	u32			catas_size;
 	u16			fw_pages;
 	u8			clr_int_bar;
 	u8			catas_bar;
+	u8			comm_bar;
+};
+
+struct mlx4_comm {
+	u32			slave_write;
+	u32			slave_read;
+};
+
+struct mlx4_slave_eqe {
+	u8 type;
+	u8 port;
+	u32 param;
+};
+
+struct mlx4_mcast_entry {
+	struct list_head list;
+	u64 addr;
+};
+
+#define VLAN_FLTR_SIZE	128
+struct mlx4_vlan_fltr {
+	__be32 entry[VLAN_FLTR_SIZE];
+};
+
+struct mlx4_slave_state {
+	u8 comm_toggle;
+	u8 last_cmd;
+	u8 init_port_mask;
+	dma_addr_t vhcr_dma;
+	u16 mtu[MLX4_MAX_PORTS + 1];
+	__be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
+	struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
+	struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
+	struct mlx4_vlan_fltr vlan_filter[MLX4_MAX_PORTS + 1];
+	u16 eq_pi;
+	u16 eq_ci;
+	int sqp_start;
+	spinlock_t lock;
+};
+
+struct mlx4_mfunc_master_ctx {
+	struct mlx4_slave_state *slave_state;
+	int			init_port_ref[MLX4_MAX_PORTS + 1];
+	u16			max_mtu[MLX4_MAX_PORTS + 1];
+	int			disable_mcast_ref[MLX4_MAX_PORTS + 1];
+};
+
+struct mlx4_vhcr {
+	u64	in_param;
+	u64	out_param;
+	u32	in_modifier;
+	u32	timeout;
+	u16	op;
+	u16	token;
+	u8	op_modifier;
+	int	errno;
+};
+
+struct mlx4_mfunc {
+	struct mlx4_comm __iomem       *comm;
+	struct workqueue_struct	       *comm_wq;
+	struct delayed_work	        comm_work;
+	struct mlx4_vhcr	       *vhcr;
+	dma_addr_t			vhcr_dma;
+
+	struct mlx4_mfunc_master_ctx	master;
+	u32				demux_sqp[MLX4_MFUNC_MAX];
 };
 
 struct mlx4_cmd {
@@ -188,6 +301,7 @@ struct mlx4_cmd {
 	u16			token_mask;
 	u8			use_events;
 	u8			toggle;
+	u8			comm_toggle;
 };
 
 struct mlx4_uar_table {
@@ -260,20 +374,15 @@ struct mlx4_catas_err {
 
 struct mlx4_mac_table {
 #define MLX4_MAX_MAC_NUM	128
-#define MLX4_MAC_MASK		0xffffffffffff
-#define MLX4_MAC_VALID_SHIFT	63
 #define MLX4_MAC_TABLE_SIZE	(MLX4_MAX_MAC_NUM << 3)
 	__be64 entries[MLX4_MAX_MAC_NUM];
-	int refs[MLX4_MAX_MAC_NUM];
-	struct semaphore mac_sem;
+	struct mutex mutex;
 	int total;
 	int max;
 };
 
 struct mlx4_vlan_table {
 #define MLX4_MAX_VLAN_NUM	128
-#define MLX4_VLAN_MASK		0xfff
-#define MLX4_VLAN_VALID		(1 << 31)
 #define MLX4_VLAN_TABLE_SIZE	(MLX4_MAX_VLAN_NUM << 2)
 	__be32 entries[MLX4_MAX_VLAN_NUM];
 	int refs[MLX4_MAX_VLAN_NUM];
@@ -290,6 +399,7 @@ struct mlx4_port_info {
 	enum mlx4_port_type	tmp_type;
 	struct mlx4_mac_table	mac_table;
 	struct mlx4_vlan_table	vlan_table;
+	int			base_qpn;
 };
 
 struct mlx4_sense {
@@ -301,6 +411,8 @@ struct mlx4_sense {
 	u32			resched;
 };
 
+extern struct mutex drv_mutex;
+
 struct mlx4_priv {
 	struct mlx4_dev		dev;
 
@@ -313,6 +425,7 @@ struct mlx4_priv {
 
 	struct mlx4_fw		fw;
 	struct mlx4_cmd		cmd;
+	struct mlx4_mfunc	mfunc;
 
 	struct mlx4_bitmap	pd_bitmap;
 	struct mlx4_uar_table	uar_table;
@@ -324,11 +437,11 @@ struct mlx4_priv {
 	struct mlx4_mcg_table	mcg_table;
 
 	struct mlx4_catas_err	catas_err;
+	u32			catas_state;
+	u32			tout_counter;
 
 	void __iomem	       *clr_base;
 
-	struct mlx4_uar		driver_uar;
-	void __iomem	       *kar;
 	struct mlx4_port_info	port[MLX4_MAX_PORTS + 1];
 	struct device_attribute trigger_attr;
 	int                     trig;
@@ -350,10 +463,16 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
 		     u32 reserved_bot, u32 resetrved_top);
+int mlx4_bitmap_init_no_mask(struct mlx4_bitmap *bitmap, u32 num,
+			     u32 reserved_bot, u32 reserved_top);
 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
 
 int mlx4_reset(struct mlx4_dev *dev);
 
+void mlx4_slave_event(struct mlx4_dev *dev, int slave, u8 type, u8 port, u32 param);
+int mlx4_GET_EVENT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+			   struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox);
+
 int mlx4_init_pd_table(struct mlx4_dev *dev);
 int mlx4_init_uar_table(struct mlx4_dev *dev);
 int mlx4_init_mr_table(struct mlx4_dev *dev);
@@ -372,6 +491,22 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
 
+int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
+void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
+int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
+void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
+int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
+void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
+int mlx4_mr_reserve(struct mlx4_dev *dev);
+void mlx4_mr_release(struct mlx4_dev *dev, u32 index);
+int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
+void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
+void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+						 struct mlx4_cmd_mailbox *inbox,
+						 struct mlx4_cmd_mailbox *outbox);
+
 void mlx4_start_catas_poll(struct mlx4_dev *dev);
 void mlx4_stop_catas_poll(struct mlx4_dev *dev);
 int mlx4_catas_init(void);
@@ -389,15 +524,20 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
 		      struct mlx4_dev_cap *dev_cap,
 		      struct mlx4_init_hca_param *init_hca);
 
+void mlx4_slave_async_eq_poll(struct work_struct *work);
 int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
 void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
 
 int mlx4_cmd_init(struct mlx4_dev *dev);
 void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+int mlx4_multi_func_init(struct mlx4_dev *dev);
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
 int mlx4_cmd_use_events(struct mlx4_dev *dev);
 void mlx4_cmd_use_polling(struct mlx4_dev *dev);
 
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, unsigned long timeout);
+
 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
 
@@ -423,6 +563,36 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
 
 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+							struct mlx4_cmd_mailbox *inbox,
+							struct mlx4_cmd_mailbox *outbox);
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+							 struct mlx4_cmd_mailbox *inbox,
+							 struct mlx4_cmd_mailbox *outbox);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+							  struct mlx4_cmd_mailbox *inbox,
+							  struct mlx4_cmd_mailbox *outbox);
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+							  struct mlx4_cmd_mailbox *inbox,
+							  struct mlx4_cmd_mailbox *outbox);
 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
 
+int mlx4_CONF_SPECIAL_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+						       struct mlx4_cmd_mailbox *inbox,
+						       struct mlx4_cmd_mailbox *outbox);
+int mlx4_GET_SLAVE_SQP(struct mlx4_dev *dev, u32 *sqp, int num);
+int mlx4_GET_SLAVE_SQP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+							  struct mlx4_cmd_mailbox *inbox,
+							  struct mlx4_cmd_mailbox *outbox);
+
+int mlx4_MCAST_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+						     struct mlx4_cmd_mailbox *inbox,
+						     struct mlx4_cmd_mailbox *outbox);
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+				struct mlx4_cmd_mailbox *inbox,
+				struct mlx4_cmd_mailbox *outbox);
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+				struct mlx4_cmd_mailbox *inbox,
+				struct mlx4_cmd_mailbox *outbox);
+
 #endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index d31e5e9..abda02c 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -45,20 +45,36 @@
 #include <linux/mlx4/cq.h>
 #include <linux/mlx4/srq.h>
 #include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
 
 #include "en_port.h"
 
 #define DRV_NAME	"mlx4_en"
-#define DRV_VERSION	"1.4.1"
-#define DRV_RELDATE	"April 2009"
-
+#define DRV_VERSION	"1.4.2.2"
+#define DRV_RELDATE	"Nov 2009"
 
 #define MLX4_EN_MSG_LEVEL	(NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
 
-#define mlx4_dbg(mlevel, priv, format, arg...)	\
+#define en_print(level, priv, format, arg...)			\
+	{							\
+	if ((priv)->registered)					\
+		printk(level "%s: %s: " format, DRV_NAME,	\
+			(priv->dev)->name, ## arg);		\
+	else							\
+		printk(level "%s: %s: Port %d: " format,	\
+			DRV_NAME, (&priv->mdev->pdev->dev)->bus_id, \
+			(priv)->port, ## arg);			\
+	}
+
+#define en_dbg(mlevel, priv, format, arg...)	\
 	if (NETIF_MSG_##mlevel & priv->msg_enable) \
-	printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\
-		(&priv->mdev->pdev->dev)->bus_id , ## arg)
+		en_print(KERN_DEBUG, priv, format, ## arg)
+#define en_warn(priv, format, arg...) \
+	en_print(KERN_WARNING, priv, format, ## arg)
+#define en_err(priv, format, arg...) \
+	en_print(KERN_ERR, priv, format, ## arg)
+#define en_info(priv, format, arg...) \
+	en_print(KERN_INFO, priv, format, ## arg)
 
 #define mlx4_err(mdev, format, arg...) \
 	printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
@@ -78,12 +94,10 @@
 #define MLX4_EN_PAGE_SHIFT	12
 #define MLX4_EN_PAGE_SIZE	(1 << MLX4_EN_PAGE_SHIFT)
 #define MAX_TX_RINGS		(MLX4_EN_NUM_HASH_RINGS + MLX4_EN_NUM_PPP_RINGS + 1)
-#define MAX_RX_RINGS		17
-#define MAX_RSS_MAP_SIZE	64
-#define RSS_FACTOR		1
+#define MAX_RX_RINGS		9
+#define MIN_RX_RINGS		5
 #define TXBB_SIZE		64
 #define HEADROOM		(2048 / TXBB_SIZE + 1)
-#define MAX_LSO_HDR_SIZE	92
 #define STAMP_STRIDE		64
 #define STAMP_DWORDS		(STAMP_STRIDE / 4)
 #define STAMP_SHIFT		31
@@ -127,7 +141,7 @@ enum {
 #define MLX4_EN_SMALL_PKT_SIZE		64
 #define MLX4_EN_TX_HASH_SIZE		256
 #define MLX4_EN_TX_HASH_MASK		(MLX4_EN_TX_HASH_SIZE - 1)
-#define MLX4_EN_NUM_HASH_RINGS		8
+#define MLX4_EN_NUM_HASH_RINGS		4
 #define MLX4_EN_NUM_PPP_RINGS		8
 #define MLX4_EN_DEF_TX_RING_SIZE	512
 #define MLX4_EN_DEF_RX_RING_SIZE  	1024
@@ -146,6 +160,7 @@ enum {
 #define MLX4_EN_RX_SIZE_THRESH		1024
 #define MLX4_EN_RX_RATE_THRESH		(1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
 #define MLX4_EN_SAMPLE_INTERVAL		0
+#define MLX4_EN_AVG_PKT_SMALL		256
 
 #define MLX4_EN_AUTO_CONF	0xffff
 
@@ -161,9 +176,13 @@ enum {
 
 #define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)
 #define HEADER_COPY_SIZE       (128 - NET_IP_ALIGN)
+#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
 
 #define MLX4_EN_MIN_MTU		46
-#define ETH_BCAST		0xffffffffffff
+#define ETH_BCAST		0xffffffffffffULL
+
+#define MLX4_EN_LOOPBACK_RETRIES	5
+#define MLX4_EN_LOOPBACK_TIMEOUT	1000
 
 #ifdef MLX4_EN_PERF_STAT
 /* Number of samples to 'average' */
@@ -204,6 +223,7 @@ enum cq_type {
 #define ROUNDUP_LOG2(x)		ilog2(roundup_pow_of_two(x))
 #define XNOR(x, y)		(!(x) == !(y))
 #define ILLEGAL_MAC(addr)	(addr == 0xffffffffffff || addr == 0x0)
+#define rounddown_pow_of_two(n)	((n == 1) ? 0 : (1UL << ilog2(n)))
 
 
 struct mlx4_en_tx_info {
@@ -278,7 +298,6 @@ struct mlx4_en_ipfrag {
 };
 
 struct mlx4_en_rx_desc {
-	struct mlx4_wqe_srq_next_seg next;
 	/* actual number of entries depends on rx ring stride */
 	struct mlx4_wqe_data_seg data[0];
 };
@@ -312,7 +331,6 @@ struct mlx4_en_lro {
 
 
 struct mlx4_en_rx_ring {
-	struct mlx4_srq srq;
 	struct mlx4_hwq_resources wqres;
 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
 	struct mlx4_en_lro lro;
@@ -328,8 +346,6 @@ struct mlx4_en_rx_ring {
 	u32 prod;
 	u32 cons;
 	u32 buf_size;
-	int need_refill;
-	int full;
 	void *buf;
 	void *rx_info;
 	unsigned long bytes;
@@ -343,7 +359,7 @@ static inline int mlx4_en_can_lro(__be16 status)
 {
 	return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4	|
 				     MLX4_CQE_STATUS_IPV4F	|
-                                     MLX4_CQE_STATUS_IPV6	|
+				     MLX4_CQE_STATUS_IPV6	|
 				     MLX4_CQE_STATUS_IPV4OPT	|
 				     MLX4_CQE_STATUS_TCP	|
 				     MLX4_CQE_STATUS_UDP	|
@@ -416,11 +432,9 @@ struct mlx4_en_dev {
 
 
 struct mlx4_en_rss_map {
-	int size;
 	int base_qpn;
-	u16 map[MAX_RSS_MAP_SIZE];
-	struct mlx4_qp qps[MAX_RSS_MAP_SIZE];
-	enum mlx4_qp_state state[MAX_RSS_MAP_SIZE];
+	struct mlx4_qp qps[MAX_RX_RINGS];
+	enum mlx4_qp_state state[MAX_RX_RINGS];
 	struct mlx4_qp indir_qp;
 	enum mlx4_qp_state indir_state;
 };
@@ -434,6 +448,12 @@ struct mlx4_en_rss_context {
 	__be32 rss_key[10];
 };
 
+struct mlx4_en_port_state {
+	int link_state;
+	int link_speed;
+	int transciver;
+};
+
 struct mlx4_en_pkt_stats {
 	unsigned long broadcast;
 	unsigned long rx_prio[8];
@@ -489,6 +509,7 @@ struct mlx4_en_priv {
 	struct vlan_group *vlgrp;
 	struct net_device_stats stats;
 	struct net_device_stats ret_stats;
+	struct mlx4_en_port_state port_state;
 	spinlock_t stats_lock;
 
 	unsigned long last_moder_packets;
@@ -507,14 +528,16 @@ struct mlx4_en_priv {
 	u16 sample_interval;
 	u16 adaptive_rx_coal;
 	u32 msg_enable;
+	u32 loopback_ok;
+	u32 validate_loopback;
 
 	struct mlx4_hwq_resources res;
 	int link_state;
 	int last_link_state;
 	bool port_up;
 	int port;
-	int registered;
-	int allocated;
+	bool registered;
+	bool allocated;
 	int rx_csum;
 	u64 mac;
 	int mac_index;
@@ -539,7 +562,6 @@ struct mlx4_en_priv {
 	struct mlx4_en_tx_hash_entry tx_hash[MLX4_EN_TX_HASH_SIZE];
 	struct work_struct mcast_task;
 	struct work_struct mac_task;
-	struct delayed_work refill_task;
 	struct work_struct watchdog_task;
 	struct work_struct linkstate_task;
 	struct delayed_work stats_task;
@@ -564,8 +586,6 @@ void mlx4_en_stop_port(struct net_device *dev);
 void mlx4_en_free_resources(struct mlx4_en_priv *priv);
 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
 
-int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
-
 int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
 		      int entries, int ring, enum cq_type mode);
 void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -584,7 +604,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ri
 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_tx_ring *ring,
-			     int cq, int srqn);
+			     int cq);
 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
 				struct mlx4_en_tx_ring *ring);
 
@@ -603,7 +623,7 @@ int mlx4_en_process_rx_cq_skb(struct net_device *dev,
 			      int budget);
 int mlx4_en_poll_rx_cq(struct net_device *poll_dev, int *budget);;
 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
-			     int is_tx, int rss, int qpn, int cqn, int srqn,
+			     int is_tx, int rss, int qpn, int cqn,
 			     struct mlx4_qp_context *context);
 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
 int mlx4_en_map_buffer(struct mlx4_buf *buf);
@@ -622,11 +642,7 @@ struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
 			       struct mlx4_en_rx_alloc *page_alloc,
 			       unsigned int length);
 
-void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
-				 struct mlx4_en_rss_map *rss_map,
-				 int num_entries, int num_rings);
-
-void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
+void mlx4_en_lro_flush(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, u8 all);
 int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
 		   struct mlx4_en_rx_desc *rx_desc,
 		   struct skb_frag_struct *skb_frags,
@@ -634,14 +650,12 @@ int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
 void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
 int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
 
-void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
+void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map);
 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
-void mlx4_en_rx_refill(struct work_struct *work);
 void mlx4_en_rx_irq(struct mlx4_cq *mcq);
 
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
 int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
@@ -649,6 +663,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
 			   u8 promisc);
 
 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
+int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
+
+#define MLX4_EN_NUM_SELF_TEST	5
+void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
+u64 mlx4_en_mac_to_u64(u8 *addr);
 
 /*
  * Globals
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 3c0e802..f896dd2 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -183,10 +183,26 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
 	kfree(buddy->num_free);
 }
 
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
 {
 	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+	u64 in_param;
+	u64 out_param;
 	u32 seg;
+	int err;
+
+	if (mlx4_is_slave(dev)) {
+		*((u32 *) &in_param) = order;
+		*(((u32 *) &in_param) + 1) = 0;
+		err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
+						       ICM_RESERVE_AND_ALLOC,
+						       MLX4_CMD_ALLOC_RES,
+						       MLX4_CMD_TIME_CLASS_A);
+		if (err)
+			return -1;
+		else
+			return out_param;
+	}
 
 	seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
 	if (seg == -1)
@@ -224,16 +240,33 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
 
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order)
 {
 	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+	u64 in_param;
+	int err;
 
+	if (mlx4_is_slave(dev)) {
+		*((u32 *) &in_param) = first_seg;
+		*(((u32 *) &in_param) + 1) = order;
+		err = mlx4_cmd(dev, in_param, RES_MTT, ICM_RESERVE_AND_ALLOC,
+						       MLX4_CMD_FREE_RES,
+						       MLX4_CMD_TIME_CLASS_A);
+		if (err)
+			mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n", first_seg, order);
+	} else {
+		mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, order);
+		mlx4_table_put_range(dev, &mr_table->mtt_table, first_seg,
+					     first_seg + (1 << order) - 1);
+	}
+}
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
 	if (mtt->order < 0)
 		return;
 
-	mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
-	mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
-			     mtt->first_seg + (1 << mtt->order) - 1);
+	mlx4_free_mtt_range(dev, mtt->first_seg, mtt->order);
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
 
@@ -304,21 +337,117 @@ int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
 
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+						 struct mlx4_cmd_mailbox *inbox,
+						 struct mlx4_cmd_mailbox *outbox)
+{
+	struct mlx4_mtt mtt;
+	u64 *page_list = inbox->buf;
+	int i;
+
+	/* Call the SW implementation of write_mtt:
+	 * - Prepare a dummy mtt struct
+	 * - Translate inbox contents to simple addresses in host endianess */
+	mtt.first_seg = 0;
+	mtt.order = 0;
+	mtt.page_shift = 0;
+	for (i = 0; i < vhcr->in_modifier; ++i)
+		page_list[i + 2] = be64_to_cpu(page_list[i + 2]) & ~1ULL;
+	vhcr->errno = mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]),
+						vhcr->in_modifier,
+						page_list + 2);
+	return 0;
+}
+
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+			  int num_entries)
+{
+	return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
+			MLX4_CMD_TIME_CLASS_A);
+}
+
+int mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	u64 out_param;
+	int err;
+
+	if (mlx4_is_slave(dev)) {
+		err = mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, ICM_RESERVE,
+								MLX4_CMD_ALLOC_RES,
+								MLX4_CMD_TIME_CLASS_A);
+		if (err)
+			return -1;
+		return out_param;
+	}
+	return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+}
+
+void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	u64 in_param;
+	int err;
+
+	if (mlx4_is_slave(dev)) {
+		*((u32 *) &in_param) = index;
+		*(((u32 *) &in_param) + 1) = 0;
+		err = mlx4_cmd(dev, in_param, RES_MPT, ICM_RESERVE,
+						       MLX4_CMD_FREE_RES,
+						       MLX4_CMD_TIME_CLASS_A);
+		if (err)
+			mlx4_warn(dev, "Failed to release mr index:%d\n", index);
+	} else
+		mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+}
+
+int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+	u64 param;
+
+	if (mlx4_is_slave(dev)) {
+		*((u32 *) &param) = index;
+		*(((u32 *) &param) + 1) = 0;
+		return mlx4_cmd_imm(dev, param, &param, RES_MPT, ICM_ALLOC,
+							MLX4_CMD_ALLOC_RES,
+							MLX4_CMD_TIME_CLASS_A);
+	} else
+		return mlx4_table_get(dev, &mr_table->dmpt_table, index);
+}
+
+void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+	u64 in_param;
+	int err;
+
+	if (mlx4_is_slave(dev)) {
+		*((u32 *) &in_param) = index;
+		*(((u32 *) &in_param) + 1) = 0;
+		err = mlx4_cmd(dev, in_param, RES_MPT, ICM_ALLOC,
+						       MLX4_CMD_FREE_RES,
+						       MLX4_CMD_TIME_CLASS_A);
+		if (err)
+			mlx4_warn(dev, "Failed to free icm of mr index:%d\n", index);
+	} else
+		mlx4_table_put(dev, &mr_table->dmpt_table, index);
+}
+
 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
 		  int npages, int page_shift, struct mlx4_mr *mr)
 {
-	struct mlx4_priv *priv = mlx4_priv(dev);
 	u32 index;
 	int err;
 
-	index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+	index = mlx4_mr_reserve(dev);
 	if (index == -1)
 		return -ENOMEM;
 
 	err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
 				     access, npages, page_shift, mr);
 	if (err)
-		mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+		mlx4_mr_release(dev, index);
 
 	return err;
 }
@@ -342,20 +471,19 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
 
 void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
-	struct mlx4_priv *priv = mlx4_priv(dev);
 	mlx4_mr_free_reserved(dev, mr);
-	mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+	mlx4_mr_release(dev, key_to_hw_index(mr->key));
+	mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_free);
 
 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
-	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
 	struct mlx4_cmd_mailbox *mailbox;
 	struct mlx4_mpt_entry *mpt_entry;
 	int err;
 
-	err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+	err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
 	if (err)
 		return err;
 
@@ -413,7 +541,7 @@ err_cmd:
 	mlx4_free_cmd_mailbox(dev, mailbox);
 
 err_table:
-	mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+	mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
 	return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -451,24 +579,45 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
 		   int start_index, int npages, u64 *page_list)
 {
+	struct mlx4_cmd_mailbox *mailbox = NULL;
 	int chunk;
-	int err;
+	int err = 0;
+	__be64 *inbox = NULL;
+	int i;
 
 	if (mtt->order < 0)
 		return -EINVAL;
 
+	if (mlx4_is_slave(dev)) {
+		mailbox = mlx4_alloc_cmd_mailbox(dev);
+		if (IS_ERR(mailbox))
+			return PTR_ERR(mailbox);
+		inbox = mailbox->buf;
+	}
+
 	while (npages > 0) {
-		chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
-		err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
+		if (mlx4_is_slave(dev)) {
+			chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - MLX4_MTT_ENTRY_PER_SEG, npages);
+			inbox[0] = cpu_to_be64(mtt->first_seg * MLX4_MTT_ENTRY_PER_SEG + start_index);
+			inbox[1] = 0;
+			for (i = 0; i < chunk; ++i)
+				inbox[i + 2] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
+			err = mlx4_WRITE_MTT(dev, mailbox, chunk);
+		} else {
+			chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
+			err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
+		}
 		if (err)
-			return err;
+			goto out;
 
 		npages      -= chunk;
 		start_index += chunk;
 		page_list   += chunk;
 	}
-
-	return 0;
+out:
+	if (mlx4_is_slave(dev))
+		mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
 
@@ -504,6 +653,10 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
 	if (!is_power_of_2(dev->caps.num_mpts))
 		return -EINVAL;
 
+	/* Nothing to do for slaves - all MR handling is forwarded to the master */
+	if (mlx4_is_slave(dev))
+		return 0;
+
 	err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
 			       ~0, dev->caps.reserved_mrws, 0);
 	if (err)
@@ -538,6 +691,8 @@ void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
 {
 	struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
 
+	if (mlx4_is_slave(dev))
+		return;
 	mlx4_buddy_cleanup(&mr_table->mtt_buddy);
 	mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
 }
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index 26d1a7a..c7161ec 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -46,7 +46,7 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
 	*pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
 	if (*pdn == -1)
 		return -ENOMEM;
-
+	*pdn |= dev->caps.pd_base << dev->caps.slave_pd_shift;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
@@ -73,12 +73,18 @@ void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
 
 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
 {
+	int offset;
+
 	uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
 	if (uar->index == -1)
 		return -ENOMEM;
 
-	uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
-
+	if (mlx4_is_slave(dev))
+		offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+				       dev->caps.uar_page_size);
+	else
+		offset = uar->index;
+	uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
@@ -91,6 +97,10 @@ EXPORT_SYMBOL_GPL(mlx4_uar_free);
 
 int mlx4_init_uar_table(struct mlx4_dev *dev)
 {
+	/* CX1: master doesn't have UARs */
+	if (mlx4_is_master(dev))
+		return 0;
+
 	if (dev->caps.num_uars <= 128) {
 		mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
 			 dev->caps.num_uars);
@@ -98,12 +108,14 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
 		return -ENODEV;
 	}
 
-	return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
-				dev->caps.num_uars, dev->caps.num_uars - 1,
-				max(128, dev->caps.reserved_uars), 0);
+	return mlx4_bitmap_init_no_mask(&mlx4_priv(dev)->uar_table.bitmap,
+					dev->caps.num_uars,
+					dev->caps.reserved_uars, 0);
 }
 
 void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
 {
+	if (mlx4_is_master(dev))
+		return;
 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap);
 }
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index b93fb6c..ced3bd8 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -33,25 +33,31 @@
 
 #include <linux/errno.h>
 #include <linux/if_ether.h>
+#include <linux/if_vlan.h>
 
 #include <linux/mlx4/cmd.h>
 
 #include "mlx4.h"
+#include "en_port.h"
 
 int mlx4_ib_set_4k_mtu = 0;
 module_param_named(set_4k_mtu, mlx4_ib_set_4k_mtu, int, 0444);
 MODULE_PARM_DESC(set_4k_mtu, "attempt to set 4K MTU to all ConnectX ports");
 
+#define MLX4_MAC_VALID		(1ull << 63)
+#define MLX4_MAC_MASK		0xffffffffffffULL
+
+#define MLX4_VLAN_VALID		(1u << 31)
+#define MLX4_VLAN_MASK		0xfff
+
 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
 {
 	int i;
 
-	sema_init(&table->mac_sem, 1);
-	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+	mutex_init(&table->mutex);
+	for (i = 0; i < MLX4_MAX_MAC_NUM; i++)
 		table->entries[i] = 0;
-		table->refs[i] = 0;
-	}
-	table->max = 1 << dev->caps.log_num_macs;
+	table->max   = 1 << dev->caps.log_num_macs;
 	table->total = 0;
 }
 
@@ -68,7 +74,7 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
 	table->total = 0;
 }
 
-static int mlx4_SET_PORT_mac_table(struct mlx4_dev *dev, u8 port,
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
 				   __be64 *entries)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -89,29 +95,37 @@ static int mlx4_SET_PORT_mac_table(struct mlx4_dev *dev, u8 port,
 	return err;
 }
 
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
 {
-	struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
+	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+	struct mlx4_mac_table *table = &info->mac_table;
+	u64 out_param;
 	int i, err = 0;
 	int free = -1;
-	u64 valid = 1;
 
-	mlx4_dbg(dev, "Registering mac : 0x%llx\n", mac);
-	down(&table->mac_sem);
+	if (mlx4_is_slave(dev)) {
+		err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, port,
+				   MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A);
+		if (!err)
+			*qpn = out_param;
+		return err;
+	}
+
+	mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+	mutex_lock(&table->mutex);
 	for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
-		if (free < 0 && !table->refs[i]) {
+		if (free < 0 && !table->entries[i]) {
 			free = i;
 			continue;
 		}
 
 		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
-			/* Mac already registered, increase refernce count */
-			*index = i;
-			++table->refs[i];
+			/* MAC already registered, Must not have duplicates */
+			err = -EEXIST;
 			goto out;
 		}
 	}
-	mlx4_dbg(dev, "Free mac index is %d\n", free);
+	mlx4_dbg(dev, "Free MAC index is %d\n", free);
 
 	if (table->total == table->max) {
 		/* No free mac entries */
@@ -120,47 +134,92 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
 	}
 
 	/* Register new MAC */
-	table->refs[free] = 1;
-	table->entries[free] = cpu_to_be64(mac | valid << MLX4_MAC_VALID_SHIFT);
+	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
 
-	err = mlx4_SET_PORT_mac_table(dev, port, table->entries);
+	err = mlx4_set_port_mac_table(dev, port, table->entries);
 	if (unlikely(err)) {
-		mlx4_err(dev, "Failed adding mac: 0x%llx\n", mac);
-		table->refs[free] = 0;
+		mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
 		table->entries[free] = 0;
 		goto out;
 	}
 
-	*index = free;
+	*qpn = info->base_qpn + free;
 	++table->total;
 out:
-	up(&table->mac_sem);
+	mutex_unlock(&table->mutex);
 	return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_register_mac);
 
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
+static int validate_index(struct mlx4_dev *dev,
+			  struct mlx4_mac_table *table, int index)
 {
-	struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
+	int err = 0;
 
-	down(&table->mac_sem);
-	if (!table->refs[index]) {
-		mlx4_warn(dev, "No mac entry for index %d\n", index);
-		goto out;
+	if (index < 0 || index >= table->max || !table->entries[index]) {
+		mlx4_warn(dev, "No valid Mac entry for the given index\n");
+		err = -EINVAL;
 	}
-	if (--table->refs[index]) {
-		mlx4_warn(dev, "Have more references for index %d,"
-			  "no need to modify mac table\n", index);
-		goto out;
+	return err;
+}
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+{
+	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+	struct mlx4_mac_table *table = &info->mac_table;
+	int index = qpn - info->base_qpn;
+
+	if (mlx4_is_slave(dev)) {
+		mlx4_cmd(dev, qpn, RES_MAC, port,
+			 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A);
+		return;
 	}
+
+	mutex_lock(&table->mutex);
+
+	if (validate_index(dev, table, index))
+		goto out;
+
 	table->entries[index] = 0;
-	mlx4_SET_PORT_mac_table(dev, port, table->entries);
+	mlx4_set_port_mac_table(dev, port, table->entries);
 	--table->total;
 out:
-	up(&table->mac_sem);
+	mutex_unlock(&table->mutex);
 }
 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
 
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
+{
+	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+	struct mlx4_mac_table *table = &info->mac_table;
+	int index = qpn - info->base_qpn;
+	int err;
+
+	if (mlx4_is_slave(dev)) {
+		err = mlx4_cmd_imm(dev, new_mac, (u64 *) &qpn, RES_MAC, port,
+				   MLX4_CMD_REPLACE_RES, MLX4_CMD_TIME_CLASS_A);
+		return err;
+	}
+
+	mutex_lock(&table->mutex);
+
+	err = validate_index(dev, table, index);
+	if (err)
+		goto out;
+
+	table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
+
+	err = mlx4_set_port_mac_table(dev, port, table->entries);
+	if (unlikely(err)) {
+		mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+		table->entries[index] = 0;
+	}
+out:
+	mutex_unlock(&table->mutex);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+
 static int mlx4_SET_PORT_vlan_table(struct mlx4_dev *dev, u8 port,
 				    __be32 *entries)
 {
@@ -294,6 +353,129 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
 	return err;
 }
 
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+							struct mlx4_cmd_mailbox *inbox,
+							struct mlx4_cmd_mailbox *outbox)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_port_info *port_info;
+	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+	struct mlx4_set_port_rqp_calc_context *qpn_context;
+	struct mlx4_set_port_general_context *gen_context;
+	int reset_qkey_viols;
+	int port;
+	int is_eth;
+	u32 in_modifier;
+	u32 promisc;
+	u16 mtu, prev_mtu;
+	int err;
+	int i;
+	__be32 agg_cap_mask;
+	__be32 slave_cap_mask;
+	__be32 new_cap_mask;
+
+	port = vhcr->in_modifier & 0xff;
+	in_modifier = vhcr->in_modifier >> 8;
+	is_eth = vhcr->op_modifier;
+	port_info = &priv->port[port];
+
+	/* All slaves can perform SET_PORT operations, just need to verify
+	 * we keep the mutual resources unchanged */
+	if (is_eth) {
+		switch (in_modifier) {
+		case MLX4_SET_PORT_RQP_CALC:
+			qpn_context = inbox->buf;
+			qpn_context->base_qpn = cpu_to_be32(port_info->base_qpn);
+			qpn_context->n_mac = 0x7;
+			promisc = be32_to_cpu(qpn_context->promisc) >>
+				SET_PORT_PROMISC_SHIFT;
+			qpn_context->promisc = cpu_to_be32(
+				promisc << SET_PORT_PROMISC_SHIFT |
+				port_info->base_qpn);
+			promisc = be32_to_cpu(qpn_context->mcast) >>
+				SET_PORT_MC_PROMISC_SHIFT;
+			qpn_context->mcast = cpu_to_be32(
+				promisc << SET_PORT_MC_PROMISC_SHIFT |
+				port_info->base_qpn);
+			break;
+		case MLX4_SET_PORT_GENERAL:
+			gen_context = inbox->buf;
+			/* Mtu is configured as the max MTU among all the
+			 * the functions on the port. */
+			mtu = be16_to_cpu(gen_context->mtu);
+			mtu = max_t(int, mtu, dev->caps.eth_mtu_cap[port]);
+			prev_mtu = slave_st->mtu[port];
+			slave_st->mtu[port] = mtu;
+			if (mtu > master->max_mtu[port])
+				master->max_mtu[port] = mtu;
+			if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) {
+				slave_st->mtu[port] = mtu;
+				master->max_mtu[port] = mtu;
+				for (i = 0; i < dev->num_slaves; i++) {
+					master->max_mtu[port] =
+						max(master->max_mtu[port],
+						    master->slave_state[i].mtu[port]);
+				}
+			}
+
+			gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+			break;
+		}
+		return mlx4_cmd(dev, inbox->dma, vhcr->in_modifier,
+						 vhcr->op_modifier,
+						 MLX4_CMD_SET_PORT,
+						 MLX4_CMD_TIME_CLASS_B);
+	}
+
+	/* For IB, we only consider:
+	 * - The capability mask, which is set to the aggregate of all slave frunction
+	 *   capabilities
+	 * - The QKey violatin counter - reset according to each request.
+	 */
+
+	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+		reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
+		new_cap_mask = ((__be32 *) inbox->buf)[2];
+	} else {
+		reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
+		new_cap_mask = ((__be32 *) inbox->buf)[1];
+	}
+
+	/* CX1: only slave0 has access to qp0 */
+	if (slave && (new_cap_mask & cpu_to_be32(IB_PORT_SM))) {
+		mlx4_warn(dev, "denying sm port capability for slave:%d\n", slave);
+		return -EINVAL;
+	}
+
+	agg_cap_mask = 0;
+	slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+	priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
+	for (i = 0; i < dev->num_slaves; i++)
+		agg_cap_mask |= priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+
+#if 0
+	mlx4_warn(dev, "old_slave_cap:0x%x slave_cap:0x%x cap:0x%x qkey_reset:%d\n",
+			slave_cap_mask, priv->mfunc.master.slave_state[slave].ib_cap_mask[port],
+			agg_cap_mask, reset_qkey_viols);
+#endif
+
+	memset(inbox->buf, 0, 256);
+	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+		*(u8 *) inbox->buf	   = !!reset_qkey_viols << 6;
+		((__be32 *) inbox->buf)[2] = agg_cap_mask;
+	} else {
+		((u8 *) inbox->buf)[3]     = !!reset_qkey_viols;
+		((__be32 *) inbox->buf)[1] = agg_cap_mask;
+	}
+
+	err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B);
+	if (err)
+		priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = slave_cap_mask;
+	return err;
+}
+
 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -318,3 +500,199 @@ out:
 	mlx4_free_cmd_mailbox(dev, mailbox);
 	return err;
 }
+
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+				struct mlx4_cmd_mailbox *inbox,
+				struct mlx4_cmd_mailbox *outbox)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int port = vhcr->in_modifier;
+	int err = 0;
+	u64 addr = vhcr->in_param & 0xffffffffffffULL;
+	u64 clear = vhcr->in_param >> 63;
+	struct mlx4_mcast_entry *entry, *tmp;
+	struct mlx4_slave_state *s_state = &priv->mfunc.master.slave_state[slave];
+	int i;
+
+	switch (vhcr->op_modifier) {
+	case MLX4_MCAST_DISABLE:
+		/* The multicast filter is disabled only once,
+		 * If some other function already done it, operation
+		 * is ignored */
+		if (!(priv->mfunc.master.disable_mcast_ref[port]++))
+			err = mlx4_cmd(dev, 0, port, MLX4_MCAST_DISABLE,
+					MLX4_CMD_SET_MCAST_FLTR,
+					MLX4_CMD_TIME_CLASS_B);
+		break;
+	case MLX4_MCAST_ENABLE:
+		/* We enable the muticast filter only if all functions
+		 * have the filter enabled */
+		if (!(--priv->mfunc.master.disable_mcast_ref[port]))
+			err = mlx4_cmd(dev, 0, port, MLX4_MCAST_ENABLE,
+					MLX4_CMD_SET_MCAST_FLTR,
+					MLX4_CMD_TIME_CLASS_B);
+		break;
+	case MLX4_MCAST_CONFIG:
+		if (clear) {
+			/* Disable the muticast filter while updating it */
+			if (!priv->mfunc.master.disable_mcast_ref[port]) {
+				err = mlx4_cmd(dev, 0, port, MLX4_MCAST_DISABLE,
+						MLX4_CMD_SET_MCAST_FLTR,
+						MLX4_CMD_TIME_CLASS_B);
+				if (err) {
+					mlx4_warn(dev, "Failed to disable multicast "
+						       "filter\n");
+					goto out;
+				}
+			}
+			/* Clear the multicast filter */
+			err = mlx4_cmd(dev, clear << 63, port,
+				       MLX4_MCAST_CONFIG,
+				       MLX4_CMD_SET_MCAST_FLTR,
+				       MLX4_CMD_TIME_CLASS_B);
+			if (err) {
+				mlx4_warn(dev, "Failed clearing the multicast filter\n");
+				goto out;
+			}
+
+			/* Clear the multicast addresses for the given slave */
+			list_for_each_entry_safe(entry, tmp,
+						 &s_state->mcast_filters[port],
+						 list) {
+				list_del(&entry->list);
+				kfree(entry);
+			}
+
+			/* Assign all the multicast addresses that still exist */
+			for (i = 0; i < dev->num_slaves; i++) {
+				list_for_each_entry(entry,
+					&priv->mfunc.master.slave_state[slave].mcast_filters[port],
+					list) {
+					if (mlx4_cmd(dev, entry->addr, port,
+						     MLX4_MCAST_CONFIG,
+						     MLX4_CMD_SET_MCAST_FLTR,
+						     MLX4_CMD_TIME_CLASS_B))
+						mlx4_warn(dev, "Failed to reconfigure "
+							  "multicast address: 0x%llx\n",
+							  entry->addr);
+				}
+			}
+			/* Enable the filter */
+			if (!priv->mfunc.master.disable_mcast_ref[port]) {
+				err = mlx4_cmd(dev, 0, port, MLX4_MCAST_ENABLE,
+						MLX4_CMD_SET_MCAST_FLTR,
+						MLX4_CMD_TIME_CLASS_B);
+				if (err) {
+					mlx4_warn(dev, "Failed to enable multicast "
+						       "filter\n");
+					goto out;
+				}
+			}
+		}
+		/* Add the new address if exists */
+		if (addr) {
+			entry = kzalloc(sizeof (struct mlx4_mcast_entry),
+					GFP_KERNEL);
+			if (!entry) {
+				mlx4_warn(dev, "Failed to allocate entry for "
+					       "muticast address\n");
+				err = -ENOMEM;
+				goto out;
+			}
+			INIT_LIST_HEAD(&entry->list);
+			entry->addr = addr;
+			list_add_tail(&entry->list, &s_state->mcast_filters[port]);
+			err = mlx4_cmd(dev, addr, port, MLX4_MCAST_CONFIG,
+				       MLX4_CMD_SET_MCAST_FLTR,
+				       MLX4_CMD_TIME_CLASS_B);
+			if (err)
+				mlx4_warn(dev, "Failed to add the new address:"
+					       "0x%llx\n", addr);
+		}
+		break;
+	default:
+		mlx4_warn(dev, "SET_MCAST_FILTER called with illegal modifier\n");
+		err = -EINVAL;
+	}
+out:
+	return err;
+}
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+			u64 mac, u64 clear, u8 mode)
+{
+	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
+}
+EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
+
+
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+			       struct mlx4_cmd_mailbox *inbox,
+			       struct mlx4_cmd_mailbox *outbox)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_vlan_fltr *filter;
+	struct mlx4_slave_state *s_state = &priv->mfunc.master.slave_state[slave];
+	int port = vhcr->in_modifier;
+	int i, j, err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	/* Update slave's Vlan filter */
+	memcpy(s_state->vlan_filter[port].entry, inbox->buf,
+	       sizeof(struct mlx4_vlan_fltr));
+
+	/* We configure the Vlan filter to allow the vlans of
+	 * all slaves */
+	filter = mailbox->buf;
+	memset(filter, 0, sizeof(*filter));
+	for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
+		for (j = 0; j < dev->num_slaves; j++) {
+			s_state = &priv->mfunc.master.slave_state[j];
+			filter->entry[i] |= s_state->vlan_filter[port].entry[i];
+		}
+	}
+	err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
+		       MLX4_CMD_TIME_CLASS_B);
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_vlan_fltr *filter;
+	int i;
+	int j;
+	int index = 0;
+	u32 entry;
+	int err = 0;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	filter = mailbox->buf;
+	if (grp) {
+		memset(filter, 0, sizeof *filter);
+		for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
+			entry = 0;
+			for (j = 0; j < 32; j++)
+				if (vlan_group_get_device(grp, index++))
+					entry |= 1 << j;
+			filter->entry[i] = cpu_to_be32(entry);
+		}
+	} else {
+		/* When no vlans are configured we block all vlans */
+		memset(filter, 0, sizeof(*filter));
+	}
+	err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
+		       MLX4_CMD_TIME_CLASS_B);
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL(mlx4_SET_VLAN_FLTR);
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index 5958874..0b3a27a 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -107,7 +107,19 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
 	profile[MLX4_RES_AUXC].num    = request->num_qp;
 	profile[MLX4_RES_SRQ].num     = request->num_srq;
 	profile[MLX4_RES_CQ].num      = request->num_cq;
-	profile[MLX4_RES_EQ].num      = MLX4_NUM_EQ + dev_cap->reserved_eqs;
+	if (mlx4_is_master(dev)) {
+		profile[MLX4_RES_EQ].num = dev_cap->reserved_eqs +
+					   MLX4_MFUNC_EQ_NUM *
+					   (dev->num_slaves + 1);
+		if (profile[MLX4_RES_EQ].num > dev_cap->max_eqs) {
+			mlx4_warn(dev, "Not enough eqs for:%ld slave functions\n", dev->num_slaves);
+			kfree(profile);
+			return -ENOMEM;
+		}
+	} else
+		profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs,
+						 dev_cap->reserved_eqs +
+						 num_possible_cpus() + 1);
 	profile[MLX4_RES_DMPT].num    = request->num_mpt;
 	profile[MLX4_RES_CMPT].num    = MLX4_NUM_CMPTS;
 	profile[MLX4_RES_MTT].num     = request->num_mtt;
@@ -196,7 +208,13 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
 			init_hca->log_num_cqs = profile[i].log_num;
 			break;
 		case MLX4_RES_EQ:
-			dev->caps.num_eqs     = profile[i].num;
+			if (mlx4_is_master(dev)) {
+				dev->caps.num_eqs = dev_cap->reserved_eqs +
+						    min_t(unsigned,
+							  MLX4_MFUNC_EQ_NUM,
+							  num_possible_cpus() + 1);
+			} else
+				dev->caps.num_eqs     = profile[i].num;
 			init_hca->eqc_base    = profile[i].start;
 			init_hca->log_num_eqs = profile[i].log_num;
 			break;
@@ -232,6 +250,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
 	 * of the HCA profile anyway.
 	 */
 	dev->caps.num_pds = MLX4_NUM_PDS;
+	dev->caps.slave_pd_shift = MLX4_SLAVE_PD_SHIFT;
+	dev->caps.pd_base = 0;
 
 	kfree(profile);
 	return total_size;
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 9c9f1a2..6cc8627 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -147,17 +147,76 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_modify);
 
+u32 mlx4_get_slave_sqp(struct mlx4_dev *dev, int slave)
+{
+	if (mlx4_is_master(dev) && slave < dev->num_slaves) {
+		return mlx4_priv(dev)->mfunc.master.slave_state[slave].sqp_start;
+	}
+	if (mlx4_is_slave(dev) && slave < dev->caps.sqp_demux) {
+		return mlx4_priv(dev)->mfunc.demux_sqp[slave];
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_slave_sqp);
+
+int mlx4_GET_SLAVE_SQP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+							  struct mlx4_cmd_mailbox *inbox,
+							  struct mlx4_cmd_mailbox *outbox)
+{
+	u32 *slave_sqp = outbox->buf;
+	int i;
+
+	/* CX1: special qp demultiplexing is done by slave0 */
+	if (slave) {
+		mlx4_warn(dev, "Denying slave_sqp request from slave:%d\n", slave);
+		return -EINVAL;
+	}
+	for (i = 0; i < 64; i++)
+		slave_sqp[i] = mlx4_get_slave_sqp(dev, i);
+	return 0;
+}
+
+int mlx4_GET_SLAVE_SQP(struct mlx4_dev *dev, u32 *sqp, int num)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+
+	err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_GET_SLAVE_SQP,
+			   MLX4_CMD_TIME_CLASS_A);
+	if (!err)
+		memcpy(sqp, mailbox->buf, sizeof (u32) * num);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_GET_SLAVE_SQP);
+
 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_qp_table *qp_table = &priv->qp_table;
-	int qpn;
-
-	qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
-	if (qpn == -1)
-		return -ENOMEM;
+	u64 in_param;
+	u64 out_param;
+	int err;
 
-	*base = qpn;
+	if (mlx4_is_slave(dev)) {
+		*((u32 *) &in_param) = cnt;
+		*(((u32 *) &in_param) + 1) = align;
+		err = mlx4_cmd_imm(dev, in_param, &out_param, RES_QP, ICM_RESERVE,
+							      MLX4_CMD_ALLOC_RES,
+							      MLX4_CMD_TIME_CLASS_A);
+		if (err)
+			return err;
+		*base = out_param;
+	} else {
+		*base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+		if (*base == -1)
+			return -ENOMEM;
+	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
@@ -166,73 +225,133 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_qp_table *qp_table = &priv->qp_table;
-	if (base_qpn < dev->caps.sqp_start + 8)
-		return;
+	u64 in_param;
+	int err;
 
-	mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+	if (mlx4_is_slave(dev)) {
+		*((u32 *) &in_param) = base_qpn;
+		*(((u32 *) &in_param) + 1) = cnt;
+		err = mlx4_cmd(dev, in_param, RES_QP, ICM_RESERVE,
+						      MLX4_CMD_FREE_RES,
+						      MLX4_CMD_TIME_CLASS_A);
+		if (err) {
+			mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
+									base_qpn, cnt);
+		}
+	} else {
+		if (base_qpn < dev->caps.sqp_start + 8)
+			return;
+		mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+	}
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
 
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_qp_table *qp_table = &priv->qp_table;
+	u64 param;
 	int err;
 
-	if (!qpn)
-		return -EINVAL;
-
-	qp->qpn = qpn;
-
-	err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+	if (mlx4_is_slave(dev)) {
+		*((u32 *) &param) = qpn;
+		*(((u32 *) &param) + 1) = 0;
+		return mlx4_cmd_imm(dev, param, &param, RES_QP, ICM_ALLOC,
+								MLX4_CMD_ALLOC_RES,
+								MLX4_CMD_TIME_CLASS_A);
+	}
+	err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
 	if (err)
 		goto err_out;
 
-	err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+	err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
 	if (err)
 		goto err_put_qp;
 
-	err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+	err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
 	if (err)
 		goto err_put_auxc;
 
-	err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+	err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
 	if (err)
 		goto err_put_altc;
 
-	err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+	err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
 	if (err)
 		goto err_put_rdmarc;
 
-	spin_lock_irq(&qp_table->lock);
-	err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
-	spin_unlock_irq(&qp_table->lock);
-	if (err)
-		goto err_put_cmpt;
-
-	atomic_set(&qp->refcount, 1);
-	init_completion(&qp->free);
-
 	return 0;
 
-err_put_cmpt:
-	mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-
 err_put_rdmarc:
-	mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+	mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
 
 err_put_altc:
-	mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+	mlx4_table_put(dev, &qp_table->altc_table, qpn);
 
 err_put_auxc:
-	mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+	mlx4_table_put(dev, &qp_table->auxc_table, qpn);
 
 err_put_qp:
-	mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+	mlx4_table_put(dev, &qp_table->qp_table, qpn);
 
 err_out:
 	return err;
 }
+
+void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_qp_table *qp_table = &priv->qp_table;
+	u64 in_param;
+	int err;
+
+	if (mlx4_is_slave(dev)) {
+		*((u32 *) &in_param) = qpn;
+		*(((u32 *) &in_param) + 1) = 0;
+		err = mlx4_cmd(dev, in_param, RES_QP, ICM_ALLOC,
+						      MLX4_CMD_FREE_RES,
+						      MLX4_CMD_TIME_CLASS_A);
+		if (err)
+			mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
+	} else {
+		mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
+		mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
+		mlx4_table_put(dev, &qp_table->altc_table, qpn);
+		mlx4_table_put(dev, &qp_table->auxc_table, qpn);
+		mlx4_table_put(dev, &qp_table->qp_table, qpn);
+	}
+}
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_qp_table *qp_table = &priv->qp_table;
+	int err;
+
+	if (!qpn)
+		return -EINVAL;
+
+	qp->qpn = qpn;
+
+	err = mlx4_qp_alloc_icm(dev, qpn);
+	if (err)
+		return err;
+
+	spin_lock_irq(&qp_table->lock);
+	err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
+	spin_unlock_irq(&qp_table->lock);
+	if (err)
+		goto err_icm;
+
+	atomic_set(&qp->refcount, 1);
+	init_completion(&qp->free);
+
+	return 0;
+
+err_icm:
+	mlx4_qp_free_icm(dev, qpn);
+	return err;
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
@@ -248,18 +367,11 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
 
 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
-	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-
 	if (atomic_dec_and_test(&qp->refcount))
 		complete(&qp->free);
 	wait_for_completion(&qp->free);
 
-	mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-	mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
-	mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
-	mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
-	mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
-
+	mlx4_qp_free_icm(dev, qp->qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_free);
 
@@ -270,6 +382,25 @@ static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
 			MLX4_CMD_CONF_SPECIAL_QP, MLX4_CMD_TIME_CLASS_B);
 }
 
+int mlx4_CONF_SPECIAL_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+						       struct mlx4_cmd_mailbox *inbox,
+						       struct mlx4_cmd_mailbox *outbox)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int ret;
+
+	priv->mfunc.master.slave_state[slave].sqp_start = vhcr->in_modifier & 0xffffff;
+	if (!slave) {
+		/* CX1: slave0 owns real special QPs */
+		ret = mlx4_CONF_SPECIAL_QP(dev, priv->mfunc.master.slave_state[slave].sqp_start);
+		if (ret)
+			return ret;
+	}
+	/* Notify slave0 that an SQP change occured */
+	mlx4_slave_event(dev, 0, MLX4_EVENT_TYPE_SQP_UPDATE, 0, 0);
+	return 0;
+}
+
 int mlx4_init_qp_table(struct mlx4_dev *dev)
 {
 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
@@ -278,6 +409,23 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
 
 	spin_lock_init(&qp_table->lock);
 	INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+	if (mlx4_is_slave(dev)) {
+		/* For each slave, just allocate a normal 8-byte alligned special-QP
+		 * range intead of mlx4_init_qp_table() reservation */
+		err = mlx4_qp_reserve_range(dev, 8, 8, &dev->caps.sqp_start);
+		if (err) {
+			mlx4_err(dev, "Failed to allocate special QP range\n");
+			return err;
+		}
+
+		err = mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
+		if (err) {
+			mlx4_err(dev, "Failed to configure special QP range\n");
+			mlx4_qp_release_range(dev, dev->caps.sqp_start, 8);
+			return err;
+		}
+		return 0;
+	}
 
 	/*
 	 * We reserve 2 extra QPs per port for the special QPs.  The
@@ -287,6 +435,10 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
 	dev->caps.sqp_start =
 		ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
 
+	/* If multi-function is enabled, we reserve an additional QP for qp0/1 tunneling.
+	 * CX1: slave0 manages tunnel QP */
+	dev->caps.tunnel_qpn = mlx4_is_master(dev) ? dev->caps.sqp_start + 8 : 0;
+
 	{
 		int sort[MLX4_QP_REGION_COUNT];
 		int i, j, tmp;
@@ -316,17 +468,25 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
 	}
 
 	err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
-			       (1 << 23) - 1, dev->caps.sqp_start + 8,
-			       reserved_from_top);
+			       (1 << 23) - 1, dev->caps.sqp_start + 8 +
+			       2 * !!dev->caps.tunnel_qpn, reserved_from_top);
 	if (err)
 		return err;
 
+	/* CX1: master has no QPs */
+	if (mlx4_is_master(dev))
+		return 0;
+
 	return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
 }
 
 void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
 {
 	mlx4_CONF_SPECIAL_QP(dev, 0);
+	if (mlx4_is_slave(dev)) {
+		mlx4_qp_release_range(dev, dev->caps.sqp_start, 8);
+		return;
+	}
 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
 }
 
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index fe9f218..381a1fb 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -110,32 +110,86 @@ static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
 			    MLX4_CMD_TIME_CLASS_A);
 }
 
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
-		   u64 db_rec, struct mlx4_srq *srq)
+int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
 {
 	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
-	struct mlx4_cmd_mailbox *mailbox;
-	struct mlx4_srq_context *srq_context;
-	u64 mtt_addr;
+	u64 out_param;
 	int err;
 
-	srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
-	if (srq->srqn == -1)
+	if (mlx4_is_slave(dev)) {
+		err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
+						       ICM_RESERVE_AND_ALLOC,
+						       MLX4_CMD_ALLOC_RES,
+						       MLX4_CMD_TIME_CLASS_A);
+		if (err) {
+			*srqn = -1;
+			return err;
+		} else {
+			*srqn = out_param;
+			return 0;
+		}
+	}
+
+	*srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+	if (*srqn == -1)
 		return -ENOMEM;
 
-	err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+	err = mlx4_table_get(dev, &srq_table->table, *srqn);
 	if (err)
 		goto err_out;
 
-	err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+	err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
 	if (err)
 		goto err_put;
+	return 0;
+
+err_put:
+	mlx4_table_put(dev, &srq_table->table, *srqn);
+
+err_out:
+	mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+	return err;
+}
+
+void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+	u64 in_param;
+	int err;
+
+	if (mlx4_is_slave(dev)) {
+		*((u32 *) &in_param) = srqn;
+		*(((u32 *) &in_param) + 1) = 0;
+		err = mlx4_cmd(dev, in_param, RES_SRQ, ICM_RESERVE_AND_ALLOC,
+						       MLX4_CMD_FREE_RES,
+						       MLX4_CMD_TIME_CLASS_A);
+		if (err)
+			mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
+	} else {
+		mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
+		mlx4_table_put(dev, &srq_table->table, srqn);
+		mlx4_bitmap_free(&srq_table->bitmap, srqn);
+	}
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
+		   u64 db_rec, struct mlx4_srq *srq)
+{
+	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_srq_context *srq_context;
+	u64 mtt_addr;
+	int err;
+
+	err = mlx4_srq_alloc_icm(dev, &srq->srqn);
+	if (err)
+		return err;
 
 	spin_lock_irq(&srq_table->lock);
 	err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
 	spin_unlock_irq(&srq_table->lock);
 	if (err)
-		goto err_cmpt_put;
+		goto err_icm;
 
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox)) {
@@ -172,15 +226,8 @@ err_radix:
 	radix_tree_delete(&srq_table->tree, srq->srqn);
 	spin_unlock_irq(&srq_table->lock);
 
-err_cmpt_put:
-	mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
-
-err_put:
-	mlx4_table_put(dev, &srq_table->table, srq->srqn);
-
-err_out:
-	mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-
+err_icm:
+	mlx4_srq_free_icm(dev, srq->srqn);
 	return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
@@ -202,8 +249,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
 		complete(&srq->free);
 	wait_for_completion(&srq->free);
 
-	mlx4_table_put(dev, &srq_table->table, srq->srqn);
-	mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+	mlx4_srq_free_icm(dev, srq->srqn);
 }
 EXPORT_SYMBOL_GPL(mlx4_srq_free);
 
@@ -243,6 +289,8 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
 
 	spin_lock_init(&srq_table->lock);
 	INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+	if (mlx4_is_slave(dev))
+		return 0;
 
 	err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
 			       dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
@@ -254,5 +302,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
 {
+	if (mlx4_is_slave(dev))
+		return;
 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
 }
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 0f82293..271fd4e 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -56,6 +56,7 @@ enum {
 	MLX4_CMD_QUERY_HCA	 = 0xb,
 	MLX4_CMD_QUERY_PORT	 = 0x43,
 	MLX4_CMD_SENSE_PORT	 = 0x4d,
+	MLX4_CMD_HW_HEALTH_CHECK = 0x50,
 	MLX4_CMD_SET_PORT	 = 0xc,
 	MLX4_CMD_ACCESS_DDR	 = 0x2e,
 	MLX4_CMD_MAP_ICM	 = 0xffa,
@@ -117,6 +118,21 @@ enum {
 	/* miscellaneous commands */
 	MLX4_CMD_DIAG_RPRT	 = 0x30,
 	MLX4_CMD_NOP		 = 0x31,
+	MLX4_CMD_ACCESS_MEM	 = 0x2e,
+
+	/* Ethernet specific commands */
+	MLX4_CMD_SET_VLAN_FLTR	 = 0x47,
+	MLX4_CMD_SET_MCAST_FLTR	 = 0x48,
+	MLX4_CMD_DUMP_ETH_STATS	 = 0x49,
+
+	/* virtual commands */
+	MLX4_CMD_ALLOC_RES	 = 0x50,
+	MLX4_CMD_FREE_RES	 = 0x51,
+	MLX4_CMD_REPLACE_RES	 = 0x52,
+	MLX4_CMD_GET_EVENT	 = 0x53,
+	MLX4_CMD_QUERY_SLAVE_CAP = 0x54,
+	MLX4_CMD_MCAST_ATTACH	 = 0x55,
+	MLX4_CMD_GET_SLAVE_SQP	 = 0x56,
 
 	/* debug commands */
 	MLX4_CMD_QUERY_DEBUG_MSG = 0x2a,
@@ -130,7 +146,8 @@ enum {
 };
 
 enum {
-	MLX4_MAILBOX_SIZE	=  4096
+	MLX4_MAILBOX_SIZE	= 4096,
+	MLX4_ACCESS_MEM_ALIGN	= 256,
 };
 
 enum {
@@ -140,6 +157,7 @@ enum {
 	MLX4_SET_PORT_MAC_TABLE = 0x2,
 	MLX4_SET_PORT_VLAN_TABLE = 0x3,
 	MLX4_SET_PORT_PRIO_MAP  = 0x4,
+	MLX4_SET_PORT_MODIFIERS
 };
 
 struct mlx4_dev;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ad9c772..f0e82ae 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -42,7 +42,10 @@
 enum {
 	MLX4_FLAG_MSI_X		= 1 << 0,
 	MLX4_FLAG_OLD_PORT_CMDS	= 1 << 1,
-	MLX4_FLAG_NOT_PRIME	= 1 << 2,
+	MLX4_FLAG_MASTER	= 1 << 2,
+	MLX4_FLAG_SLAVE		= 1 << 3,
+	MLX4_FLAG_SRIOV		= 1 << 4,
+	MLX4_FLAG_NOT_PRIME	= 1 << 5,
 };
 
 enum {
@@ -97,7 +100,9 @@ enum mlx4_event {
 	MLX4_EVENT_TYPE_PORT_CHANGE	   = 0x09,
 	MLX4_EVENT_TYPE_EQ_OVERFLOW	   = 0x0f,
 	MLX4_EVENT_TYPE_ECC_DETECT	   = 0x0e,
-	MLX4_EVENT_TYPE_CMD		   = 0x0a
+	MLX4_EVENT_TYPE_CMD		   = 0x0a,
+	MLX4_EVENT_TYPE_SQP_UPDATE	   = 0xfe,
+	MLX4_EVENT_TYPE_NONE		   = 0xff,
 };
 
 enum {
@@ -144,6 +149,13 @@ enum {
 	MLX4_STAT_RATE_OFFSET	= 5
 };
 
+enum mlx4_protocol {
+	MLX4_PROT_IB_IPV6 = 0,
+	MLX4_PROT_ETH,
+	MLX4_PROT_IB_IPV4,
+	MLX4_PROT_FCOE
+};
+
 enum {
 	MLX4_MTT_FLAG_PRESENT		= 1
 };
@@ -190,6 +202,7 @@ struct mlx4_caps {
 	u64			trans_code[MLX4_MAX_PORTS + 1];
 	int			local_ca_ack_delay;
 	int			num_uars;
+	int			uar_page_size;
 	int			bf_reg_size;
 	int			bf_regs_per_page;
 	int			max_sq_sg;
@@ -201,6 +214,7 @@ struct mlx4_caps {
 	int			max_qp_init_rdma;
 	int			max_qp_dest_rdma;
 	int			sqp_start;
+	int			tunnel_qpn;
 	int			num_srqs;
 	int			max_srq_wqes;
 	int			max_srq_sge;
@@ -212,6 +226,7 @@ struct mlx4_caps {
 	int			eqc_entry_size;
 	int			reserved_eqs;
 	int			num_comp_vectors;
+	int			num_msix;
 	int			num_mpts;
 	int			num_mtt_segs;
 	int			mtts_per_seg;
@@ -225,6 +240,8 @@ struct mlx4_caps {
 	int			num_qp_per_mgm;
 	int			num_pds;
 	int			reserved_pds;
+	int			slave_pd_shift;
+	int			pd_base;
 	int			mtt_entry_sz;
 	u32			max_msg_sz;
 	u32			page_size_cap;
@@ -232,6 +249,7 @@ struct mlx4_caps {
 	u32			bmme_flags;
 	u32			reserved_lkey;
 	u16			stat_rate_support;
+	int			loopback_support;
 	u8			port_width_cap[MLX4_MAX_PORTS + 1];
 	int			max_gso_sz;
 	int                     reserved_qps_cnt[MLX4_QP_REGION_COUNT];
@@ -242,6 +260,7 @@ struct mlx4_caps {
 	int                     log_num_prios;
 	enum mlx4_port_type	port_type[MLX4_MAX_PORTS + 1];
 	u8			supported_type[MLX4_MAX_PORTS + 1];
+	u8                      sqp_demux;
 	u32			port_mask;
 	enum mlx4_port_type	possible_type[MLX4_MAX_PORTS + 1];
 };
@@ -379,6 +398,7 @@ struct mlx4_av {
 struct mlx4_dev {
 	struct pci_dev	       *pdev;
 	unsigned long		flags;
+	unsigned long		num_slaves;
 	struct mlx4_caps	caps;
 	struct radix_tree_root	qp_table_tree;
 	u32			rev_id;
@@ -412,6 +432,21 @@ static inline void mlx4_query_steer_cap(struct mlx4_dev *dev, int *log_mac,
 		if ((type == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
 		     ~(dev)->caps.port_mask) & 1 << ((port)-1))
 
+static inline int mlx4_is_slave(struct mlx4_dev *dev)
+{
+	return dev->flags & MLX4_FLAG_SLAVE;
+}
+
+static inline int mlx4_is_master(struct mlx4_dev *dev)
+{
+	return dev->flags & MLX4_FLAG_MASTER;
+}
+
+static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
+{
+	return dev->flags & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE);
+}
+
 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
 		   struct mlx4_buf *buf);
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -468,6 +503,7 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
 
 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
+u32 mlx4_get_slave_sqp(struct mlx4_dev *dev, int vf);
 
 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
 		   u64 db_rec, struct mlx4_srq *srq);
@@ -479,11 +515,14 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
 
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-			  int block_mcast_loopback);
-int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
+			  int block_mcast_loopback, enum mlx4_protocol prot);
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+			  enum mlx4_protocol prot);
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
 
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn);
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
 
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
@@ -506,5 +545,6 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
 int mlx4_query_diag_counters(struct mlx4_dev *mlx4_dev, int array_length,
 			     u8 op_modifier, u32 in_offset[], u32 counter_out[]);
+int mlx4_test_interrupts(struct mlx4_dev *dev);
 
 #endif /* MLX4_DEVICE_H */