Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > fc11cd6e1c513a17304da94a5390f3cd > files > 3672

kernel-2.6.18-194.11.1.el5.src.rpm

From: mchristi@redhat.com <mchristi@redhat.com>
Date: Wed, 22 Apr 2009 02:10:19 -0500
Subject: [scsi] update libfc/fcoe for RHEL-5.4
Message-id: 1240384221-15379-1-git-send-email-mchristi@redhat.com
O-Subject: [PATCH 1/3] RHEL 5.4: update libfc/fcoe
Bugzilla: 484438
RH-Acked-by: David Miller <davem@redhat.com>

From: Mike Christie <mchristi@redhat.com>

This is for BZ 484438.

This part of the patchset updates libfc and fcoe. It syncs us with
the libfc/fcoe code in the scsi maintainer's tree

(last commit)
[SCSI] fcoe, libfcoe: Add support for FIP. FCoE discovery and
keep-alive.
97c8389d54b9665c38105ea72a428a44b97ff2f6

It also add some patches that were just sent to the lists:
http://marc.info/?l=linux-scsi&m=123879588506805&w=2
The scsi maintainer did not like the original patchset,
so I have taken the rebuilt patches in the fcoe maintainer's
tree that he is going to resend according to the scsi maintianer's
comments.

This also fixes a bug I found last night while testing:
http://www.open-fcoe.org/pipermail/devel/2009-April/002273.html
and partially reverts a patch that was causing a throughput regession:
http://www.open-fcoe.org/pipermail/devel/2009-April/002184.html

I have tested this here by doing disktest and bonnie++ while
disabling and enabling ports and pulling cables.

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 38abac7..de3be26 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -266,10 +266,17 @@ config LIBFC
    ---help---
           Fibre Channel library module
 
+config LIBFCOE
+	tristate "LibFCoE module"
+	select LIBFC
+	---help---
+	  Library for Fibre Channel over Ethernet module
+
 config FCOE
 	tristate "FCoE module"
 	depends on SCSI && SCSI_FC_ATTRS
 	select LIBFC
+	select LIBFCOE
 	---help---
 	  Fibre Channel over Ethernet module
 
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index b78da06..950f276 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,8 +1,2 @@
-# $Id: Makefile
-
 obj-$(CONFIG_FCOE) += fcoe.o
-
-fcoe-y := \
-	libfcoe.o \
-	fcoe_sw.o \
-	fc_transport_fcoe.o
+obj-$(CONFIG_LIBFCOE) += libfcoe.o
diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c
deleted file mode 100644
index 7944c07..0000000
--- a/drivers/scsi/fcoe/fc_transport_fcoe.c
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Maintained at www.Open-FCoE.org
- */
-
-#include <linux/pci.h>
-#include <scsi/libfcoe.h>
-#include <scsi/fc_transport_fcoe.h>
-
-/* internal fcoe transport */
-struct fcoe_transport_internal {
-	struct fcoe_transport *t;
-	struct net_device *netdev;
-	struct list_head list;
-};
-
-/* fcoe transports list and its lock */
-static LIST_HEAD(fcoe_transports);
-static DEFINE_MUTEX(fcoe_transports_lock);
-
-/**
- * fcoe_transport_default - returns ptr to the default transport fcoe_sw
- **/
-struct fcoe_transport *fcoe_transport_default(void)
-{
-	return &fcoe_sw_transport;
-}
-
-/**
- * fcoe_transport_device_lookup - find out netdev is managed by the
- * transport
- * assign a transport to a device
- * @netdev: the netdev the transport to be attached to
- *
- * This will look for existing offload driver, if not found, it falls back to
- * the default sw hba (fcoe_sw) as its fcoe transport.
- *
- * Returns: 0 for success
- **/
-static struct fcoe_transport_internal *fcoe_transport_device_lookup(
-	struct fcoe_transport *t, struct net_device *netdev)
-{
-	struct fcoe_transport_internal *ti;
-
-	/* assign the transpor to this device */
-	mutex_lock(&t->devlock);
-	list_for_each_entry(ti, &t->devlist, list) {
-		if (ti->netdev == netdev) {
-			mutex_unlock(&t->devlock);
-			return ti;
-		}
-	}
-	mutex_unlock(&t->devlock);
-	return NULL;
-}
-/**
- * fcoe_transport_device_add - assign a transport to a device
- * @netdev: the netdev the transport to be attached to
- *
- * This will look for existing offload driver, if not found, it falls back to
- * the default sw hba (fcoe_sw) as its fcoe transport.
- *
- * Returns: 0 for success
- **/
-static int fcoe_transport_device_add(struct fcoe_transport *t,
-				     struct net_device *netdev)
-{
-	struct fcoe_transport_internal *ti;
-
-	ti = fcoe_transport_device_lookup(t, netdev);
-	if (ti) {
-		printk(KERN_DEBUG "fcoe_transport_device_add:"
-		       "device %s is already added to transport %s\n",
-		       netdev->name, t->name);
-		return -EEXIST;
-	}
-	/* allocate an internal struct to host the netdev and the list */
-	ti = kzalloc(sizeof(*ti), GFP_KERNEL);
-	if (!ti)
-		return -ENOMEM;
-
-	ti->t = t;
-	ti->netdev = netdev;
-	INIT_LIST_HEAD(&ti->list);
-	dev_hold(ti->netdev);
-
-	mutex_lock(&t->devlock);
-	list_add(&ti->list, &t->devlist);
-	mutex_unlock(&t->devlock);
-
-	printk(KERN_DEBUG "fcoe_transport_device_add:"
-		       "device %s added to transport %s\n",
-		       netdev->name, t->name);
-
-	return 0;
-}
-
-/**
- * fcoe_transport_device_remove - remove a device from its transport
- * @netdev: the netdev the transport to be attached to
- *
- * this removes the device from the transport so the given transport will
- * not manage this device any more
- *
- * Returns: 0 for success
- **/
-static int fcoe_transport_device_remove(struct fcoe_transport *t,
-					struct net_device *netdev)
-{
-	struct fcoe_transport_internal *ti;
-
-	ti = fcoe_transport_device_lookup(t, netdev);
-	if (!ti) {
-		printk(KERN_DEBUG "fcoe_transport_device_remove:"
-		       "device %s is not managed by transport %s\n",
-		       netdev->name, t->name);
-		return -ENODEV;
-	}
-	mutex_lock(&t->devlock);
-	list_del(&ti->list);
-	mutex_unlock(&t->devlock);
-	printk(KERN_DEBUG "fcoe_transport_device_remove:"
-	       "device %s removed from transport %s\n",
-	       netdev->name, t->name);
-	dev_put(ti->netdev);
-	kfree(ti);
-	return 0;
-}
-
-/**
- * fcoe_transport_device_remove_all - remove all from transport devlist
- *
- * this removes the device from the transport so the given transport will
- * not manage this device any more
- *
- * Returns: 0 for success
- **/
-static void fcoe_transport_device_remove_all(struct fcoe_transport *t)
-{
-	struct fcoe_transport_internal *ti, *tmp;
-
-	mutex_lock(&t->devlock);
-	list_for_each_entry_safe(ti, tmp, &t->devlist, list) {
-		list_del(&ti->list);
-		kfree(ti);
-	}
-	mutex_unlock(&t->devlock);
-}
-
-/**
- * fcoe_transport_register - adds a fcoe transport to the fcoe transports list
- * @t: ptr to the fcoe transport to be added
- *
- * Returns: 0 for success
- **/
-int fcoe_transport_register(struct fcoe_transport *t)
-{
-	struct fcoe_transport *tt;
-
-	/* TODO - add fcoe_transport specific initialization here */
-	mutex_lock(&fcoe_transports_lock);
-	list_for_each_entry(tt, &fcoe_transports, list) {
-		if (tt == t) {
-			mutex_unlock(&fcoe_transports_lock);
-			return -EEXIST;
-		}
-	}
-	list_add_tail(&t->list, &fcoe_transports);
-	mutex_unlock(&fcoe_transports_lock);
-
-	mutex_init(&t->devlock);
-	INIT_LIST_HEAD(&t->devlist);
-
-	printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(fcoe_transport_register);
-
-/**
- * fcoe_transport_unregister - remove the tranport fro the fcoe transports list
- * @t: ptr to the fcoe transport to be removed
- *
- * Returns: 0 for success
- **/
-int fcoe_transport_unregister(struct fcoe_transport *t)
-{
-	struct fcoe_transport *tt, *tmp;
-
-	mutex_lock(&fcoe_transports_lock);
-	list_for_each_entry_safe(tt, tmp, &fcoe_transports, list) {
-		if (tt == t) {
-			list_del(&t->list);
-			mutex_unlock(&fcoe_transports_lock);
-			fcoe_transport_device_remove_all(t);
-			printk(KERN_DEBUG "fcoe_transport_unregister:%s\n",
-			       t->name);
-			return 0;
-		}
-	}
-	mutex_unlock(&fcoe_transports_lock);
-	return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(fcoe_transport_unregister);
-
-/**
- * fcoe_transport_attach - load transport to fcoe
- * @netdev: the netdev the transport to be attached to
- *
- * This will look for existing offload driver, if not found, it falls back to
- * the default sw hba (fcoe_sw) as its fcoe transport.
- *
- * Returns: 0 for success
- **/
-int fcoe_transport_attach(struct net_device *netdev)
-{
-	struct fcoe_transport *t;
-
-	/* find the corresponding transport */
-	t = fcoe_transport_default();
-	if (!t) {
-		printk(KERN_DEBUG "fcoe_transport_attach"
-		       ":no transport for %s:use %s\n",
-		       netdev->name, t->name);
-		return -ENODEV;
-	}
-	/* add to the transport */
-	if (fcoe_transport_device_add(t, netdev)) {
-		printk(KERN_DEBUG "fcoe_transport_attach"
-		       ":failed to add %s to tramsport %s\n",
-		       netdev->name, t->name);
-		return -EIO;
-	}
-	/* transport create function */
-	if (t->create)
-		t->create(netdev);
-
-	printk(KERN_DEBUG "fcoe_transport_attach:transport %s for %s\n",
-	       t->name, netdev->name);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(fcoe_transport_attach);
-
-/**
- * fcoe_transport_release - unload transport from fcoe
- * @netdev: the net device on which fcoe is to be released
- *
- * Returns: 0 for success
- **/
-int fcoe_transport_release(struct net_device *netdev)
-{
-	struct fcoe_transport *t;
-
-	/* find the corresponding transport */
-	t = fcoe_transport_default();
-	if (!t) {
-		printk(KERN_DEBUG "fcoe_transport_release:"
-		       "no transport for %s:use %s\n",
-		       netdev->name, t->name);
-		return -ENODEV;
-	}
-	/* remove the device from the transport */
-	if (fcoe_transport_device_remove(t, netdev)) {
-		printk(KERN_DEBUG "fcoe_transport_release:"
-		       "failed to add %s to tramsport %s\n",
-		       netdev->name, t->name);
-		return -EIO;
-	}
-	/* transport destroy function */
-	if (t->destroy)
-		t->destroy(netdev);
-
-	printk(KERN_DEBUG "fcoe_transport_release:"
-	       "device %s dettached from transport %s\n",
-	       netdev->name, t->name);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(fcoe_transport_release);
-
-/**
- * fcoe_transport_init - initializes fcoe transport layer
- *
- * This prepares for the fcoe transport layer
- *
- * Returns: none
- **/
-int __init fcoe_transport_init(void)
-{
-	INIT_LIST_HEAD(&fcoe_transports);
-	mutex_init(&fcoe_transports_lock);
-	return 0;
-}
-
-/**
- * fcoe_transport_exit - cleans up the fcoe transport layer
- * This cleans up the fcoe transport layer. removing any transport on the list,
- * note that the transport destroy func is not called here.
- *
- * Returns: none
- **/
-int __exit fcoe_transport_exit(void)
-{
-	struct fcoe_transport *t, *tmp;
-
-	mutex_lock(&fcoe_transports_lock);
-	list_for_each_entry_safe(t, tmp, &fcoe_transports, list) {
-		list_del(&t->list);
-		mutex_unlock(&fcoe_transports_lock);
-		fcoe_transport_device_remove_all(t);
-		mutex_lock(&fcoe_transports_lock);
-	}
-	mutex_unlock(&fcoe_transports_lock);
-	return 0;
-}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
new file mode 100644
index 0000000..faa9ddf
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -0,0 +1,1894 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/cpu.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/ctype.h>
+#include <linux/rtnetlink.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include <scsi/fc/fc_encaps.h>
+#include <scsi/fc/fc_fip.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+#include <scsi/libfcoe.h>
+
+#include "fcoe.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FCoE");
+MODULE_LICENSE("GPL v2");
+
+/* fcoe host list */
+LIST_HEAD(fcoe_hostlist);
+DEFINE_RWLOCK(fcoe_hostlist_lock);
+DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
+DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
+
+/* Function Prototyes */
+static int fcoe_reset(struct Scsi_Host *shost);
+static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
+static int fcoe_rcv(struct sk_buff *, struct net_device *,
+		    struct packet_type *, struct net_device *);
+static int fcoe_percpu_receive_thread(void *arg);
+static void fcoe_clean_pending_queue(struct fc_lport *lp);
+static void fcoe_percpu_clean(struct fc_lport *lp);
+static int fcoe_link_ok(struct fc_lport *lp);
+
+static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
+static int fcoe_hostlist_add(const struct fc_lport *);
+static int fcoe_hostlist_remove(const struct fc_lport *);
+
+static int fcoe_check_wait_queue(struct fc_lport *);
+static int fcoe_device_notification(struct notifier_block *, ulong, void *);
+static void fcoe_dev_setup(void);
+static void fcoe_dev_cleanup(void);
+
+/* notification function from net device */
+static struct notifier_block fcoe_notifier = {
+	.notifier_call = fcoe_device_notification,
+};
+
+static struct scsi_transport_template *scsi_transport_fcoe_sw;
+
+struct fc_function_template fcoe_transport_function = {
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_active_fc4s = 1,
+	.show_host_maxframe_size = 1,
+
+	.show_host_port_id = 1,
+	.show_host_supported_speeds = 1,
+	.get_host_speed = fc_get_host_speed,
+	.show_host_speed = 1,
+	.show_host_port_type = 1,
+	.get_host_port_state = fc_get_host_port_state,
+	.show_host_port_state = 1,
+	.show_host_symbolic_name = 1,
+
+	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+
+	.show_host_fabric_name = 1,
+	.show_starget_node_name = 1,
+	.show_starget_port_name = 1,
+	.show_starget_port_id = 1,
+	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+	.get_fc_host_stats = fc_get_host_stats,
+	.issue_fc_host_lip = fcoe_reset,
+
+	.terminate_rport_io = fc_rport_terminate_io,
+};
+
+static struct scsi_host_template fcoe_shost_template = {
+	.module = THIS_MODULE,
+	.name = "FCoE Driver",
+	.proc_name = FCOE_NAME,
+	.queuecommand = fc_queuecommand,
+	.eh_abort_handler = fc_eh_abort,
+	.eh_device_reset_handler = fc_eh_device_reset,
+	.eh_host_reset_handler = fc_eh_host_reset,
+	.slave_alloc = fc_slave_alloc,
+	.change_queue_depth = fc_change_queue_depth,
+	.change_queue_type = fc_change_queue_type,
+	.this_id = -1,
+	.cmd_per_lun = 32,
+	.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
+	.use_clustering = ENABLE_CLUSTERING,
+	.sg_tablesize = SG_ALL,
+	.max_sectors = 0xffff,
+};
+
+/**
+ * fcoe_lport_config() - sets up the fc_lport
+ * @lp: ptr to the fc_lport
+ * @shost: ptr to the parent scsi host
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_lport_config(struct fc_lport *lp)
+{
+	lp->link_up = 0;
+	lp->qfull = 0;
+	lp->max_retry_count = 3;
+	lp->max_rport_retry_count = 3;
+	lp->e_d_tov = 2 * 1000;	/* FC-FS default */
+	lp->r_a_tov = 2 * 2 * 1000;
+	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+			      FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+
+	fc_lport_init_stats(lp);
+
+	/* lport fc_lport related configuration */
+	fc_lport_config(lp);
+
+	/* offload related configuration */
+	lp->crc_offload = 0;
+	lp->seq_offload = 0;
+	lp->lro_enabled = 0;
+	lp->lro_xid = 0;
+	lp->lso_max = 0;
+
+	return 0;
+}
+
+/**
+ * fcoe_netdev_config() - Set up netdev for SW FCoE
+ * @lp : ptr to the fc_lport
+ * @netdev : ptr to the associated netdevice struct
+ *
+ * Must be called after fcoe_lport_config() as it will use lport mutex
+ *
+ * Returns : 0 for success
+ */
+static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
+{
+	u32 mfs;
+	u64 wwnn, wwpn;
+	struct fcoe_softc *fc;
+	u8 flogi_maddr[ETH_ALEN];
+
+	/* Setup lport private data to point to fcoe softc */
+	fc = lport_priv(lp);
+	fc->ctlr.lp = lp;
+	fc->real_dev = netdev;
+	fc->phys_dev = netdev;
+
+	/* Require support for get_pauseparam ethtool op. */
+	if (netdev->priv_flags & IFF_802_1Q_VLAN)
+		fc->phys_dev = vlan_dev_real_dev(netdev);
+
+	/* Do not support for bonding device */
+	if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
+	    (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
+	    (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * Determine max frame size based on underlying device and optional
+	 * user-configured limit.  If the MFS is too low, fcoe_link_ok()
+	 * will return 0, so do this first.
+	 */
+	mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
+				   sizeof(struct fcoe_crc_eof));
+	if (fc_set_mfs(lp, mfs))
+		return -EINVAL;
+
+	/* offload features support */
+	if (fc->real_dev->features & NETIF_F_SG)
+		lp->sg_supp = 1;
+
+#ifdef NETIF_F_FCOE_CRC
+	if (netdev->features & NETIF_F_FCOE_CRC) {
+		lp->crc_offload = 1;
+		FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
+	}
+#endif
+
+#ifdef FCOE_NETDEV_BACKPORT
+#ifdef NETIF_F_FSO
+	if (netdev->features & NETIF_F_FSO) {
+		lp->seq_offload = 1;
+		lp->lso_max = netdev->gso_max_size;
+		FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
+				lp->lso_max);
+	}
+#endif
+	if (netdev->fcoe_ddp_xid) {
+		lp->lro_enabled = 1;
+		lp->lro_xid = netdev->fcoe_ddp_xid;
+		FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
+				lp->lro_xid);
+	}
+#endif
+	skb_queue_head_init(&fc->fcoe_pending_queue);
+	fc->fcoe_pending_queue_active = 0;
+
+	/* setup Source Mac Address */
+	memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
+	       fc->real_dev->addr_len);
+
+	wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
+	fc_set_wwnn(lp, wwnn);
+	/* XXX - 3rd arg needs to be vlan id */
+	wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
+	fc_set_wwpn(lp, wwpn);
+
+	/*
+	 * Add FCoE MAC address as second unicast MAC address
+	 * or enter promiscuous mode if not capable of listening
+	 * for multiple unicast MACs.
+	 */
+#ifdef FCOE_NETDEV_BACKPORT
+	rtnl_lock();
+	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+	dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
+	rtnl_unlock();
+#else
+	dev_set_promiscuity(fc->real_dev, 1);
+#endif
+
+	/*
+	 * setup the receive function from ethernet driver
+	 * on the ethertype for the given device
+	 */
+	fc->fcoe_packet_type.func = fcoe_rcv;
+	fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+	fc->fcoe_packet_type.dev = fc->real_dev;
+	dev_add_pack(&fc->fcoe_packet_type);
+
+	return 0;
+}
+
+/**
+ * fcoe_shost_config() - Sets up fc_lport->host
+ * @lp : ptr to the fc_lport
+ * @shost : ptr to the associated scsi host
+ * @dev : device associated to scsi host
+ *
+ * Must be called after fcoe_lport_config() and fcoe_netdev_config()
+ *
+ * Returns : 0 for success
+ */
+static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
+				struct device *dev)
+{
+	int rc = 0;
+
+	/* lport scsi host config */
+	lp->host = shost;
+
+	lp->host->max_lun = FCOE_MAX_LUN;
+	lp->host->max_id = FCOE_MAX_FCP_TARGET;
+	lp->host->max_channel = 0;
+	lp->host->transportt = scsi_transport_fcoe_sw;
+
+	/* add the new host to the SCSI-ml */
+	rc = scsi_add_host(lp->host, dev);
+	if (rc) {
+		FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: "
+				"error on scsi_add_host\n");
+		return rc;
+	}
+	sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
+		FCOE_NAME, FCOE_VERSION,
+		fcoe_netdev(lp)->name);
+
+	return 0;
+}
+
+/**
+ * fcoe_em_config() - allocates em for this lport
+ * @lp: the port that em is to allocated for
+ *
+ * Returns : 0 on success
+ */
+static inline int fcoe_em_config(struct fc_lport *lp)
+{
+	BUG_ON(lp->emp);
+
+	lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
+				    FCOE_MIN_XID, FCOE_MAX_XID);
+	if (!lp->emp)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * fcoe_if_destroy() - FCoE software HBA tear-down function
+ * @netdev: ptr to the associated net_device
+ *
+ * Returns: 0 if link is OK for use by FCoE.
+ */
+static int fcoe_if_destroy(struct net_device *netdev)
+{
+	struct fc_lport *lp = NULL;
+	struct fcoe_softc *fc;
+	u8 flogi_maddr[ETH_ALEN];
+
+	BUG_ON(!netdev);
+
+	FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
+
+	lp = fcoe_hostlist_lookup(netdev);
+	if (!lp)
+		return -ENODEV;
+
+	fc = lport_priv(lp);
+
+	/* Logout of the fabric */
+	fc_fabric_logoff(lp);
+
+	/* Remove the instance from fcoe's list */
+	fcoe_hostlist_remove(lp);
+
+	/* Don't listen for Ethernet packets anymore */
+	dev_remove_pack(&fc->fcoe_packet_type);
+	dev_remove_pack(&fc->fip_packet_type);
+	fcoe_ctlr_destroy(&fc->ctlr);
+
+	/* Cleanup the fc_lport */
+	fc_lport_destroy(lp);
+	fc_fcp_destroy(lp);
+
+	/* Detach from the scsi-ml */
+	fc_remove_host(lp->host);
+	scsi_remove_host(lp->host);
+
+	/* There are no more rports or I/O, free the EM */
+	if (lp->emp)
+		fc_exch_mgr_free(lp->emp);
+
+#ifdef FCOE_NETDEV_BACKPORT
+	/* Delete secondary MAC addresses */
+	rtnl_lock();
+	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+	dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
+	if (!is_zero_ether_addr(fc->ctlr.data_src_addr))
+		dev_unicast_delete(fc->real_dev,
+				   fc->ctlr.data_src_addr, ETH_ALEN);
+	dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
+	rtnl_unlock();
+#else
+	dev_set_promiscuity(fc->real_dev, -1);
+#endif
+
+	/* Free the per-CPU revieve threads */
+	fcoe_percpu_clean(lp);
+
+	/* Free existing skbs */
+	fcoe_clean_pending_queue(lp);
+
+	/* Free memory used by statistical counters */
+	fc_lport_free_stats(lp);
+
+	/* Release the net_device and Scsi_Host */
+	dev_put(fc->real_dev);
+	scsi_host_put(lp->host);
+
+	return 0;
+}
+
+/*
+ * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
+ * @lp:	the corresponding fc_lport
+ * @xid: the exchange id for this ddp transfer
+ * @sgl: the scatterlist describing this transfer
+ * @sgc: number of sg items
+ *
+ * Returns : 0 no ddp
+ */
+static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
+			     struct scatterlist *sgl, unsigned int sgc)
+{
+	struct net_device *n = fcoe_netdev(lp);
+
+#ifdef FCOE_NETDEV_BACKPORT 
+	if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
+		return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
+#endif
+	return 0;
+}
+
+/*
+ * fcoe_ddp_done - calls LLD's ddp_done through net_device
+ * @lp:	the corresponding fc_lport
+ * @xid: the exchange id for this ddp transfer
+ *
+ * Returns : the length of data that have been completed by ddp
+ */
+static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
+{
+	struct net_device *n = fcoe_netdev(lp);
+
+#ifdef FCOE_NETDEV_BACKPORT 
+	if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
+		return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
+#endif
+	return 0;
+}
+
+static struct libfc_function_template fcoe_libfc_fcn_templ = {
+	.frame_send = fcoe_xmit,
+	.ddp_setup = fcoe_ddp_setup,
+	.ddp_done = fcoe_ddp_done,
+};
+
+/**
+ * fcoe_fip_recv - handle a received FIP frame.
+ * @skb: the receive skb
+ * @dev: associated &net_device
+ * @ptype: the &packet_type structure which was used to register this handler.
+ * @orig_dev: original receive &net_device, in case @dev is a bond.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
+			 struct packet_type *ptype,
+			 struct net_device *orig_dev)
+{
+	struct fcoe_softc *fc;
+
+	fc = container_of(ptype, struct fcoe_softc, fip_packet_type);
+	fcoe_ctlr_recv(&fc->ctlr, skb);
+	return 0;
+}
+
+/**
+ * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
+ * @fip: FCoE controller.
+ * @skb: FIP Packet.
+ */
+static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+	skb->dev = fcoe_from_ctlr(fip)->real_dev;
+	dev_queue_xmit(skb);
+}
+
+/**
+ * fcoe_update_src_mac() - Update Ethernet MAC filters.
+ * @fip: FCoE controller.
+ * @old: Unicast MAC address to delete if the MAC is non-zero.
+ * @new: Unicast MAC address to add.
+ *
+ * Remove any previously-set unicast MAC filter.
+ * Add secondary FCoE MAC address filter for our OUI.
+ */
+static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
+{
+	struct fcoe_softc *fc;
+
+	fc = fcoe_from_ctlr(fip);
+#ifdef FCOE_NETDEV_BACKPORT
+	rtnl_lock();
+	if (!is_zero_ether_addr(old))
+		dev_unicast_delete(fc->real_dev, old, ETH_ALEN);
+	dev_unicast_add(fc->real_dev, new, ETH_ALEN);
+	rtnl_unlock();
+#endif
+}
+
+/**
+ * fcoe_if_create() - this function creates the fcoe interface
+ * @netdev: pointer the associated netdevice
+ *
+ * Creates fc_lport struct and scsi_host for lport, configures lport
+ * and starts fabric login.
+ *
+ * Returns : 0 on success
+ */
+static int fcoe_if_create(struct net_device *netdev)
+{
+	int rc;
+	struct fc_lport *lp = NULL;
+	struct fcoe_softc *fc;
+	struct Scsi_Host *shost;
+
+	BUG_ON(!netdev);
+
+	FCOE_NETDEV_DBG(netdev, "Create Interface\n");
+
+	lp = fcoe_hostlist_lookup(netdev);
+	if (lp)
+		return -EEXIST;
+
+	shost = libfc_host_alloc(&fcoe_shost_template,
+				 sizeof(struct fcoe_softc));
+	if (!shost) {
+		FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
+		return -ENOMEM;
+	}
+	lp = shost_priv(shost);
+	fc = lport_priv(lp);
+
+	/* configure fc_lport, e.g., em */
+	rc = fcoe_lport_config(lp);
+	if (rc) {
+		FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
+				"interface\n");
+		goto out_host_put;
+	}
+
+	/* configure lport network properties */
+	rc = fcoe_netdev_config(lp, netdev);
+	if (rc) {
+		FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
+				"interface\n");
+		goto out_host_put;
+	}
+
+	/*
+	 * Initialize FIP.
+	 */
+	fcoe_ctlr_init(&fc->ctlr);
+	fc->ctlr.send = fcoe_fip_send;
+	fc->ctlr.update_mac = fcoe_update_src_mac;
+
+	fc->fip_packet_type.func = fcoe_fip_recv;
+	fc->fip_packet_type.type = htons(ETH_P_FIP);
+	fc->fip_packet_type.dev = fc->real_dev;
+	dev_add_pack(&fc->fip_packet_type);
+
+	/* configure lport scsi host properties */
+	rc = fcoe_shost_config(lp, shost, netdev->class_dev.dev->parent);
+	if (rc) {
+		FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
+				"interface\n");
+		goto out_host_put;
+	}
+
+	/* lport exch manager allocation */
+	rc = fcoe_em_config(lp);
+	if (rc) {
+		FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
+				"interface\n");
+		goto out_host_put;
+	}
+
+	/* Initialize the library */
+	rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ);
+	if (rc) {
+		FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
+				"interface\n");
+		goto out_lp_destroy;
+	}
+
+	/* add to lports list */
+	fcoe_hostlist_add(lp);
+
+	lp->boot_time = jiffies;
+
+	fc_fabric_login(lp);
+
+	if (!fcoe_link_ok(lp))
+		fcoe_ctlr_link_up(&fc->ctlr);
+
+	dev_hold(netdev);
+
+	return rc;
+
+out_lp_destroy:
+	fc_exch_mgr_free(lp->emp); /* Free the EM */
+out_host_put:
+	scsi_host_put(lp->host);
+	return rc;
+}
+
+/**
+ * fcoe_if_init() - attach to scsi transport
+ *
+ * Returns : 0 on success
+ */
+static int __init fcoe_if_init(void)
+{
+	/* attach to scsi transport */
+	scsi_transport_fcoe_sw =
+		fc_attach_transport(&fcoe_transport_function);
+
+	if (!scsi_transport_fcoe_sw) {
+		printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * fcoe_if_exit() - detach from scsi transport
+ *
+ * Returns : 0 on success
+ */
+int __exit fcoe_if_exit(void)
+{
+	fc_release_transport(scsi_transport_fcoe_sw);
+	return 0;
+}
+
+/**
+ * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
+ * @cpu: cpu index for the online cpu
+ */
+static void fcoe_percpu_thread_create(unsigned int cpu)
+{
+	struct fcoe_percpu_s *p;
+	struct task_struct *thread;
+
+	p = &per_cpu(fcoe_percpu, cpu);
+
+	thread = kthread_create(fcoe_percpu_receive_thread,
+				(void *)p, "fcoethread/%d", cpu);
+
+	if (likely(!IS_ERR(p->thread))) {
+		kthread_bind(thread, cpu);
+		wake_up_process(thread);
+
+		spin_lock_bh(&p->fcoe_rx_list.lock);
+		p->thread = thread;
+		spin_unlock_bh(&p->fcoe_rx_list.lock);
+	}
+}
+
+/**
+ * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
+ * @cpu: cpu index the rx thread is to be removed
+ *
+ * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
+ * current CPU's Rx thread. If the thread being destroyed is bound to
+ * the CPU processing this context the skbs will be freed.
+ */
+static void fcoe_percpu_thread_destroy(unsigned int cpu)
+{
+	struct fcoe_percpu_s *p;
+	struct task_struct *thread;
+	struct page *crc_eof;
+	struct sk_buff *skb;
+#ifdef CONFIG_SMP
+	struct fcoe_percpu_s *p0;
+	unsigned targ_cpu = smp_processor_id();
+#endif /* CONFIG_SMP */
+
+	FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
+
+	/* Prevent any new skbs from being queued for this CPU. */
+	p = &per_cpu(fcoe_percpu, cpu);
+	spin_lock_bh(&p->fcoe_rx_list.lock);
+	thread = p->thread;
+	p->thread = NULL;
+	crc_eof = p->crc_eof_page;
+	p->crc_eof_page = NULL;
+	p->crc_eof_offset = 0;
+	spin_unlock_bh(&p->fcoe_rx_list.lock);
+
+#ifdef CONFIG_SMP
+	/*
+	 * Don't bother moving the skb's if this context is running
+	 * on the same CPU that is having its thread destroyed. This
+	 * can easily happen when the module is removed.
+	 */
+	if (cpu != targ_cpu) {
+		p0 = &per_cpu(fcoe_percpu, targ_cpu);
+		spin_lock_bh(&p0->fcoe_rx_list.lock);
+		if (p0->thread) {
+			FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
+				 cpu, targ_cpu);
+
+			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
+				__skb_queue_tail(&p0->fcoe_rx_list, skb);
+			spin_unlock_bh(&p0->fcoe_rx_list.lock);
+		} else {
+			/*
+			 * The targeted CPU is not initialized and cannot accept
+			 * new  skbs. Unlock the targeted CPU and drop the skbs
+			 * on the CPU that is going offline.
+			 */
+			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
+				kfree_skb(skb);
+			spin_unlock_bh(&p0->fcoe_rx_list.lock);
+		}
+	} else {
+		/*
+		 * This scenario occurs when the module is being removed
+		 * and all threads are being destroyed. skbs will continue
+		 * to be shifted from the CPU thread that is being removed
+		 * to the CPU thread associated with the CPU that is processing
+		 * the module removal. Once there is only one CPU Rx thread it
+		 * will reach this case and we will drop all skbs and later
+		 * stop the thread.
+		 */
+		spin_lock_bh(&p->fcoe_rx_list.lock);
+		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
+			kfree_skb(skb);
+		spin_unlock_bh(&p->fcoe_rx_list.lock);
+	}
+#else
+	/*
+	 * This a non-SMP scenario where the singluar Rx thread is
+	 * being removed. Free all skbs and stop the thread.
+	 */
+	spin_lock_bh(&p->fcoe_rx_list.lock);
+	while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
+		kfree_skb(skb);
+	spin_unlock_bh(&p->fcoe_rx_list.lock);
+#endif
+
+	if (thread)
+		kthread_stop(thread);
+
+	if (crc_eof)
+		put_page(crc_eof);
+}
+
+/**
+ * fcoe_cpu_callback() - fcoe cpu hotplug event callback
+ * @nfb: callback data block
+ * @action: event triggering the callback
+ * @hcpu: index for the cpu of this event
+ *
+ * This creates or destroys per cpu data for fcoe
+ *
+ * Returns NOTIFY_OK always.
+ */
+static int fcoe_cpu_callback(struct notifier_block *nfb,
+			     unsigned long action, void *hcpu)
+{
+	unsigned cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
+		fcoe_percpu_thread_create(cpu);
+		break;
+	case CPU_DEAD:
+	case CPU_DEAD_FROZEN:
+		FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
+		fcoe_percpu_thread_destroy(cpu);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block fcoe_cpu_notifier = {
+	.notifier_call = fcoe_cpu_callback,
+};
+
+/**
+ * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
+ * @skb: the receive skb
+ * @dev: associated net device
+ * @ptype: context
+ * @odldev: last device
+ *
+ * this function will receive the packet and build fc frame and pass it up
+ *
+ * Returns: 0 for success
+ */
+int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
+	     struct packet_type *ptype, struct net_device *olddev)
+{
+	struct fc_lport *lp;
+	struct fcoe_rcv_info *fr;
+	struct fcoe_softc *fc;
+	struct fc_frame_header *fh;
+	struct fcoe_percpu_s *fps;
+	unsigned short oxid;
+	unsigned int cpu = 0;
+
+	fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
+	lp = fc->ctlr.lp;
+	if (unlikely(lp == NULL)) {
+		FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
+		goto err2;
+	}
+	if (!lp->link_up)
+		goto err2;
+
+	FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p "
+			"data:%p tail:%p end:%p sum:%d dev:%s",
+			skb->len, skb->data_len, skb->head, skb->data,
+			skb_tail_pointer(skb), skb_end_pointer(skb),
+			skb->csum, skb->dev ? skb->dev->name : "<NULL>");
+
+	/* check for FCOE packet type */
+	if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+		FCOE_NETDEV_DBG(dev, "Wrong FC type frame");
+		goto err;
+	}
+
+	/*
+	 * Check for minimum frame length, and make sure required FCoE
+	 * and FC headers are pulled into the linear data area.
+	 */
+	if (unlikely((skb->len < FCOE_MIN_FRAME) ||
+	    !pskb_may_pull(skb, FCOE_HEADER_LEN)))
+		goto err;
+
+	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
+	fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+	oxid = ntohs(fh->fh_ox_id);
+
+	fr = fcoe_dev_from_skb(skb);
+	fr->fr_dev = lp;
+	fr->ptype = ptype;
+
+#ifdef CONFIG_SMP
+	/*
+	 * The incoming frame exchange id(oxid) is ANDed with num of online
+	 * cpu bits to get cpu and then this cpu is used for selecting
+	 * a per cpu kernel thread from fcoe_percpu.
+	 */
+	cpu = oxid & (num_online_cpus() - 1);
+#endif
+
+	fps = &per_cpu(fcoe_percpu, cpu);
+	spin_lock_bh(&fps->fcoe_rx_list.lock);
+	if (unlikely(!fps->thread)) {
+		/*
+		 * The targeted CPU is not ready, let's target
+		 * the first CPU now. For non-SMP systems this
+		 * will check the same CPU twice.
+		 */
+		FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread "
+				"ready for incoming skb- using first online "
+				"CPU.\n");
+
+		spin_unlock_bh(&fps->fcoe_rx_list.lock);
+		cpu = first_cpu(cpu_online_map);
+		fps = &per_cpu(fcoe_percpu, cpu);
+		spin_lock_bh(&fps->fcoe_rx_list.lock);
+		if (!fps->thread) {
+			spin_unlock_bh(&fps->fcoe_rx_list.lock);
+			goto err;
+		}
+	}
+
+	/*
+	 * We now have a valid CPU that we're targeting for
+	 * this skb. We also have this receive thread locked,
+	 * so we're free to queue skbs into it's queue.
+	 */
+	__skb_queue_tail(&fps->fcoe_rx_list, skb);
+	if (fps->fcoe_rx_list.qlen == 1)
+		wake_up_process(fps->thread);
+
+	spin_unlock_bh(&fps->fcoe_rx_list.lock);
+
+	return 0;
+err:
+	fc_lport_get_stats(lp)->ErrorFrames++;
+
+err2:
+	kfree_skb(skb);
+	return -1;
+}
+EXPORT_SYMBOL_GPL(fcoe_rcv);
+
+/**
+ * fcoe_start_io() - pass to netdev to start xmit for fcoe
+ * @skb: the skb to be xmitted
+ *
+ * Returns: 0 for success
+ */
+static inline int fcoe_start_io(struct sk_buff *skb)
+{
+	int rc;
+
+	skb_get(skb);
+	rc = dev_queue_xmit(skb);
+	if (rc != 0)
+		return rc;
+	kfree_skb(skb);
+	return 0;
+}
+
+/**
+ * fcoe_get_paged_crc_eof() - in case we need alloc a page for crc_eof
+ * @skb: the skb to be xmitted
+ * @tlen: total len
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+	struct fcoe_percpu_s *fps;
+	struct page *page;
+
+	fps = &get_cpu_var(fcoe_percpu);
+	page = fps->crc_eof_page;
+	if (!page) {
+		page = alloc_page(GFP_ATOMIC);
+		if (!page) {
+			put_cpu_var(fcoe_percpu);
+			return -ENOMEM;
+		}
+		fps->crc_eof_page = page;
+		fps->crc_eof_offset = 0;
+	}
+
+	get_page(page);
+	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
+			   fps->crc_eof_offset, tlen);
+	skb->len += tlen;
+	skb->data_len += tlen;
+	skb->truesize += tlen;
+	fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
+
+	if (fps->crc_eof_offset >= PAGE_SIZE) {
+		fps->crc_eof_page = NULL;
+		fps->crc_eof_offset = 0;
+		put_page(page);
+	}
+	put_cpu_var(fcoe_percpu);
+	return 0;
+}
+
+/**
+ * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
+ * @fp: the fc_frame containg data to be checksummed
+ *
+ * This uses crc32() to calculate the crc for fc frame
+ * Return   : 32 bit crc
+ */
+u32 fcoe_fc_crc(struct fc_frame *fp)
+{
+	struct sk_buff *skb = fp_skb(fp);
+	struct skb_frag_struct *frag;
+	unsigned char *data;
+	unsigned long off, len, clen;
+	u32 crc;
+	unsigned i;
+
+	crc = crc32(~0, skb->data, skb_headlen(skb));
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		frag = &skb_shinfo(skb)->frags[i];
+		off = frag->page_offset;
+		len = frag->size;
+		while (len > 0) {
+			clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
+			data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
+					   KM_SKB_DATA_SOFTIRQ);
+			crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
+			kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
+			off += clen;
+			len -= clen;
+		}
+	}
+	return crc;
+}
+
+/**
+ * fcoe_xmit() - FCoE frame transmit function
+ * @lp:	the associated local port
+ * @fp: the fc_frame to be transmitted
+ *
+ * Return   : 0 for success
+ */
+int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
+{
+	int wlen, rc = 0;
+	u32 crc;
+	struct ethhdr *eh;
+	struct fcoe_crc_eof *cp;
+	struct sk_buff *skb;
+	struct fcoe_dev_stats *stats;
+	struct fc_frame_header *fh;
+	unsigned int hlen;		/* header length implies the version */
+	unsigned int tlen;		/* trailer length */
+	unsigned int elen;		/* eth header, may include vlan */
+	struct fcoe_softc *fc;
+	u8 sof, eof;
+	struct fcoe_hdr *hp;
+
+	WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
+
+	fc = lport_priv(lp);
+	fh = fc_frame_header_get(fp);
+	skb = fp_skb(fp);
+	wlen = skb->len / FCOE_WORD_TO_BYTE;
+
+	if (!lp->link_up) {
+		kfree_skb(skb);
+		return 0;
+	}
+
+	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
+	    fcoe_ctlr_els_send(&fc->ctlr, skb))
+		return 0;
+
+	sof = fr_sof(fp);
+	eof = fr_eof(fp);
+
+	elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
+		sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
+	hlen = sizeof(struct fcoe_hdr);
+	tlen = sizeof(struct fcoe_crc_eof);
+	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+#ifdef FCOE_NETDEV_BACKPORT
+	/* crc offload */
+	if (likely(lp->crc_offload)) {
+		skb->ip_summed = CHECKSUM_PARTIAL;
+		skb->csum_start = skb_headroom(skb);
+		skb->csum_offset = skb->len;
+		crc = 0;
+	} else {
+#else
+		skb->ip_summed = CHECKSUM_NONE;
+		crc = fcoe_fc_crc(fp);
+#endif
+	/* copy fc crc and eof to the skb buff */
+	if (skb_is_nonlinear(skb)) {
+		skb_frag_t *frag;
+		if (fcoe_get_paged_crc_eof(skb, tlen)) {
+			kfree_skb(skb);
+			return -ENOMEM;
+		}
+		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+		cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+			+ frag->page_offset;
+	} else {
+		cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+	}
+
+	memset(cp, 0, sizeof(*cp));
+	cp->fcoe_eof = eof;
+	cp->fcoe_crc32 = cpu_to_le32(~crc);
+
+	if (skb_is_nonlinear(skb)) {
+		kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+		cp = NULL;
+	}
+
+	/* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
+	skb_push(skb, elen + hlen);
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+	skb->mac_len = elen;
+	skb->protocol = htons(ETH_P_FCOE);
+	skb->dev = fc->real_dev;
+
+	/* fill up mac and fcoe headers */
+	eh = eth_hdr(skb);
+	eh->h_proto = htons(ETH_P_FCOE);
+	if (fc->ctlr.map_dest)
+		fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
+	else
+		/* insert GW address */
+		memcpy(eh->h_dest, fc->ctlr.dest_addr, ETH_ALEN);
+
+	if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN))
+		memcpy(eh->h_source, fc->ctlr.ctl_src_addr, ETH_ALEN);
+	else
+		memcpy(eh->h_source, fc->ctlr.data_src_addr, ETH_ALEN);
+
+	hp = (struct fcoe_hdr *)(eh + 1);
+	memset(hp, 0, sizeof(*hp));
+	if (FC_FCOE_VER)
+		FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+	hp->fcoe_sof = sof;
+
+#ifdef NETIF_F_FSO
+	/* fcoe lso, mss is in max_payload which is non-zero for FCP data */
+	if (lp->seq_offload && fr_max_payload(fp)) {
+		skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
+		skb_shinfo(skb)->gso_size = fr_max_payload(fp);
+	} else {
+		skb_shinfo(skb)->gso_type = 0;
+		skb_shinfo(skb)->gso_size = 0;
+	}
+#endif
+	/* update tx stats: regardless if LLD fails */
+	stats = fc_lport_get_stats(lp);
+	stats->TxFrames++;
+	stats->TxWords += wlen;
+
+	/* send down to lld */
+	fr_dev(fp) = lp;
+	if (fc->fcoe_pending_queue.qlen)
+		rc = fcoe_check_wait_queue(lp);
+
+	if (rc == 0)
+		rc = fcoe_start_io(skb);
+
+	if (rc) {
+		spin_lock_bh(&fc->fcoe_pending_queue.lock);
+		__skb_queue_tail(&fc->fcoe_pending_queue, skb);
+		spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+		if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+			lp->qfull = 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_xmit);
+
+/**
+ * fcoe_percpu_receive_thread() - recv thread per cpu
+ * @arg: ptr to the fcoe per cpu struct
+ *
+ * Return: 0 for success
+ */
+int fcoe_percpu_receive_thread(void *arg)
+{
+	struct fcoe_percpu_s *p = arg;
+	u32 fr_len;
+	struct fc_lport *lp;
+	struct fcoe_rcv_info *fr;
+	struct fcoe_dev_stats *stats;
+	struct fc_frame_header *fh;
+	struct sk_buff *skb;
+	struct fcoe_crc_eof crc_eof;
+	struct fc_frame *fp;
+	u8 *mac = NULL;
+	struct fcoe_softc *fc;
+	struct fcoe_hdr *hp;
+
+	set_user_nice(current, -20);
+
+	while (!kthread_should_stop()) {
+
+		spin_lock_bh(&p->fcoe_rx_list.lock);
+		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock_bh(&p->fcoe_rx_list.lock);
+			schedule();
+			set_current_state(TASK_RUNNING);
+			if (kthread_should_stop())
+				return 0;
+			spin_lock_bh(&p->fcoe_rx_list.lock);
+		}
+		spin_unlock_bh(&p->fcoe_rx_list.lock);
+		fr = fcoe_dev_from_skb(skb);
+		lp = fr->fr_dev;
+		if (unlikely(lp == NULL)) {
+			FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure");
+			kfree_skb(skb);
+			continue;
+		}
+
+		FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
+				"head:%p data:%p tail:%p end:%p sum:%d dev:%s",
+				skb->len, skb->data_len,
+				skb->head, skb->data, skb_tail_pointer(skb),
+				skb_end_pointer(skb), skb->csum,
+				skb->dev ? skb->dev->name : "<NULL>");
+
+		/*
+		 * Save source MAC address before discarding header.
+		 */
+		fc = lport_priv(lp);
+		if (skb_is_nonlinear(skb))
+			skb_linearize(skb);	/* not ideal */
+		mac = eth_hdr(skb)->h_source;
+
+		/*
+		 * Frame length checks and setting up the header pointers
+		 * was done in fcoe_rcv already.
+		 */
+		hp = (struct fcoe_hdr *) skb_network_header(skb);
+		fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+		stats = fc_lport_get_stats(lp);
+		if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+			if (stats->ErrorFrames < 5)
+				printk(KERN_WARNING "fcoe: FCoE version "
+				       "mismatch: The frame has "
+				       "version %x, but the "
+				       "initiator supports version "
+				       "%x\n", FC_FCOE_DECAPS_VER(hp),
+				       FC_FCOE_VER);
+			stats->ErrorFrames++;
+			kfree_skb(skb);
+			continue;
+		}
+
+		skb_pull(skb, sizeof(struct fcoe_hdr));
+		fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+		stats->RxFrames++;
+		stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
+		fp = (struct fc_frame *)skb;
+		fc_frame_init(fp);
+		fr_dev(fp) = lp;
+		fr_sof(fp) = hp->fcoe_sof;
+
+		/* Copy out the CRC and EOF trailer for access */
+		if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+			kfree_skb(skb);
+			continue;
+		}
+		fr_eof(fp) = crc_eof.fcoe_eof;
+		fr_crc(fp) = crc_eof.fcoe_crc32;
+		if (pskb_trim(skb, fr_len)) {
+			kfree_skb(skb);
+			continue;
+		}
+
+		/*
+		 * We only check CRC if no offload is available and if it is
+		 * it's solicited data, in which case, the FCP layer would
+		 * check it during the copy.
+		 */
+		if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
+			fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+		else
+			fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
+
+		fh = fc_frame_header_get(fp);
+		if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+		    fh->fh_type == FC_TYPE_FCP) {
+			fc_exch_recv(lp, lp->emp, fp);
+			continue;
+		}
+		if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+			if (le32_to_cpu(fr_crc(fp)) !=
+			    ~crc32(~0, skb->data, fr_len)) {
+				if (stats->InvalidCRCCount < 5)
+					printk(KERN_WARNING "fcoe: dropping "
+					       "frame with CRC error\n");
+				stats->InvalidCRCCount++;
+				stats->ErrorFrames++;
+				fc_frame_free(fp);
+				continue;
+			}
+			fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+		}
+		if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
+		    fcoe_ctlr_recv_flogi(&fc->ctlr, fp, mac)) {
+			fc_frame_free(fp);
+			continue;
+		}
+		fc_exch_recv(lp, lp->emp, fp);
+	}
+	return 0;
+}
+
+/**
+ * fcoe_watchdog() - fcoe timer callback
+ * @vp:
+ *
+ * This checks the pending queue length for fcoe and set lport qfull
+ * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
+ * fcoe_hostlist.
+ *
+ * Returns: 0 for success
+ */
+void fcoe_watchdog(ulong vp)
+{
+	struct fcoe_softc *fc;
+
+	read_lock(&fcoe_hostlist_lock);
+	list_for_each_entry(fc, &fcoe_hostlist, list) {
+		if (fc->ctlr.lp)
+			fcoe_check_wait_queue(fc->ctlr.lp);
+	}
+	read_unlock(&fcoe_hostlist_lock);
+
+	fcoe_timer.expires = jiffies + (1 * HZ);
+	add_timer(&fcoe_timer);
+}
+
+
+/**
+ * fcoe_check_wait_queue() - put the skb into fcoe pending xmit queue
+ * @lp: the fc_port for this skb
+ * @skb: the associated skb to be xmitted
+ *
+ * This empties the wait_queue, dequeue the head of the wait_queue queue
+ * and calls fcoe_start_io() for each packet, if all skb have been
+ * transmitted, return qlen or -1 if a error occurs, then restore
+ * wait_queue and  try again later.
+ *
+ * The wait_queue is used when the skb transmit fails. skb will go
+ * in the wait_queue which will be emptied by the time function OR
+ * by the next skb transmit.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_check_wait_queue(struct fc_lport *lp)
+{
+	struct fcoe_softc *fc = lport_priv(lp);
+	struct sk_buff *skb;
+	int rc = -1;
+
+	spin_lock_bh(&fc->fcoe_pending_queue.lock);
+	if (fc->fcoe_pending_queue_active)
+		goto out;
+	fc->fcoe_pending_queue_active = 1;
+
+	while (fc->fcoe_pending_queue.qlen) {
+		/* keep qlen > 0 until fcoe_start_io succeeds */
+		fc->fcoe_pending_queue.qlen++;
+		skb = __skb_dequeue(&fc->fcoe_pending_queue);
+
+		spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+		rc = fcoe_start_io(skb);
+		spin_lock_bh(&fc->fcoe_pending_queue.lock);
+
+		if (rc) {
+			__skb_queue_head(&fc->fcoe_pending_queue, skb);
+			/* undo temporary increment above */
+			fc->fcoe_pending_queue.qlen--;
+			break;
+		}
+		/* undo temporary increment above */
+		fc->fcoe_pending_queue.qlen--;
+	}
+
+	if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
+		lp->qfull = 0;
+	fc->fcoe_pending_queue_active = 0;
+	rc = fc->fcoe_pending_queue.qlen;
+out:
+	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+	return rc;
+}
+
+/**
+ * fcoe_dev_setup() - setup link change notification interface
+ */
+static void fcoe_dev_setup()
+{
+	/*
+	 * here setup a interface specific wd time to
+	 * monitor the link state
+	 */
+	register_netdevice_notifier(&fcoe_notifier);
+}
+
+/**
+ * fcoe_dev_setup() - cleanup link change notification interface
+ */
+static void fcoe_dev_cleanup(void)
+{
+	unregister_netdevice_notifier(&fcoe_notifier);
+}
+
+/**
+ * fcoe_device_notification() - netdev event notification callback
+ * @notifier: context of the notification
+ * @event: type of event
+ * @ptr: fixed array for output parsed ifname
+ *
+ * This function is called by the ethernet driver in case of link change event
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_device_notification(struct notifier_block *notifier,
+				    ulong event, void *ptr)
+{
+	struct fc_lport *lp = NULL;
+	struct net_device *real_dev = ptr;
+	struct fcoe_softc *fc;
+	struct fcoe_dev_stats *stats;
+	u32 link_possible = 1;
+	u32 mfs;
+	int rc = NOTIFY_OK;
+
+	read_lock(&fcoe_hostlist_lock);
+	list_for_each_entry(fc, &fcoe_hostlist, list) {
+		if (fc->real_dev == real_dev) {
+			lp = fc->ctlr.lp;
+			break;
+		}
+	}
+	read_unlock(&fcoe_hostlist_lock);
+	if (lp == NULL) {
+		rc = NOTIFY_DONE;
+		goto out;
+	}
+
+	switch (event) {
+	case NETDEV_DOWN:
+	case NETDEV_GOING_DOWN:
+		link_possible = 0;
+		break;
+	case NETDEV_UP:
+	case NETDEV_CHANGE:
+		break;
+	case NETDEV_CHANGEMTU:
+		mfs = fc->real_dev->mtu -
+			(sizeof(struct fcoe_hdr) +
+			 sizeof(struct fcoe_crc_eof));
+		if (mfs >= FC_MIN_MAX_FRAME)
+			fc_set_mfs(lp, mfs);
+		break;
+	case NETDEV_REGISTER:
+		break;
+	default:
+		FCOE_NETDEV_DBG(real_dev, "Unknown event %ld "
+				"from netdev netlink\n", event);
+	}
+	if (link_possible && !fcoe_link_ok(lp))
+		fcoe_ctlr_link_up(&fc->ctlr);
+	else if (fcoe_ctlr_link_down(&fc->ctlr)) {
+		stats = fc_lport_get_stats(lp);
+		stats->LinkFailureCount++;
+		fcoe_clean_pending_queue(lp);
+	}
+out:
+	return rc;
+}
+
+/**
+ * fcoe_if_to_netdev() - parse a name buffer to get netdev
+ * @ifname: fixed array for output parsed ifname
+ * @buffer: incoming buffer to be copied
+ *
+ * Returns: NULL or ptr to netdeive
+ */
+static struct net_device *fcoe_if_to_netdev(const char *buffer)
+{
+	char *cp;
+	char ifname[IFNAMSIZ + 2];
+
+	if (buffer) {
+		strlcpy(ifname, buffer, IFNAMSIZ);
+		cp = ifname + strlen(ifname);
+		while (--cp >= ifname && *cp == '\n')
+			*cp = '\0';
+		return dev_get_by_name(&init_net, ifname);
+	}
+	return NULL;
+}
+
+/**
+ * fcoe_netdev_to_module_owner() - finds out the nic drive moddule of the netdev
+ * @netdev: the target netdev
+ *
+ * Returns: ptr to the struct module, NULL for failure
+ */
+static struct module *
+fcoe_netdev_to_module_owner(const struct net_device *netdev)
+{
+	struct device *dev;
+
+	if (!netdev)
+		return NULL;
+
+	dev = netdev->class_dev.dev->parent;
+	if (!dev)
+		return NULL;
+
+	if (!dev->driver)
+		return NULL;
+
+	return dev->driver->owner;
+}
+
+/**
+ * fcoe_ethdrv_get() - Hold the Ethernet driver
+ * @netdev: the target netdev
+ *
+ * Holds the Ethernet driver module by try_module_get() for
+ * the corresponding netdev.
+ *
+ * Returns: 0 for succsss
+ */
+static int fcoe_ethdrv_get(const struct net_device *netdev)
+{
+	struct module *owner;
+
+	owner = fcoe_netdev_to_module_owner(netdev);
+	if (owner) {
+		FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n",
+				module_name(owner));
+		return  try_module_get(owner);
+	}
+	return -ENODEV;
+}
+
+/**
+ * fcoe_ethdrv_put() - Release the Ethernet driver
+ * @netdev: the target netdev
+ *
+ * Releases the Ethernet driver module by module_put for
+ * the corresponding netdev.
+ *
+ * Returns: 0 for succsss
+ */
+static int fcoe_ethdrv_put(const struct net_device *netdev)
+{
+	struct module *owner;
+
+	owner = fcoe_netdev_to_module_owner(netdev);
+	if (owner) {
+		FCOE_NETDEV_DBG(netdev, "Release driver module %s\n",
+				module_name(owner));
+		module_put(owner);
+		return 0;
+	}
+	return -ENODEV;
+}
+
+/**
+ * fcoe_destroy() - handles the destroy from sysfs
+ * @buffer: expcted to be a eth if name
+ * @kp: associated kernel param
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
+{
+	int rc;
+	struct net_device *netdev;
+
+	netdev = fcoe_if_to_netdev(buffer);
+	if (!netdev) {
+		rc = -ENODEV;
+		goto out_nodev;
+	}
+	/* look for existing lport */
+	if (!fcoe_hostlist_lookup(netdev)) {
+		rc = -ENODEV;
+		goto out_putdev;
+	}
+	rc = fcoe_if_destroy(netdev);
+	if (rc) {
+		printk(KERN_ERR "fcoe: Failed to destroy interface (%s)\n",
+		       netdev->name);
+		rc = -EIO;
+		goto out_putdev;
+	}
+	fcoe_ethdrv_put(netdev);
+	rc = 0;
+out_putdev:
+	dev_put(netdev);
+out_nodev:
+	return rc;
+}
+
+/**
+ * fcoe_create() - Handles the create call from sysfs
+ * @buffer: expcted to be a eth if name
+ * @kp: associated kernel param
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_create(const char *buffer, struct kernel_param *kp)
+{
+	int rc;
+	struct net_device *netdev;
+
+	netdev = fcoe_if_to_netdev(buffer);
+	if (!netdev) {
+		rc = -ENODEV;
+		goto out_nodev;
+	}
+	/* look for existing lport */
+	if (fcoe_hostlist_lookup(netdev)) {
+		rc = -EEXIST;
+		goto out_putdev;
+	}
+	fcoe_ethdrv_get(netdev);
+
+	rc = fcoe_if_create(netdev);
+	if (rc) {
+		printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
+		       netdev->name);
+		fcoe_ethdrv_put(netdev);
+		rc = -EIO;
+		goto out_putdev;
+	}
+	rc = 0;
+out_putdev:
+	dev_put(netdev);
+out_nodev:
+	return rc;
+}
+
+module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(create, "string");
+MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
+module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(destroy, "string");
+MODULE_PARM_DESC(destroy, "Destroy fcoe port");
+
+/**
+ * fcoe_link_ok() - Check if link is ok for the fc_lport
+ * @lp: ptr to the fc_lport
+ *
+ * Any permanently-disqualifying conditions have been previously checked.
+ * This also updates the speed setting, which may change with link for 100/1000.
+ *
+ * This function should probably be checking for PAUSE support at some point
+ * in the future. Currently Per-priority-pause is not determinable using
+ * ethtool, so we shouldn't be restrictive until that problem is resolved.
+ *
+ * Returns: 0 if link is OK for use by FCoE.
+ *
+ */
+int fcoe_link_ok(struct fc_lport *lp)
+{
+	struct fcoe_softc *fc = lport_priv(lp);
+	struct net_device *dev = fc->real_dev;
+	struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+	int rc = 0;
+
+	if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
+		dev = fc->phys_dev;
+		if (dev->ethtool_ops->get_settings) {
+			dev->ethtool_ops->get_settings(dev, &ecmd);
+			lp->link_supported_speeds &=
+				~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+			if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+					      SUPPORTED_1000baseT_Full))
+				lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+			if (ecmd.supported & SUPPORTED_10000baseT_Full)
+				lp->link_supported_speeds |=
+					FC_PORTSPEED_10GBIT;
+			if (ecmd.speed == SPEED_1000)
+				lp->link_speed = FC_PORTSPEED_1GBIT;
+			if (ecmd.speed == SPEED_10000)
+				lp->link_speed = FC_PORTSPEED_10GBIT;
+		}
+	} else
+		rc = -1;
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(fcoe_link_ok);
+
+/**
+ * fcoe_percpu_clean() - Clear the pending skbs for an lport
+ * @lp: the fc_lport
+ */
+void fcoe_percpu_clean(struct fc_lport *lp)
+{
+	struct fcoe_percpu_s *pp;
+	struct fcoe_rcv_info *fr;
+	struct sk_buff_head *list;
+	struct sk_buff *skb, *next;
+	struct sk_buff *head;
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu) {
+		pp = &per_cpu(fcoe_percpu, cpu);
+		spin_lock_bh(&pp->fcoe_rx_list.lock);
+		list = &pp->fcoe_rx_list;
+		head = list->next;
+		for (skb = head; skb != (struct sk_buff *)list;
+		     skb = next) {
+			next = skb->next;
+			fr = fcoe_dev_from_skb(skb);
+			if (fr->fr_dev == lp) {
+				__skb_unlink(skb, list);
+				kfree_skb(skb);
+			}
+		}
+		spin_unlock_bh(&pp->fcoe_rx_list.lock);
+	}
+}
+EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
+
+/**
+ * fcoe_clean_pending_queue() - Dequeue a skb and free it
+ * @lp: the corresponding fc_lport
+ *
+ * Returns: none
+ */
+void fcoe_clean_pending_queue(struct fc_lport *lp)
+{
+	struct fcoe_softc  *fc = lport_priv(lp);
+	struct sk_buff *skb;
+
+	spin_lock_bh(&fc->fcoe_pending_queue.lock);
+	while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
+		spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+		kfree_skb(skb);
+		spin_lock_bh(&fc->fcoe_pending_queue.lock);
+	}
+	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
+
+/**
+ * fcoe_reset() - Resets the fcoe
+ * @shost: shost the reset is from
+ *
+ * Returns: always 0
+ */
+int fcoe_reset(struct Scsi_Host *shost)
+{
+	struct fc_lport *lport = shost_priv(shost);
+	fc_lport_reset(lport);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_reset);
+
+/**
+ * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
+ * @device: this is currently ptr to net_device
+ *
+ * Returns: NULL or the located fcoe_softc
+ */
+static struct fcoe_softc *
+fcoe_hostlist_lookup_softc(const struct net_device *dev)
+{
+	struct fcoe_softc *fc;
+
+	read_lock(&fcoe_hostlist_lock);
+	list_for_each_entry(fc, &fcoe_hostlist, list) {
+		if (fc->real_dev == dev) {
+			read_unlock(&fcoe_hostlist_lock);
+			return fc;
+		}
+	}
+	read_unlock(&fcoe_hostlist_lock);
+	return NULL;
+}
+
+/**
+ * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
+ * @netdev: ptr to net_device
+ *
+ * Returns: 0 for success
+ */
+struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
+{
+	struct fcoe_softc *fc;
+
+	fc = fcoe_hostlist_lookup_softc(netdev);
+
+	return (fc) ? fc->ctlr.lp : NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
+
+/**
+ * fcoe_hostlist_add() - Add a lport to lports list
+ * @lp: ptr to the fc_lport to badded
+ *
+ * Returns: 0 for success
+ */
+int fcoe_hostlist_add(const struct fc_lport *lp)
+{
+	struct fcoe_softc *fc;
+
+	fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
+	if (!fc) {
+		fc = lport_priv(lp);
+		write_lock_bh(&fcoe_hostlist_lock);
+		list_add_tail(&fc->list, &fcoe_hostlist);
+		write_unlock_bh(&fcoe_hostlist_lock);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
+
+/**
+ * fcoe_hostlist_remove() - remove a lport from lports list
+ * @lp: ptr to the fc_lport to badded
+ *
+ * Returns: 0 for success
+ */
+int fcoe_hostlist_remove(const struct fc_lport *lp)
+{
+	struct fcoe_softc *fc;
+
+	fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
+	BUG_ON(!fc);
+	write_lock_bh(&fcoe_hostlist_lock);
+	list_del(&fc->list);
+	write_unlock_bh(&fcoe_hostlist_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
+
+/**
+ * fcoe_init() - fcoe module loading initialization
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int __init fcoe_init(void)
+{
+	unsigned int cpu;
+	int rc = 0;
+	struct fcoe_percpu_s *p;
+
+	INIT_LIST_HEAD(&fcoe_hostlist);
+	rwlock_init(&fcoe_hostlist_lock);
+
+	for_each_possible_cpu(cpu) {
+		p = &per_cpu(fcoe_percpu, cpu);
+		skb_queue_head_init(&p->fcoe_rx_list);
+	}
+
+	for_each_online_cpu(cpu)
+		fcoe_percpu_thread_create(cpu);
+
+	/* Initialize per CPU interrupt thread */
+	rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
+	if (rc)
+		goto out_free;
+
+	/* Setup link change notification */
+	fcoe_dev_setup();
+
+	setup_timer(&fcoe_timer, fcoe_watchdog, 0);
+
+	mod_timer(&fcoe_timer, jiffies + (10 * HZ));
+
+	fcoe_if_init();
+
+	return 0;
+
+out_free:
+	for_each_online_cpu(cpu) {
+		fcoe_percpu_thread_destroy(cpu);
+	}
+
+	return rc;
+}
+module_init(fcoe_init);
+
+/**
+ * fcoe_exit() - fcoe module unloading cleanup
+ *
+ * Returns 0 on success, negative on failure
+ */
+static void __exit fcoe_exit(void)
+{
+	unsigned int cpu;
+	struct fcoe_softc *fc, *tmp;
+
+	fcoe_dev_cleanup();
+
+	/* Stop the timer */
+	del_timer_sync(&fcoe_timer);
+
+	/* releases the associated fcoe hosts */
+	list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
+		fcoe_if_destroy(fc->real_dev);
+
+	unregister_hotcpu_notifier(&fcoe_cpu_notifier);
+
+	for_each_online_cpu(cpu) {
+		fcoe_percpu_thread_destroy(cpu);
+	}
+
+	/* detach from scsi transport */
+	fcoe_if_exit();
+}
+module_exit(fcoe_exit);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
new file mode 100644
index 0000000..a16a59d
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FCOE_H_
+#define _FCOE_H_
+
+#include <linux/skbuff.h>
+#include <linux/kthread.h>
+
+#define FCOE_MAX_QUEUE_DEPTH	256
+#define FCOE_LOW_QUEUE_DEPTH	32
+
+#define FCOE_WORD_TO_BYTE	4
+
+#define FCOE_VERSION	"0.1"
+#define FCOE_NAME	"fcoe"
+#define FCOE_VENDOR	"Open-FCoE.org"
+
+#define FCOE_MAX_LUN		255
+#define FCOE_MAX_FCP_TARGET	256
+
+#define FCOE_MAX_OUTSTANDING_COMMANDS	1024
+
+#define FCOE_MIN_XID		0x0001	/* the min xid supported by fcoe_sw */
+#define FCOE_MAX_XID		0x07ef	/* the max xid supported by fcoe_sw */
+
+unsigned int fcoe_debug_logging;
+module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+#define FCOE_LOGGING        0x01 /* General logging, not categorized */
+#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
+
+#define FCOE_CHECK_LOGGING(LEVEL, CMD)					\
+do {                                                            	\
+	if (unlikely(fcoe_debug_logging & LEVEL))			\
+		do {							\
+			CMD;						\
+		} while (0);						\
+} while (0);
+
+#define FCOE_DBG(fmt, args...)						\
+	FCOE_CHECK_LOGGING(FCOE_LOGGING,				\
+			   printk(KERN_INFO "fcoe: " fmt, ##args);)
+
+#define FCOE_NETDEV_DBG(netdev, fmt, args...)			\
+	FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING,			\
+			   printk(KERN_INFO "fcoe: %s" fmt,	\
+				  netdev->name, ##args);)
+
+/*
+ * this percpu struct for fcoe
+ */
+struct fcoe_percpu_s {
+	struct task_struct *thread;
+	struct sk_buff_head fcoe_rx_list;
+	struct page *crc_eof_page;
+	int crc_eof_offset;
+};
+
+/*
+ * the fcoe sw transport private data
+ */
+struct fcoe_softc {
+	struct list_head list;
+	struct net_device *real_dev;
+	struct net_device *phys_dev;		/* device with ethtool_ops */
+	struct packet_type  fcoe_packet_type;
+	struct packet_type  fip_packet_type;
+	struct sk_buff_head fcoe_pending_queue;
+	u8	fcoe_pending_queue_active;
+	struct fcoe_ctlr ctlr;
+};
+
+#define fcoe_from_ctlr(fc) container_of(fc, struct fcoe_softc, ctlr)
+
+static inline struct net_device *fcoe_netdev(
+	const struct fc_lport *lp)
+{
+	return ((struct fcoe_softc *)lport_priv(lp))->real_dev;
+}
+
+#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c
deleted file mode 100644
index aaf2326..0000000
--- a/drivers/scsi/fcoe/fcoe_sw.c
+++ /dev/null
@@ -1,474 +0,0 @@
-/*
- * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Maintained at www.Open-FCoE.org
- */
-
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
-
-#include <scsi/fc/fc_els.h>
-#include <scsi/fc/fc_encaps.h>
-#include <scsi/fc/fc_fs.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_transport.h>
-#include <scsi/scsi_transport_fc.h>
-
-#include <scsi/libfc.h>
-#include <scsi/libfcoe.h>
-#include <scsi/fc_transport_fcoe.h>
-
-#define FCOE_SW_VERSION	"0.1"
-#define	FCOE_SW_NAME	"fcoesw"
-#define	FCOE_SW_VENDOR	"Open-FCoE.org"
-
-#define FCOE_MAX_LUN		255
-#define FCOE_MAX_FCP_TARGET	256
-
-#define FCOE_MAX_OUTSTANDING_COMMANDS	1024
-
-#define FCOE_MIN_XID		0x0001	/* the min xid supported by fcoe_sw */
-#define FCOE_MAX_XID		0x07ef	/* the max xid supported by fcoe_sw */
-
-static struct scsi_transport_template *scsi_transport_fcoe_sw;
-
-struct fc_function_template fcoe_sw_transport_function = {
-	.show_host_node_name = 1,
-	.show_host_port_name = 1,
-	.show_host_supported_classes = 1,
-	.show_host_supported_fc4s = 1,
-	.show_host_active_fc4s = 1,
-	.show_host_maxframe_size = 1,
-
-	.show_host_port_id = 1,
-	.show_host_supported_speeds = 1,
-	.get_host_speed = fc_get_host_speed,
-	.show_host_speed = 1,
-	.show_host_port_type = 1,
-	.get_host_port_state = fc_get_host_port_state,
-	.show_host_port_state = 1,
-	.show_host_symbolic_name = 1,
-
-	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
-	.show_rport_maxframe_size = 1,
-	.show_rport_supported_classes = 1,
-
-	.show_host_fabric_name = 1,
-	.show_starget_node_name = 1,
-	.show_starget_port_name = 1,
-	.show_starget_port_id = 1,
-	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
-	.show_rport_dev_loss_tmo = 1,
-	.get_fc_host_stats = fc_get_host_stats,
-	.issue_fc_host_lip = fcoe_reset,
-
-	.terminate_rport_io = fc_rport_terminate_io,
-};
-
-static struct scsi_host_template fcoe_sw_shost_template = {
-	.module = THIS_MODULE,
-	.name = "FCoE Driver",
-	.proc_name = FCOE_SW_NAME,
-	.queuecommand = fc_queuecommand,
-	.eh_abort_handler = fc_eh_abort,
-	.eh_device_reset_handler = fc_eh_device_reset,
-	.eh_host_reset_handler = fc_eh_host_reset,
-	.slave_alloc = fc_slave_alloc,
-	.change_queue_depth = fc_change_queue_depth,
-	.change_queue_type = fc_change_queue_type,
-	.this_id = -1,
-	.cmd_per_lun = 32,
-	.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
-	.use_clustering = ENABLE_CLUSTERING,
-	.sg_tablesize = 4,
-	.max_sectors = 0xffff,
-};
-
-/*
- * fcoe_sw_lport_config - sets up the fc_lport
- * @lp: ptr to the fc_lport
- * @shost: ptr to the parent scsi host
- *
- * Returns: 0 for success
- *
- */
-static int fcoe_sw_lport_config(struct fc_lport *lp)
-{
-	int i = 0;
-
-	lp->link_status = 0;
-	lp->max_retry_count = 3;
-	lp->e_d_tov = 2 * 1000;	/* FC-FS default */
-	lp->r_a_tov = 2 * 2 * 1000;
-	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
-			      FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
-
-	/*
-	 * allocate per cpu stats block
-	 */
-	for_each_online_cpu(i)
-		lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats),
-					   GFP_KERNEL);
-
-	/* lport fc_lport related configuration */
-	fc_lport_config(lp);
-
-	return 0;
-}
-
-/*
- * fcoe_sw_netdev_config - sets up fcoe_softc for lport and network
- * related properties
- * @lp : ptr to the fc_lport
- * @netdev : ptr to the associated netdevice struct
- *
- * Must be called after fcoe_sw_lport_config() as it will use lport mutex
- *
- * Returns : 0 for success
- *
- */
-static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev)
-{
-	u32 mfs;
-	u64 wwnn, wwpn;
-	struct fcoe_softc *fc;
-
-	/* Setup lport private data to point to fcoe softc */
-	fc = lport_priv(lp);
-	fc->lp = lp;
-	fc->real_dev = netdev;
-	fc->phys_dev = netdev;
-
-	/* Require support for get_pauseparam ethtool op. */
-	if (netdev->priv_flags & IFF_802_1Q_VLAN)
-		fc->phys_dev = VLAN_DEV_INFO(netdev)->real_dev;
-
-	/* Do not support for bonding device */
-	if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
-	    (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
-	    (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
-		return -EOPNOTSUPP;
-	}
-
-	/*
-	 * Determine max frame size based on underlying device and optional
-	 * user-configured limit.  If the MFS is too low, fcoe_link_ok()
-	 * will return 0, so do this first.
-	 */
-	mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
-				   sizeof(struct fcoe_crc_eof));
-	fc_set_mfs(lp, mfs);
-
-	lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
-	if (!fcoe_link_ok(lp))
-		lp->link_status |= FC_LINK_UP;
-
-	/* offload features support */
-	if (fc->real_dev->features & NETIF_F_SG)
-		lp->sg_supp = 1;
-
-
-	skb_queue_head_init(&fc->fcoe_pending_queue);
-
-	/* setup Source Mac Address */
-	memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
-	       fc->real_dev->addr_len);
-
-	wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
-	fc_set_wwnn(lp, wwnn);
-	/* XXX - 3rd arg needs to be vlan id */
-	wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
-	fc_set_wwpn(lp, wwpn);
-
-	dev_set_promiscuity(fc->real_dev, 1);
-
-	/*
-	 * setup the receive function from ethernet driver
-	 * on the ethertype for the given device
-	 */
-	fc->fcoe_packet_type.func = fcoe_rcv;
-	fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
-	fc->fcoe_packet_type.dev = fc->real_dev;
-	dev_add_pack(&fc->fcoe_packet_type);
-
-	return 0;
-}
-
-/*
- * fcoe_sw_shost_config - sets up fc_lport->host
- * @lp : ptr to the fc_lport
- * @shost : ptr to the associated scsi host
- *
- * Must be called after fcoe_sw_lport_config) and fcoe_sw_netdev_config()
- *
- * Returns : 0 for success
- *
- */
-static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost)
-{
-	int rc = 0;
-
-	/* lport scsi host config */
-	lp->host = shost;
-
-	lp->host->max_lun = FCOE_MAX_LUN;
-	lp->host->max_id = FCOE_MAX_FCP_TARGET;
-	lp->host->max_channel = 0;
-	lp->host->transportt = scsi_transport_fcoe_sw;
-
-	/* add the new host to the SCSI-ml */
-	rc = scsi_add_host(lp->host, NULL);
-	if (rc) {
-		FC_DBG("fcoe_sw_shost_config:error on scsi_add_host\n");
-		return rc;
-	}
-	sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
-		FCOE_SW_NAME, FCOE_SW_VERSION,
-		fcoe_netdev(lp)->name);
-
-	return 0;
-}
-
-/*
- * fcoe_sw_em_config - allocates em for this lport
- * @lp: the port that em is to allocated for
- *
- * Returns : 0 on success
- */
-static inline int fcoe_sw_em_config(struct fc_lport *lp)
-{
-	BUG_ON(lp->emp);
-
-	lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
-				    FCOE_MIN_XID, FCOE_MAX_XID);
-	if (!lp->emp)
-		return -ENOMEM;
-
-	return 0;
-}
-
-/*
- * fcoe_sw_destroy - FCoE software HBA tear-down function
- * @netdev: ptr to the associated net_device
- *
- * Returns: 0 if link is OK for use by FCoE.
- */
-static int fcoe_sw_destroy(struct net_device *netdev)
-{
-	int cpu;
-	struct fc_lport *lp = NULL;
-	struct fcoe_softc *fc;
-
-	BUG_ON(!netdev);
-
-	printk(KERN_DEBUG "fcoe_sw_destroy:interface on %s\n",
-	       netdev->name);
-
-	lp = fcoe_hostlist_lookup(netdev);
-	if (!lp)
-		return -ENODEV;
-
-	fc = fcoe_softc(lp);
-
-	/* Remove the instance from fcoe's list */
-	fcoe_hostlist_remove(lp);
-
-	/* Don't listen for Ethernet packets anymore */
-	dev_remove_pack(&fc->fcoe_packet_type);
-
-	/* Cleanup the fc_lport */
-	fc_lport_destroy(lp);
-	fc_fcp_destroy(lp);
-
-	/* Detach from the scsi-ml */
-	fc_remove_host(lp->host);
-	scsi_remove_host(lp->host);
-
-	/* There are no more rports or I/O, free the EM */
-	if (lp->emp)
-		fc_exch_mgr_free(lp->emp);
-
-	/* turn off promscuity */
-	dev_set_promiscuity(fc->real_dev, -1);
-
-	/* Free the per-CPU revieve threads */
-	fcoe_percpu_clean(lp);
-
-	/* Free existing skbs */
-	fcoe_clean_pending_queue(lp);
-
-	/* Free memory used by statistical counters */
-	for_each_online_cpu(cpu)
-		kfree(lp->dev_stats[cpu]);
-
-	/* Release the net_device and Scsi_Host */
-	dev_put(fc->real_dev);
-	scsi_host_put(lp->host);
-
-	return 0;
-}
-
-static struct libfc_function_template fcoe_sw_libfc_fcn_templ = {
-	.frame_send = fcoe_xmit,
-};
-
-/*
- * fcoe_sw_create - this function creates the fcoe interface
- * @netdev: pointer the associated netdevice
- *
- * Creates fc_lport struct and scsi_host for lport, configures lport
- * and starts fabric login.
- *
- * Returns : 0 on success
- */
-static int fcoe_sw_create(struct net_device *netdev)
-{
-	int rc;
-	struct fc_lport *lp = NULL;
-	struct fcoe_softc *fc;
-	struct Scsi_Host *shost;
-
-	BUG_ON(!netdev);
-
-	printk(KERN_DEBUG "fcoe_sw_create:interface on %s\n",
-	       netdev->name);
-
-	lp = fcoe_hostlist_lookup(netdev);
-	if (lp)
-		return -EEXIST;
-
-	shost = fcoe_host_alloc(&fcoe_sw_shost_template,
-				sizeof(struct fcoe_softc));
-	if (!shost) {
-		FC_DBG("Could not allocate host structure\n");
-		return -ENOMEM;
-	}
-	lp = shost_priv(shost);
-	fc = lport_priv(lp);
-
-	/* configure fc_lport, e.g., em */
-	rc = fcoe_sw_lport_config(lp);
-	if (rc) {
-		FC_DBG("Could not configure lport\n");
-		goto out_host_put;
-	}
-
-	/* configure lport network properties */
-	rc = fcoe_sw_netdev_config(lp, netdev);
-	if (rc) {
-		FC_DBG("Could not configure netdev for lport\n");
-		goto out_host_put;
-	}
-
-	/* configure lport scsi host properties */
-	rc = fcoe_sw_shost_config(lp, shost);
-	if (rc) {
-		FC_DBG("Could not configure shost for lport\n");
-		goto out_host_put;
-	}
-
-	/* lport exch manager allocation */
-	rc = fcoe_sw_em_config(lp);
-	if (rc) {
-		FC_DBG("Could not configure em for lport\n");
-		goto out_host_put;
-	}
-
-	/* Initialize the library */
-	rc = fcoe_libfc_config(lp, &fcoe_sw_libfc_fcn_templ);
-	if (rc) {
-		FC_DBG("Could not configure libfc for lport!\n");
-		goto out_lp_destroy;
-	}
-
-	/* add to lports list */
-	fcoe_hostlist_add(lp);
-
-	lp->boot_time = jiffies;
-
-	fc_fabric_login(lp);
-
-	dev_hold(netdev);
-
-	return rc;
-
-out_lp_destroy:
-	fc_exch_mgr_free(lp->emp); /* Free the EM */
-out_host_put:
-	scsi_host_put(lp->host);
-	return rc;
-}
-
-/*
- * fcoe_sw_match - the fcoe sw transport match function
- *
- * Returns : false always
- */
-static bool fcoe_sw_match(struct net_device *netdev)
-{
-	/* FIXME - for sw transport, always return false */
-	return false;
-}
-
-/* the sw hba fcoe transport */
-struct fcoe_transport fcoe_sw_transport = {
-	.name = "fcoesw",
-	.create = fcoe_sw_create,
-	.destroy = fcoe_sw_destroy,
-	.match = fcoe_sw_match,
-	.vendor = 0x0,
-	.device = 0xffff,
-};
-
-/*
- * fcoe_sw_init - registers fcoe_sw_transport
- *
- * Returns : 0 on success
- */
-int __init fcoe_sw_init(void)
-{
-	/* attach to scsi transport */
-	scsi_transport_fcoe_sw =
-		fc_attach_transport(&fcoe_sw_transport_function);
-	if (!scsi_transport_fcoe_sw) {
-		printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n");
-		return -ENODEV;
-	}
-	/* register sw transport */
-	fcoe_transport_register(&fcoe_sw_transport);
-	return 0;
-}
-
-/*
- * fcoe_sw_exit - unregisters fcoe_sw_transport
- *
- * Returns : 0 on success
- */
-int __exit fcoe_sw_exit(void)
-{
-	/* dettach the transport */
-	fc_release_transport(scsi_transport_fcoe_sw);
-	fcoe_transport_unregister(&fcoe_sw_transport);
-	return 0;
-}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 08cd9a6..cc3f2de 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -1,5 +1,6 @@
 /*
- * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ * Copyright (c) 2008-2009 Cisco Systems, Inc.  All rights reserved.
+ * Copyright (c) 2009 Intel Corporation.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -17,1145 +18,1277 @@
  * Maintained at www.Open-FCoE.org
  */
 
+#include <linux/types.h>
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/kernel.h>
+#include <linux/list.h>
 #include <linux/spinlock.h>
-#include <linux/skbuff.h>
+#include <linux/timer.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
-#include <linux/kthread.h>
-#include <linux/crc32.h>
-#include <linux/cpu.h>
-#include <linux/fs.h>
-#include <linux/sysfs.h>
-#include <linux/ctype.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsicam.h>
-#include <scsi/scsi_transport.h>
-#include <scsi/scsi_transport_fc.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/rtnetlink.h>
 
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_fip.h>
 #include <scsi/fc/fc_encaps.h>
+#include <scsi/fc/fc_fcoe.h>
 
 #include <scsi/libfc.h>
-#include <scsi/fc_frame.h>
 #include <scsi/libfcoe.h>
-#include <scsi/fc_transport_fcoe.h>
 
-static int debug_fcoe;
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs");
+MODULE_LICENSE("GPL v2");
+
+#define	FCOE_CTLR_MIN_FKA	500		/* min keep alive (mS) */
+#define	FCOE_CTLR_DEF_FKA	FIP_DEF_FKA	/* default keep alive (mS) */
 
-#define FCOE_MAX_QUEUE_DEPTH  256
+static void fcoe_ctlr_timeout(unsigned long);
+static void fcoe_ctlr_link_work(struct work_struct *);
+static void fcoe_ctlr_recv_work(struct work_struct *);
 
-/* destination address mode */
-#define FCOE_GW_ADDR_MODE	    0x00
-#define FCOE_FCOUI_ADDR_MODE	    0x01
+static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
 
-#define FCOE_WORD_TO_BYTE  4
+unsigned int libfcoe_debug_logging;
+module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
 
-MODULE_AUTHOR("Open-FCoE.org");
-MODULE_DESCRIPTION("FCoE");
-MODULE_LICENSE("GPL");
-
-/* fcoe host list */
-LIST_HEAD(fcoe_hostlist);
-DEFINE_RWLOCK(fcoe_hostlist_lock);
-DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
-struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
-
-
-/* Function Prototyes */
-static int fcoe_check_wait_queue(struct fc_lport *);
-static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
-static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
-static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
-#ifdef CONFIG_HOTPLUG_CPU
-static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
-#endif /* CONFIG_HOTPLUG_CPU */
-static int fcoe_device_notification(struct notifier_block *, ulong, void *);
-static void fcoe_dev_setup(void);
-static void fcoe_dev_cleanup(void);
-
-/* notification function from net device */
-static struct notifier_block fcoe_notifier = {
-	.notifier_call = fcoe_device_notification,
-};
-
-
-#ifdef CONFIG_HOTPLUG_CPU
-static struct notifier_block fcoe_cpu_notifier = {
-	.notifier_call = fcoe_cpu_callback,
-};
+#define LIBFCOE_LOGGING     0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+
+#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD)				\
+do {                                                            	\
+	if (unlikely(libfcoe_debug_logging & LEVEL))			\
+		do {							\
+			CMD;						\
+		} while (0);						\
+} while (0);
+
+#define LIBFCOE_DBG(fmt, args...)					\
+	LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING,				\
+			      printk(KERN_INFO "libfcoe: " fmt, ##args);)
+
+#define LIBFCOE_FIP_DBG(fmt, args...)					\
+	LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING,			\
+			      printk(KERN_INFO "fip: " fmt, ##args);)
+
+/*
+ * Return non-zero if FCF fcoe_size has been validated.
+ */
+static inline int fcoe_ctlr_mtu_valid(const struct fcoe_fcf *fcf)
+{
+	return (fcf->flags & FIP_FL_SOL) != 0;
+}
+
+/*
+ * Return non-zero if the FCF is usable.
+ */
+static inline int fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf)
+{
+	u16 flags = FIP_FL_SOL | FIP_FL_AVAIL;
+
+	return (fcf->flags & flags) == flags;
+}
 
 /**
- * fcoe_create_percpu_data - creates the associated cpu data
- * @cpu: index for the cpu where fcoe cpu data will be created
- *
- * create percpu stats block, from cpu add notifier
- *
- * Returns: none
- **/
-static void fcoe_create_percpu_data(int cpu)
+ * fcoe_ctlr_init() - Initialize the FCoE Controller instance.
+ * @fip:	FCoE controller.
+ */
+void fcoe_ctlr_init(struct fcoe_ctlr *fip)
 {
-	struct fc_lport *lp;
-	struct fcoe_softc *fc;
-
-	write_lock_bh(&fcoe_hostlist_lock);
-	list_for_each_entry(fc, &fcoe_hostlist, list) {
-		lp = fc->lp;
-		if (lp->dev_stats[cpu] == NULL)
-			lp->dev_stats[cpu] =
-				kzalloc(sizeof(struct fcoe_dev_stats),
-					GFP_KERNEL);
-	}
-	write_unlock_bh(&fcoe_hostlist_lock);
+	fip->state = FIP_ST_LINK_WAIT;
+	INIT_LIST_HEAD(&fip->fcfs);
+	spin_lock_init(&fip->lock);
+	fip->flogi_oxid = FC_XID_UNKNOWN;
+	setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
+	INIT_WORK(&fip->link_work, fcoe_ctlr_link_work);
+	INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
+	skb_queue_head_init(&fip->fip_recv_list);
 }
+EXPORT_SYMBOL(fcoe_ctlr_init);
 
 /**
- * fcoe_destroy_percpu_data - destroys the associated cpu data
- * @cpu: index for the cpu where fcoe cpu data will destroyed
- *
- * destroy percpu stats block called by cpu add/remove notifier
+ * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller.
+ * @fip:	FCoE controller.
  *
- * Retuns: none
- **/
-static void fcoe_destroy_percpu_data(int cpu)
+ * Called with &fcoe_ctlr lock held.
+ */
+static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
 {
-	struct fc_lport *lp;
-	struct fcoe_softc *fc;
+	struct fcoe_fcf *fcf;
+	struct fcoe_fcf *next;
 
-	write_lock_bh(&fcoe_hostlist_lock);
-	list_for_each_entry(fc, &fcoe_hostlist, list) {
-		lp = fc->lp;
-		kfree(lp->dev_stats[cpu]);
-		lp->dev_stats[cpu] = NULL;
+	fip->sel_fcf = NULL;
+	list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
+		list_del(&fcf->list);
+		kfree(fcf);
 	}
-	write_unlock_bh(&fcoe_hostlist_lock);
+	fip->fcf_count = 0;
+	fip->sel_time = 0;
 }
 
 /**
- * fcoe_cpu_callback - fcoe cpu hotplug event callback
- * @nfb: callback data block
- * @action: event triggering the callback
- * @hcpu: index for the cpu of this event
+ * fcoe_ctrl_destroy() - Disable and tear-down the FCoE controller.
+ * @fip:	FCoE controller.
+ *
+ * This is called by FCoE drivers before freeing the &fcoe_ctlr.
  *
- * this creates or destroys per cpu data for fcoe
+ * The receive handler will have been deleted before this to guarantee
+ * that no more recv_work will be scheduled.
  *
- * Returns NOTIFY_OK always.
- **/
-static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
-			     void *hcpu)
+ * The timer routine will simply return once we set FIP_ST_DISABLED.
+ * This guarantees that no further timeouts or work will be scheduled.
+ */
+void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
 {
-	unsigned int cpu = (unsigned long)hcpu;
-
-	switch (action) {
-	case CPU_ONLINE:
-		fcoe_create_percpu_data(cpu);
-		break;
-	case CPU_DEAD:
-		fcoe_destroy_percpu_data(cpu);
-		break;
-	default:
-		break;
-	}
-	return NOTIFY_OK;
+	flush_work(&fip->recv_work);
+	spin_lock_bh(&fip->lock);
+	fip->state = FIP_ST_DISABLED;
+	fcoe_ctlr_reset_fcfs(fip);
+	spin_unlock_bh(&fip->lock);
+	del_timer_sync(&fip->timer);
+	flush_work(&fip->link_work);
 }
-#endif /* CONFIG_HOTPLUG_CPU */
+EXPORT_SYMBOL(fcoe_ctlr_destroy);
 
 /**
- * foce_rcv - this is the fcoe receive function called by NET_RX_SOFTIRQ
- * @skb: the receive skb
- * @dev: associated net device
- * @ptype: context
- * @odldev: last device
+ * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port.
+ * @fip:	FCoE controller.
  *
- * this function will receive the packet and build fc frame and pass it up
- *
- * Returns: 0 for success
- **/
-int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
-	     struct packet_type *ptype, struct net_device *olddev)
+ * Returns the maximum packet size including the FCoE header and trailer,
+ * but not including any Ethernet or VLAN headers.
+ */
+static inline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip)
 {
-	struct fc_lport *lp;
-	struct fcoe_rcv_info *fr;
-	struct fcoe_softc *fc;
-	struct fcoe_dev_stats *stats;
-	u8 *data;
-	struct fc_frame_header *fh;
-	unsigned short oxid;
-	int cpu_idx;
-	struct fcoe_percpu_s *fps;
-
-	fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
-	lp = fc->lp;
-	if (unlikely(lp == NULL)) {
-		FC_DBG("cannot find hba structure");
-		goto err2;
-	}
+	/*
+	 * Determine the max FCoE frame size allowed, including
+	 * FCoE header and trailer.
+	 * Note:  lp->mfs is currently the payload size, not the frame size.
+	 */
+	return fip->lp->mfs + sizeof(struct fc_frame_header) +
+		sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof);
+}
 
-	if (unlikely(debug_fcoe)) {
-		FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
-		       "end:%p sum:%d dev:%s", skb->len, skb->data_len,
-		       skb->head, skb->data, skb_tail_pointer(skb),
-		       skb_end_pointer(skb), skb->csum,
-		       skb->dev ? skb->dev->name : "<NULL>");
+/**
+ * fcoe_ctlr_solicit() - Send a solicitation.
+ * @fip:	FCoE controller.
+ * @fcf:	Destination FCF.  If NULL, a multicast solicitation is sent.
+ */
+static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
+{
+	struct sk_buff *skb;
+	struct fip_sol {
+		struct ethhdr eth;
+		struct fip_header fip;
+		struct {
+			struct fip_mac_desc mac;
+			struct fip_wwn_desc wwnn;
+			struct fip_size_desc size;
+		} __attribute__((packed)) desc;
+	}  __attribute__((packed)) *sol;
+	u32 fcoe_size;
+
+	skb = dev_alloc_skb(sizeof(*sol));
+	if (!skb)
+		return;
 
-	}
+	sol = (struct fip_sol *)skb->data;
 
-	/* check for FCOE packet type */
-	if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
-		FC_DBG("wrong FC type frame");
-		goto err;
-	}
-	data = skb->data;
-	data += sizeof(struct fcoe_hdr);
-	fh = (struct fc_frame_header *)data;
-	oxid = ntohs(fh->fh_ox_id);
-
-	fr = fcoe_dev_from_skb(skb);
-	fr->fr_dev = lp;
-	fr->ptype = ptype;
-	cpu_idx = 0;
-#ifdef CONFIG_SMP
-	/*
-	 * The exchange ID are ANDed with num of online CPUs,
-	 * so that will have the least lock contention in
-	 * handling the exchange. if there is no thread
-	 * for a given idx then use first online cpu.
-	 */
-	cpu_idx = oxid & (num_online_cpus() >> 1);
-	if (fcoe_percpu[cpu_idx] == NULL)
-		cpu_idx = first_cpu(cpu_online_map);
-#endif
-	fps = fcoe_percpu[cpu_idx];
+	memset(sol, 0, sizeof(*sol));
+	memcpy(sol->eth.h_dest, fcf ? fcf->fcf_mac : fcoe_all_fcfs, ETH_ALEN);
+	memcpy(sol->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+	sol->eth.h_proto = htons(ETH_P_FIP);
 
-	spin_lock_bh(&fps->fcoe_rx_list.lock);
-	__skb_queue_tail(&fps->fcoe_rx_list, skb);
-	if (fps->fcoe_rx_list.qlen == 1)
-		wake_up_process(fps->thread);
+	sol->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+	sol->fip.fip_op = htons(FIP_OP_DISC);
+	sol->fip.fip_subcode = FIP_SC_SOL;
+	sol->fip.fip_dl_len = htons(sizeof(sol->desc) / FIP_BPW);
+	sol->fip.fip_flags = htons(FIP_FL_FPMA);
 
-	spin_unlock_bh(&fps->fcoe_rx_list.lock);
+	sol->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
+	sol->desc.mac.fd_desc.fip_dlen = sizeof(sol->desc.mac) / FIP_BPW;
+	memcpy(sol->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
 
-	return 0;
-err:
-#ifdef CONFIG_SMP
-	stats = lp->dev_stats[smp_processor_id()];
-#else
-	stats = lp->dev_stats[0];
-#endif
-	if (stats)
-		stats->ErrorFrames++;
-
-err2:
-	kfree_skb(skb);
-	return -1;
+	sol->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
+	sol->desc.wwnn.fd_desc.fip_dlen = sizeof(sol->desc.wwnn) / FIP_BPW;
+	put_unaligned_be64(fip->lp->wwnn, &sol->desc.wwnn.fd_wwn);
+
+	fcoe_size = fcoe_ctlr_fcoe_size(fip);
+	sol->desc.size.fd_desc.fip_dtype = FIP_DT_FCOE_SIZE;
+	sol->desc.size.fd_desc.fip_dlen = sizeof(sol->desc.size) / FIP_BPW;
+	sol->desc.size.fd_size = htons(fcoe_size);
+
+	skb_put(skb, sizeof(*sol));
+	skb->protocol = htons(ETH_P_802_3);
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+	fip->send(fip, skb);
+
+	if (!fcf)
+		fip->sol_time = jiffies;
 }
-EXPORT_SYMBOL_GPL(fcoe_rcv);
 
 /**
- * fcoe_start_io - pass to netdev to start xmit for fcoe
- * @skb: the skb to be xmitted
+ * fcoe_ctlr_link_up() - Start FCoE controller.
+ * @fip:	FCoE controller.
  *
- * Returns: 0 for success
- **/
-static inline int fcoe_start_io(struct sk_buff *skb)
+ * Called from the LLD when the network link is ready.
+ */
+void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
 {
-	int rc;
-
-	skb_get(skb);
-	rc = dev_queue_xmit(skb);
-	if (rc != 0)
-		return rc;
-	kfree_skb(skb);
-	return 0;
+	spin_lock_bh(&fip->lock);
+	if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) {
+		fip->last_link = 1;
+		fip->link = 1;
+		spin_unlock_bh(&fip->lock);
+		fc_linkup(fip->lp);
+	} else if (fip->state == FIP_ST_LINK_WAIT) {
+		fip->state = FIP_ST_AUTO;
+		fip->last_link = 1;
+		fip->link = 1;
+		spin_unlock_bh(&fip->lock);
+		LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n");
+		fc_linkup(fip->lp);
+		fcoe_ctlr_solicit(fip, NULL);
+	} else
+		spin_unlock_bh(&fip->lock);
 }
+EXPORT_SYMBOL(fcoe_ctlr_link_up);
 
 /**
- * fcoe_get_paged_crc_eof - in case we need alloc a page for crc_eof
- * @skb: the skb to be xmitted
- * @tlen: total len
+ * fcoe_ctlr_reset() - Reset FIP.
+ * @fip:	FCoE controller.
+ * @new_state:	FIP state to be entered.
  *
- * Returns: 0 for success
- **/
-static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+ * Returns non-zero if the link was up and now isn't.
+ */
+static int fcoe_ctlr_reset(struct fcoe_ctlr *fip, enum fip_state new_state)
 {
-	struct fcoe_percpu_s *fps;
-	struct page *page;
-	int cpu_idx;
-
-	cpu_idx = get_cpu();
-	fps = fcoe_percpu[cpu_idx];
-	page = fps->crc_eof_page;
-	if (!page) {
-		page = alloc_page(GFP_ATOMIC);
-		if (!page) {
-			put_cpu();
-			return -ENOMEM;
-		}
-		fps->crc_eof_page = page;
-		WARN_ON(fps->crc_eof_offset != 0);
-	}
-
-	get_page(page);
-	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
-			   fps->crc_eof_offset, tlen);
-	skb->len += tlen;
-	skb->data_len += tlen;
-	skb->truesize += tlen;
-	fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
-
-	if (fps->crc_eof_offset >= PAGE_SIZE) {
-		fps->crc_eof_page = NULL;
-		fps->crc_eof_offset = 0;
-		put_page(page);
+	struct fc_lport *lp = fip->lp;
+	int link_dropped;
+
+	spin_lock_bh(&fip->lock);
+	fcoe_ctlr_reset_fcfs(fip);
+	del_timer(&fip->timer);
+	fip->state = new_state;
+	fip->ctlr_ka_time = 0;
+	fip->port_ka_time = 0;
+	fip->sol_time = 0;
+	fip->flogi_oxid = FC_XID_UNKNOWN;
+	fip->map_dest = 0;
+	fip->last_link = 0;
+	link_dropped = fip->link;
+	fip->link = 0;
+	spin_unlock_bh(&fip->lock);
+
+	if (link_dropped)
+		fc_linkdown(lp);
+
+	if (new_state == FIP_ST_ENABLED) {
+		fcoe_ctlr_solicit(fip, NULL);
+		fc_linkup(lp);
+		link_dropped = 0;
 	}
-	put_cpu();
-	return 0;
+	return link_dropped;
 }
 
 /**
- * fcoe_fc_crc - calculates FC CRC in this fcoe skb
- * @fp: the fc_frame containg data to be checksummed
+ * fcoe_ctlr_link_down() - Stop FCoE controller.
+ * @fip:	FCoE controller.
  *
- * This uses crc32() to calculate the crc for fc frame
- * Return   : 32 bit crc
+ * Returns non-zero if the link was up and now isn't.
  *
- **/
-u32 fcoe_fc_crc(struct fc_frame *fp)
+ * Called from the LLD when the network link is not ready.
+ * There may be multiple calls while the link is down.
+ */
+int fcoe_ctlr_link_down(struct fcoe_ctlr *fip)
 {
-	struct sk_buff *skb = fp_skb(fp);
-	struct skb_frag_struct *frag;
-	unsigned char *data;
-	unsigned long off, len, clen;
-	u32 crc;
-	unsigned i;
-
-	crc = crc32(~0, skb->data, skb_headlen(skb));
-
-	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		frag = &skb_shinfo(skb)->frags[i];
-		off = frag->page_offset;
-		len = frag->size;
-		while (len > 0) {
-			clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
-			data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
-					   KM_SKB_DATA_SOFTIRQ);
-			crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
-			kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
-			off += clen;
-			len -= clen;
-		}
-	}
-	return crc;
+	return fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT);
 }
-EXPORT_SYMBOL_GPL(fcoe_fc_crc);
+EXPORT_SYMBOL(fcoe_ctlr_link_down);
 
 /**
- * fcoe_xmit - FCoE frame transmit function
- * @lp:	the associated local port
- * @fp: the fc_frame to be transmitted
+ * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF.
+ * @fip:	FCoE controller.
+ * @ports:	0 for controller keep-alive, 1 for port keep-alive.
+ * @sa:		source MAC address.
  *
- * Return   : 0 for success
+ * A controller keep-alive is sent every fka_period (typically 8 seconds).
+ * The source MAC is the native MAC address.
  *
- **/
-int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
+ * A port keep-alive is sent every 90 seconds while logged in.
+ * The source MAC is the assigned mapped source address.
+ * The destination is the FCF's F-port.
+ */
+static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
 {
-	int wlen, rc = 0;
-	u32 crc;
-	struct ethhdr *eh;
-	struct fcoe_crc_eof *cp;
 	struct sk_buff *skb;
-	struct fcoe_dev_stats *stats;
-	struct fc_frame_header *fh;
-	unsigned int hlen;		/* header length implies the version */
-	unsigned int tlen;		/* trailer length */
-	unsigned int elen;		/* eth header, may include vlan */
-	int flogi_in_progress = 0;
-	struct fcoe_softc *fc;
-	u8 sof, eof;
-	struct fcoe_hdr *hp;
-
-	WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
-
-	fc = fcoe_softc(lp);
-	/*
-	 * if it is a flogi then we need to learn gw-addr
-	 * and my own fcid
-	 */
-	fh = fc_frame_header_get(fp);
-	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
-		if (fc_frame_payload_op(fp) == ELS_FLOGI) {
-			fc->flogi_oxid = ntohs(fh->fh_ox_id);
-			fc->address_mode = FCOE_FCOUI_ADDR_MODE;
-			fc->flogi_progress = 1;
-			flogi_in_progress = 1;
-		} else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
-			/*
-			 * Here we must've gotten an SID by accepting an FLOGI
-			 * from a point-to-point connection.  Switch to using
-			 * the source mac based on the SID.  The destination
-			 * MAC in this case would have been set by receving the
-			 * FLOGI.
-			 */
-			fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
-			fc->flogi_progress = 0;
-		}
-	}
+	struct fip_kal {
+		struct ethhdr eth;
+		struct fip_header fip;
+		struct fip_mac_desc mac;
+	} __attribute__((packed)) *kal;
+	struct fip_vn_desc *vn;
+	u32 len;
+	struct fc_lport *lp;
+	struct fcoe_fcf *fcf;
 
-	skb = fp_skb(fp);
-	sof = fr_sof(fp);
-	eof = fr_eof(fp);
-
-	elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
-		sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
-	hlen = sizeof(struct fcoe_hdr);
-	tlen = sizeof(struct fcoe_crc_eof);
-	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
-
-	/* crc offload */
-	skb->ip_summed = CHECKSUM_NONE;
-	crc = fcoe_fc_crc(fp);
-
-	/* copy fc crc and eof to the skb buff */
-	if (skb_is_nonlinear(skb)) {
-		skb_frag_t *frag;
-		if (fcoe_get_paged_crc_eof(skb, tlen)) {
-			kfree(skb);
-			return -ENOMEM;
-		}
-		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
-		cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
-			+ frag->page_offset;
-	} else {
-		cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
-	}
+	fcf = fip->sel_fcf;
+	lp = fip->lp;
+	if (!fcf || !fc_host_port_id(lp->host))
+		return;
 
-	memset(cp, 0, sizeof(*cp));
-	cp->fcoe_eof = eof;
-	cp->fcoe_crc32 = cpu_to_le32(~crc);
+	len = fcoe_ctlr_fcoe_size(fip) + sizeof(struct ethhdr);
+	BUG_ON(len < sizeof(*kal) + sizeof(*vn));
+	skb = dev_alloc_skb(len);
+	if (!skb)
+		return;
 
-	if (skb_is_nonlinear(skb)) {
-		kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
-		cp = NULL;
+	kal = (struct fip_kal *)skb->data;
+	memset(kal, 0, len);
+	memcpy(kal->eth.h_dest, fcf->fcf_mac, ETH_ALEN);
+	memcpy(kal->eth.h_source, sa, ETH_ALEN);
+	kal->eth.h_proto = htons(ETH_P_FIP);
+
+	kal->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+	kal->fip.fip_op = htons(FIP_OP_CTRL);
+	kal->fip.fip_subcode = FIP_SC_KEEP_ALIVE;
+	kal->fip.fip_dl_len = htons((sizeof(kal->mac) +
+				    ports * sizeof(*vn)) / FIP_BPW);
+	kal->fip.fip_flags = htons(FIP_FL_FPMA);
+
+	kal->mac.fd_desc.fip_dtype = FIP_DT_MAC;
+	kal->mac.fd_desc.fip_dlen = sizeof(kal->mac) / FIP_BPW;
+	memcpy(kal->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
+
+	if (ports) {
+		vn = (struct fip_vn_desc *)(kal + 1);
+		vn->fd_desc.fip_dtype = FIP_DT_VN_ID;
+		vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW;
+		memcpy(vn->fd_mac, fip->data_src_addr, ETH_ALEN);
+		hton24(vn->fd_fc_id, fc_host_port_id(lp->host));
+		put_unaligned_be64(lp->wwpn, &vn->fd_wwpn);
 	}
 
-	/* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
-	skb_push(skb, elen + hlen);
+	skb_put(skb, len);
+	skb->protocol = htons(ETH_P_802_3);
 	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
-	skb->mac_len = elen;
-	skb->protocol = htons(ETH_P_802_3);
-	skb->dev = fc->real_dev;
-
-	/* fill up mac and fcoe headers */
-	eh = eth_hdr(skb);
-	eh->h_proto = htons(ETH_P_FCOE);
-	if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
-		fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
-	else
-		/* insert GW address */
-		memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
-
-	if (unlikely(flogi_in_progress))
-		memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
-	else
-		memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
-
-	hp = (struct fcoe_hdr *)(eh + 1);
-	memset(hp, 0, sizeof(*hp));
-	if (FC_FCOE_VER)
-		FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
-	hp->fcoe_sof = sof;
-
-	/* update tx stats: regardless if LLD fails */
-	stats = lp->dev_stats[smp_processor_id()];
-	if (stats) {
-		stats->TxFrames++;
-		stats->TxWords += wlen;
-	}
-
-	/* send down to lld */
-	fr_dev(fp) = lp;
-	if (fc->fcoe_pending_queue.qlen)
-		rc = fcoe_check_wait_queue(lp);
-
-	if (rc == 0)
-		rc = fcoe_start_io(skb);
-
-	if (rc) {
-		fcoe_insert_wait_queue(lp, skb);
-		if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
-			fc_pause(lp);
-	}
-
-	return 0;
+	fip->send(fip, skb);
 }
-EXPORT_SYMBOL_GPL(fcoe_xmit);
 
-/*
- * fcoe_percpu_receive_thread - recv thread per cpu
- * @arg: ptr to the fcoe per cpu struct
+/**
+ * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it.
+ * @fip:	FCoE controller.
+ * @dtype:	FIP descriptor type for the frame.
+ * @skb:	FCoE ELS frame including FC header but no FCoE headers.
+ *
+ * Returns non-zero error code on failure.
  *
- * Return: 0 for success
+ * The caller must check that the length is a multiple of 4.
  *
+ * The @skb must have enough headroom (28 bytes) and tailroom (8 bytes).
+ * Headroom includes the FIP encapsulation description, FIP header, and
+ * Ethernet header.  The tailroom is for the FIP MAC descriptor.
  */
-int fcoe_percpu_receive_thread(void *arg)
+static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
+			    u8 dtype, struct sk_buff *skb)
 {
-	struct fcoe_percpu_s *p = arg;
-	u32 fr_len;
-	unsigned int hlen;
-	unsigned int tlen;
-	struct fc_lport *lp;
-	struct fcoe_rcv_info *fr;
-	struct fcoe_dev_stats *stats;
-	struct fc_frame_header *fh;
-	struct sk_buff *skb;
-	struct fcoe_crc_eof *cp;
-	struct fc_frame *fp;
-	u8 *mac = NULL;
-	struct fcoe_softc *fc;
-	struct fcoe_hdr *hp;
-
-	set_user_nice(current, 19);
-
-	while (!kthread_should_stop()) {
-
-		spin_lock_bh(&p->fcoe_rx_list.lock);
-		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
-			set_current_state(TASK_INTERRUPTIBLE);
-			spin_unlock_bh(&p->fcoe_rx_list.lock);
-			schedule();
-			set_current_state(TASK_RUNNING);
-			if (kthread_should_stop())
-				return 0;
-			spin_lock_bh(&p->fcoe_rx_list.lock);
-		}
-		spin_unlock_bh(&p->fcoe_rx_list.lock);
-		fr = fcoe_dev_from_skb(skb);
-		lp = fr->fr_dev;
-		if (unlikely(lp == NULL)) {
-			FC_DBG("invalid HBA Structure");
-			kfree_skb(skb);
-			continue;
-		}
-
-		stats = lp->dev_stats[smp_processor_id()];
-
-		if (unlikely(debug_fcoe)) {
-			FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
-			       "tail:%p end:%p sum:%d dev:%s",
-			       skb->len, skb->data_len,
-			       skb->head, skb->data, skb_tail_pointer(skb),
-			       skb_end_pointer(skb), skb->csum,
-			       skb->dev ? skb->dev->name : "<NULL>");
-		}
-
-		/*
-		 * Save source MAC address before discarding header.
-		 */
-		fc = lport_priv(lp);
-		if (unlikely(fc->flogi_progress))
-			mac = eth_hdr(skb)->h_source;
+	struct fip_encaps_head {
+		struct ethhdr eth;
+		struct fip_header fip;
+		struct fip_encaps encaps;
+	} __attribute__((packed)) *cap;
+	struct fip_mac_desc *mac;
+	struct fcoe_fcf *fcf;
+	size_t dlen;
+
+	fcf = fip->sel_fcf;
+	if (!fcf)
+		return -ENODEV;
+	dlen = sizeof(struct fip_encaps) + skb->len;	/* len before push */
+	cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap));
+
+	memset(cap, 0, sizeof(*cap));
+	memcpy(cap->eth.h_dest, fcf->fcf_mac, ETH_ALEN);
+	memcpy(cap->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+	cap->eth.h_proto = htons(ETH_P_FIP);
+
+	cap->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+	cap->fip.fip_op = htons(FIP_OP_LS);
+	cap->fip.fip_subcode = FIP_SC_REQ;
+	cap->fip.fip_dl_len = htons((dlen + sizeof(*mac)) / FIP_BPW);
+	cap->fip.fip_flags = htons(FIP_FL_FPMA);
+
+	cap->encaps.fd_desc.fip_dtype = dtype;
+	cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
+
+	mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac));
+	memset(mac, 0, sizeof(mac));
+	mac->fd_desc.fip_dtype = FIP_DT_MAC;
+	mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
+	if (dtype != ELS_FLOGI)
+		memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN);
 
-		if (skb_is_nonlinear(skb))
-			skb_linearize(skb);	/* not ideal */
-
-		/*
-		 * Check the header and pull it off.
-		 */
-		hlen = sizeof(struct fcoe_hdr);
-		hp = (struct fcoe_hdr *)skb->data;
-		if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
-			if (stats) {
-				if (stats->ErrorFrames < 5)
-					FC_DBG("unknown FCoE version %x",
-					       FC_FCOE_DECAPS_VER(hp));
-				stats->ErrorFrames++;
-			}
-			kfree_skb(skb);
-			continue;
-		}
-		skb_pull(skb, sizeof(struct fcoe_hdr));
-		tlen = sizeof(struct fcoe_crc_eof);
-		fr_len = skb->len - tlen;
-		skb_trim(skb, fr_len);
-
-		if (unlikely(fr_len > skb->len)) {
-			if (stats) {
-				if (stats->ErrorFrames < 5)
-					FC_DBG("length error fr_len 0x%x "
-					       "skb->len 0x%x", fr_len,
-					       skb->len);
-				stats->ErrorFrames++;
-			}
-			kfree_skb(skb);
-			continue;
-		}
-		if (stats) {
-			stats->RxFrames++;
-			stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
-		}
-
-		fp = (struct fc_frame *)skb;
-		cp = (struct fcoe_crc_eof *)(skb->data + fr_len);
-		fc_frame_init(fp);
-		fr_eof(fp) = cp->fcoe_eof;
-		fr_sof(fp) = hp->fcoe_sof;
-		fr_dev(fp) = lp;
-
-		/*
-		 * We only check CRC if no offload is available and if it is
-		 * it's solicited data, in which case, the FCP layer would
-		 * check it during the copy.
-		 */
-		if (lp->crc_offload)
-			fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
-		else
-			fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
-
-		fh = fc_frame_header_get(fp);
-		if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
-		    fh->fh_type == FC_TYPE_FCP) {
-			fc_exch_recv(lp, lp->emp, fp);
-			continue;
-		}
-		if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
-			if (le32_to_cpu(cp->fcoe_crc32) !=
-			    ~crc32(~0, skb->data, fr_len)) {
-				if (debug_fcoe || stats->InvalidCRCCount < 5)
-					printk(KERN_WARNING "fcoe: dropping "
-					       "frame with CRC error\n");
-				stats->InvalidCRCCount++;
-				stats->ErrorFrames++;
-				fc_frame_free(fp);
-				continue;
-			}
-			fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
-		}
-		/* non flogi and non data exchanges are handled here */
-		if (unlikely(fc->flogi_progress))
-			fcoe_recv_flogi(fc, fp, mac);
-		fc_exch_recv(lp, lp->emp, fp);
-	}
+	skb->protocol = htons(ETH_P_802_3);
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
 	return 0;
 }
 
 /**
- * fcoe_recv_flogi - flogi receive function
- * @fc: associated fcoe_softc
- * @fp: the recieved frame
- * @sa: the source address of this flogi
+ * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate.
+ * @fip:	FCoE controller.
+ * @skb:	FCoE ELS frame including FC header but no FCoE headers.
  *
- * This is responsible to parse the flogi response and sets the corresponding
- * mac address for the initiator, eitehr OUI based or GW based.
+ * Returns a non-zero error code if the frame should not be sent.
+ * Returns zero if the caller should send the frame with FCoE encapsulation.
  *
- * Returns: none
- **/
-static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
+ * The caller must check that the length is a multiple of 4.
+ * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes).
+ */
+int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
 	struct fc_frame_header *fh;
+	u16 old_xid;
 	u8 op;
 
-	fh = fc_frame_header_get(fp);
-	if (fh->fh_type != FC_TYPE_ELS)
-		return;
-	op = fc_frame_payload_op(fp);
-	if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
-	    fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
-		/*
-		 * FLOGI accepted.
-		 * If the src mac addr is FC_OUI-based, then we mark the
-		 * address_mode flag to use FC_OUI-based Ethernet DA.
-		 * Otherwise we use the FCoE gateway addr
-		 */
-		if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
-			fc->address_mode = FCOE_FCOUI_ADDR_MODE;
-		} else {
-			memcpy(fc->dest_addr, sa, ETH_ALEN);
-			fc->address_mode = FCOE_GW_ADDR_MODE;
+	if (fip->state == FIP_ST_NON_FIP)
+		return 0;
+
+	fh = (struct fc_frame_header *)skb->data;
+	op = *(u8 *)(fh + 1);
+
+	switch (op) {
+	case ELS_FLOGI:
+		old_xid = fip->flogi_oxid;
+		fip->flogi_oxid = ntohs(fh->fh_ox_id);
+		if (fip->state == FIP_ST_AUTO) {
+			if (old_xid == FC_XID_UNKNOWN)
+				fip->flogi_count = 0;
+			fip->flogi_count++;
+			if (fip->flogi_count < 3)
+				goto drop;
+			fip->map_dest = 1;
+			return 0;
 		}
-
-		fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
-		fc->flogi_progress = 0;
-	} else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
+		op = FIP_DT_FLOGI;
+		break;
+	case ELS_FDISC:
+		if (ntoh24(fh->fh_s_id))
+			return 0;
+		op = FIP_DT_FDISC;
+		break;
+	case ELS_LOGO:
+		if (fip->state != FIP_ST_ENABLED)
+			return 0;
+		if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
+			return 0;
+		op = FIP_DT_LOGO;
+		break;
+	case ELS_LS_ACC:
+		if (fip->flogi_oxid == FC_XID_UNKNOWN)
+			return 0;
+		if (!ntoh24(fh->fh_s_id))
+			return 0;
+		if (fip->state == FIP_ST_AUTO)
+			return 0;
 		/*
-		 * Save source MAC for point-to-point responses.
+		 * Here we must've gotten an SID by accepting an FLOGI
+		 * from a point-to-point connection.  Switch to using
+		 * the source mac based on the SID.  The destination
+		 * MAC in this case would have been set by receving the
+		 * FLOGI.
 		 */
-		memcpy(fc->dest_addr, sa, ETH_ALEN);
-		fc->address_mode = FCOE_GW_ADDR_MODE;
+		fip->flogi_oxid = FC_XID_UNKNOWN;
+		fc_fcoe_set_mac(fip->data_src_addr, fh->fh_s_id);
+		return 0;
+	default:
+		if (fip->state != FIP_ST_ENABLED)
+			goto drop;
+		return 0;
 	}
+	if (fcoe_ctlr_encaps(fip, op, skb))
+		goto drop;
+	fip->send(fip, skb);
+	return -EINPROGRESS;
+drop:
+	kfree_skb(skb);
+	return -EINVAL;
 }
+EXPORT_SYMBOL(fcoe_ctlr_els_send);
 
-/**
- * fcoe_watchdog - fcoe timer callback
- * @vp:
+/*
+ * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller.
+ * @fip:	FCoE controller.
+ *
+ * Called with lock held.
  *
- * This checks the pending queue length for fcoe and put fcoe to be paused state
- * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
- * fcoe_hostlist.
+ * An FCF is considered old if we have missed three advertisements.
+ * That is, there have been no valid advertisement from it for three
+ * times its keep-alive period including fuzz.
  *
- * Returns: 0 for success
- **/
-void fcoe_watchdog(ulong vp)
+ * In addition, determine the time when an FCF selection can occur.
+ */
+static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
 {
-	struct fc_lport *lp;
-	struct fcoe_softc *fc;
-	int paused = 0;
-
-	read_lock(&fcoe_hostlist_lock);
-	list_for_each_entry(fc, &fcoe_hostlist, list) {
-		lp = fc->lp;
-		if (lp) {
-			if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
-				paused = 1;
-			if (fcoe_check_wait_queue(lp) <	 FCOE_MAX_QUEUE_DEPTH) {
-				if (paused)
-					fc_unpause(lp);
-			}
+	struct fcoe_fcf *fcf;
+	struct fcoe_fcf *next;
+	unsigned long sel_time = 0;
+
+	list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
+		if (time_after(jiffies, fcf->time + fcf->fka_period * 3 +
+			       msecs_to_jiffies(FIP_FCF_FUZZ * 3))) {
+			if (fip->sel_fcf == fcf)
+				fip->sel_fcf = NULL;
+			list_del(&fcf->list);
+			WARN_ON(!fip->fcf_count);
+			fip->fcf_count--;
+			kfree(fcf);
+		} else if (fcoe_ctlr_mtu_valid(fcf) &&
+			   (!sel_time || time_before(sel_time, fcf->time))) {
+			sel_time = fcf->time;
 		}
 	}
-	read_unlock(&fcoe_hostlist_lock);
-
-	fcoe_timer.expires = jiffies + (1 * HZ);
-	add_timer(&fcoe_timer);
+	if (sel_time) {
+		sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+		fip->sel_time = sel_time;
+		if (time_before(sel_time, fip->timer.expires))
+			mod_timer(&fip->timer, sel_time);
+	} else {
+		fip->sel_time = 0;
+	}
 }
 
-
 /**
- * fcoe_check_wait_queue - put the skb into fcoe pending xmit queue
- * @lp: the fc_port for this skb
- * @skb: the associated skb to be xmitted
- *
- * This empties the wait_queue, dequeue the head of the wait_queue queue
- * and calls fcoe_start_io() for each packet, if all skb have been
- * transmitted, return 0 if a error occurs, then restore wait_queue and
- * try again later.
+ * fcoe_ctlr_parse_adv() - Decode a FIP advertisement into a new FCF entry.
+ * @skb:	received FIP advertisement frame
+ * @fcf:	resulting FCF entry.
  *
- * The wait_queue is used when the skb transmit fails. skb will go
- * in the wait_queue which will be emptied by the time function OR
- * by the next skb transmit.
- *
- * Returns: 0 for success
- **/
-static int fcoe_check_wait_queue(struct fc_lport *lp)
+ * Returns zero on a valid parsed advertisement,
+ * otherwise returns non zero value.
+ */
+static int fcoe_ctlr_parse_adv(struct sk_buff *skb, struct fcoe_fcf *fcf)
 {
-	int rc, unpause = 0;
-	int paused = 0;
-	struct sk_buff *skb;
-	struct fcoe_softc *fc;
-
-	fc = fcoe_softc(lp);
-	spin_lock_bh(&fc->fcoe_pending_queue.lock);
-
-	/*
-	 * is this interface paused?
-	 */
-	if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
-		paused = 1;
-	if (fc->fcoe_pending_queue.qlen) {
-		while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
-			spin_unlock_bh(&fc->fcoe_pending_queue.lock);
-			rc = fcoe_start_io(skb);
-			if (rc) {
-				fcoe_insert_wait_queue_head(lp, skb);
-				return rc;
+	struct fip_header *fiph;
+	struct fip_desc *desc = NULL;
+	struct fip_wwn_desc *wwn;
+	struct fip_fab_desc *fab;
+	struct fip_fka_desc *fka;
+	unsigned long t;
+	size_t rlen;
+	size_t dlen;
+
+	memset(fcf, 0, sizeof(*fcf));
+	fcf->fka_period = msecs_to_jiffies(FCOE_CTLR_DEF_FKA);
+
+	fiph = (struct fip_header *)skb->data;
+	fcf->flags = ntohs(fiph->fip_flags);
+
+	rlen = ntohs(fiph->fip_dl_len) * 4;
+	if (rlen + sizeof(*fiph) > skb->len)
+		return -EINVAL;
+
+	desc = (struct fip_desc *)(fiph + 1);
+	while (rlen > 0) {
+		dlen = desc->fip_dlen * FIP_BPW;
+		if (dlen < sizeof(*desc) || dlen > rlen)
+			return -EINVAL;
+		switch (desc->fip_dtype) {
+		case FIP_DT_PRI:
+			if (dlen != sizeof(struct fip_pri_desc))
+				goto len_err;
+			fcf->pri = ((struct fip_pri_desc *)desc)->fd_pri;
+			break;
+		case FIP_DT_MAC:
+			if (dlen != sizeof(struct fip_mac_desc))
+				goto len_err;
+			memcpy(fcf->fcf_mac,
+			       ((struct fip_mac_desc *)desc)->fd_mac,
+			       ETH_ALEN);
+			if (!is_valid_ether_addr(fcf->fcf_mac)) {
+				LIBFCOE_FIP_DBG("Invalid MAC address "
+						"in FIP adv\n");
+				return -EINVAL;
 			}
-			spin_lock_bh(&fc->fcoe_pending_queue.lock);
+			break;
+		case FIP_DT_NAME:
+			if (dlen != sizeof(struct fip_wwn_desc))
+				goto len_err;
+			wwn = (struct fip_wwn_desc *)desc;
+			fcf->switch_name = get_unaligned_be64(&wwn->fd_wwn);
+			break;
+		case FIP_DT_FAB:
+			if (dlen != sizeof(struct fip_fab_desc))
+				goto len_err;
+			fab = (struct fip_fab_desc *)desc;
+			fcf->fabric_name = get_unaligned_be64(&fab->fd_wwn);
+			fcf->vfid = ntohs(fab->fd_vfid);
+			fcf->fc_map = ntoh24(fab->fd_map);
+			break;
+		case FIP_DT_FKA:
+			if (dlen != sizeof(struct fip_fka_desc))
+				goto len_err;
+			fka = (struct fip_fka_desc *)desc;
+			t = ntohl(fka->fd_fka_period);
+			if (t >= FCOE_CTLR_MIN_FKA)
+				fcf->fka_period = msecs_to_jiffies(t);
+			break;
+		case FIP_DT_MAP_OUI:
+		case FIP_DT_FCOE_SIZE:
+		case FIP_DT_FLOGI:
+		case FIP_DT_FDISC:
+		case FIP_DT_LOGO:
+		case FIP_DT_ELP:
+		default:
+			LIBFCOE_FIP_DBG("unexpected descriptor type %x "
+					"in FIP adv\n", desc->fip_dtype);
+			/* standard says ignore unknown descriptors >= 128 */
+			if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
+				return -EINVAL;
+			continue;
 		}
-		if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
-			unpause = 1;
+		desc = (struct fip_desc *)((char *)desc + dlen);
+		rlen -= dlen;
 	}
-	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
-	if ((unpause) && (paused))
-		fc_unpause(lp);
-	return fc->fcoe_pending_queue.qlen;
+	if (!fcf->fc_map || (fcf->fc_map & 0x10000))
+		return -EINVAL;
+	if (!fcf->switch_name || !fcf->fabric_name)
+		return -EINVAL;
+	return 0;
+
+len_err:
+	LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n",
+			desc->fip_dtype, dlen);
+	return -EINVAL;
 }
 
 /**
- * fcoe_insert_wait_queue_head - puts skb to fcoe pending queue head
- * @lp: the fc_port for this skb
- * @skb: the associated skb to be xmitted
- *
- * Returns: none
- **/
-static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
-					struct sk_buff *skb)
+ * fcoe_ctlr_recv_adv() - Handle an incoming advertisement.
+ * @fip:	FCoE controller.
+ * @skb:	Received FIP packet.
+ */
+static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
-	struct fcoe_softc *fc;
+	struct fcoe_fcf *fcf;
+	struct fcoe_fcf new;
+	struct fcoe_fcf *found;
+	unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
+	int first = 0;
+	int mtu_valid;
+
+	if (fcoe_ctlr_parse_adv(skb, &new))
+		return;
 
-	fc = fcoe_softc(lp);
-	spin_lock_bh(&fc->fcoe_pending_queue.lock);
-	__skb_queue_head(&fc->fcoe_pending_queue, skb);
-	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
-}
+	spin_lock_bh(&fip->lock);
+	first = list_empty(&fip->fcfs);
+	found = NULL;
+	list_for_each_entry(fcf, &fip->fcfs, list) {
+		if (fcf->switch_name == new.switch_name &&
+		    fcf->fabric_name == new.fabric_name &&
+		    fcf->fc_map == new.fc_map &&
+		    compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
+			found = fcf;
+			break;
+		}
+	}
+	if (!found) {
+		if (fip->fcf_count >= FCOE_CTLR_FCF_LIMIT)
+			goto out;
 
-/**
- * fcoe_insert_wait_queue - put the skb into fcoe pending queue tail
- * @lp: the fc_port for this skb
- * @skb: the associated skb to be xmitted
- *
- * Returns: none
- **/
-static void fcoe_insert_wait_queue(struct fc_lport *lp,
-				   struct sk_buff *skb)
-{
-	struct fcoe_softc *fc;
+		fcf = kmalloc(sizeof(*fcf), GFP_ATOMIC);
+		if (!fcf)
+			goto out;
 
-	fc = fcoe_softc(lp);
-	spin_lock_bh(&fc->fcoe_pending_queue.lock);
-	__skb_queue_tail(&fc->fcoe_pending_queue, skb);
-	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
-}
+		fip->fcf_count++;
+		memcpy(fcf, &new, sizeof(new));
+		list_add(&fcf->list, &fip->fcfs);
+	} else {
+		/*
+		 * Flags in advertisements are ignored once the FCF is
+		 * selected.  Flags in unsolicited advertisements are
+		 * ignored after a usable solicited advertisement
+		 * has been received.
+		 */
+		if (fcf == fip->sel_fcf) {
+			fip->ctlr_ka_time -= fcf->fka_period;
+			fip->ctlr_ka_time += new.fka_period;
+			if (time_before(fip->ctlr_ka_time, fip->timer.expires))
+				mod_timer(&fip->timer, fip->ctlr_ka_time);
+		} else if (!fcoe_ctlr_fcf_usable(fcf))
+			fcf->flags = new.flags;
+		fcf->fka_period = new.fka_period;
+		memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
+	}
+	mtu_valid = fcoe_ctlr_mtu_valid(fcf);
+	fcf->time = jiffies;
+	if (!found) {
+		LIBFCOE_FIP_DBG("New FCF for fab %llx map %x val %d\n",
+				fcf->fabric_name, fcf->fc_map, mtu_valid);
+	}
 
-/**
- * fcoe_dev_setup - setup link change notification interface
- *
- **/
-static void fcoe_dev_setup(void)
-{
 	/*
-	 * here setup a interface specific wd time to
-	 * monitor the link state
+	 * If this advertisement is not solicited and our max receive size
+	 * hasn't been verified, send a solicited advertisement.
 	 */
-	register_netdevice_notifier(&fcoe_notifier);
-}
+	if (!mtu_valid)
+		fcoe_ctlr_solicit(fip, fcf);
 
-/**
- * fcoe_dev_setup - cleanup link change notification interface
- **/
-static void fcoe_dev_cleanup(void)
-{
-	unregister_netdevice_notifier(&fcoe_notifier);
+	/*
+	 * If its been a while since we did a solicit, and this is
+	 * the first advertisement we've received, do a multicast
+	 * solicitation to gather as many advertisements as we can
+	 * before selection occurs.
+	 */
+	if (first && time_after(jiffies, fip->sol_time + sol_tov))
+		fcoe_ctlr_solicit(fip, NULL);
+
+	/*
+	 * If this is the first validated FCF, note the time and
+	 * set a timer to trigger selection.
+	 */
+	if (mtu_valid && !fip->sel_time && fcoe_ctlr_fcf_usable(fcf)) {
+		fip->sel_time = jiffies +
+				msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+		if (!timer_pending(&fip->timer) ||
+		    time_before(fip->sel_time, fip->timer.expires))
+			mod_timer(&fip->timer, fip->sel_time);
+	}
+out:
+	spin_unlock_bh(&fip->lock);
 }
 
 /**
- * fcoe_device_notification - netdev event notification callback
- * @notifier: context of the notification
- * @event: type of event
- * @ptr: fixed array for output parsed ifname
- *
- * This function is called by the ethernet driver in case of link change event
- *
- * Returns: 0 for success
- **/
-static int fcoe_device_notification(struct notifier_block *notifier,
-				    ulong event, void *ptr)
+ * fcoe_ctlr_recv_els() - Handle an incoming FIP-encapsulated ELS frame.
+ * @fip:	FCoE controller.
+ * @skb:	Received FIP packet.
+ */
+static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
-	struct fc_lport *lp = NULL;
-	struct net_device *real_dev = ptr;
-	struct fcoe_softc *fc;
+	struct fc_lport *lp = fip->lp;
+	struct fip_header *fiph;
+	struct fc_frame *fp;
+	struct fc_frame_header *fh = NULL;
+	struct fip_desc *desc;
+	struct fip_encaps *els;
 	struct fcoe_dev_stats *stats;
-	u16 new_status;
-	u32 mfs;
-	int rc = NOTIFY_OK;
-
-	read_lock(&fcoe_hostlist_lock);
-	list_for_each_entry(fc, &fcoe_hostlist, list) {
-		if (fc->real_dev == real_dev) {
-			lp = fc->lp;
+	enum fip_desc_type els_dtype = 0;
+	u8 els_op;
+	u8 sub;
+	u8 granted_mac[ETH_ALEN] = { 0 };
+	size_t els_len = 0;
+	size_t rlen;
+	size_t dlen;
+
+	fiph = (struct fip_header *)skb->data;
+	sub = fiph->fip_subcode;
+	if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
+		goto drop;
+
+	rlen = ntohs(fiph->fip_dl_len) * 4;
+	if (rlen + sizeof(*fiph) > skb->len)
+		goto drop;
+
+	desc = (struct fip_desc *)(fiph + 1);
+	while (rlen > 0) {
+		dlen = desc->fip_dlen * FIP_BPW;
+		if (dlen < sizeof(*desc) || dlen > rlen)
+			goto drop;
+		switch (desc->fip_dtype) {
+		case FIP_DT_MAC:
+			if (dlen != sizeof(struct fip_mac_desc))
+				goto len_err;
+			memcpy(granted_mac,
+			       ((struct fip_mac_desc *)desc)->fd_mac,
+			       ETH_ALEN);
+			if (!is_valid_ether_addr(granted_mac)) {
+				LIBFCOE_FIP_DBG("Invalid MAC address "
+						"in FIP ELS\n");
+				goto drop;
+			}
 			break;
+		case FIP_DT_FLOGI:
+		case FIP_DT_FDISC:
+		case FIP_DT_LOGO:
+		case FIP_DT_ELP:
+			if (fh)
+				goto drop;
+			if (dlen < sizeof(*els) + sizeof(*fh) + 1)
+				goto len_err;
+			els_len = dlen - sizeof(*els);
+			els = (struct fip_encaps *)desc;
+			fh = (struct fc_frame_header *)(els + 1);
+			els_dtype = desc->fip_dtype;
+			break;
+		default:
+			LIBFCOE_FIP_DBG("unexpected descriptor type %x "
+					"in FIP adv\n", desc->fip_dtype);
+			/* standard says ignore unknown descriptors >= 128 */
+			if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
+				goto drop;
+			continue;
 		}
-	}
-	read_unlock(&fcoe_hostlist_lock);
-	if (lp == NULL) {
-		rc = NOTIFY_DONE;
-		goto out;
+		desc = (struct fip_desc *)((char *)desc + dlen);
+		rlen -= dlen;
 	}
 
-	new_status = lp->link_status;
-	switch (event) {
-	case NETDEV_DOWN:
-	case NETDEV_GOING_DOWN:
-		new_status &= ~FC_LINK_UP;
-		break;
-	case NETDEV_UP:
-	case NETDEV_CHANGE:
-		new_status &= ~FC_LINK_UP;
-		if (!fcoe_link_ok(lp))
-			new_status |= FC_LINK_UP;
-		break;
-	case NETDEV_CHANGEMTU:
-		mfs = fc->real_dev->mtu -
-			(sizeof(struct fcoe_hdr) +
-			 sizeof(struct fcoe_crc_eof));
-		if (fc->user_mfs && fc->user_mfs < mfs)
-			mfs = fc->user_mfs;
-		if (mfs >= FC_MIN_MAX_FRAME)
-			fc_set_mfs(lp, mfs);
-		new_status &= ~FC_LINK_UP;
-		if (!fcoe_link_ok(lp))
-			new_status |= FC_LINK_UP;
-		break;
-	case NETDEV_REGISTER:
-		break;
-	default:
-		FC_DBG("unknown event %ld call", event);
-	}
-	if (lp->link_status != new_status) {
-		if ((new_status & FC_LINK_UP) == FC_LINK_UP)
-			fc_linkup(lp);
-		else {
-			stats = lp->dev_stats[smp_processor_id()];
-			if (stats)
-				stats->LinkFailureCount++;
-			fc_linkdown(lp);
-			fcoe_clean_pending_queue(lp);
-		}
+	if (!fh)
+		goto drop;
+	els_op = *(u8 *)(fh + 1);
+
+	if (els_dtype == FIP_DT_FLOGI && sub == FIP_SC_REP &&
+	    fip->flogi_oxid == ntohs(fh->fh_ox_id) &&
+	    els_op == ELS_LS_ACC && is_valid_ether_addr(granted_mac)) {
+		fip->flogi_oxid = FC_XID_UNKNOWN;
+		fip->update_mac(fip, fip->data_src_addr, granted_mac);
+		memcpy(fip->data_src_addr, granted_mac, ETH_ALEN);
 	}
-out:
-	return rc;
+
+	/*
+	 * Convert skb into an fc_frame containing only the ELS.
+	 */
+	skb_pull(skb, (u8 *)fh - skb->data);
+	skb_trim(skb, els_len);
+	fp = (struct fc_frame *)skb;
+	fc_frame_init(fp);
+	fr_sof(fp) = FC_SOF_I3;
+	fr_eof(fp) = FC_EOF_T;
+	fr_dev(fp) = lp;
+
+	stats = fc_lport_get_stats(lp);
+	stats->RxFrames++;
+	stats->RxWords += skb->len / FIP_BPW;
+
+	fc_exch_recv(lp, lp->emp, fp);
+	return;
+
+len_err:
+	LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n",
+			desc->fip_dtype, dlen);
+drop:
+	kfree_skb(skb);
 }
 
 /**
- * fcoe_if_to_netdev - parse a name buffer to get netdev
- * @ifname: fixed array for output parsed ifname
- * @buffer: incoming buffer to be copied
+ * fcoe_ctlr_recv_els() - Handle an incoming link reset frame.
+ * @fip:	FCoE controller.
+ * @fh:		Received FIP header.
  *
- * Returns: NULL or ptr to netdeive
- **/
-static struct net_device *fcoe_if_to_netdev(const char *buffer)
+ * There may be multiple VN_Port descriptors.
+ * The overall length has already been checked.
+ */
+static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
+				      struct fip_header *fh)
 {
-	char *cp;
-	char ifname[IFNAMSIZ + 2];
-
-	if (buffer) {
-		strlcpy(ifname, buffer, IFNAMSIZ);
-		cp = ifname + strlen(ifname);
-		while (--cp >= ifname && *cp == '\n')
-			*cp = '\0';
-		return dev_get_by_name(ifname);
+	struct fip_desc *desc;
+	struct fip_mac_desc *mp;
+	struct fip_wwn_desc *wp;
+	struct fip_vn_desc *vp;
+	size_t rlen;
+	size_t dlen;
+	struct fcoe_fcf *fcf = fip->sel_fcf;
+	struct fc_lport *lp = fip->lp;
+	u32	desc_mask;
+
+	LIBFCOE_FIP_DBG("Clear Virtual Link received\n");
+	if (!fcf)
+		return;
+	if (!fcf || !fc_host_port_id(lp->host))
+		return;
+
+	/*
+	 * mask of required descriptors.  Validating each one clears its bit.
+	 */
+	desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME) | BIT(FIP_DT_VN_ID);
+
+	rlen = ntohs(fh->fip_dl_len) * FIP_BPW;
+	desc = (struct fip_desc *)(fh + 1);
+	while (rlen >= sizeof(*desc)) {
+		dlen = desc->fip_dlen * FIP_BPW;
+		if (dlen > rlen)
+			return;
+		switch (desc->fip_dtype) {
+		case FIP_DT_MAC:
+			mp = (struct fip_mac_desc *)desc;
+			if (dlen < sizeof(*mp))
+				return;
+			if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
+				return;
+			desc_mask &= ~BIT(FIP_DT_MAC);
+			break;
+		case FIP_DT_NAME:
+			wp = (struct fip_wwn_desc *)desc;
+			if (dlen < sizeof(*wp))
+				return;
+			if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name)
+				return;
+			desc_mask &= ~BIT(FIP_DT_NAME);
+			break;
+		case FIP_DT_VN_ID:
+			vp = (struct fip_vn_desc *)desc;
+			if (dlen < sizeof(*vp))
+				return;
+			if (compare_ether_addr(vp->fd_mac,
+			    fip->data_src_addr) == 0 &&
+			    get_unaligned_be64(&vp->fd_wwpn) == lp->wwpn &&
+			    ntoh24(vp->fd_fc_id) == fc_host_port_id(lp->host))
+				desc_mask &= ~BIT(FIP_DT_VN_ID);
+			break;
+		default:
+			/* standard says ignore unknown descriptors >= 128 */
+			if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
+				return;
+			break;
+		}
+		desc = (struct fip_desc *)((char *)desc + dlen);
+		rlen -= dlen;
+	}
+
+	/*
+	 * reset only if all required descriptors were present and valid.
+	 */
+	if (desc_mask) {
+		LIBFCOE_FIP_DBG("missing descriptors mask %x\n", desc_mask);
+	} else {
+		LIBFCOE_FIP_DBG("performing Clear Virtual Link\n");
+		fcoe_ctlr_reset(fip, FIP_ST_ENABLED);
 	}
-	return NULL;
 }
 
 /**
- * fcoe_destroy- handles the destroy from sysfs
- * @buffer: expcted to be a eth if name
- * @kp: associated kernel param
+ * fcoe_ctlr_recv() - Receive a FIP frame.
+ * @fip:	FCoE controller.
+ * @skb:	Received FIP packet.
  *
- * Returns: 0 for success
- **/
-static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
+ * This is called from NET_RX_SOFTIRQ.
+ */
+void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
-	int rc;
-	struct net_device *netdev;
-
-	netdev = fcoe_if_to_netdev(buffer);
-	if (!netdev) {
-		rc = -ENODEV;
-		goto out_nodev;
-	}
-	/* look for existing lport */
-	if (!fcoe_hostlist_lookup(netdev)) {
-		rc = -ENODEV;
-		goto out_putdev;
-	}
-	/* pass to transport */
-	rc = fcoe_transport_release(netdev);
-	if (rc) {
-		printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n",
-		       netdev->name);
-		rc = -EIO;
-		goto out_putdev;
-	}
-	rc = 0;
-out_putdev:
-	dev_put(netdev);
-out_nodev:
-	return rc;
+	spin_lock_bh(&fip->fip_recv_list.lock);
+	__skb_queue_tail(&fip->fip_recv_list, skb);
+	spin_unlock_bh(&fip->fip_recv_list.lock);
+	schedule_work(&fip->recv_work);
 }
+EXPORT_SYMBOL(fcoe_ctlr_recv);
 
 /**
- * fcoe_create - handles the create call from sysfs
- * @buffer: expcted to be a eth if name
- * @kp: associated kernel param
+ * fcoe_ctlr_recv_handler() - Receive a FIP frame.
+ * @fip:	FCoE controller.
+ * @skb:	Received FIP packet.
  *
- * Returns: 0 for success
- **/
-static int fcoe_create(const char *buffer, struct kernel_param *kp)
+ * Returns non-zero if the frame is dropped.
+ */
+static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
-	int rc;
-	struct net_device *netdev;
-
-	netdev = fcoe_if_to_netdev(buffer);
-	if (!netdev) {
-		rc = -ENODEV;
-		goto out_nodev;
-	}
-	/* look for existing lport */
-	if (fcoe_hostlist_lookup(netdev)) {
-		rc = -EEXIST;
-		goto out_putdev;
+	struct fip_header *fiph;
+	struct ethhdr *eh;
+	enum fip_state state;
+	u16 op;
+	u8 sub;
+
+	if (skb_linearize(skb))
+		goto drop;
+	if (skb->len < sizeof(*fiph))
+		goto drop;
+	eh = eth_hdr(skb);
+	if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) &&
+	    compare_ether_addr(eh->h_dest, FIP_ALL_ENODE_MACS))
+		goto drop;
+	fiph = (struct fip_header *)skb->data;
+	op = ntohs(fiph->fip_op);
+	sub = fiph->fip_subcode;
+
+	if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
+		goto drop;
+	if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
+		goto drop;
+
+	spin_lock_bh(&fip->lock);
+	state = fip->state;
+	if (state == FIP_ST_AUTO) {
+		fip->map_dest = 0;
+		fip->state = FIP_ST_ENABLED;
+		state = FIP_ST_ENABLED;
+		LIBFCOE_FIP_DBG("Using FIP mode\n");
 	}
+	spin_unlock_bh(&fip->lock);
+	if (state != FIP_ST_ENABLED)
+		goto drop;
 
-	/* pass to transport */
-	rc = fcoe_transport_attach(netdev);
-	if (rc) {
-		printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n",
-		       netdev->name);
-		rc = -EIO;
-		goto out_putdev;
+	if (op == FIP_OP_LS) {
+		fcoe_ctlr_recv_els(fip, skb);	/* consumes skb */
+		return 0;
 	}
-	rc = 0;
-out_putdev:
-	dev_put(netdev);
-out_nodev:
-	return rc;
+	if (op == FIP_OP_DISC && sub == FIP_SC_ADV)
+		fcoe_ctlr_recv_adv(fip, skb);
+	else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK)
+		fcoe_ctlr_recv_clr_vlink(fip, fiph);
+	kfree_skb(skb);
+	return 0;
+drop:
+	kfree_skb(skb);
+	return -1;
 }
 
-module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
-module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, "Destroy fcoe port");
-
-/*
- * fcoe_link_ok - check if link is ok for the fc_lport
- * @lp: ptr to the fc_lport
- *
- * Any permanently-disqualifying conditions have been previously checked.
- * This also updates the speed setting, which may change with link for 100/1000.
- *
- * This function should probably be checking for PAUSE support at some point
- * in the future. Currently Per-priority-pause is not determinable using
- * ethtool, so we shouldn't be restrictive until that problem is resolved.
+/**
+ * fcoe_ctlr_select() - Select the best FCF, if possible.
+ * @fip:	FCoE controller.
  *
- * Returns: 0 if link is OK for use by FCoE.
+ * If there are conflicting advertisements, no FCF can be chosen.
  *
+ * Called with lock held.
  */
-int fcoe_link_ok(struct fc_lport *lp)
+static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
 {
-	struct fcoe_softc *fc = fcoe_softc(lp);
-	struct net_device *dev = fc->real_dev;
-	struct ethtool_cmd ecmd = { ETHTOOL_GSET };
-	int rc = 0;
-
-	if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
-		dev = fc->phys_dev;
-		if (dev->ethtool_ops->get_settings) {
-			dev->ethtool_ops->get_settings(dev, &ecmd);
-			lp->link_supported_speeds &=
-				~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
-			if (ecmd.supported & (SUPPORTED_1000baseT_Half |
-					      SUPPORTED_1000baseT_Full))
-				lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
-			if (ecmd.supported & SUPPORTED_10000baseT_Full)
-				lp->link_supported_speeds |=
-					FC_PORTSPEED_10GBIT;
-			if (ecmd.speed == SPEED_1000)
-				lp->link_speed = FC_PORTSPEED_1GBIT;
-			if (ecmd.speed == SPEED_10000)
-				lp->link_speed = FC_PORTSPEED_10GBIT;
+	struct fcoe_fcf *fcf;
+	struct fcoe_fcf *best = NULL;
+
+	list_for_each_entry(fcf, &fip->fcfs, list) {
+		LIBFCOE_FIP_DBG("consider FCF for fab %llx VFID %d map %x "
+				"val %d\n", fcf->fabric_name, fcf->vfid,
+				fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
+		if (!fcoe_ctlr_fcf_usable(fcf)) {
+			LIBFCOE_FIP_DBG("FCF for fab %llx map %x %svalid "
+					"%savailable\n", fcf->fabric_name,
+					fcf->fc_map, (fcf->flags & FIP_FL_SOL)
+					? "" : "in", (fcf->flags & FIP_FL_AVAIL)
+					? "" : "un");
+			continue;
 		}
-	} else
-		rc = -1;
-
-	return rc;
-}
-EXPORT_SYMBOL_GPL(fcoe_link_ok);
-
-/*
- * fcoe_percpu_clean - frees skb of the corresponding lport from the per
- * cpu queue.
- * @lp: the fc_lport
- */
-void fcoe_percpu_clean(struct fc_lport *lp)
-{
-	int idx;
-	struct fcoe_percpu_s *pp;
-	struct fcoe_rcv_info *fr;
-	struct sk_buff_head *list;
-	struct sk_buff *skb, *next;
-	struct sk_buff *head;
-
-	for (idx = 0; idx < NR_CPUS; idx++) {
-		if (fcoe_percpu[idx]) {
-			pp = fcoe_percpu[idx];
-			spin_lock_bh(&pp->fcoe_rx_list.lock);
-			list = &pp->fcoe_rx_list;
-			head = list->next;
-			for (skb = head; skb != (struct sk_buff *)list;
-			     skb = next) {
-				next = skb->next;
-				fr = fcoe_dev_from_skb(skb);
-				if (fr->fr_dev == lp) {
-					__skb_unlink(skb, list);
-					kfree_skb(skb);
-				}
-			}
-			spin_unlock_bh(&pp->fcoe_rx_list.lock);
+		if (!best) {
+			best = fcf;
+			continue;
+		}
+		if (fcf->fabric_name != best->fabric_name ||
+		    fcf->vfid != best->vfid ||
+		    fcf->fc_map != best->fc_map) {
+			LIBFCOE_FIP_DBG("Conflicting fabric, VFID, "
+					"or FC-MAP\n");
+			return;
 		}
+		if (fcf->pri < best->pri)
+			best = fcf;
 	}
+	fip->sel_fcf = best;
 }
-EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
 
 /**
- * fcoe_clean_pending_queue - dequeue skb and free it
- * @lp: the corresponding fc_lport
+ * fcoe_ctlr_timeout() - FIP timer function.
+ * @arg:	&fcoe_ctlr pointer.
  *
- * Returns: none
- **/
-void fcoe_clean_pending_queue(struct fc_lport *lp)
+ * Ages FCFs.  Triggers FCF selection if possible.  Sends keep-alives.
+ */
+static void fcoe_ctlr_timeout(unsigned long arg)
 {
-	struct fcoe_softc  *fc = lport_priv(lp);
-	struct sk_buff *skb;
+	struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg;
+	struct fcoe_fcf *sel;
+	struct fcoe_fcf *fcf;
+	unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
+	DECLARE_MAC_BUF(buf);
+	u8 send_ctlr_ka;
+	u8 send_port_ka;
+
+	spin_lock_bh(&fip->lock);
+	if (fip->state == FIP_ST_DISABLED) {
+		spin_unlock_bh(&fip->lock);
+		return;
+	}
+
+	fcf = fip->sel_fcf;
+	fcoe_ctlr_age_fcfs(fip);
+
+	sel = fip->sel_fcf;
+	if (!sel && fip->sel_time && time_after_eq(jiffies, fip->sel_time)) {
+		fcoe_ctlr_select(fip);
+		sel = fip->sel_fcf;
+		fip->sel_time = 0;
+	}
+
+	if (sel != fcf) {
+		fcf = sel;		/* the old FCF may have been freed */
+		if (sel) {
+			printk(KERN_INFO "libfcoe: host%d: FIP selected "
+			       "Fibre-Channel Forwarder MAC %s\n",
+			       fip->lp->host->host_no,
+			       print_mac(buf, sel->fcf_mac));
+			memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
+			fip->port_ka_time = jiffies +
+					    msecs_to_jiffies(FIP_VN_KA_PERIOD);
+			fip->ctlr_ka_time = jiffies + sel->fka_period;
+			fip->link = 1;
+		} else {
+			printk(KERN_NOTICE "libfcoe: host%d: "
+			       "FIP Fibre-Channel Forwarder timed out.  "
+			       "Starting FCF discovery.\n",
+			       fip->lp->host->host_no);
+			fip->link = 0;
+		}
+		schedule_work(&fip->link_work);
+	}
 
-	spin_lock_bh(&fc->fcoe_pending_queue.lock);
-	while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
-		spin_unlock_bh(&fc->fcoe_pending_queue.lock);
-		kfree_skb(skb);
-		spin_lock_bh(&fc->fcoe_pending_queue.lock);
+	send_ctlr_ka = 0;
+	send_port_ka = 0;
+	if (sel) {
+		if (time_after_eq(jiffies, fip->ctlr_ka_time)) {
+			fip->ctlr_ka_time = jiffies + sel->fka_period;
+			send_ctlr_ka = 1;
+		}
+		if (time_after(next_timer, fip->ctlr_ka_time))
+			next_timer = fip->ctlr_ka_time;
+
+		if (time_after_eq(jiffies, fip->port_ka_time)) {
+			fip->port_ka_time += jiffies +
+					msecs_to_jiffies(FIP_VN_KA_PERIOD);
+			send_port_ka = 1;
+		}
+		if (time_after(next_timer, fip->port_ka_time))
+			next_timer = fip->port_ka_time;
+		mod_timer(&fip->timer, next_timer);
+	} else if (fip->sel_time) {
+		next_timer = fip->sel_time +
+				msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+		mod_timer(&fip->timer, next_timer);
 	}
-	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+	spin_unlock_bh(&fip->lock);
+
+	if (send_ctlr_ka)
+		fcoe_ctlr_send_keep_alive(fip, 0, fip->ctl_src_addr);
+	if (send_port_ka)
+		fcoe_ctlr_send_keep_alive(fip, 1, fip->data_src_addr);
 }
-EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
 
 /**
- * libfc_host_alloc - allocate a Scsi_Host with room for the fc_lport
- * @sht: ptr to the scsi host templ
- * @priv_size: size of private data after fc_lport
+ * fcoe_ctlr_link_work() - worker thread function for link changes.
+ * @work:	pointer to link_work member inside &fcoe_ctlr.
  *
- * Returns: ptr to Scsi_Host
- * TODO - to libfc?
+ * See if the link status has changed and if so, report it.
+ *
+ * This is here because fc_linkup() and fc_linkdown() must not
+ * be called from the timer directly, since they use a mutex.
  */
-static inline struct Scsi_Host *libfc_host_alloc(
-	struct scsi_host_template *sht, int priv_size)
+static void fcoe_ctlr_link_work(struct work_struct *work)
 {
-	return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
+	struct fcoe_ctlr *fip;
+	int link;
+	int last_link;
+
+	fip = container_of(work, struct fcoe_ctlr, link_work);
+	spin_lock_bh(&fip->lock);
+	last_link = fip->last_link;
+	link = fip->link;
+	fip->last_link = link;
+	spin_unlock_bh(&fip->lock);
+
+	if (last_link != link) {
+		if (link)
+			fc_linkup(fip->lp);
+		else
+			fcoe_ctlr_reset(fip, FIP_ST_LINK_WAIT);
+	}
 }
 
 /**
- * fcoe_host_alloc - allocate a Scsi_Host with room for the fcoe_softc
- * @sht: ptr to the scsi host templ
- * @priv_size: size of private data after fc_lport
- *
- * Returns: ptr to Scsi_Host
+ * fcoe_ctlr_recv_work() - Worker thread function for receiving FIP frames.
+ * @recv_work:	pointer to recv_work member inside &fcoe_ctlr.
  */
-struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size)
+static void fcoe_ctlr_recv_work(struct work_struct *recv_work)
 {
-	return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size);
+	struct fcoe_ctlr *fip;
+	struct sk_buff *skb;
+
+	fip = container_of(recv_work, struct fcoe_ctlr, recv_work);
+	spin_lock_bh(&fip->fip_recv_list.lock);
+	while ((skb = __skb_dequeue(&fip->fip_recv_list))) {
+		spin_unlock_bh(&fip->fip_recv_list.lock);
+		fcoe_ctlr_recv_handler(fip, skb);
+		spin_lock_bh(&fip->fip_recv_list.lock);
+	}
+	spin_unlock_bh(&fip->fip_recv_list.lock);
 }
-EXPORT_SYMBOL_GPL(fcoe_host_alloc);
 
-/*
- * fcoe_reset - resets the fcoe
- * @shost: shost the reset is from
+/**
+ * fcoe_ctlr_recv_flogi() - snoop Pre-FIP receipt of FLOGI response or request.
+ * @fip:	FCoE controller.
+ * @fp:		FC frame.
+ * @sa:		Ethernet source MAC address from received FCoE frame.
  *
- * Returns: always 0
+ * Snoop potential response to FLOGI or even incoming FLOGI.
+ *
+ * The caller has checked that we are waiting for login as indicated
+ * by fip->flogi_oxid != FC_XID_UNKNOWN.
+ *
+ * The caller is responsible for freeing the frame.
+ *
+ * Return non-zero if the frame should not be delivered to libfc.
  */
-int fcoe_reset(struct Scsi_Host *shost)
+int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_frame *fp, u8 *sa)
 {
-	struct fc_lport *lport = shost_priv(shost);
-	fc_lport_reset(lport);
+	struct fc_frame_header *fh;
+	u8 op;
+	u8 mac[ETH_ALEN];
+
+	fh = fc_frame_header_get(fp);
+	if (fh->fh_type != FC_TYPE_ELS)
+		return 0;
+
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
+	    fip->flogi_oxid == ntohs(fh->fh_ox_id)) {
+
+		spin_lock_bh(&fip->lock);
+		if (fip->state != FIP_ST_AUTO && fip->state != FIP_ST_NON_FIP) {
+			spin_unlock_bh(&fip->lock);
+			return -EINVAL;
+		}
+		fip->state = FIP_ST_NON_FIP;
+		LIBFCOE_FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n");
+
+		/*
+		 * FLOGI accepted.
+		 * If the src mac addr is FC_OUI-based, then we mark the
+		 * address_mode flag to use FC_OUI-based Ethernet DA.
+		 * Otherwise we use the FCoE gateway addr
+		 */
+		if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
+			fip->map_dest = 1;
+		} else {
+			memcpy(fip->dest_addr, sa, ETH_ALEN);
+			fip->map_dest = 0;
+		}
+		fip->flogi_oxid = FC_XID_UNKNOWN;
+		memcpy(mac, fip->data_src_addr, ETH_ALEN);
+		fc_fcoe_set_mac(fip->data_src_addr, fh->fh_d_id);
+		spin_unlock_bh(&fip->lock);
+
+		fip->update_mac(fip, mac, fip->data_src_addr);
+	} else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
+		/*
+		 * Save source MAC for point-to-point responses.
+		 */
+		spin_lock_bh(&fip->lock);
+		if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) {
+			memcpy(fip->dest_addr, sa, ETH_ALEN);
+			fip->map_dest = 0;
+			if (fip->state == FIP_ST_NON_FIP)
+				LIBFCOE_FIP_DBG("received FLOGI REQ, "
+						"using non-FIP mode\n");
+			fip->state = FIP_ST_NON_FIP;
+		}
+		spin_unlock_bh(&fip->lock);
+	}
 	return 0;
 }
-EXPORT_SYMBOL_GPL(fcoe_reset);
+EXPORT_SYMBOL(fcoe_ctlr_recv_flogi);
 
-/*
- * fcoe_wwn_from_mac - converts 48-bit IEEE MAC address to 64-bit FC WWN.
+/**
+ * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN.
  * @mac: mac address
  * @scheme: check port
  * @port: port indicator for converting
@@ -1194,92 +1327,14 @@ u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
 	return wwn;
 }
 EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
-/*
- * fcoe_hostlist_lookup_softc - find the corresponding lport by a given device
- * @device: this is currently ptr to net_device
- *
- * Returns: NULL or the located fcoe_softc
- */
-static struct fcoe_softc *fcoe_hostlist_lookup_softc(
-	const struct net_device *dev)
-{
-	struct fcoe_softc *fc;
-
-	read_lock(&fcoe_hostlist_lock);
-	list_for_each_entry(fc, &fcoe_hostlist, list) {
-		if (fc->real_dev == dev) {
-			read_unlock(&fcoe_hostlist_lock);
-			return fc;
-		}
-	}
-	read_unlock(&fcoe_hostlist_lock);
-	return NULL;
-}
-
-/*
- * fcoe_hostlist_lookup - find the corresponding lport by netdev
- * @netdev: ptr to net_device
- *
- * Returns: 0 for success
- */
-struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
-{
-	struct fcoe_softc *fc;
-
-	fc = fcoe_hostlist_lookup_softc(netdev);
-
-	return (fc) ? fc->lp : NULL;
-}
-EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
-
-/*
- * fcoe_hostlist_add - add a lport to lports list
- * @lp: ptr to the fc_lport to badded
- *
- * Returns: 0 for success
- */
-int fcoe_hostlist_add(const struct fc_lport *lp)
-{
-	struct fcoe_softc *fc;
-
-	fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
-	if (!fc) {
-		fc = fcoe_softc(lp);
-		write_lock_bh(&fcoe_hostlist_lock);
-		list_add_tail(&fc->list, &fcoe_hostlist);
-		write_unlock_bh(&fcoe_hostlist_lock);
-	}
-	return 0;
-}
-EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
-
-/*
- * fcoe_hostlist_remove - remove a lport from lports list
- * @lp: ptr to the fc_lport to badded
- *
- * Returns: 0 for success
- */
-int fcoe_hostlist_remove(const struct fc_lport *lp)
-{
-	struct fcoe_softc *fc;
-
-	fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
-	BUG_ON(!fc);
-	write_lock_bh(&fcoe_hostlist_lock);
-	list_del(&fc->list);
-	write_unlock_bh(&fcoe_hostlist_lock);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
 
 /**
- * fcoe_libfc_config - sets up libfc related properties for lport
+ * fcoe_libfc_config() - sets up libfc related properties for lport
  * @lp: ptr to the fc_lport
  * @tt: libfc function template
  *
  * Returns : 0 for success
- **/
+ */
 int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt)
 {
 	/* Set the function pointers set by the LLDD */
@@ -1295,124 +1350,3 @@ int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt)
 	return 0;
 }
 EXPORT_SYMBOL_GPL(fcoe_libfc_config);
-
-/**
- * fcoe_init - fcoe module loading initialization
- *
- * Initialization routine
- * 1. Will create fc transport software structure
- * 2. initialize the link list of port information structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int __init fcoe_init(void)
-{
-	int cpu;
-	struct fcoe_percpu_s *p;
-
-
-	INIT_LIST_HEAD(&fcoe_hostlist);
-	rwlock_init(&fcoe_hostlist_lock);
-
-#ifdef CONFIG_HOTPLUG_CPU
-	register_cpu_notifier(&fcoe_cpu_notifier);
-#endif /* CONFIG_HOTPLUG_CPU */
-
-	/*
-	 * initialize per CPU interrupt thread
-	 */
-	for_each_online_cpu(cpu) {
-		p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
-		if (p) {
-			p->thread = kthread_create(fcoe_percpu_receive_thread,
-						   (void *)p,
-						   "fcoethread/%d", cpu);
-
-			/*
-			 * if there is no error then bind the thread to the cpu
-			 * initialize the semaphore and skb queue head
-			 */
-			if (likely(!IS_ERR(p->thread))) {
-				p->cpu = cpu;
-				fcoe_percpu[cpu] = p;
-				skb_queue_head_init(&p->fcoe_rx_list);
-				kthread_bind(p->thread, cpu);
-				wake_up_process(p->thread);
-			} else {
-				fcoe_percpu[cpu] = NULL;
-				kfree(p);
-
-			}
-		}
-	}
-
-	/*
-	 * setup link change notification
-	 */
-	fcoe_dev_setup();
-
-	init_timer(&fcoe_timer);
-	fcoe_timer.data = 0;
-	fcoe_timer.function = fcoe_watchdog;
-	fcoe_timer.expires = (jiffies + (10 * HZ));
-	add_timer(&fcoe_timer);
-
-	/* initiatlize the fcoe transport */
-	fcoe_transport_init();
-
-	fcoe_sw_init();
-
-	return 0;
-}
-module_init(fcoe_init);
-
-/**
- * fcoe_exit - fcoe module unloading cleanup
- *
- * Returns 0 on success, negative on failure
- **/
-static void __exit fcoe_exit(void)
-{
-	u32 idx;
-	struct fcoe_softc *fc, *tmp;
-	struct fcoe_percpu_s *p;
-	struct sk_buff *skb;
-
-	/*
-	 * Stop all call back interfaces
-	 */
-#ifdef CONFIG_HOTPLUG_CPU
-	unregister_cpu_notifier(&fcoe_cpu_notifier);
-#endif /* CONFIG_HOTPLUG_CPU */
-	fcoe_dev_cleanup();
-
-	/*
-	 * stop timer
-	 */
-	del_timer_sync(&fcoe_timer);
-
-	/* releases the assocaited fcoe transport for each lport */
-	list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
-		fcoe_transport_release(fc->real_dev);
-
-	for (idx = 0; idx < NR_CPUS; idx++) {
-		if (fcoe_percpu[idx]) {
-			kthread_stop(fcoe_percpu[idx]->thread);
-			p = fcoe_percpu[idx];
-			spin_lock_bh(&p->fcoe_rx_list.lock);
-			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
-				kfree_skb(skb);
-			spin_unlock_bh(&p->fcoe_rx_list.lock);
-			if (fcoe_percpu[idx]->crc_eof_page)
-				put_page(fcoe_percpu[idx]->crc_eof_page);
-			kfree(fcoe_percpu[idx]);
-		}
-	}
-
-	/* remove sw trasnport */
-	fcoe_sw_exit();
-
-	/* detach the transport */
-	fcoe_transport_exit();
-}
-module_exit(fcoe_exit);
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index cfe4828..6e2f6f5 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -24,6 +24,14 @@
  * also handles RSCN events and re-discovery if necessary.
  */
 
+/*
+ * DISC LOCKING
+ *
+ * The disc mutex is can be locked when acquiring rport locks, but may not
+ * be held when acquiring the lport lock. Refer to fc_lport.c for more
+ * details.
+ */
+
 #include <linux/timer.h>
 #include <linux/err.h>
 #include <asm/unaligned.h>
@@ -37,98 +45,78 @@
 
 #define	FC_DISC_DELAY		3
 
-static int fc_disc_debug;
-
-#define FC_DEBUG_DISC(fmt...)			\
-	do {					\
-		if (fc_disc_debug)		\
-			FC_DBG(fmt);		\
-	} while (0)
-
-struct fc_disc {
-	unsigned char		retry_count;
-	unsigned char		delay;
-	unsigned char		pending;
-	unsigned char		requested;
-	unsigned short		seq_count;
-	unsigned char		buf_len;
-	enum fc_disc_event	event;
-
-	void (*disc_callback)(struct fc_lport *,
-			      enum fc_disc_event);
-
-	struct list_head	 rports;
-	struct fc_lport		*lport;
-	struct mutex		disc_mutex;
-	struct fc_gpn_ft_resp	partial_buf;	/* partial name buffer */
-	struct work_struct	disc_work;
-};
-
 static void fc_disc_gpn_ft_req(struct fc_disc *);
 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
 static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
 			      struct fc_rport_identifiers *);
 static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
 static void fc_disc_done(struct fc_disc *);
-static void fc_disc_timeout(void *);
+static void fc_disc_timeout(struct work_struct *);
 static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
 static void fc_disc_restart(struct fc_disc *);
 
 /**
- * fc_disc_lookup_rport - lookup a remote port by port_id
+ * fc_disc_lookup_all_rports() - lookup both real and rogue ports by id
  * @lport: Fibre Channel host port instance
  * @port_id: remote port port_id to match
  */
-struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
-				      u32 port_id)
+struct fc_rport *fc_disc_lookup_all_rports(const struct fc_lport *lport,
+					   u32 port_id)
 {
-	struct fc_disc *disc = lport->disc;
+	const struct fc_disc *disc = &lport->disc;
 	struct fc_rport *rport, *found = NULL;
 	struct fc_rport_libfc_priv *rdata;
-	int disc_found = 0;
-
-	if (!disc)
-		return NULL;
 
 	list_for_each_entry(rdata, &disc->rports, peers) {
 		rport = PRIV_TO_RPORT(rdata);
 		if (rport->port_id == port_id) {
-			disc_found = 1;
 			found = rport;
-			get_device(&found->dev);
-			break;
+			goto out;
 		}
 	}
 
-	if (!disc_found)
-		found = NULL;
+	list_for_each_entry(rdata, &disc->rogue_rports, peers) {
+		rport = PRIV_TO_RPORT(rdata);
+		if (rport->port_id == port_id) {
+			found = rport;
+			goto out;
+		}
+	}
 
+out:
 	return found;
 }
 
 /**
- * fc_disc_alloc - Allocate a discovery work object
- * @lport: The FC lport associated with the discovery job
+ * fc_disc_lookup_rport() - lookup a remote port by port_id
+ * @lport: Fibre Channel host port instance
+ * @port_id: remote port port_id to match
  */
-static inline struct fc_disc *fc_disc_alloc(struct fc_lport *lport)
+struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
+				      u32 port_id)
 {
-	struct fc_disc *disc;
+	const struct fc_disc *disc = &lport->disc;
+	struct fc_rport *rport, *found = NULL;
+	struct fc_rport_libfc_priv *rdata;
+	int disc_found = 0;
 
-	disc = kzalloc(sizeof(struct fc_disc), GFP_KERNEL);
-	INIT_WORK(&disc->disc_work, fc_disc_timeout, disc);
-	mutex_init(&disc->disc_mutex);
-	INIT_LIST_HEAD(&disc->rports);
+	list_for_each_entry(rdata, &disc->rports, peers) {
+		rport = PRIV_TO_RPORT(rdata);
+		if (rport->port_id == port_id) {
+			disc_found = 1;
+			found = rport;
+			break;
+		}
+	}
 
-	disc->lport = lport;
-	lport->disc = disc;
-	disc->delay = FC_DISC_DELAY;
-	disc->event = DISC_EV_NONE;
+	if (!disc_found)
+		found = NULL;
 
-	return disc;
+	return found;
 }
 
 /**
- * fc_disc_stop_rports - delete all the remote ports associated with the lport
+ * fc_disc_stop_rports() - delete all the remote ports associated with the lport
  * @disc: The discovery job to stop rports on
  *
  * Locking Note: This function expects that the lport mutex is locked before
@@ -149,11 +137,16 @@ void fc_disc_stop_rports(struct fc_disc *disc)
 		lport->tt.rport_logoff(rport);
 	}
 
+	list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) {
+		rport = PRIV_TO_RPORT(rdata);
+		lport->tt.rport_logoff(rport);
+	}
+
 	mutex_unlock(&disc->disc_mutex);
 }
 
 /**
- * fc_disc_rport_event - Event handler for rport events
+ * fc_disc_rport_callback() - Event handler for rport events
  * @lport: The lport which is receiving the event
  * @rport: The rport which the event has occured on
  * @event: The event that occured
@@ -161,33 +154,42 @@ void fc_disc_stop_rports(struct fc_disc *disc)
  * Locking Note: The rport lock should not be held when calling
  *		 this function.
  */
-static void fc_disc_rport_event(struct fc_lport *lport,
-				struct fc_rport *rport,
-				enum fc_lport_event event)
+static void fc_disc_rport_callback(struct fc_lport *lport,
+				   struct fc_rport *rport,
+				   enum fc_rport_event event)
 {
 	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-	struct fc_disc *disc = lport->disc;
-	int found = 0;
+	struct fc_disc *disc = &lport->disc;
 
-	FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
-		      rport->port_id);
+	FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event,
+		    rport->port_id);
 
-	if (event == RPORT_EV_CREATED) {
+	switch (event) {
+	case RPORT_EV_CREATED:
 		if (disc) {
-			found = 1;
 			mutex_lock(&disc->disc_mutex);
 			list_add_tail(&rdata->peers, &disc->rports);
 			mutex_unlock(&disc->disc_mutex);
 		}
+		break;
+	case RPORT_EV_LOGO:
+	case RPORT_EV_FAILED:
+	case RPORT_EV_STOP:
+		mutex_lock(&disc->disc_mutex);
+		mutex_lock(&rdata->rp_mutex);
+		if (rdata->trans_state == FC_PORTSTATE_ROGUE)
+			list_del(&rdata->peers);
+		mutex_unlock(&rdata->rp_mutex);
+		mutex_unlock(&disc->disc_mutex);
+		break;
+	default:
+		break;
 	}
 
-	if (!found)
-		FC_DEBUG_DISC("The rport (%6x) is not maintained "
-			      "by the discovery layer\n", rport->port_id);
 }
 
 /**
- * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN)
+ * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
  * @sp: Current sequence of the RSCN exchange
  * @fp: RSCN Frame
  * @lport: Fibre Channel host port instance
@@ -213,20 +215,29 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
 
 	lport = disc->lport;
 
-	FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
-		      fc_host_port_id(lport->host));
+	FC_DISC_DBG(disc, "Received an RSCN event\n");
 
+	/* make sure the frame contains an RSCN message */
 	rp = fc_frame_payload_get(fp, sizeof(*rp));
-
-	if (!rp || rp->rscn_page_len != sizeof(*pp))
+	if (!rp)
 		goto reject;
-
+	/* make sure the page length is as expected (4 bytes) */
+	if (rp->rscn_page_len != sizeof(*pp))
+		goto reject;
+	/* get the RSCN payload length */
 	len = ntohs(rp->rscn_plen);
 	if (len < sizeof(*rp))
 		goto reject;
+	/* make sure the frame contains the expected payload */
+	rp = fc_frame_payload_get(fp, len);
+	if (!rp)
+		goto reject;
+	/* payload must be a multiple of the RSCN page size */
 	len -= sizeof(*rp);
+	if (len % sizeof(*pp))
+		goto reject;
 
-	for (pp = (void *)(rp + 1); len; len -= sizeof(*pp), pp++) {
+	for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
 		ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
 		ev_qual &= ELS_RSCN_EV_QUAL_MASK;
 		fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
@@ -237,8 +248,8 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
 		 */
 		switch (fmt) {
 		case ELS_ADDR_FMT_PORT:
-			FC_DEBUG_DISC("Port address format for port (%6x)\n",
-				      ntoh24(pp->rscn_fid));
+			FC_DISC_DBG(disc, "Port address format for port "
+				    "(%6x)\n", ntoh24(pp->rscn_fid));
 			dp = kzalloc(sizeof(*dp), GFP_KERNEL);
 			if (!dp) {
 				redisc = 1;
@@ -255,25 +266,27 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
 		case ELS_ADDR_FMT_DOM:
 		case ELS_ADDR_FMT_FAB:
 		default:
-			FC_DEBUG_DISC("Address format is (%d)\n", fmt);
+			FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
 			redisc = 1;
 			break;
 		}
 	}
 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
 	if (redisc) {
-		FC_DEBUG_DISC("RSCN received: rediscovering\n");
+		FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
 		fc_disc_restart(disc);
 	} else {
-		FC_DEBUG_DISC("RSCN received: not rediscovering. "
-			      "redisc %d state %d in_prog %d\n",
-			      redisc, lport->state, disc->pending);
+		FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
+			    "redisc %d state %d in_prog %d\n",
+			    redisc, lport->state, disc->pending);
 		list_for_each_entry_safe(dp, next, &disc_ports, peers) {
 			list_del(&dp->peers);
-			rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
+			rport = fc_disc_lookup_all_rports(lport,
+							  dp->ids.port_id);
 			if (rport) {
-				rdata = RPORT_TO_PRIV(rport);
-				list_del(&rdata->peers);
+				rdata = rport->dd_data;
+				if (rdata->trans_state != FC_PORTSTATE_ROGUE)
+					list_del(&rdata->peers);
 				lport->tt.rport_logoff(rport);
 			}
 			fc_disc_single(disc, dp);
@@ -282,6 +295,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
 	fc_frame_free(fp);
 	return;
 reject:
+	FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
 	rjt_data.fp = NULL;
 	rjt_data.reason = ELS_RJT_LOGIC;
 	rjt_data.explan = ELS_EXPL_NONE;
@@ -290,7 +304,7 @@ reject:
 }
 
 /**
- * fc_disc_recv_req - Handle incoming requests
+ * fc_disc_recv_req() - Handle incoming requests
  * @sp: Current sequence of the request exchange
  * @fp: The frame
  * @lport: The FC local port
@@ -303,13 +317,7 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
 			     struct fc_lport *lport)
 {
 	u8 op;
-	struct fc_disc *disc = lport->disc;
-
-	if (!disc) {
-		FC_DBG("Received a request for an lport not managed "
-		       "by the discovery engine\n");
-		return;
-	}
+	struct fc_disc *disc = &lport->disc;
 
 	op = fc_frame_payload_op(fp);
 	switch (op) {
@@ -319,13 +327,14 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
 		mutex_unlock(&disc->disc_mutex);
 		break;
 	default:
-		FC_DBG("Received an unsupported request. opcode (%x)\n", op);
+		FC_DISC_DBG(disc, "Received an unsupported request, "
+			    "the opcode is (%x)\n", op);
 		break;
 	}
 }
 
 /**
- * fc_disc_restart - Restart discovery
+ * fc_disc_restart() - Restart discovery
  * @lport: FC discovery context
  *
  * Locking Note: This function expects that the disc mutex
@@ -337,23 +346,26 @@ static void fc_disc_restart(struct fc_disc *disc)
 	struct fc_rport_libfc_priv *rdata, *next;
 	struct fc_lport *lport = disc->lport;
 
-	FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
-		      fc_host_port_id(lport->host));
+	FC_DISC_DBG(disc, "Restarting discovery\n");
 
 	list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
 		rport = PRIV_TO_RPORT(rdata);
-		FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
 		list_del(&rdata->peers);
 		lport->tt.rport_logoff(rport);
 	}
 
+	list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) {
+		rport = PRIV_TO_RPORT(rdata);
+		lport->tt.rport_logoff(rport);
+	}
+
 	disc->requested = 1;
 	if (!disc->pending)
 		fc_disc_gpn_ft_req(disc);
 }
 
 /**
- * fc_disc_start - Fibre Channel Target discovery
+ * fc_disc_start() - Fibre Channel Target discovery
  * @lport: FC local port
  *
  * Returns non-zero if discovery cannot be started.
@@ -364,17 +376,7 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
 {
 	struct fc_rport *rport;
 	struct fc_rport_identifiers ids;
-	struct fc_disc *disc = lport->disc;
-
-	if (!disc) {
-		FC_DEBUG_DISC("No existing discovery job, "
-			      "creating one for lport (%6x)\n",
-			      fc_host_port_id(lport->host));
-		disc = fc_disc_alloc(lport);
-	} else
-		FC_DEBUG_DISC("Found an existing discovery job "
-			      "for lport (%6x)\n",
-			      fc_host_port_id(lport->host));
+	struct fc_disc *disc = &lport->disc;
 
 	/*
 	 * At this point we may have a new disc job or an existing
@@ -419,8 +421,12 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
 	mutex_unlock(&disc->disc_mutex);
 }
 
+static struct fc_rport_operations fc_disc_rport_ops = {
+	.event_callback = fc_disc_rport_callback,
+};
+
 /**
- * fc_disc_new_target - Handle new target found by discovery
+ * fc_disc_new_target() - Handle new target found by discovery
  * @lport: FC local port
  * @rport: The previous FC remote port (NULL if new remote port)
  * @ids: Identifiers for the new FC remote port
@@ -433,7 +439,7 @@ static int fc_disc_new_target(struct fc_disc *disc,
 			      struct fc_rport_identifiers *ids)
 {
 	struct fc_lport *lport = disc->lport;
-	struct fc_rport_libfc_priv *rp;
+	struct fc_rport_libfc_priv *rdata;
 	int error = 0;
 
 	if (rport && ids->port_name) {
@@ -467,15 +473,16 @@ static int fc_disc_new_target(struct fc_disc *disc,
 				dp.ids.port_name = ids->port_name;
 				dp.ids.node_name = ids->node_name;
 				dp.ids.roles = ids->roles;
-				rport = fc_rport_rogue_create(&dp);
+				rport = lport->tt.rport_create(&dp);
 			}
 			if (!rport)
 				error = -ENOMEM;
 		}
 		if (rport) {
-			rp = rport->dd_data;
-			rp->event_callback = fc_disc_rport_event;
-			rp->rp_state = RPORT_ST_INIT;
+			rdata = rport->dd_data;
+			rdata->ops = &fc_disc_rport_ops;
+			rdata->rp_state = RPORT_ST_INIT;
+			list_add_tail(&rdata->peers, &disc->rogue_rports);
 			lport->tt.rport_login(rport);
 		}
 	}
@@ -483,40 +490,47 @@ static int fc_disc_new_target(struct fc_disc *disc,
 }
 
 /**
- * fc_disc_del_target - Delete a target
+ * fc_disc_del_target() - Delete a target
  * @disc: FC discovery context
  * @rport: The remote port to be removed
  */
 static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
 {
 	struct fc_lport *lport = disc->lport;
-	struct fc_rport_libfc_priv *rdata = RPORT_TO_PRIV(rport);
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 	list_del(&rdata->peers);
 	lport->tt.rport_logoff(rport);
 }
 
 /**
- * fc_disc_done - Discovery has been completed
+ * fc_disc_done() - Discovery has been completed
  * @disc: FC discovery context
+ * Locking Note: This function expects that the disc mutex is locked before
+ * it is called. The discovery callback is then made with the lock released,
+ * and the lock is re-taken before returning from this function
  */
 static void fc_disc_done(struct fc_disc *disc)
 {
 	struct fc_lport *lport = disc->lport;
+	enum fc_disc_event event;
 
-	FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
-		      fc_host_port_id(lport->host));
+	FC_DISC_DBG(disc, "Discovery complete\n");
 
-	disc->disc_callback(lport, disc->event);
+	event = disc->event;
 	disc->event = DISC_EV_NONE;
 
 	if (disc->requested)
 		fc_disc_gpn_ft_req(disc);
 	else
 		disc->pending = 0;
+
+	mutex_unlock(&disc->disc_mutex);
+	disc->disc_callback(lport, event);
+	mutex_lock(&disc->disc_mutex);
 }
 
 /**
- * fc_disc_error - Handle error on dNS request
+ * fc_disc_error() - Handle error on dNS request
  * @disc: FC discovery context
  * @fp: The frame pointer
  */
@@ -524,10 +538,10 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
 {
 	struct fc_lport *lport = disc->lport;
 	unsigned long delay = 0;
-	if (fc_disc_debug)
-		FC_DBG("Error %ld, retries %d/%d\n",
-		       PTR_ERR(fp), disc->retry_count,
-		       FC_DISC_RETRY_LIMIT);
+
+	FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
+		    PTR_ERR(fp), disc->retry_count,
+		    FC_DISC_RETRY_LIMIT);
 
 	if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
 		/*
@@ -556,7 +570,7 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
 }
 
 /**
- * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
+ * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
  * @lport: FC discovery context
  *
  * Locking Note: This function expects that the disc_mutex is locked
@@ -590,7 +604,7 @@ err:
 }
 
 /**
- * fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request
+ * fc_disc_gpn_ft_parse() - Parse the list of IDs and names resulting from a request
  * @lport: Fibre Channel host port instance
  * @buf: GPN_FT response buffer
  * @len: size of response buffer
@@ -654,16 +668,18 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
 
 		if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
 		    (dp.ids.port_name != lport->wwpn)) {
-			rport = fc_rport_rogue_create(&dp);
+			rport = lport->tt.rport_create(&dp);
 			if (rport) {
 				rdata = rport->dd_data;
-				rdata->event_callback = fc_disc_rport_event;
+				rdata->ops = &fc_disc_rport_ops;
 				rdata->local_port = lport;
+				list_add_tail(&rdata->peers,
+					      &disc->rogue_rports);
 				lport->tt.rport_login(rport);
 			} else
-				FC_DBG("Failed to allocate memory for "
-				       "the newly discovered port (%6x)\n",
-				       dp.ids.port_id);
+				printk(KERN_WARNING "libfc: Failed to allocate "
+				       "memory for the newly discovered port "
+				       "(%6x)\n", dp.ids.port_id);
 		}
 
 		if (np->fp_flags & FC_NS_FID_LAST) {
@@ -683,9 +699,8 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
 	 */
 	if (error == 0 && len > 0 && len < sizeof(*np)) {
 		if (np != &disc->partial_buf) {
-			FC_DEBUG_DISC("Partial buffer remains "
-				      "for discovery by (%6x)\n",
-				      fc_host_port_id(lport->host));
+			FC_DISC_DBG(disc, "Partial buffer remains "
+				    "for discovery\n");
 			memcpy(&disc->partial_buf, np, len);
 		}
 		disc->buf_len = (unsigned char) len;
@@ -695,13 +710,17 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
 	return error;
 }
 
-/*
+/**
+ * fc_disc_timeout() - Retry handler for the disc component
+ * @work: Structure holding disc obj that needs retry discovery
+ *
  * Handle retry of memory allocation for remote ports.
  */
-static void fc_disc_timeout(void *data)
+static void fc_disc_timeout(struct work_struct *work)
 {
-	struct fc_disc *disc = data;
-
+	struct fc_disc *disc = container_of(work,
+					    struct fc_disc,
+					    disc_work.work);
 	mutex_lock(&disc->disc_mutex);
 	if (disc->requested && !disc->pending)
 		fc_disc_gpn_ft_req(disc);
@@ -709,13 +728,13 @@ static void fc_disc_timeout(void *data)
 }
 
 /**
- * fc_disc_gpn_ft_resp - Handle a response frame from Get Port Names (GPN_FT)
+ * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
  * @sp: Current sequence of GPN_FT exchange
  * @fp: response frame
  * @lp_arg: Fibre Channel host port instance
  *
- * Locking Note: This function expects that the disc_mutex is locked
- *		 before it is called.
+ * Locking Note: This function is called without disc mutex held, and
+ *		 should do all its processing with the mutex held
  */
 static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
 				void *disc_arg)
@@ -728,11 +747,12 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
 	unsigned int len;
 	int error;
 
-	FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
-		      fc_host_port_id(disc->lport->host));
+	mutex_lock(&disc->disc_mutex);
+	FC_DISC_DBG(disc, "Received a GPN_FT response\n");
 
 	if (IS_ERR(fp)) {
 		fc_disc_error(disc, fp);
+		mutex_unlock(&disc->disc_mutex);
 		return;
 	}
 
@@ -744,32 +764,30 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
 	    disc->seq_count == 0) {
 		cp = fc_frame_payload_get(fp, sizeof(*cp));
 		if (!cp) {
-			FC_DBG("GPN_FT response too short, len %d\n",
-			       fr_len(fp));
+			FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
+				    fr_len(fp));
 		} else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
 
-			/*
-			 * Accepted.  Parse response.
-			 */
+			/* Accepted, parse the response. */
 			buf = cp + 1;
 			len -= sizeof(*cp);
 		} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
-			FC_DBG("GPN_FT rejected reason %x exp %x "
-			       "(check zoning)\n", cp->ct_reason,
-			       cp->ct_explan);
+			FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
+				    "(check zoning)\n", cp->ct_reason,
+				    cp->ct_explan);
 			disc->event = DISC_EV_FAILED;
 			fc_disc_done(disc);
 		} else {
-			FC_DBG("GPN_FT unexpected response code %x\n",
-			       ntohs(cp->ct_cmd));
+			FC_DISC_DBG(disc, "GPN_FT unexpected response code "
+				    "%x\n", ntohs(cp->ct_cmd));
 		}
 	} else if (fr_sof(fp) == FC_SOF_N3 &&
 		   seq_cnt == disc->seq_count) {
 		buf = fh + 1;
 	} else {
-		FC_DBG("GPN_FT unexpected frame - out of sequence? "
-		       "seq_cnt %x expected %x sof %x eof %x\n",
-		       seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
+		FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
+			    "seq_cnt %x expected %x sof %x eof %x\n",
+			    seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
 	}
 	if (buf) {
 		error = fc_disc_gpn_ft_parse(disc, buf, len);
@@ -779,10 +797,12 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
 			disc->seq_count++;
 	}
 	fc_frame_free(fp);
+
+	mutex_unlock(&disc->disc_mutex);
 }
 
 /**
- * fc_disc_single - Discover the directory information for a single target
+ * fc_disc_single() - Discover the directory information for a single target
  * @lport: FC local port
  * @dp: The port to rediscover
  *
@@ -792,7 +812,6 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
 static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
 {
 	struct fc_lport *lport;
-	struct fc_rport *rport;
 	struct fc_rport *new_rport;
 	struct fc_rport_libfc_priv *rdata;
 
@@ -801,17 +820,12 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
 	if (dp->ids.port_id == fc_host_port_id(lport->host))
 		goto out;
 
-	rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
-	if (rport) {
-		fc_disc_del_target(disc, rport);
-		put_device(&rport->dev); /* hold from lookup */
-	}
-
-	new_rport = fc_rport_rogue_create(dp);
+	new_rport = lport->tt.rport_create(dp);
 	if (new_rport) {
 		rdata = new_rport->dd_data;
-		rdata->event_callback = fc_disc_rport_event;
+		rdata->ops = &fc_disc_rport_ops;
 		kfree(dp);
+		list_add_tail(&rdata->peers, &disc->rogue_rports);
 		lport->tt.rport_login(new_rport);
 	}
 	return;
@@ -820,22 +834,21 @@ out:
 }
 
 /**
- * fc_disc_stop - Stop discovery for a given lport
+ * fc_disc_stop() - Stop discovery for a given lport
  * @lport: The lport that discovery should stop for
  */
 void fc_disc_stop(struct fc_lport *lport)
 {
-	struct fc_disc *disc = lport->disc;
+	struct fc_disc *disc = &lport->disc;
 
 	if (disc) {
-		if (!cancel_delayed_work(&disc->disc_work))
-			flush_scheduled_work();
+		cancel_delayed_work_sync(&disc->disc_work);
 		fc_disc_stop_rports(disc);
 	}
 }
 
 /**
- * fc_disc_stop_final - Stop discovery for a given lport
+ * fc_disc_stop_final() - Stop discovery for a given lport
  * @lport: The lport that discovery should stop for
  *
  * This function will block until discovery has been
@@ -848,11 +861,12 @@ void fc_disc_stop_final(struct fc_lport *lport)
 }
 
 /**
- * fc_disc_init - Initialize the discovery block
+ * fc_disc_init() - Initialize the discovery block
  * @lport: FC local port
  */
 int fc_disc_init(struct fc_lport *lport)
 {
+	struct fc_disc *disc;
 
 	if (!lport->tt.disc_start)
 		lport->tt.disc_start = fc_disc_start;
@@ -869,6 +883,16 @@ int fc_disc_init(struct fc_lport *lport)
 	if (!lport->tt.rport_lookup)
 		lport->tt.rport_lookup = fc_disc_lookup_rport;
 
+	disc = &lport->disc;
+	INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
+	mutex_init(&disc->disc_mutex);
+	INIT_LIST_HEAD(&disc->rports);
+	INIT_LIST_HEAD(&disc->rogue_rports);
+
+	disc->lport = lport;
+	disc->delay = FC_DISC_DELAY;
+	disc->event = DISC_EV_NONE;
+
 	return 0;
 }
 EXPORT_SYMBOL(fc_disc_init);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index fb6000f..5296f5a 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -33,20 +33,7 @@
 #include <scsi/libfc.h>
 #include <scsi/fc_encode.h>
 
-#define	  FC_DEF_R_A_TOV      (10 * 1000) /* resource allocation timeout */
-
-/*
- * fc_exch_debug can be set in debugger or at compile time to get more logs.
- */
-static int fc_exch_debug;
-
-#define FC_DEBUG_EXCH(fmt...)			\
-	do {					\
-		if (fc_exch_debug)		\
-			FC_DBG(fmt);		\
-	} while (0)
-
-static struct kmem_cache *fc_em_cachep;	/* cache for exchanges */
+static struct kmem_cache *fc_em_cachep;        /* cache for exchanges */
 
 /*
  * Structure and function definitions for managing Fibre Channel Exchanges
@@ -69,7 +56,8 @@ static struct kmem_cache *fc_em_cachep;	/* cache for exchanges */
  */
 struct fc_exch_mgr {
 	enum fc_class	class;		/* default class for sequences */
-	spinlock_t	em_lock;	/* exchange manager lock */
+	spinlock_t	em_lock;	/* exchange manager lock,
+					   must be taken before ex_lock */
 	u16		last_xid;	/* last allocated exchange ID */
 	u16		min_xid;	/* min exchange ID */
 	u16		max_xid;	/* max exchange ID */
@@ -180,6 +168,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
  * sequence allocation and deallocation must be locked.
  *  - exchange refcnt can be done atomicly without locks.
  *  - sequence allocation must be locked by exch lock.
+ *  - If the em_lock and ex_lock must be taken at the same time, then the
+ *    em_lock must be taken before the ex_lock.
  */
 
 /*
@@ -281,7 +271,7 @@ static void fc_exch_release(struct fc_exch *ep)
 			ep->destructor(&ep->seq, ep->arg);
 		if (ep->lp->tt.exch_put)
 			ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
-		WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
+		WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
 		mempool_free(ep, mp->ep_pool);
 	}
 }
@@ -333,8 +323,8 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
 	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
 		return;
 
-	FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
-		      ep->xid);
+	FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n");
+
 	if (schedule_delayed_work(&ep->timeout_work,
 				  msecs_to_jiffies(timer_msec)))
 		fc_exch_hold(ep);		/* hold for timer */
@@ -409,9 +399,10 @@ EXPORT_SYMBOL(fc_seq_exch_abort);
  * Exchange timeout - handle exchange timer expiration.
  * The timer will have been cancelled before this is called.
  */
-static void fc_exch_timeout(void *data)
+static void fc_exch_timeout(struct work_struct *work)
 {
-	struct fc_exch *ep = data;
+	struct fc_exch *ep = container_of(work, struct fc_exch,
+					  timeout_work.work);
 	struct fc_seq *sp = &ep->seq;
 	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
 	void *arg;
@@ -488,7 +479,7 @@ static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
 	struct fc_exch *ep = NULL;
 
 	if (mp->max_read) {
-		if (fc_frame_is_read(fp)) {
+		if (fc_fcp_is_read(fr_fsp(fp))) {
 			min = mp->min_xid;
 			max = mp->max_read;
 			plast = &mp->last_read;
@@ -544,7 +535,7 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
 		/* alloc a new xid */
 		xid = fc_em_alloc_xid(mp, fp);
 		if (!xid) {
-			printk(KERN_ERR "fc_em_alloc_xid() failed\n");
+			printk(KERN_WARNING "libfc: Failed to allocate an exhange\n");
 			goto err;
 		}
 	}
@@ -573,7 +564,7 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
 	ep->f_ctl = FC_FC_FIRST_SEQ;	/* next seq is first seq */
 	ep->rxid = FC_XID_UNKNOWN;
 	ep->class = mp->class;
-	INIT_WORK(&ep->timeout_work, fc_exch_timeout, ep);
+	INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
 out:
 	return ep;
 err:
@@ -624,7 +615,6 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
 {
 	struct fc_exch *ep;
 	struct fc_frame_header *fh;
-	u16 rxid;
 
 	ep = mp->lp->tt.exch_get(mp->lp, fp);
 	if (ep) {
@@ -651,18 +641,6 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
 		if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
 			ep->esb_stat &= ~ESB_ST_SEQ_INIT;
 
-		/*
-		 * Set the responder ID in the frame header.
-		 * The old one should've been 0xffff.
-		 * If it isn't, don't assign one.
-		 * Incoming basic link service frames may specify
-		 * a referenced RX_ID.
-		 */
-		if (fh->fh_type != FC_TYPE_BLS) {
-			rxid = ntohs(fh->fh_rx_id);
-			WARN_ON(rxid != FC_XID_UNKNOWN);
-			fh->fh_rx_id = htons(ep->rxid);
-		}
 		fc_exch_hold(ep);	/* hold for caller */
 		spin_unlock_bh(&ep->ex_lock);	/* lock from exch_get */
 	}
@@ -674,8 +652,8 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
  * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
  * on the ep that should be released by the caller.
  */
-static enum fc_pf_rjt_reason
-fc_seq_lookup_recip(struct fc_exch_mgr *mp, struct fc_frame *fp)
+static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp,
+						 struct fc_frame *fp)
 {
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
 	struct fc_exch *ep = NULL;
@@ -832,8 +810,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
 	struct fc_exch *ep = fc_seq_exch(sp);
 
 	sp = fc_seq_alloc(ep, ep->seq_id++);
-	FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n",
-		      ep->xid, ep->f_ctl, sp->id);
+	FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
+		    ep->f_ctl, sp->id);
 	return sp;
 }
 /*
@@ -913,7 +891,7 @@ void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
 		fc_exch_els_rec(sp, els_data->fp);
 		break;
 	default:
-		FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
+		FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd);
 	}
 }
 EXPORT_SYMBOL(fc_seq_els_rsp_send);
@@ -993,9 +971,9 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
  * Send BLS Reject.
  * This is for rejecting BA_ABTS only.
  */
-static void
-fc_exch_send_ba_rjt(struct fc_frame *rx_fp, enum fc_ba_rjt_reason reason,
-		    enum fc_ba_rjt_explan explan)
+static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
+				enum fc_ba_rjt_reason reason,
+				enum fc_ba_rjt_explan explan)
 {
 	struct fc_frame *fp;
 	struct fc_frame_header *rx_fh;
@@ -1093,7 +1071,7 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
 		ap->ba_high_seq_cnt = fh->fh_seq_cnt;
 		ap->ba_low_seq_cnt = htons(sp->cnt);
 	}
-	sp = fc_seq_start_next(sp);
+	sp = fc_seq_start_next_locked(sp);
 	spin_unlock_bh(&ep->ex_lock);
 	fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
 	fc_frame_free(rx_fp);
@@ -1146,7 +1124,7 @@ static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
 			lp->tt.lport_recv(lp, sp, fp);
 		fc_exch_release(ep);	/* release from lookup */
 	} else {
-		FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject);
+		FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject);
 		fc_frame_free(fp);
 	}
 }
@@ -1250,10 +1228,10 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
 	sp = fc_seq_lookup_orig(mp, fp);	/* doesn't hold sequence */
 	if (!sp) {
 		atomic_inc(&mp->stats.xid_not_found);
-		FC_DEBUG_EXCH("seq lookup failed\n");
+		FC_EM_DBG(mp, "seq lookup failed\n");
 	} else {
 		atomic_inc(&mp->stats.non_bls_resp);
-		FC_DEBUG_EXCH("non-BLS response to sequence");
+		FC_EM_DBG(mp, "non-BLS response to sequence");
 	}
 	fc_frame_free(fp);
 }
@@ -1274,13 +1252,11 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
 	int rc = 1, has_rec = 0;
 
 	fh = fc_frame_header_get(fp);
-	FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n",
-		      fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
+	FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
+		    fc_exch_rctl_name(fh->fh_r_ctl));
 
-	if (cancel_delayed_work(&ep->timeout_work))
+	if (cancel_delayed_work_sync(&ep->timeout_work))
 		fc_exch_release(ep);	/* release from pending timer hold */
-	else
-		flush_scheduled_work();
 
 	spin_lock_bh(&ep->ex_lock);
 	switch (fh->fh_r_ctl) {
@@ -1369,9 +1345,9 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
 		case FC_RCTL_ACK_0:
 			break;
 		default:
-			FC_DEBUG_EXCH("BLS rctl %x - %s received",
-				      fh->fh_r_ctl,
-				      fc_exch_rctl_name(fh->fh_r_ctl));
+			FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
+				    fh->fh_r_ctl,
+				    fc_exch_rctl_name(fh->fh_r_ctl));
 			break;
 		}
 		fc_frame_free(fp);
@@ -1479,10 +1455,11 @@ static void fc_exch_reset(struct fc_exch *ep)
  * If sid is non-zero, reset only exchanges we source from that FID.
  * If did is non-zero, reset only exchanges destined to that FID.
  */
-void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did)
+void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
 {
 	struct fc_exch *ep;
 	struct fc_exch *next;
+	struct fc_exch_mgr *mp = lp->emp;
 
 	spin_lock_bh(&mp->em_lock);
 restart:
@@ -1606,9 +1583,10 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
 	if (IS_ERR(fp)) {
 		int err = PTR_ERR(fp);
 
-		if (err == -FC_EX_CLOSED)
+		if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
 			goto cleanup;
-		FC_DBG("Cannot process RRQ, because of frame error %d\n", err);
+		FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
+			    "frame error %d\n", err);
 		return;
 	}
 
@@ -1617,12 +1595,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
 
 	switch (op) {
 	case ELS_LS_RJT:
-		FC_DBG("LS_RJT for RRQ");
+		FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
 		/* fall through */
 	case ELS_LS_ACC:
 		goto cleanup;
 	default:
-		FC_DBG("unexpected response op %x for RRQ", op);
+		FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
+			    "for RRQ", op);
 		return;
 	}
 
@@ -1749,8 +1728,8 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
 	size_t len;
 
 	if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
-		FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
-		       min_xid, max_xid);
+		FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
+			     min_xid, max_xid);
 		return NULL;
 	}
 
@@ -1854,6 +1833,8 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
 	fc_exch_setup_hdr(ep, fp, ep->f_ctl);
 	sp->cnt++;
 
+	fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
+
 	if (unlikely(lp->tt.frame_send(lp, fp)))
 		goto err;
 
@@ -1885,7 +1866,8 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
 
 	/* lport lock ? */
 	if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
-		FC_DBG("fc_lport or EM is not allocated and configured");
+		FC_LPORT_DBG(lp, "Receiving frames for an lport that "
+			     "has not been initialized correctly\n");
 		fc_frame_free(fp);
 		return;
 	}
@@ -1911,7 +1893,7 @@ void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
 			fc_exch_recv_req(lp, mp, fp);
 		break;
 	default:
-		FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
+		FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp));
 		fc_frame_free(fp);
 		break;
 	}
@@ -1957,7 +1939,7 @@ EXPORT_SYMBOL(fc_exch_init);
 int fc_setup_exch_mgr(void)
 {
 	fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
-					 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+					 0, SLAB_HWCACHE_ALIGN, NULL);
 	if (!fc_em_cachep)
 		return -ENOMEM;
 	return 0;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 319c4d7..ac5682c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -20,13 +20,13 @@
  */
 
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/scatterlist.h>
 #include <linux/err.h>
 #include <linux/crc32.h>
-#include <linux/delay.h>
 
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi.h>
@@ -41,15 +41,11 @@
 
 MODULE_AUTHOR("Open-FCoE.org");
 MODULE_DESCRIPTION("libfc");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
 
-static int fc_fcp_debug;
-
-#define FC_DEBUG_FCP(fmt...)			\
-	do {					\
-		if (fc_fcp_debug)		\
-			FC_DBG(fmt);		\
-	} while (0)
+unsigned int fc_debug_logging;
+module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
 
 static struct kmem_cache *scsi_pkt_cachep;
 
@@ -161,7 +157,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
 }
 
 /**
- * fc_fcp_pkt_release - release hold on scsi_pkt packet
+ * fc_fcp_pkt_release() - release hold on scsi_pkt packet
  * @fsp:	fcp packet struct
  *
  * This is used by upper layer scsi driver.
@@ -183,8 +179,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
 }
 
 /**
- * fc_fcp_pkt_destory - release hold on scsi_pkt packet
- *
+ * fc_fcp_pkt_destory() - release hold on scsi_pkt packet
  * @seq:		exchange sequence
  * @fsp:	fcp packet struct
  *
@@ -199,7 +194,7 @@ static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
 }
 
 /**
- * fc_fcp_lock_pkt - lock a packet and get a ref to it.
+ * fc_fcp_lock_pkt() - lock a packet and get a ref to it.
  * @fsp:	fcp packet
  *
  * We should only return error if we return a command to scsi-ml before
@@ -260,26 +255,61 @@ static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
 	}
 
 	fsp->state &= ~FC_SRB_ABORT_PENDING;
-	fsp->io_status = SUGGEST_RETRY << 24;
+	fsp->io_status = 0;
 	fsp->status_code = FC_ERROR;
 	fc_fcp_complete_locked(fsp);
 }
 
-#define for_each_sg(sglist, __sg, nr, __i)        \
-        for (__i = 0, __sg = (sglist); __i < (nr); __i++, __sg = sg_next(__sg))
-
-static inline struct scatterlist *sg_next(struct scatterlist *sg)
+/*
+ * fc_fcp_ddp_setup - calls to LLD's ddp_setup to set up DDP
+ * transfer for a read I/O indicated by the fc_fcp_pkt.
+ * @fsp: ptr to the fc_fcp_pkt
+ *
+ * This is called in exch_seq_send() when we have a newly allocated
+ * exchange with a valid exchange id to setup ddp.
+ *
+ * returns: none
+ */
+void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
 {
-	if (!sg)
-		return NULL;
-	return sg + 1;
+	struct fc_lport *lp;
+
+	if (!fsp)
+		return;
+
+	lp = fsp->lp;
+	if ((fsp->req_flags & FC_SRB_READ) &&
+	    (lp->lro_enabled) && (lp->tt.ddp_setup)) {
+		if (lp->tt.ddp_setup(lp, xid, scsi_sglist(fsp->cmd),
+				     scsi_sg_count(fsp->cmd)))
+			fsp->xfer_ddp = xid;
+	}
 }
+EXPORT_SYMBOL(fc_fcp_ddp_setup);
 
-static inline struct page *sg_page(struct scatterlist *sg)
+/*
+ * fc_fcp_ddp_done - calls to LLD's ddp_done to release any
+ * DDP related resources for this I/O if it is initialized
+ * as a ddp transfer
+ * @fsp: ptr to the fc_fcp_pkt
+ *
+ * returns: none
+ */
+static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
 {
-	return sg->page;
+	struct fc_lport *lp;
+
+	if (!fsp)
+		return;
+
+	lp = fsp->lp;
+	if (fsp->xfer_ddp && lp->tt.ddp_done) {
+		fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp);
+		fsp->xfer_ddp = 0;
+	}
 }
 
+
 /*
  * Receive SCSI data from target.
  * Called after receiving solicited data.
@@ -306,15 +336,16 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 	len = fr_len(fp) - sizeof(*fh);
 	buf = fc_frame_payload_get(fp, 0);
 
+	/* if this I/O is ddped, update xfer len */
+	fc_fcp_ddp_done(fsp);
+
 	if (offset + len > fsp->data_len) {
-		/*
-		 * this should never happen
-		 */
+		/* this should never happen */
 		if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
 		    fc_frame_crc_check(fp))
 			goto crc_err;
-		FC_DEBUG_FCP("data received past end. len %zx offset %zx "
-			     "data_len %x\n", len, offset, fsp->data_len);
+		FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
+			   "data_len %x\n", len, offset, fsp->data_len);
 		fc_fcp_retry_cmd(fsp);
 		return;
 	}
@@ -372,14 +403,15 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 			len += 4 - (len % 4);
 		}
 
-		if (~crc != le32_to_cpu(*(__le32 *)(buf + len))) {
+		if (~crc != le32_to_cpu(fr_crc(fp))) {
 crc_err:
-			stats = lp->dev_stats[smp_processor_id()];
-			if (stats) {
-				stats->ErrorFrames++;
-				if (stats->InvalidCRCCount++ < 5)
-					FC_DBG("CRC error on data frame\n");
-			}
+			stats = fc_lport_get_stats(lp);
+			stats->ErrorFrames++;
+			/* FIXME - per cpu count, not total count! */
+			if (stats->InvalidCRCCount++ < 5)
+				printk(KERN_WARNING "libfc: CRC error on data "
+				       "frame for port (%6x)\n",
+				       fc_host_port_id(lp->host));
 			/*
 			 * Assume the frame is total garbage.
 			 * We may have copied it over the good part
@@ -406,8 +438,8 @@ crc_err:
 		fc_fcp_complete_locked(fsp);
 }
 
-/*
- * fc_fcp_send_data -  Send SCSI data to target.
+/**
+ * fc_fcp_send_data() -  Send SCSI data to target.
  * @fsp: ptr to fc_fcp_pkt
  * @sp: ptr to this sequence
  * @offset: starting offset for this data request
@@ -442,14 +474,14 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 	WARN_ON(seq_blen <= 0);
 	if (unlikely(offset + seq_blen > fsp->data_len)) {
 		/* this should never happen */
-		FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
-			     seq_blen, offset);
+		FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx "
+			   "offset %zx\n", seq_blen, offset);
 		fc_fcp_send_abort(fsp);
 		return 0;
 	} else if (offset != fsp->xfer_len) {
 		/* Out of Order Data Request - no problem, but unexpected. */
-		FC_DEBUG_FCP("xfer-ready non-contiguous. "
-			     "seq_blen %zx offset %zx\n", seq_blen, offset);
+		FC_FCP_DBG(fsp, "xfer-ready non-contiguous. "
+			   "seq_blen %zx offset %zx\n", seq_blen, offset);
 	}
 
 	/*
@@ -457,7 +489,13 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 	 * burst length (t_blen) to seq_blen, otherwise set t_blen
 	 * to max FC frame payload previously set in fsp->max_payload.
 	 */
-	t_blen = lp->seq_offload ? seq_blen : fsp->max_payload;
+	t_blen = fsp->max_payload;
+	if (lp->seq_offload) {
+		t_blen = min(seq_blen, (size_t)lp->lso_max);
+		FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
+			   fsp, seq_blen, lp->lso_max, t_blen);
+	}
+
 	WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
 	if (t_blen > 512)
 		t_blen &= ~(512 - 1);	/* round down to block size */
@@ -631,8 +669,8 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 	}
 }
 
-/*
- * fc_fcp_reduce_can_queue - drop can_queue
+/**
+ * fc_fcp_reduce_can_queue() - drop can_queue
  * @lp: lport to drop queueing for
  *
  * If we are getting memory allocation failures, then we may
@@ -657,15 +695,17 @@ static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
 	if (!can_queue)
 		can_queue = 1;
 	lp->host->can_queue = can_queue;
-	shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n"
+	shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n"
 		     "Reducing can_queue to %d.\n", can_queue);
 done:
 	spin_unlock_irqrestore(lp->host->host_lock, flags);
 }
 
-/*
- * exch mgr calls this routine to process scsi
- * exchanges.
+/**
+ * fc_fcp_recv() - Reveive FCP frames
+ * @seq: The sequence the frame is on
+ * @fp: The FC frame
+ * @arg: The related FCP packet
  *
  * Return   : None
  * Context  : called from Soft IRQ context
@@ -730,7 +770,7 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 
 		fc_fcp_resp(fsp, fp);
 	} else {
-		FC_DBG("unexpected frame.  r_ctl %x\n", r_ctl);
+		FC_FCP_DBG(fsp, "unexpected frame.  r_ctl %x\n", r_ctl);
 	}
 unlock:
 	fc_fcp_unlock_pkt(fsp);
@@ -766,6 +806,9 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 	fsp->scsi_comp_flags = flags;
 	expected_len = fsp->data_len;
 
+	/* if ddp, update xfer len */
+	fc_fcp_ddp_done(fsp);
+
 	if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
 		rp_ex = (void *)(fc_rp + 1);
 		if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
@@ -836,24 +879,24 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 			return;
 		}
 		fsp->status_code = FC_DATA_OVRRUN;
-		FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
-		       "data len %x\n",
-		       fsp->rport->port_id,
-		       fsp->xfer_len, expected_len, fsp->data_len);
+		FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, "
+			   "len %x, data len %x\n",
+			   fsp->rport->port_id,
+			   fsp->xfer_len, expected_len, fsp->data_len);
 	}
 	fc_fcp_complete_locked(fsp);
 	return;
 
 len_err:
-	FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
-	       flags, fr_len(fp), respl, snsl);
+	FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u "
+		   "snsl %u\n", flags, fr_len(fp), respl, snsl);
 err:
 	fsp->status_code = FC_ERROR;
 	fc_fcp_complete_locked(fsp);
 }
 
 /**
- * fc_fcp_complete_locked - complete processing of a fcp packet
+ * fc_fcp_complete_locked() - complete processing of a fcp packet
  * @fsp:	fcp packet
  *
  * This function may sleep if a timer is pending. The packet lock must be
@@ -881,7 +924,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
 		    (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
 		     fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
 			fsp->status_code = FC_DATA_UNDRUN;
-			fsp->io_status = SUGGEST_RETRY << 24;
+			fsp->io_status = 0;
 		}
 	}
 
@@ -921,7 +964,7 @@ static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
 }
 
 /**
- * fc_fcp_cleanup_each_cmd - run fn on each active command
+ * fc_fcp_cleanup_each_cmd() - Cleanup active commads
  * @lp:		logical port
  * @id:		target id
  * @lun:	lun
@@ -973,7 +1016,7 @@ static void fc_fcp_abort_io(struct fc_lport *lp)
 }
 
 /**
- * fc_fcp_pkt_send - send a fcp packet to the lower level.
+ * fc_fcp_pkt_send() - send a fcp packet to the lower level.
  * @lp:		fc lport
  * @fsp:	fc packet.
  *
@@ -1028,7 +1071,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
 	}
 
 	memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
-	fr_cmd(fp) = fsp->cmd;
+	fr_fsp(fp) = fsp;
 	rport = fsp->rport;
 	fsp->max_payload = rport->maxframe_size;
 	rp = rport->dd_data;
@@ -1066,13 +1109,11 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 	if (fc_fcp_lock_pkt(fsp))
 		return;
 
-	switch (error) {
-	case -FC_EX_CLOSED:
+	if (error == -FC_EX_CLOSED) {
 		fc_fcp_retry_cmd(fsp);
 		goto unlock;
-	default:
-		FC_DBG("unknown error %ld\n", PTR_ERR(fp));
 	}
+
 	/*
 	 * clear abort pending, because the lower layer
 	 * decided to force completion.
@@ -1104,10 +1145,10 @@ static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
 	fsp->wait_for_comp = 0;
 
 	if (!rc) {
-		FC_DBG("target abort cmd  failed\n");
+		FC_FCP_DBG(fsp, "target abort cmd  failed\n");
 		rc = FAILED;
 	} else if (fsp->state & FC_SRB_ABORTED) {
-		FC_DBG("target abort cmd  passed\n");
+		FC_FCP_DBG(fsp, "target abort cmd  passed\n");
 		rc = SUCCESS;
 		fc_fcp_complete_locked(fsp);
 	}
@@ -1172,7 +1213,7 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
 	spin_unlock_bh(&fsp->scsi_pkt_lock);
 
 	if (!rc) {
-		FC_DBG("lun reset failed\n");
+		FC_SCSI_DBG(lp, "lun reset failed\n");
 		return FAILED;
 	}
 
@@ -1180,7 +1221,7 @@ static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
 	if (fsp->cdb_status != FCP_TMF_CMPL)
 		return FAILED;
 
-	FC_DBG("lun reset to lun %u completed\n", lun);
+	FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun);
 	fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
 	return SUCCESS;
 }
@@ -1289,7 +1330,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
 	rp = rport->dd_data;
 	if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
 		fsp->status_code = FC_HRD_ERROR;
-		fsp->io_status = SUGGEST_RETRY << 24;
+		fsp->io_status = 0;
 		fc_fcp_complete_locked(fsp);
 		return;
 	}
@@ -1347,13 +1388,13 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
 		switch (rjt->er_reason) {
 		default:
-			FC_DEBUG_FCP("device %x unexpected REC reject "
-				     "reason %d expl %d\n",
-				     fsp->rport->port_id, rjt->er_reason,
-				     rjt->er_explan);
+			FC_FCP_DBG(fsp, "device %x unexpected REC reject "
+				   "reason %d expl %d\n",
+				   fsp->rport->port_id, rjt->er_reason,
+				   rjt->er_explan);
 			/* fall through */
 		case ELS_RJT_UNSUP:
-			FC_DEBUG_FCP("device does not support REC\n");
+			FC_FCP_DBG(fsp, "device does not support REC\n");
 			rp = fsp->rport->dd_data;
 			/*
 			 * if we do not spport RECs or got some bogus
@@ -1473,8 +1514,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 		break;
 
 	default:
-		FC_DBG("REC %p fid %x error unexpected error %d\n",
-		       fsp, fsp->rport->port_id, error);
+		FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n",
+			   fsp, fsp->rport->port_id, error);
 		fsp->status_code = FC_CMD_PLOGO;
 		/* fall through */
 
@@ -1483,9 +1524,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 		 * Assume REC or LS_ACC was lost.
 		 * The exchange manager will have aborted REC, so retry.
 		 */
-		FC_DBG("REC fid %x error error %d retry %d/%d\n",
-		       fsp->rport->port_id, error, fsp->recov_retry,
-		       FC_MAX_RECOV_RETRY);
+		FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n",
+			   fsp->rport->port_id, error, fsp->recov_retry,
+			   FC_MAX_RECOV_RETRY);
 		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
 			fc_fcp_rec(fsp);
 		else
@@ -1642,7 +1683,7 @@ out:
 static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
 {
 	/* lock ? */
-	return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP);
+	return (lp->state == LPORT_ST_READY) && lp->link_up;
 }
 
 /**
@@ -1712,23 +1753,18 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
 	/*
 	 * setup the data direction
 	 */
-	stats = lp->dev_stats[smp_processor_id()];
+	stats = fc_lport_get_stats(lp);
 	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		fsp->req_flags = FC_SRB_READ;
-		if (stats) {
-			stats->InputRequests++;
-			stats->InputMegabytes = fsp->data_len;
-		}
+		stats->InputRequests++;
+		stats->InputMegabytes = fsp->data_len;
 	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
 		fsp->req_flags = FC_SRB_WRITE;
-		if (stats) {
-			stats->OutputRequests++;
-			stats->OutputMegabytes = fsp->data_len;
-		}
+		stats->OutputRequests++;
+		stats->OutputMegabytes = fsp->data_len;
 	} else {
 		fsp->req_flags = 0;
-		if (stats)
-			stats->ControlRequests++;
+		stats->ControlRequests++;
 	}
 
 	fsp->tgt_flags = rp->flags;
@@ -1753,7 +1789,7 @@ out:
 EXPORT_SYMBOL(fc_queuecommand);
 
 /**
- * fc_io_compl -  Handle responses for completed commands
+ * fc_io_compl() -  Handle responses for completed commands
  * @fsp:	scsi packet
  *
  * Translates a error to a Linux SCSI error.
@@ -1767,6 +1803,9 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 	struct fc_lport *lp;
 	unsigned long flags;
 
+	/* release outstanding ddp context */
+	fc_fcp_ddp_done(fsp);
+
 	fsp->state |= FC_SRB_COMPL;
 	if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
 		spin_unlock_bh(&fsp->scsi_pkt_lock);
@@ -1836,12 +1875,12 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 		sc_cmd->result = DID_ERROR << 16;
 		break;
 	case FC_DATA_UNDRUN:
-		if (fsp->cdb_status == 0) {
+		if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) {
 			/*
 			 * scsi status is good but transport level
-			 * underrun. for read it should be an error??
+			 * underrun.
 			 */
-			sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
+			sc_cmd->result = DID_OK << 16;
 		} else {
 			/*
 			 * scsi got underrun, this is an error
@@ -1857,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 		sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
 		break;
 	case FC_CMD_ABORTED:
-		sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
+		sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
 		break;
 	case FC_CMD_TIME_OUT:
 		sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
@@ -1883,7 +1922,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 }
 
 /**
- * fc_fcp_complete - complete processing of a fcp packet
+ * fc_fcp_complete() - complete processing of a fcp packet
  * @fsp:	fcp packet
  *
  * This function may sleep if a fsp timer is pending.
@@ -1900,9 +1939,10 @@ void fc_fcp_complete(struct fc_fcp_pkt *fsp)
 EXPORT_SYMBOL(fc_fcp_complete);
 
 /**
- * fc_eh_abort - Abort a command...from scsi host template
+ * fc_eh_abort() - Abort a command
  * @sc_cmd:	scsi command to abort
  *
+ * From scsi host template.
  * send ABTS to the target device  and wait for the response
  * sc_cmd is the pointer to the command to be aborted.
  */
@@ -1916,7 +1956,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
 	lp = shost_priv(sc_cmd->device->host);
 	if (lp->state != LPORT_ST_READY)
 		return rc;
-	else if (!(lp->link_status & FC_LINK_UP))
+	else if (!lp->link_up)
 		return rc;
 
 	spin_lock_irqsave(lp->host->host_lock, flags);
@@ -1946,7 +1986,7 @@ release_pkt:
 EXPORT_SYMBOL(fc_eh_abort);
 
 /**
- * fc_eh_device_reset: Reset a single LUN
+ * fc_eh_device_reset() Reset a single LUN
  * @sc_cmd:	scsi command
  *
  * Set from scsi host template to send tm cmd to the target and wait for the
@@ -1971,9 +2011,11 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
 	if (lp->state != LPORT_ST_READY)
 		return rc;
 
+	FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id);
+
 	fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
 	if (fsp == NULL) {
-		FC_DBG("could not allocate scsi_pkt\n");
+		printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
 		sc_cmd->result = DID_NO_CONNECT << 16;
 		goto out;
 	}
@@ -1999,7 +2041,7 @@ out:
 EXPORT_SYMBOL(fc_eh_device_reset);
 
 /**
- * fc_eh_host_reset - The reset function will reset the ports on the host.
+ * fc_eh_host_reset() - The reset function will reset the ports on the host.
  * @sc_cmd:	scsi command
  */
 int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
@@ -2008,24 +2050,28 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
 	struct fc_lport *lp = shost_priv(shost);
 	unsigned long wait_tmo;
 
+	FC_SCSI_DBG(lp, "Resetting host\n");
+
 	lp->tt.lport_reset(lp);
 	wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
 	while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
 		msleep(1000);
 
 	if (fc_fcp_lport_queue_ready(lp)) {
-		shost_printk(KERN_INFO, shost, "Host reset succeeded.\n");
+		shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
+			     "on port (%6x)\n", fc_host_port_id(lp->host));
 		return SUCCESS;
 	} else {
-		shost_printk(KERN_INFO, shost, "Host reset failed. "
-			     "lport not ready.\n");
+		shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
+			     "port (%6x) is not ready.\n",
+			     fc_host_port_id(lp->host));
 		return FAILED;
 	}
 }
 EXPORT_SYMBOL(fc_eh_host_reset);
 
 /**
- * fc_slave_alloc - configure queue depth
+ * fc_slave_alloc() - configure queue depth
  * @sdev:	scsi device
  *
  * Configures queue depth based on host's cmd_per_len. If not set
@@ -2077,7 +2123,8 @@ void fc_fcp_destroy(struct fc_lport *lp)
 	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
 
 	if (!list_empty(&si->scsi_pkt_queue))
-		printk(KERN_ERR "Leaked scsi packets.\n");
+		printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
+		       "port (%6x)\n", fc_host_port_id(lp->host));
 
 	mempool_destroy(si->scsi_pkt_pool);
 	kfree(si);
@@ -2124,9 +2171,10 @@ static int __init libfc_init(void)
 
 	scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
 					    sizeof(struct fc_fcp_pkt),
-					    0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+					    0, SLAB_HWCACHE_ALIGN, NULL);
 	if (scsi_pkt_cachep == NULL) {
-		FC_DBG("Unable to allocate SRB cache...module load failed!");
+		printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
+		       "module load failed!");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
index 0bbeff2..63fe00c 100644
--- a/drivers/scsi/libfc/fc_frame.c
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -42,7 +42,7 @@ u32 fc_frame_crc_check(struct fc_frame *fp)
 	len = (fr_len(fp) + 3) & ~3;	/* round up length to include fill */
 	bp = (const u8 *) fr_hdr(fp);
 	crc = ~crc32(~0, bp, len);
-	error = crc ^ *(u32 *) (bp + len);
+	error = crc ^ fr_crc(fp);
 	return error;
 }
 EXPORT_SYMBOL(fc_frame_crc_check);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 4a9f54a..2db9f78 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -18,34 +18,51 @@
  */
 
 /*
- * General locking notes:
+ * PORT LOCKING NOTES
  *
- * The lport and rport blocks both have mutexes that are used to protect
- * the port objects states. The main motivation for this protection is that
- * we don't want to be preparing a request/response in one context while
- * another thread "resets" the port in question. For example, if the lport
- * block is sending a SCR request to the directory server we don't want
- * the lport to be reset before we fill out the frame header's port_id. The
- * problem is that a reset would cause the lport's port_id to reset to 0.
- * If we don't protect the lport we'd spew incorrect frames.
+ * These comments only apply to the 'port code' which consists of the lport,
+ * disc and rport blocks.
  *
- * At the time of this writing there are two primary mutexes, one for the
- * lport and one for the rport. Since the lport uses the rport and makes
- * calls into that block the rport should never make calls that would cause
- * the lport's mutex to be locked. In other words, the lport's mutex is
- * considered the outer lock and the rport's lock is considered the inner
- * lock. The bottom line is that you can hold a lport's mutex and then
- * hold the rport's mutex, but not the other way around.
+ * MOTIVATION
  *
- * The only complication to this rule is the callbacks from the rport to
- * the lport's event_callback function. When rports become READY they make
- * a callback to the lport so that it can track them. In the case of the
- * directory server that callback might cause the lport to change its
- * state, implying that the lport mutex would need to be held. This problem
- * was solved by serializing the rport notifications to the lport and the
- * callback is made without holding the rport's lock.
+ * The lport, disc and rport blocks all have mutexes that are used to protect
+ * those objects. The main motivation for these locks is to prevent from
+ * having an lport reset just before we send a frame. In that scenario the
+ * lport's FID would get set to zero and then we'd send a frame with an
+ * invalid SID. We also need to ensure that states don't change unexpectedly
+ * while processing another state.
  *
- * lport locking notes:
+ * HEIRARCHY
+ *
+ * The following heirarchy defines the locking rules. A greater lock
+ * may be held before acquiring a lesser lock, but a lesser lock should never
+ * be held while attempting to acquire a greater lock. Here is the heirarchy-
+ *
+ * lport > disc, lport > rport, disc > rport
+ *
+ * CALLBACKS
+ *
+ * The callbacks cause complications with this scheme. There is a callback
+ * from the rport (to either lport or disc) and a callback from disc
+ * (to the lport).
+ *
+ * As rports exit the rport state machine a callback is made to the owner of
+ * the rport to notify success or failure. Since the callback is likely to
+ * cause the lport or disc to grab its lock we cannot hold the rport lock
+ * while making the callback. To ensure that the rport is not free'd while
+ * processing the callback the rport callbacks are serialized through a
+ * single-threaded workqueue. An rport would never be free'd while in a
+ * callback handler becuase no other rport work in this queue can be executed
+ * at the same time.
+ *
+ * When discovery succeeds or fails a callback is made to the lport as
+ * notification. Currently, succesful discovery causes the lport to take no
+ * action. A failure will cause the lport to reset. There is likely a circular
+ * locking problem with this implementation.
+ */
+
+/*
+ * LPORT LOCKING
  *
  * The critical sections protected by the lport's mutex are quite broad and
  * may be improved upon in the future. The lport code and its locking doesn't
@@ -54,9 +71,9 @@
  *
  * The strategy is to lock whenever processing a request or response. Note
  * that every _enter_* function corresponds to a state change. They generally
- * change the lports state and then sends a request out on the wire. We lock
+ * change the lports state and then send a request out on the wire. We lock
  * before calling any of these functions to protect that state change. This
- * means that the entry points into the lport block to manage the locks while
+ * means that the entry points into the lport block manage the locks while
  * the state machine can transition between states (i.e. _enter_* functions)
  * while always staying protected.
  *
@@ -68,9 +85,6 @@
  * Retries also have to consider the locking. The retries occur from a work
  * context and the work function will lock the lport and then retry the state
  * (i.e. _enter_* function).
- *
- * The implication to all of this is that each lport can only process one
- * state at a time.
  */
 
 #include <linux/timer.h>
@@ -87,14 +101,6 @@
 
 #define	DNS_DELAY	      3 /* Discovery delay after RSCN (in seconds)*/
 
-static int fc_lport_debug;
-
-#define FC_DEBUG_LPORT(fmt...)			\
-	do {					\
-		if (fc_lport_debug)		\
-			FC_DBG(fmt);		\
-	} while (0)
-
 static void fc_lport_error(struct fc_lport *, struct fc_frame *);
 
 static void fc_lport_enter_reset(struct fc_lport *);
@@ -125,7 +131,7 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
 }
 
 /**
- * fc_lport_rport_event - Event handler for rport events
+ * fc_lport_rport_callback() - Event handler for rport events
  * @lport: The lport which is receiving the event
  * @rport: The rport which the event has occured on
  * @event: The event that occured
@@ -133,12 +139,12 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
  * Locking Note: The rport lock should not be held when calling
  *		 this function.
  */
-static void fc_lport_rport_event(struct fc_lport *lport,
-				 struct fc_rport *rport,
-				 enum fc_lport_event event)
+static void fc_lport_rport_callback(struct fc_lport *lport,
+				    struct fc_rport *rport,
+				    enum fc_rport_event event)
 {
-	FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
-		       rport->port_id);
+	FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
+		     rport->port_id);
 
 	switch (event) {
 	case RPORT_EV_CREATED:
@@ -148,19 +154,19 @@ static void fc_lport_rport_event(struct fc_lport *lport,
 				lport->dns_rp = rport;
 				fc_lport_enter_rpn_id(lport);
 			} else {
-				FC_DEBUG_LPORT("Received an CREATED event on "
-					       "port (%6x) for the directory "
-					       "server, but the lport is not "
-					       "in the DNS state, it's in the "
-					       "%d state", rport->port_id,
-					       lport->state);
+				FC_LPORT_DBG(lport, "Received an CREATED event "
+					     "on port (%6x) for the directory "
+					     "server, but the lport is not "
+					     "in the DNS state, it's in the "
+					     "%d state", rport->port_id,
+					     lport->state);
 				lport->tt.rport_logoff(rport);
 			}
 			mutex_unlock(&lport->lp_mutex);
 		} else
-			FC_DEBUG_LPORT("Received an event for port (%6x) "
-				       "which is not the directory server\n",
-				       rport->port_id);
+			FC_LPORT_DBG(lport, "Received an event for port (%6x) "
+				     "which is not the directory server\n",
+				     rport->port_id);
 		break;
 	case RPORT_EV_LOGO:
 	case RPORT_EV_FAILED:
@@ -171,9 +177,9 @@ static void fc_lport_rport_event(struct fc_lport *lport,
 			mutex_unlock(&lport->lp_mutex);
 
 		} else
-			FC_DEBUG_LPORT("Received an event for port (%6x) "
-				       "which is not the directory server\n",
-				       rport->port_id);
+			FC_LPORT_DBG(lport, "Received an event for port (%6x) "
+				     "which is not the directory server\n",
+				     rport->port_id);
 		break;
 	case RPORT_EV_NONE:
 		break;
@@ -181,7 +187,7 @@ static void fc_lport_rport_event(struct fc_lport *lport,
 }
 
 /**
- * fc_lport_state - Return a string which represents the lport's state
+ * fc_lport_state() - Return a string which represents the lport's state
  * @lport: The lport whose state is to converted to a string
  */
 static const char *fc_lport_state(struct fc_lport *lport)
@@ -195,7 +201,7 @@ static const char *fc_lport_state(struct fc_lport *lport)
 }
 
 /**
- * fc_lport_ptp_setup - Create an rport for point-to-point mode
+ * fc_lport_ptp_setup() - Create an rport for point-to-point mode
  * @lport: The lport to attach the ptp rport to
  * @fid: The FID of the ptp rport
  * @remote_wwpn: The WWPN of the ptp rport
@@ -218,7 +224,7 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
 		lport->ptp_rp = NULL;
 	}
 
-	lport->ptp_rp = fc_rport_rogue_create(&dp);
+	lport->ptp_rp = lport->tt.rport_create(&dp);
 
 	lport->tt.rport_login(lport->ptp_rp);
 
@@ -236,7 +242,7 @@ void fc_get_host_port_state(struct Scsi_Host *shost)
 {
 	struct fc_lport *lp = shost_priv(shost);
 
-	if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
+	if (lp->link_up)
 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
 	else
 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
@@ -253,10 +259,10 @@ EXPORT_SYMBOL(fc_get_host_speed);
 
 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
 {
-	int i;
 	struct fc_host_statistics *fcoe_stats;
 	struct fc_lport *lp = shost_priv(shost);
 	struct timespec v0, v1;
+	unsigned int cpu;
 
 	fcoe_stats = &lp->host_stats;
 	memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
@@ -265,10 +271,11 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
 	jiffies_to_timespec(lp->boot_time, &v1);
 	fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
 
-	for_each_online_cpu(i) {
-		struct fcoe_dev_stats *stats = lp->dev_stats[i];
-		if (stats == NULL)
-			continue;
+	for_each_possible_cpu(cpu) {
+		struct fcoe_dev_stats *stats;
+
+		stats = per_cpu_ptr(lp->dev_stats, cpu);
+
 		fcoe_stats->tx_frames += stats->TxFrames;
 		fcoe_stats->tx_words += stats->TxWords;
 		fcoe_stats->rx_frames += stats->RxFrames;
@@ -304,9 +311,8 @@ fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
 
 	memset(flogi, 0, sizeof(*flogi));
 	flogi->fl_cmd = (u8) op;
-	put_unaligned(htonll(lport->wwpn), &flogi->fl_wwpn);
-	put_unaligned(htonll(lport->wwnn), &flogi->fl_wwnn);
-
+	put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
+	put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
 	sp = &flogi->fl_csp;
 	sp->sp_hi_ver = 0x20;
 	sp->sp_lo_ver = 0x20;
@@ -338,7 +344,7 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
 }
 
 /**
- * fc_lport_recv_rlir_req - Handle received Registered Link Incident Report.
+ * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
  * @lport: Fibre Channel local port recieving the RLIR
  * @sp: current sequence in the RLIR exchange
  * @fp: RLIR request frame
@@ -349,15 +355,15 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
 static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
 				   struct fc_lport *lport)
 {
-	FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
-		       fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
+		     fc_lport_state(lport));
 
 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
 	fc_frame_free(fp);
 }
 
 /**
- * fc_lport_recv_echo_req - Handle received ECHO request
+ * fc_lport_recv_echo_req() - Handle received ECHO request
  * @lport: Fibre Channel local port recieving the ECHO
  * @sp: current sequence in the ECHO exchange
  * @fp: ECHO request frame
@@ -375,8 +381,8 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
 	void *dp;
 	u32 f_ctl;
 
-	FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
-		       fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
+		     fc_lport_state(lport));
 
 	len = fr_len(in_fp) - sizeof(struct fc_frame_header);
 	pp = fc_frame_payload_get(in_fp, len);
@@ -399,7 +405,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
 }
 
 /**
- * fc_lport_recv_echo_req - Handle received Request Node ID data request
+ * fc_lport_recv_echo_req() - Handle received Request Node ID data request
  * @lport: Fibre Channel local port recieving the RNID
  * @sp: current sequence in the RNID exchange
  * @fp: RNID request frame
@@ -423,8 +429,8 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
 	size_t len;
 	u32 f_ctl;
 
-	FC_DEBUG_LPORT("Received RNID request while in state %s\n",
-		       fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
+		     fc_lport_state(lport));
 
 	req = fc_frame_payload_get(in_fp, sizeof(*req));
 	if (!req) {
@@ -466,16 +472,16 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
 }
 
 /**
- * fc_lport_recv_adisc_req - Handle received Address Discovery Request
+ * fc_lport_recv_adisc_req() - Handle received Address Discovery Request
  * @lport: Fibre Channel local port recieving the ADISC
  * @sp: current sequence in the ADISC exchange
  * @fp: ADISC request frame
  *
- * Locking Note: The lport lock is exected to be held before calling
+ * Locking Note: The lport lock is expected to be held before calling
  * this function.
  */
 static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
-				   struct fc_lport *lport)
+				    struct fc_lport *lport)
 {
 	struct fc_frame *fp;
 	struct fc_exch *ep = fc_seq_exch(sp);
@@ -484,8 +490,8 @@ static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
 	size_t len;
 	u32 f_ctl;
 
-	FC_DEBUG_LPORT("Received ADISC request while in state %s\n",
-		       fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n",
+		     fc_lport_state(lport));
 
 	req = fc_frame_payload_get(in_fp, sizeof(*req));
 	if (!req) {
@@ -516,7 +522,7 @@ static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
 }
 
 /**
- * fc_lport_recv_logo_req - Handle received fabric LOGO request
+ * fc_lport_recv_logo_req() - Handle received fabric LOGO request
  * @lport: Fibre Channel local port recieving the LOGO
  * @sp: current sequence in the LOGO exchange
  * @fp: LOGO request frame
@@ -533,7 +539,7 @@ static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
 }
 
 /**
- * fc_fabric_login - Start the lport state machine
+ * fc_fabric_login() - Start the lport state machine
  * @lport: The lport that should log into the fabric
  *
  * Locking Note: This function should not be called
@@ -555,17 +561,17 @@ int fc_fabric_login(struct fc_lport *lport)
 EXPORT_SYMBOL(fc_fabric_login);
 
 /**
- * fc_linkup - Handler for transport linkup events
+ * fc_linkup() - Handler for transport linkup events
  * @lport: The lport whose link is up
  */
 void fc_linkup(struct fc_lport *lport)
 {
-	FC_DEBUG_LPORT("Link is up for port (%6x)\n",
-		       fc_host_port_id(lport->host));
+	printk(KERN_INFO "libfc: Link up on port (%6x)\n",
+	       fc_host_port_id(lport->host));
 
 	mutex_lock(&lport->lp_mutex);
-	if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) {
-		lport->link_status |= FC_LINK_UP;
+	if (!lport->link_up) {
+		lport->link_up = 1;
 
 		if (lport->state == LPORT_ST_RESET)
 			fc_lport_enter_flogi(lport);
@@ -575,17 +581,17 @@ void fc_linkup(struct fc_lport *lport)
 EXPORT_SYMBOL(fc_linkup);
 
 /**
- * fc_linkdown - Handler for transport linkdown events
+ * fc_linkdown() - Handler for transport linkdown events
  * @lport: The lport whose link is down
  */
 void fc_linkdown(struct fc_lport *lport)
 {
 	mutex_lock(&lport->lp_mutex);
-	FC_DEBUG_LPORT("Link is down for port (%6x)\n",
-		       fc_host_port_id(lport->host));
+	printk(KERN_INFO "libfc: Link down on port (%6x)\n",
+	       fc_host_port_id(lport->host));
 
-	if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) {
-		lport->link_status &= ~(FC_LINK_UP);
+	if (lport->link_up) {
+		lport->link_up = 0;
 		fc_lport_enter_reset(lport);
 		lport->tt.fcp_cleanup(lport);
 	}
@@ -594,50 +600,30 @@ void fc_linkdown(struct fc_lport *lport)
 EXPORT_SYMBOL(fc_linkdown);
 
 /**
- * fc_pause - Pause the flow of frames
- * @lport: The lport to be paused
- */
-void fc_pause(struct fc_lport *lport)
-{
-	mutex_lock(&lport->lp_mutex);
-	lport->link_status |= FC_PAUSE;
-	mutex_unlock(&lport->lp_mutex);
-}
-EXPORT_SYMBOL(fc_pause);
-
-/**
- * fc_unpause - Unpause the flow of frames
- * @lport: The lport to be unpaused
- */
-void fc_unpause(struct fc_lport *lport)
-{
-	mutex_lock(&lport->lp_mutex);
-	lport->link_status &= ~(FC_PAUSE);
-	mutex_unlock(&lport->lp_mutex);
-}
-EXPORT_SYMBOL(fc_unpause);
-
-/**
- * fc_fabric_logoff - Logout of the fabric
+ * fc_fabric_logoff() - Logout of the fabric
  * @lport:	      fc_lport pointer to logoff the fabric
  *
  * Return value:
  *	0 for success, -1 for failure
- **/
+ */
 int fc_fabric_logoff(struct fc_lport *lport)
 {
 	lport->tt.disc_stop_final(lport);
 	mutex_lock(&lport->lp_mutex);
-	kfree(lport->disc);
-	lport->disc = NULL;
+	if (lport->dns_rp)
+		lport->tt.rport_logoff(lport->dns_rp);
+	mutex_unlock(&lport->lp_mutex);
+	lport->tt.rport_flush_queue();
+	mutex_lock(&lport->lp_mutex);
 	fc_lport_enter_logo(lport);
 	mutex_unlock(&lport->lp_mutex);
+	cancel_delayed_work_sync(&lport->retry_work);
 	return 0;
 }
 EXPORT_SYMBOL(fc_fabric_logoff);
 
 /**
- * fc_lport_destroy - unregister a fc_lport
+ * fc_lport_destroy() - unregister a fc_lport
  * @lport:	      fc_lport pointer to unregister
  *
  * Return value:
@@ -647,20 +633,34 @@ EXPORT_SYMBOL(fc_fabric_logoff);
  * clean-up all the allocated memory
  * and free up other system resources.
  *
- **/
+ */
 int fc_lport_destroy(struct fc_lport *lport)
 {
+	mutex_lock(&lport->lp_mutex);
+	lport->state = LPORT_ST_NONE;
+	lport->link_up = 0;
 	lport->tt.frame_send = fc_frame_drop;
+	mutex_unlock(&lport->lp_mutex);
+
 	lport->tt.fcp_abort_io(lport);
-	lport->tt.exch_mgr_reset(lport->emp, 0, 0);
+	lport->tt.exch_mgr_reset(lport, 0, 0);
 	return 0;
 }
 EXPORT_SYMBOL(fc_lport_destroy);
 
+/**
+ * fc_set_mfs() - sets up the mfs for the corresponding fc_lport
+ * @lport: fc_lport pointer to unregister
+ * @mfs: the new mfs for fc_lport
+ *
+ * Set mfs for the given fc_lport to the new mfs.
+ *
+ * Return: 0 for success
+ */
 int fc_set_mfs(struct fc_lport *lport, u32 mfs)
 {
 	unsigned int old_mfs;
-	int rc = -1;
+	int rc = -EINVAL;
 
 	mutex_lock(&lport->lp_mutex);
 
@@ -668,7 +668,6 @@ int fc_set_mfs(struct fc_lport *lport, u32 mfs)
 
 	if (mfs >= FC_MIN_MAX_FRAME) {
 		mfs &= ~3;
-		WARN_ON((size_t) mfs < FC_MIN_MAX_FRAME);
 		if (mfs > FC_MAX_FRAME)
 			mfs = FC_MAX_FRAME;
 		mfs -= sizeof(struct fc_frame_header);
@@ -686,7 +685,7 @@ int fc_set_mfs(struct fc_lport *lport, u32 mfs)
 EXPORT_SYMBOL(fc_set_mfs);
 
 /**
- * fc_lport_disc_callback - Callback for discovery events
+ * fc_lport_disc_callback() - Callback for discovery events
  * @lport: FC local port
  * @event: The discovery event
  */
@@ -694,12 +693,11 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
 {
 	switch (event) {
 	case DISC_EV_SUCCESS:
-		FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n",
-			       fc_host_port_id(lport->host));
+		FC_LPORT_DBG(lport, "Discovery succeeded\n");
 		break;
 	case DISC_EV_FAILED:
-		FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n",
-			       fc_host_port_id(lport->host));
+		printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n",
+		       fc_host_port_id(lport->host));
 		mutex_lock(&lport->lp_mutex);
 		fc_lport_enter_reset(lport);
 		mutex_unlock(&lport->lp_mutex);
@@ -711,7 +709,7 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
 }
 
 /**
- * fc_rport_enter_ready - Enter the ready state and start discovery
+ * fc_rport_enter_ready() - Enter the ready state and start discovery
  * @lport: Fibre Channel local port that is ready
  *
  * Locking Note: The lport lock is expected to be held before calling
@@ -719,8 +717,8 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
  */
 static void fc_lport_enter_ready(struct fc_lport *lport)
 {
-	FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n",
-		       fc_host_port_id(lport->host), fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Entered READY from state %s\n",
+		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_READY);
 
@@ -728,7 +726,7 @@ static void fc_lport_enter_ready(struct fc_lport *lport)
 }
 
 /**
- * fc_lport_recv_flogi_req - Receive a FLOGI request
+ * fc_lport_recv_flogi_req() - Receive a FLOGI request
  * @sp_in: The sequence the FLOGI is on
  * @rx_fp: The frame the FLOGI is in
  * @lport: The lport that recieved the request
@@ -755,21 +753,21 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
 	u32 local_fid;
 	u32 f_ctl;
 
-	FC_DEBUG_LPORT("Received FLOGI request while in state %s\n",
-		       fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
+		     fc_lport_state(lport));
 
 	fh = fc_frame_header_get(rx_fp);
 	remote_fid = ntoh24(fh->fh_s_id);
 	flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
 	if (!flp)
 		goto out;
-	remote_wwpn = ntohll(get_unaligned(&flp->fl_wwpn));
+	remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
 	if (remote_wwpn == lport->wwpn) {
-		FC_DBG("FLOGI from port with same WWPN %llx "
-		       "possible configuration error\n", remote_wwpn);
+		printk(KERN_WARNING "libfc: Received FLOGI from port "
+		       "with same WWPN %llx\n", remote_wwpn);
 		goto out;
 	}
-	FC_DBG("FLOGI from port WWPN %llx\n", remote_wwpn);
+	FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
 
 	/*
 	 * XXX what is the right thing to do for FIDs?
@@ -808,7 +806,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
 		fc_lport_error(lport, fp);
 	}
 	fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
-			   ntohll(get_unaligned(&flp->fl_wwnn)));
+			   get_unaligned_be64(&flp->fl_wwnn));
 
 	lport->tt.disc_start(fc_lport_disc_callback, lport);
 
@@ -818,7 +816,7 @@ out:
 }
 
 /**
- * fc_lport_recv_req - The generic lport request handler
+ * fc_lport_recv_req() - The generic lport request handler
  * @lport: The lport that received the request
  * @sp: The sequence the request is on
  * @fp: The frame the request is in
@@ -888,10 +886,9 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
 			d_id = ntoh24(fh->fh_d_id);
 
 			rport = lport->tt.rport_lookup(lport, s_id);
-			if (rport) {
+			if (rport)
 				lport->tt.rport_recv_req(sp, fp, rport);
-				put_device(&rport->dev); /* hold from lookup */
-			} else {
+			else {
 				rjt_data.fp = NULL;
 				rjt_data.reason = ELS_RJT_UNAB;
 				rjt_data.explan = ELS_EXPL_NONE;
@@ -902,7 +899,8 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
 			}
 		}
 	} else {
-		FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp));
+		FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
+			     fr_eof(fp));
 		fc_frame_free(fp);
 	}
 	mutex_unlock(&lport->lp_mutex);
@@ -915,7 +913,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
 }
 
 /**
- * fc_lport_reset - Reset an lport
+ * fc_lport_reset() - Reset an lport
  * @lport: The lport which should be reset
  *
  * Locking Note: This functions should not be called with the
@@ -923,6 +921,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
  */
 int fc_lport_reset(struct fc_lport *lport)
 {
+	cancel_delayed_work_sync(&lport->retry_work);
 	mutex_lock(&lport->lp_mutex);
 	fc_lport_enter_reset(lport);
 	mutex_unlock(&lport->lp_mutex);
@@ -931,7 +930,7 @@ int fc_lport_reset(struct fc_lport *lport)
 EXPORT_SYMBOL(fc_lport_reset);
 
 /**
- * fc_rport_enter_reset - Reset the local port
+ * fc_rport_enter_reset() - Reset the local port
  * @lport: Fibre Channel local port to be reset
  *
  * Locking Note: The lport lock is expected to be held before calling
@@ -939,8 +938,8 @@ EXPORT_SYMBOL(fc_lport_reset);
  */
 static void fc_lport_enter_reset(struct fc_lport *lport)
 {
-	FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n",
-		       fc_host_port_id(lport->host), fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
+		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_RESET);
 
@@ -954,16 +953,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
 
 	lport->tt.disc_stop(lport);
 
-	lport->tt.exch_mgr_reset(lport->emp, 0, 0);
+	lport->tt.exch_mgr_reset(lport, 0, 0);
 	fc_host_fabric_name(lport->host) = 0;
 	fc_host_port_id(lport->host) = 0;
 
-	if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP)
+	if (lport->link_up)
 		fc_lport_enter_flogi(lport);
 }
 
 /**
- * fc_lport_error - Handler for any errors
+ * fc_lport_error() - Handler for any errors
  * @lport: The fc_lport object
  * @fp: The frame pointer
  *
@@ -974,44 +973,41 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
 {
 	unsigned long delay = 0;
-	FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n",
-		       PTR_ERR(fp), fc_lport_state(lport),
-		       lport->retry_count);
+	FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
+		     PTR_ERR(fp), fc_lport_state(lport),
+		     lport->retry_count);
 
-	if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+	if (lport->retry_count < lport->max_retry_count) {
+		lport->retry_count++;
 		/*
-		 * Memory allocation failure, or the exchange timed out.
-		 *  Retry after delay
+		 * for frame alloc failures, different delay than e_d_tov
 		 */
-		if (lport->retry_count < lport->max_retry_count) {
-			lport->retry_count++;
-			if (!fp)
-				delay = msecs_to_jiffies(500);
-			else
-				delay =	msecs_to_jiffies(lport->e_d_tov);
-
-			schedule_delayed_work(&lport->retry_work, delay);
-		} else {
-			switch (lport->state) {
-			case LPORT_ST_NONE:
-			case LPORT_ST_READY:
-			case LPORT_ST_RESET:
-			case LPORT_ST_RPN_ID:
-			case LPORT_ST_RFT_ID:
-			case LPORT_ST_SCR:
-			case LPORT_ST_DNS:
-			case LPORT_ST_FLOGI:
-			case LPORT_ST_LOGO:
-				fc_lport_enter_reset(lport);
-				break;
-			}
+		if (!fp)
+			delay = msecs_to_jiffies(500);
+		else
+			delay =	msecs_to_jiffies(lport->e_d_tov);
+
+		schedule_delayed_work(&lport->retry_work, delay);
+	} else {
+		switch (lport->state) {
+		case LPORT_ST_NONE:
+		case LPORT_ST_READY:
+		case LPORT_ST_RESET:
+		case LPORT_ST_RPN_ID:
+		case LPORT_ST_RFT_ID:
+		case LPORT_ST_SCR:
+		case LPORT_ST_DNS:
+		case LPORT_ST_FLOGI:
+		case LPORT_ST_LOGO:
+			fc_lport_enter_reset(lport);
+			break;
 		}
 	}
 }
 
 /**
- * fc_lport_rft_id_resp - Handle response to Register Fibre
- *			  Channel Types by ID (RPN_ID) request
+ * fc_lport_rft_id_resp() - Handle response to Register Fibre
+ *			    Channel Types by ID (RPN_ID) request
  * @sp: current sequence in RPN_ID exchange
  * @fp: response frame
  * @lp_arg: Fibre Channel host port instance
@@ -1032,11 +1028,13 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
 
 	mutex_lock(&lport->lp_mutex);
 
-	FC_DEBUG_LPORT("Received a RFT_ID response\n");
+	FC_LPORT_DBG(lport, "Received a RFT_ID response\n");
 
 	if (lport->state != LPORT_ST_RFT_ID) {
-		FC_DBG("Received a RFT_ID response, but in state %s\n",
-		       fc_lport_state(lport));
+		FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
+			     "%s\n", fc_lport_state(lport));
+		if (IS_ERR(fp))
+			goto err;
 		goto out;
 	}
 
@@ -1062,8 +1060,8 @@ err:
 }
 
 /**
- * fc_lport_rpn_id_resp - Handle response to Register Port
- *			  Name by ID (RPN_ID) request
+ * fc_lport_rpn_id_resp() - Handle response to Register Port
+ *			    Name by ID (RPN_ID) request
  * @sp: current sequence in RPN_ID exchange
  * @fp: response frame
  * @lp_arg: Fibre Channel host port instance
@@ -1084,11 +1082,13 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
 
 	mutex_lock(&lport->lp_mutex);
 
-	FC_DEBUG_LPORT("Received a RPN_ID response\n");
+	FC_LPORT_DBG(lport, "Received a RPN_ID response\n");
 
 	if (lport->state != LPORT_ST_RPN_ID) {
-		FC_DBG("Received a RPN_ID response, but in state %s\n",
-		       fc_lport_state(lport));
+		FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
+			     "%s\n", fc_lport_state(lport));
+		if (IS_ERR(fp))
+			goto err;
 		goto out;
 	}
 
@@ -1114,7 +1114,7 @@ err:
 }
 
 /**
- * fc_lport_scr_resp - Handle response to State Change Register (SCR) request
+ * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
  * @sp: current sequence in SCR exchange
  * @fp: response frame
  * @lp_arg: Fibre Channel lport port instance that sent the registration request
@@ -1134,11 +1134,13 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
 
 	mutex_lock(&lport->lp_mutex);
 
-	FC_DEBUG_LPORT("Received a SCR response\n");
+	FC_LPORT_DBG(lport, "Received a SCR response\n");
 
 	if (lport->state != LPORT_ST_SCR) {
-		FC_DBG("Received a SCR response, but in state %s\n",
-		       fc_lport_state(lport));
+		FC_LPORT_DBG(lport, "Received a SCR response, but in state "
+			     "%s\n", fc_lport_state(lport));
+		if (IS_ERR(fp))
+			goto err;
 		goto out;
 	}
 
@@ -1160,7 +1162,7 @@ err:
 }
 
 /**
- * fc_lport_enter_scr - Send a State Change Register (SCR) request
+ * fc_lport_enter_scr() - Send a State Change Register (SCR) request
  * @lport: Fibre Channel local port to register for state changes
  *
  * Locking Note: The lport lock is expected to be held before calling
@@ -1170,8 +1172,8 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
 {
 	struct fc_frame *fp;
 
-	FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n",
-		       fc_host_port_id(lport->host), fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
+		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_SCR);
 
@@ -1187,7 +1189,7 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
 }
 
 /**
- * fc_lport_enter_rft_id - Register FC4-types with the name server
+ * fc_lport_enter_rft_id() - Register FC4-types with the name server
  * @lport: Fibre Channel local port to register
  *
  * Locking Note: The lport lock is expected to be held before calling
@@ -1199,8 +1201,8 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
 	struct fc_ns_fts *lps;
 	int i;
 
-	FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n",
-		       fc_host_port_id(lport->host), fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n",
+		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
 
@@ -1229,7 +1231,7 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
 }
 
 /**
- * fc_rport_enter_rft_id - Register port name with the name server
+ * fc_rport_enter_rft_id() - Register port name with the name server
  * @lport: Fibre Channel local port to register
  *
  * Locking Note: The lport lock is expected to be held before calling
@@ -1239,8 +1241,8 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport)
 {
 	struct fc_frame *fp;
 
-	FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n",
-		       fc_host_port_id(lport->host), fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n",
+		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
 
@@ -1257,8 +1259,12 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport)
 		fc_lport_error(lport, fp);
 }
 
+static struct fc_rport_operations fc_lport_rport_ops = {
+	.event_callback = fc_lport_rport_callback,
+};
+
 /**
- * fc_rport_enter_dns - Create a rport to the name server
+ * fc_rport_enter_dns() - Create a rport to the name server
  * @lport: Fibre Channel local port requesting a rport for the name server
  *
  * Locking Note: The lport lock is expected to be held before calling
@@ -1276,17 +1282,17 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
 	dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
 	dp.lp = lport;
 
-	FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n",
-		       fc_host_port_id(lport->host), fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
+		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_DNS);
 
-	rport = fc_rport_rogue_create(&dp);
+	rport = lport->tt.rport_create(&dp);
 	if (!rport)
 		goto err;
 
 	rdata = rport->dd_data;
-	rdata->event_callback = fc_lport_rport_event;
+	rdata->ops = &fc_lport_rport_ops;
 	lport->tt.rport_login(rport);
 	return;
 
@@ -1295,12 +1301,14 @@ err:
 }
 
 /**
- * fc_lport_timeout - Handler for the retry_work timer.
+ * fc_lport_timeout() - Handler for the retry_work timer.
  * @work: The work struct of the fc_lport
  */
-static void fc_lport_timeout(void *data)
+static void fc_lport_timeout(struct work_struct *work)
 {
-	struct fc_lport *lport = data;
+	struct fc_lport *lport =
+		container_of(work, struct fc_lport,
+			     retry_work.work);
 
 	mutex_lock(&lport->lp_mutex);
 
@@ -1334,7 +1342,7 @@ static void fc_lport_timeout(void *data)
 }
 
 /**
- * fc_lport_logo_resp - Handle response to LOGO request
+ * fc_lport_logo_resp() - Handle response to LOGO request
  * @sp: current sequence in LOGO exchange
  * @fp: response frame
  * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
@@ -1354,11 +1362,13 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
 
 	mutex_lock(&lport->lp_mutex);
 
-	FC_DEBUG_LPORT("Received a LOGO response\n");
+	FC_LPORT_DBG(lport, "Received a LOGO response\n");
 
 	if (lport->state != LPORT_ST_LOGO) {
-		FC_DBG("Received a LOGO response, but in state %s\n",
-		       fc_lport_state(lport));
+		FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
+			     "%s\n", fc_lport_state(lport));
+		if (IS_ERR(fp))
+			goto err;
 		goto out;
 	}
 
@@ -1380,7 +1390,7 @@ err:
 }
 
 /**
- * fc_rport_enter_logo - Logout of the fabric
+ * fc_rport_enter_logo() - Logout of the fabric
  * @lport: Fibre Channel local port to be logged out
  *
  * Locking Note: The lport lock is expected to be held before calling
@@ -1391,15 +1401,11 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
 	struct fc_frame *fp;
 	struct fc_els_logo *logo;
 
-	FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n",
-		       fc_host_port_id(lport->host), fc_lport_state(lport));
+	FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
+		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_LOGO);
 
-	/* DNS session should be closed so we can release it here */
-	if (lport->dns_rp)
-		lport->tt.rport_logoff(lport->dns_rp);
-
 	fp = fc_frame_alloc(lport, sizeof(*logo));
 	if (!fp) {
 		fc_lport_error(lport, fp);
@@ -1412,7 +1418,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
 }
 
 /**
- * fc_lport_flogi_resp - Handle response to FLOGI request
+ * fc_lport_flogi_resp() - Handle response to FLOGI request
  * @sp: current sequence in FLOGI exchange
  * @fp: response frame
  * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
@@ -1438,11 +1444,13 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 
 	mutex_lock(&lport->lp_mutex);
 
-	FC_DEBUG_LPORT("Received a FLOGI response\n");
+	FC_LPORT_DBG(lport, "Received a FLOGI response\n");
 
 	if (lport->state != LPORT_ST_FLOGI) {
-		FC_DBG("Received a FLOGI response, but in state %s\n",
-		       fc_lport_state(lport));
+		FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
+			     "%s\n", fc_lport_state(lport));
+		if (IS_ERR(fp))
+			goto err;
 		goto out;
 	}
 
@@ -1455,7 +1463,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 	did = ntoh24(fh->fh_d_id);
 	if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
 
-		FC_DEBUG_LPORT("Assigned fid %x\n", did);
+		printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
+		       did);
 		fc_host_port_id(lport->host) = did;
 
 		flp = fc_frame_payload_get(fp, sizeof(*flp));
@@ -1474,15 +1483,18 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 				if (e_d_tov > lport->e_d_tov)
 					lport->e_d_tov = e_d_tov;
 				lport->r_a_tov = 2 * e_d_tov;
-				FC_DBG("Point-to-Point mode\n");
+				printk(KERN_INFO "libfc: Port (%6x) entered "
+				       "point to point mode\n", did);
 				fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
-					ntohll(get_unaligned(&flp->fl_wwpn)),
-					ntohll(get_unaligned(&flp->fl_wwnn)));
+						   get_unaligned_be64(
+							   &flp->fl_wwpn),
+						   get_unaligned_be64(
+							   &flp->fl_wwnn));
 			} else {
 				lport->e_d_tov = e_d_tov;
 				lport->r_a_tov = r_a_tov;
 				fc_host_fabric_name(lport->host) =
-					ntohll(get_unaligned(&flp->fl_wwnn));
+					get_unaligned_be64(&flp->fl_wwnn);
 				fc_lport_enter_dns(lport);
 			}
 		}
@@ -1495,7 +1507,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 			}
 		}
 	} else {
-		FC_DBG("bad FLOGI response\n");
+		FC_LPORT_DBG(lport, "Bad FLOGI response\n");
+		fc_lport_error(lport, fp);
 	}
 
 out:
@@ -1505,7 +1518,7 @@ err:
 }
 
 /**
- * fc_rport_enter_flogi - Send a FLOGI request to the fabric manager
+ * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
  * @lport: Fibre Channel local port to be logged in to the fabric
  *
  * Locking Note: The lport lock is expected to be held before calling
@@ -1515,7 +1528,8 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
 {
 	struct fc_frame *fp;
 
-	FC_DEBUG_LPORT("Processing FLOGI state\n");
+	FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
+		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_FLOGI);
 
@@ -1531,7 +1545,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
 /* Configure a fc_lport */
 int fc_lport_config(struct fc_lport *lport)
 {
-	INIT_WORK(&lport->retry_work, fc_lport_timeout, lport);
+	INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
 	mutex_init(&lport->lp_mutex);
 
 	fc_lport_state_enter(lport, LPORT_ST_NONE);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index db867dd..2eb89cd 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -18,20 +18,23 @@
  */
 
 /*
+ * RPORT GENERAL INFO
+ *
  * This file contains all processing regarding fc_rports. It contains the
  * rport state machine and does all rport interaction with the transport class.
  * There should be no other places in libfc that interact directly with the
  * transport class in regards to adding and deleting rports.
  *
  * fc_rport's represent N_Port's within the fabric.
+ */
+
+/*
+ * RPORT LOCKING
  *
- * rport locking notes:
- *
- * The rport should never hold the rport mutex and then lock the lport
- * mutex. The rport's mutex is considered lesser than the lport's mutex, so
- * the lport mutex can be held before locking the rport mutex, but not the
- * other way around. See the comment block at the top of fc_lport.c for more
- * details.
+ * The rport should never hold the rport mutex and then attempt to acquire
+ * either the lport or disc mutexes. The rport's mutex is considered lesser
+ * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
+ * more comments on the heirarchy.
  *
  * The locking strategy is similar to the lport's strategy. The lock protects
  * the rport's states and is held and released by the entry points to the rport
@@ -52,14 +55,6 @@
 #include <scsi/libfc.h>
 #include <scsi/fc_encode.h>
 
-static int fc_rport_debug;
-
-#define FC_DEBUG_RPORT(fmt...)			\
-	do {					\
-		if (fc_rport_debug)		\
-			FC_DBG(fmt);		\
-	} while (0)
-
 struct workqueue_struct *rport_event_queue;
 
 static void fc_rport_enter_plogi(struct fc_rport *);
@@ -76,9 +71,10 @@ static void fc_rport_recv_prlo_req(struct fc_rport *,
 				   struct fc_seq *, struct fc_frame *);
 static void fc_rport_recv_logo_req(struct fc_rport *,
 				   struct fc_seq *, struct fc_frame *);
-static void fc_rport_timeout(void *);
+static void fc_rport_timeout(struct work_struct *);
 static void fc_rport_error(struct fc_rport *, struct fc_frame *);
-static void fc_rport_work(void *);
+static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *);
+static void fc_rport_work(struct work_struct *);
 
 static const char *fc_rport_state_names[] = {
 	[RPORT_ST_NONE] = "None",
@@ -90,6 +86,13 @@ static const char *fc_rport_state_names[] = {
 	[RPORT_ST_LOGO] = "LOGO",
 };
 
+static void fc_rport_rogue_destroy(struct device *dev)
+{
+	struct fc_rport *rport = dev_to_rport(dev);
+	FC_RPORT_DBG(rport, "Destroying rogue rport\n");
+	kfree(rport);
+}
+
 struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
 {
 	struct fc_rport *rport;
@@ -108,16 +111,11 @@ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
 	rport->roles = dp->ids.roles;
 	rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
 	/*
-	 * init the device, so other code can manipulate the rport as if
-	 * it came from the fc class. We also do an extra get because
-	 * libfc will free this rport instead of relying on the normal
-	 * refcounting.
-	 *
 	 * Note: all this libfc rogue rport code will be removed for
 	 * upstream so it fine that this is really ugly and hacky right now.
 	 */
 	device_initialize(&rport->dev);
-	get_device(&rport->dev);
+	rport->dev.release = fc_rport_rogue_destroy;
 
 	mutex_init(&rdata->rp_mutex);
 	rdata->local_port = dp->lp;
@@ -125,11 +123,11 @@ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
 	rdata->rp_state = RPORT_ST_INIT;
 	rdata->event = RPORT_EV_NONE;
 	rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
-	rdata->event_callback = NULL;
+	rdata->ops = NULL;
 	rdata->e_d_tov = dp->lp->e_d_tov;
 	rdata->r_a_tov = dp->lp->r_a_tov;
-	INIT_WORK(&rdata->retry_work, fc_rport_timeout, rdata);
-	INIT_WORK(&rdata->event_work, fc_rport_work, rdata);
+	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
+	INIT_WORK(&rdata->event_work, fc_rport_work);
 	/*
 	 * For good measure, but not necessary as we should only
 	 * add REAL rport to the lport list.
@@ -139,13 +137,8 @@ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
 	return rport;
 }
 
-void fc_rport_rogue_destroy(struct fc_rport *rport)
-{
-	kfree(rport);
-}
-
 /**
- * fc_rport_state - return a string for the state the rport is in
+ * fc_rport_state() - return a string for the state the rport is in
  * @rport: The rport whose state we want to get a string for
  */
 static const char *fc_rport_state(struct fc_rport *rport)
@@ -160,7 +153,7 @@ static const char *fc_rport_state(struct fc_rport *rport)
 }
 
 /**
- * fc_set_rport_loss_tmo - Set the remote port loss timeout in seconds.
+ * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
  * @rport: Pointer to Fibre Channel remote port structure
  * @timeout: timeout in seconds
  */
@@ -174,12 +167,12 @@ void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
 
 /**
- * fc_plogi_get_maxframe - Get max payload from the common service parameters
+ * fc_plogi_get_maxframe() - Get max payload from the common service parameters
  * @flp: FLOGI payload structure
  * @maxval: upper limit, may be less than what is in the service parameters
  */
-static unsigned int
-fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval)
+static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
+					  unsigned int maxval)
 {
 	unsigned int mfs;
 
@@ -197,7 +190,7 @@ fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval)
 }
 
 /**
- * fc_rport_state_enter - Change the rport's state
+ * fc_rport_state_enter() - Change the rport's state
  * @rport: The rport whose state should change
  * @new: The new state of the rport
  *
@@ -212,19 +205,20 @@ static void fc_rport_state_enter(struct fc_rport *rport,
 	rdata->rp_state = new;
 }
 
-static void fc_rport_work(void *data)
+static void fc_rport_work(struct work_struct *work)
 {
-	struct fc_rport_libfc_priv *rdata = data;
-	enum fc_lport_event event;
+	u32 port_id;
+	struct fc_rport_libfc_priv *rdata =
+		container_of(work, struct fc_rport_libfc_priv, event_work);
+	enum fc_rport_event event;
 	enum fc_rport_trans_state trans_state;
 	struct fc_lport *lport = rdata->local_port;
-	void (*event_callback)(struct fc_lport *, struct fc_rport *,
-			       enum fc_lport_event);
+	struct fc_rport_operations *rport_ops;
 	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
 
 	mutex_lock(&rdata->rp_mutex);
 	event = rdata->event;
-	event_callback = rdata->event_callback;
+	rport_ops = rdata->ops;
 
 	if (event == RPORT_EV_CREATED) {
 		struct fc_rport *new_rport;
@@ -249,45 +243,52 @@ static void fc_rport_work(void *data)
 			new_rdata = new_rport->dd_data;
 			new_rdata->e_d_tov = rdata->e_d_tov;
 			new_rdata->r_a_tov = rdata->r_a_tov;
-			new_rdata->event_callback = rdata->event_callback;
+			new_rdata->ops = rdata->ops;
 			new_rdata->local_port = rdata->local_port;
 			new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
 			new_rdata->trans_state = FC_PORTSTATE_REAL;
 			mutex_init(&new_rdata->rp_mutex);
-			INIT_WORK(&new_rdata->retry_work, fc_rport_timeout,
-				  new_rdata);
-			INIT_WORK(&new_rdata->event_work, fc_rport_work,
-				  new_rdata);
+			INIT_DELAYED_WORK(&new_rdata->retry_work,
+					  fc_rport_timeout);
 			INIT_LIST_HEAD(&new_rdata->peers);
+			INIT_WORK(&new_rdata->event_work, fc_rport_work);
 
 			fc_rport_state_enter(new_rport, RPORT_ST_READY);
 		} else {
-			FC_DBG("Failed to create the rport for port "
-			       "(%6x).\n", ids.port_id);
+			printk(KERN_WARNING "libfc: Failed to allocate "
+			       " memory for rport (%6x)\n", ids.port_id);
 			event = RPORT_EV_FAILED;
 		}
-		fc_rport_rogue_destroy(rport);
+		if (rport->port_id != FC_FID_DIR_SERV)
+			if (rport_ops->event_callback)
+				rport_ops->event_callback(lport, rport,
+							  RPORT_EV_FAILED);
+		put_device(&rport->dev);
 		rport = new_rport;
 		rdata = new_rport->dd_data;
-		if (event_callback)
-			event_callback(lport, rport, event);
+		if (rport_ops->event_callback)
+			rport_ops->event_callback(lport, rport, event);
 	} else if ((event == RPORT_EV_FAILED) ||
 		   (event == RPORT_EV_LOGO) ||
 		   (event == RPORT_EV_STOP)) {
 		trans_state = rdata->trans_state;
 		mutex_unlock(&rdata->rp_mutex);
-		if (event_callback)
-			event_callback(lport, rport, event);
+		if (rport_ops->event_callback)
+			rport_ops->event_callback(lport, rport, event);
 		if (trans_state == FC_PORTSTATE_ROGUE)
-			fc_rport_rogue_destroy(rport);
-		else
+			put_device(&rport->dev);
+		else {
+			port_id = rport->port_id;
 			fc_remote_port_delete(rport);
+			lport->tt.exch_mgr_reset(lport, 0, port_id);
+			lport->tt.exch_mgr_reset(lport, port_id, 0);
+		}
 	} else
 		mutex_unlock(&rdata->rp_mutex);
 }
 
 /**
- * fc_rport_login - Start the remote port login state machine
+ * fc_rport_login() - Start the remote port login state machine
  * @rport: Fibre Channel remote port
  *
  * Locking Note: Called without the rport lock held. This
@@ -300,7 +301,7 @@ int fc_rport_login(struct fc_rport *rport)
 
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id);
+	FC_RPORT_DBG(rport, "Login to port\n");
 
 	fc_rport_enter_plogi(rport);
 
@@ -310,7 +311,7 @@ int fc_rport_login(struct fc_rport *rport)
 }
 
 /**
- * fc_rport_logoff - Logoff and remove an rport
+ * fc_rport_logoff() - Logoff and remove an rport
  * @rport: Fibre Channel remote port to be removed
  *
  * Locking Note: Called without the rport lock held. This
@@ -323,7 +324,13 @@ int fc_rport_logoff(struct fc_rport *rport)
 
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
+	FC_RPORT_DBG(rport, "Remove port\n");
+
+	if (rdata->rp_state == RPORT_ST_NONE) {
+		FC_RPORT_DBG(rport, "Port in NONE state, not removing\n");
+		mutex_unlock(&rdata->rp_mutex);
+		goto out;
+	}
 
 	fc_rport_enter_logo(rport);
 
@@ -335,8 +342,7 @@ int fc_rport_logoff(struct fc_rport *rport)
 
 	mutex_unlock(&rdata->rp_mutex);
 
-	if (!cancel_delayed_work(&rdata->retry_work))
-		flush_scheduled_work();
+	cancel_delayed_work_sync(&rdata->retry_work);
 
 	mutex_lock(&rdata->rp_mutex);
 
@@ -345,11 +351,12 @@ int fc_rport_logoff(struct fc_rport *rport)
 
 	mutex_unlock(&rdata->rp_mutex);
 
+out:
 	return 0;
 }
 
 /**
- * fc_rport_enter_ready - The rport is ready
+ * fc_rport_enter_ready() - The rport is ready
  * @rport: Fibre Channel remote port that is ready
  *
  * Locking Note: The rport lock is expected to be held before calling
@@ -361,23 +368,24 @@ static void fc_rport_enter_ready(struct fc_rport *rport)
 
 	fc_rport_state_enter(rport, RPORT_ST_READY);
 
-	FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id);
+	FC_RPORT_DBG(rport, "Port is Ready\n");
 
 	rdata->event = RPORT_EV_CREATED;
 	queue_work(rport_event_queue, &rdata->event_work);
 }
 
 /**
- * fc_rport_timeout - Handler for the retry_work timer.
+ * fc_rport_timeout() - Handler for the retry_work timer.
  * @work: The work struct of the fc_rport_libfc_priv
  *
  * Locking Note: Called without the rport lock held. This
  * function will hold the rport lock, call an _enter_*
  * function and then unlock the rport.
  */
-static void fc_rport_timeout(void *data)
+static void fc_rport_timeout(struct work_struct *work)
 {
-	struct fc_rport_libfc_priv *rdata = data;
+	struct fc_rport_libfc_priv *rdata =
+		container_of(work, struct fc_rport_libfc_priv, retry_work.work);
 	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
 
 	mutex_lock(&rdata->rp_mutex);
@@ -400,65 +408,82 @@ static void fc_rport_timeout(void *data)
 	case RPORT_ST_NONE:
 		break;
 	}
-	put_device(&rport->dev);
 
 	mutex_unlock(&rdata->rp_mutex);
+	put_device(&rport->dev);
 }
 
 /**
- * fc_rport_error - Handler for any errors
+ * fc_rport_error() - Error handler, called once retries have been exhausted
  * @rport: The fc_rport object
  * @fp: The frame pointer
  *
- * If the error was caused by a resource allocation failure
- * then wait for half a second and retry, otherwise retry
- * immediately.
- *
  * Locking Note: The rport lock is expected to be held before
  * calling this routine
  */
 static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
 {
 	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-	unsigned long delay = 0;
 
-	FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n",
-		       PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
+	FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n",
+		     PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
 
-	if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
-		/*
-		 * Memory allocation failure, or the exchange timed out.
-		 *  Retry after delay
-		 */
-		if (rdata->retries < rdata->local_port->max_retry_count) {
-			rdata->retries++;
-			if (!fp)
-				delay = msecs_to_jiffies(500);
-			get_device(&rport->dev);
-			schedule_delayed_work(&rdata->retry_work, delay);
-		} else {
-			switch (rdata->rp_state) {
-			case RPORT_ST_PLOGI:
-			case RPORT_ST_PRLI:
-			case RPORT_ST_LOGO:
-				rdata->event = RPORT_EV_FAILED;
-				queue_work(rport_event_queue,
-					   &rdata->event_work);
-				break;
-			case RPORT_ST_RTV:
-				fc_rport_enter_ready(rport);
-				break;
-			case RPORT_ST_NONE:
-			case RPORT_ST_READY:
-			case RPORT_ST_INIT:
-				break;
-			}
-		}
+	switch (rdata->rp_state) {
+	case RPORT_ST_PLOGI:
+	case RPORT_ST_PRLI:
+	case RPORT_ST_LOGO:
+		rdata->event = RPORT_EV_FAILED;
+		fc_rport_state_enter(rport, RPORT_ST_NONE);
+		queue_work(rport_event_queue,
+			   &rdata->event_work);
+		break;
+	case RPORT_ST_RTV:
+		fc_rport_enter_ready(rport);
+		break;
+	case RPORT_ST_NONE:
+	case RPORT_ST_READY:
+	case RPORT_ST_INIT:
+		break;
 	}
 }
 
 /**
- * fc_rport_plogi_recv_resp - Handle incoming ELS PLOGI response
+ * fc_rport_error_retry() - Error handler when retries are desired
+ * @rport: The fc_rport object
+ * @fp: The frame pointer
+ *
+ * If the error was an exchange timeout retry immediately,
+ * otherwise wait for E_D_TOV.
+ *
+ * Locking Note: The rport lock is expected to be held before
+ * calling this routine
+ */
+static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
+{
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	unsigned long delay = FC_DEF_E_D_TOV;
+
+	/* make sure this isn't an FC_EX_CLOSED error, never retry those */
+	if (PTR_ERR(fp) == -FC_EX_CLOSED)
+		return fc_rport_error(rport, fp);
+
+	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
+		FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n",
+			     PTR_ERR(fp), fc_rport_state(rport));
+		rdata->retries++;
+		/* no additional delay on exchange timeouts */
+		if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
+			delay = 0;
+		get_device(&rport->dev);
+		schedule_delayed_work(&rdata->retry_work, delay);
+		return;
+	}
+
+	return fc_rport_error(rport, fp);
+}
+
+/**
+ * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
  * @sp: current sequence in the PLOGI exchange
  * @fp: response frame
  * @rp_arg: Fibre Channel remote port
@@ -481,25 +506,26 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n",
-		       rport->port_id);
+	FC_RPORT_DBG(rport, "Received a PLOGI response\n");
 
 	if (rdata->rp_state != RPORT_ST_PLOGI) {
-		FC_DBG("Received a PLOGI response, but in state %s\n",
-		       fc_rport_state(rport));
+		FC_RPORT_DBG(rport, "Received a PLOGI response, but in state "
+			     "%s\n", fc_rport_state(rport));
+		if (IS_ERR(fp))
+			goto err;
 		goto out;
 	}
 
 	if (IS_ERR(fp)) {
-		fc_rport_error(rport, fp);
+		fc_rport_error_retry(rport, fp);
 		goto err;
 	}
 
 	op = fc_frame_payload_op(fp);
 	if (op == ELS_LS_ACC &&
 	    (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
-		rport->port_name = ntohll(get_unaligned(&plp->fl_wwpn));
-		rport->node_name = ntohll(get_unaligned(&plp->fl_wwnn));
+		rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
+		rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
 
 		tov = ntohl(plp->fl_csp.sp_e_d_tov);
 		if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
@@ -523,16 +549,17 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 		else
 			fc_rport_enter_prli(rport);
 	} else
-		fc_rport_error(rport, fp);
+		fc_rport_error_retry(rport, fp);
 
 out:
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
+	put_device(&rport->dev);
 }
 
 /**
- * fc_rport_enter_plogi - Send Port Login (PLOGI) request to peer
+ * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
  * @rport: Fibre Channel remote port to send PLOGI to
  *
  * Locking Note: The rport lock is expected to be held before calling
@@ -544,26 +571,28 @@ static void fc_rport_enter_plogi(struct fc_rport *rport)
 	struct fc_lport *lport = rdata->local_port;
 	struct fc_frame *fp;
 
-	FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n",
-		       rport->port_id, fc_rport_state(rport));
+	FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n",
+		     fc_rport_state(rport));
 
 	fc_rport_state_enter(rport, RPORT_ST_PLOGI);
 
 	rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
 	if (!fp) {
-		fc_rport_error(rport, fp);
+		fc_rport_error_retry(rport, fp);
 		return;
 	}
 	rdata->e_d_tov = lport->e_d_tov;
 
 	if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
 				  fc_rport_plogi_resp, rport, lport->e_d_tov))
-		fc_rport_error(rport, fp);
+		fc_rport_error_retry(rport, fp);
+	else
+		get_device(&rport->dev);
 }
 
 /**
- * fc_rport_prli_resp - Process Login (PRLI) response handler
+ * fc_rport_prli_resp() - Process Login (PRLI) response handler
  * @sp: current sequence in the PRLI exchange
  * @fp: response frame
  * @rp_arg: Fibre Channel remote port
@@ -587,17 +616,18 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
 
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n",
-		       rport->port_id);
+	FC_RPORT_DBG(rport, "Received a PRLI response\n");
 
 	if (rdata->rp_state != RPORT_ST_PRLI) {
-		FC_DBG("Received a PRLI response, but in state %s\n",
-		       fc_rport_state(rport));
+		FC_RPORT_DBG(rport, "Received a PRLI response, but in state "
+			     "%s\n", fc_rport_state(rport));
+		if (IS_ERR(fp))
+			goto err;
 		goto out;
 	}
 
 	if (IS_ERR(fp)) {
-		fc_rport_error(rport, fp);
+		fc_rport_error_retry(rport, fp);
 		goto err;
 	}
 
@@ -620,8 +650,9 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
 		fc_rport_enter_rtv(rport);
 
 	} else {
-		FC_DBG("Bad ELS response\n");
+		FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n");
 		rdata->event = RPORT_EV_FAILED;
+		fc_rport_state_enter(rport, RPORT_ST_NONE);
 		queue_work(rport_event_queue, &rdata->event_work);
 	}
 
@@ -629,10 +660,11 @@ out:
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
+	put_device(&rport->dev);
 }
 
 /**
- * fc_rport_logo_resp - Logout (LOGO) response handler
+ * fc_rport_logo_resp() - Logout (LOGO) response handler
  * @sp: current sequence in the LOGO exchange
  * @fp: response frame
  * @rp_arg: Fibre Channel remote port
@@ -650,26 +682,28 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
 
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n",
-		       rport->port_id);
-
-	if (IS_ERR(fp)) {
-		fc_rport_error(rport, fp);
-		goto err;
-	}
+	FC_RPORT_DBG(rport, "Received a LOGO response\n");
 
 	if (rdata->rp_state != RPORT_ST_LOGO) {
-		FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n",
-			       fc_rport_state(rport));
+		FC_RPORT_DBG(rport, "Received a LOGO response, but in state "
+			     "%s\n", fc_rport_state(rport));
+		if (IS_ERR(fp))
+			goto err;
 		goto out;
 	}
 
+	if (IS_ERR(fp)) {
+		fc_rport_error_retry(rport, fp);
+		goto err;
+	}
+
 	op = fc_frame_payload_op(fp);
 	if (op == ELS_LS_ACC) {
 		fc_rport_enter_rtv(rport);
 	} else {
-		FC_DBG("Bad ELS response\n");
+		FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n");
 		rdata->event = RPORT_EV_LOGO;
+		fc_rport_state_enter(rport, RPORT_ST_NONE);
 		queue_work(rport_event_queue, &rdata->event_work);
 	}
 
@@ -677,10 +711,11 @@ out:
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
+	put_device(&rport->dev);
 }
 
 /**
- * fc_rport_enter_prli - Send Process Login (PRLI) request to peer
+ * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
  * @rport: Fibre Channel remote port to send PRLI to
  *
  * Locking Note: The rport lock is expected to be held before calling
@@ -696,24 +731,26 @@ static void fc_rport_enter_prli(struct fc_rport *rport)
 	} *pp;
 	struct fc_frame *fp;
 
-	FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n",
-		       rport->port_id, fc_rport_state(rport));
+	FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n",
+		     fc_rport_state(rport));
 
 	fc_rport_state_enter(rport, RPORT_ST_PRLI);
 
 	fp = fc_frame_alloc(lport, sizeof(*pp));
 	if (!fp) {
-		fc_rport_error(rport, fp);
+		fc_rport_error_retry(rport, fp);
 		return;
 	}
 
 	if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
 				  fc_rport_prli_resp, rport, lport->e_d_tov))
-		fc_rport_error(rport, fp);
+		fc_rport_error_retry(rport, fp);
+	else
+		get_device(&rport->dev);
 }
 
 /**
- * fc_rport_els_rtv_resp - Request Timeout Value response handler
+ * fc_rport_els_rtv_resp() - Request Timeout Value response handler
  * @sp: current sequence in the RTV exchange
  * @fp: response frame
  * @rp_arg: Fibre Channel remote port
@@ -733,12 +770,13 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
 
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n",
-		       rport->port_id);
+	FC_RPORT_DBG(rport, "Received a RTV response\n");
 
 	if (rdata->rp_state != RPORT_ST_RTV) {
-		FC_DBG("Received a RTV response, but in state %s\n",
-		       fc_rport_state(rport));
+		FC_RPORT_DBG(rport, "Received a RTV response, but in state "
+			     "%s\n", fc_rport_state(rport));
+		if (IS_ERR(fp))
+			goto err;
 		goto out;
 	}
 
@@ -775,10 +813,11 @@ out:
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
+	put_device(&rport->dev);
 }
 
 /**
- * fc_rport_enter_rtv - Send Request Timeout Value (RTV) request to peer
+ * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
  * @rport: Fibre Channel remote port to send RTV to
  *
  * Locking Note: The rport lock is expected to be held before calling
@@ -790,24 +829,26 @@ static void fc_rport_enter_rtv(struct fc_rport *rport)
 	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 	struct fc_lport *lport = rdata->local_port;
 
-	FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n",
-		       rport->port_id, fc_rport_state(rport));
+	FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n",
+		     fc_rport_state(rport));
 
 	fc_rport_state_enter(rport, RPORT_ST_RTV);
 
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
 	if (!fp) {
-		fc_rport_error(rport, fp);
+		fc_rport_error_retry(rport, fp);
 		return;
 	}
 
 	if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
 				     fc_rport_rtv_resp, rport, lport->e_d_tov))
-		fc_rport_error(rport, fp);
+		fc_rport_error_retry(rport, fp);
+	else
+		get_device(&rport->dev);
 }
 
 /**
- * fc_rport_enter_logo - Send Logout (LOGO) request to peer
+ * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
  * @rport: Fibre Channel remote port to send LOGO to
  *
  * Locking Note: The rport lock is expected to be held before calling
@@ -819,25 +860,27 @@ static void fc_rport_enter_logo(struct fc_rport *rport)
 	struct fc_lport *lport = rdata->local_port;
 	struct fc_frame *fp;
 
-	FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n",
-		       rport->port_id, fc_rport_state(rport));
+	FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n",
+		     fc_rport_state(rport));
 
 	fc_rport_state_enter(rport, RPORT_ST_LOGO);
 
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
 	if (!fp) {
-		fc_rport_error(rport, fp);
+		fc_rport_error_retry(rport, fp);
 		return;
 	}
 
 	if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
 				  fc_rport_logo_resp, rport, lport->e_d_tov))
-		fc_rport_error(rport, fp);
+		fc_rport_error_retry(rport, fp);
+	else
+		get_device(&rport->dev);
 }
 
 
 /**
- * fc_rport_recv_req - Receive a request from a rport
+ * fc_rport_recv_req() - Receive a request from a rport
  * @sp: current sequence in the PLOGI exchange
  * @fp: response frame
  * @rp_arg: Fibre Channel remote port
@@ -898,7 +941,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
 }
 
 /**
- * fc_rport_recv_plogi_req - Handle incoming Port Login (PLOGI) request
+ * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
  * @rport: Fibre Channel remote port that initiated PLOGI
  * @sp: current sequence in the PLOGI exchange
  * @fp: PLOGI request frame
@@ -925,21 +968,20 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
 
 	fh = fc_frame_header_get(fp);
 
-	FC_DEBUG_RPORT("Received PLOGI request from port (%6x) "
-		       "while in state %s\n", ntoh24(fh->fh_s_id),
-		       fc_rport_state(rport));
+	FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n",
+		     fc_rport_state(rport));
 
 	sid = ntoh24(fh->fh_s_id);
 	pl = fc_frame_payload_get(fp, sizeof(*pl));
 	if (!pl) {
-		FC_DBG("incoming PLOGI from %x too short\n", sid);
+		FC_RPORT_DBG(rport, "Received PLOGI too short\n");
 		WARN_ON(1);
 		/* XXX TBD: send reject? */
 		fc_frame_free(fp);
 		return;
 	}
-	wwpn = ntohll(get_unaligned(&pl->fl_wwpn));
-	wwnn = ntohll(get_unaligned(&pl->fl_wwnn));
+	wwpn = get_unaligned_be64(&pl->fl_wwpn);
+	wwnn = get_unaligned_be64(&pl->fl_wwnn);
 
 	/*
 	 * If the session was just created, possibly due to the incoming PLOGI,
@@ -954,26 +996,28 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
 	 */
 	switch (rdata->rp_state) {
 	case RPORT_ST_INIT:
-		FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT "
-			       "- reject\n", sid, wwpn);
+		FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT "
+			     "- reject\n", (unsigned long long)wwpn);
 		reject = ELS_RJT_UNSUP;
 		break;
 	case RPORT_ST_PLOGI:
-		FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n",
-			       sid, rdata->rp_state);
+		FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n",
+			     rdata->rp_state);
 		if (wwpn < lport->wwpn)
 			reject = ELS_RJT_INPROG;
 		break;
 	case RPORT_ST_PRLI:
 	case RPORT_ST_READY:
-		FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d "
-			       "- ignored for now\n", sid, rdata->rp_state);
+		FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d "
+			     "- ignored for now\n", rdata->rp_state);
 		/* XXX TBD - should reset */
 		break;
 	case RPORT_ST_NONE:
 	default:
-		FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected "
-			       "state %d\n", sid, rdata->rp_state);
+		FC_RPORT_DBG(rport, "Received PLOGI in unexpected "
+			     "state %d\n", rdata->rp_state);
+		fc_frame_free(fp);
+		return;
 		break;
 	}
 
@@ -1020,7 +1064,7 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
 }
 
 /**
- * fc_rport_recv_prli_req - Handle incoming Process Login (PRLI) request
+ * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
  * @rport: Fibre Channel remote port that initiated PRLI
  * @sp: current sequence in the PRLI exchange
  * @fp: PRLI request frame
@@ -1055,9 +1099,8 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
 
 	fh = fc_frame_header_get(rx_fp);
 
-	FC_DEBUG_RPORT("Received PRLI request from port (%6x) "
-		       "while in state %s\n", ntoh24(fh->fh_s_id),
-		       fc_rport_state(rport));
+	FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n",
+		     fc_rport_state(rport));
 
 	switch (rdata->rp_state) {
 	case RPORT_ST_PRLI:
@@ -1065,6 +1108,8 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
 		reason = ELS_RJT_NONE;
 		break;
 	default:
+		fc_frame_free(rx_fp);
+		return;
 		break;
 	}
 	len = fr_len(rx_fp) - sizeof(*fh);
@@ -1171,7 +1216,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
 }
 
 /**
- * fc_rport_recv_prlo_req - Handle incoming Process Logout (PRLO) request
+ * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
  * @rport: Fibre Channel remote port that initiated PRLO
  * @sp: current sequence in the PRLO exchange
  * @fp: PRLO request frame
@@ -1190,9 +1235,13 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
 
 	fh = fc_frame_header_get(fp);
 
-	FC_DEBUG_RPORT("Received PRLO request from port (%6x) "
-		       "while in state %s\n", ntoh24(fh->fh_s_id),
-		       fc_rport_state(rport));
+	FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n",
+		     fc_rport_state(rport));
+
+	if (rdata->rp_state == RPORT_ST_NONE) {
+		fc_frame_free(fp);
+		return;
+	}
 
 	rjt_data.fp = NULL;
 	rjt_data.reason = ELS_RJT_UNAB;
@@ -1202,7 +1251,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
 }
 
 /**
- * fc_rport_recv_logo_req - Handle incoming Logout (LOGO) request
+ * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
  * @rport: Fibre Channel remote port that initiated LOGO
  * @sp: current sequence in the LOGO exchange
  * @fp: LOGO request frame
@@ -1219,11 +1268,16 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
 
 	fh = fc_frame_header_get(fp);
 
-	FC_DEBUG_RPORT("Received LOGO request from port (%6x) "
-		       "while in state %s\n", ntoh24(fh->fh_s_id),
-		       fc_rport_state(rport));
+	FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n",
+		     fc_rport_state(rport));
+
+	if (rdata->rp_state == RPORT_ST_NONE) {
+		fc_frame_free(fp);
+		return;
+	}
 
 	rdata->event = RPORT_EV_LOGO;
+	fc_rport_state_enter(rport, RPORT_ST_NONE);
 	queue_work(rport_event_queue, &rdata->event_work);
 
 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
@@ -1235,9 +1289,11 @@ static void fc_rport_flush_queue(void)
 	flush_workqueue(rport_event_queue);
 }
 
-
 int fc_rport_init(struct fc_lport *lport)
 {
+	if (!lport->tt.rport_create)
+		lport->tt.rport_create = fc_rport_rogue_create;
+
 	if (!lport->tt.rport_login)
 		lport->tt.rport_login = fc_rport_login;
 
@@ -1274,7 +1330,7 @@ void fc_rport_terminate_io(struct fc_rport *rport)
 	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 	struct fc_lport *lport = rdata->local_port;
 
-	lport->tt.exch_mgr_reset(lport->emp, 0, rport->port_id);
-	lport->tt.exch_mgr_reset(lport->emp, rport->port_id, 0);
+	lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
+	lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
 }
 EXPORT_SYMBOL(fc_rport_terminate_io);
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index ab08f35..95fcd8c 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -74,6 +74,7 @@
 					 */
 #define ETH_P_AOE	0x88A2		/* ATA over Ethernet		*/
 #define ETH_P_TIPC	0x88CA		/* TIPC 			*/
+#define ETH_P_FCOE	0x8906		/* Fibre Channel over Ethernet	*/
 
 /*
  *	Non DIX types. Won't clash for 1500 types.
diff --git a/include/scsi/fc/fc_fcoe.h b/include/scsi/fc/fc_fcoe.h
index a6118a2..ccb3dbe 100644
--- a/include/scsi/fc/fc_fcoe.h
+++ b/include/scsi/fc/fc_fcoe.h
@@ -25,17 +25,6 @@
  */
 
 /*
- * The FCoE ethertype eventually goes in net/if_ether.h.
- */
-#ifndef ETH_P_FCOE
-#define	ETH_P_FCOE	0x8906		/* FCOE ether type */
-#endif
-
-#ifndef ETH_P_8021Q
-#define	ETH_P_8021Q	0x8100
-#endif
-
-/*
  * FC_FCOE_OUI hasn't been standardized yet.   XXX TBD.
  */
 #ifndef FC_FCOE_OUI
@@ -85,6 +74,18 @@ struct fcoe_crc_eof {
 } __attribute__((packed));
 
 /*
+ * Minimum FCoE + FC header length
+ * 14 bytes FCoE header + 24 byte FC header = 38 bytes
+ */
+#define FCOE_HEADER_LEN 38
+
+/*
+ * Minimum FCoE frame size
+ * 14 bytes FCoE header + 24 byte FC header + 8 byte FCoE trailer = 46 bytes
+ */
+#define FCOE_MIN_FRAME 46
+
+/*
  * fc_fcoe_set_mac - Store OUI + DID into MAC address field.
  * @mac: mac address to be set
  * @did: fc dest id to use
diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h
new file mode 100644
index 0000000..0627a9a
--- /dev/null
+++ b/include/scsi/fc/fc_fip.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FC_FIP_H_
+#define _FC_FIP_H_
+
+/*
+ * This version is based on:
+ * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf
+ */
+
+/*
+ * The FIP ethertype eventually goes in net/if_ether.h.
+ */
+#ifndef ETH_P_FIP
+#define ETH_P_FIP	0x8914	/* FIP Ethertype */
+#endif
+
+#define FIP_DEF_PRI	128	/* default selection priority */
+#define FIP_DEF_FC_MAP	0x0efc00 /* default FCoE MAP (MAC OUI) value */
+#define FIP_DEF_FKA	8000	/* default FCF keep-alive/advert period (mS) */
+#define FIP_VN_KA_PERIOD 90000	/* required VN_port keep-alive period (mS) */
+#define FIP_FCF_FUZZ	100	/* random time added by FCF (mS) */
+
+/*
+ * Multicast MAC addresses.  T11-adopted.
+ */
+#define FIP_ALL_FCOE_MACS	((u8[6]) { 1, 0x10, 0x18, 1, 0, 0 })
+#define FIP_ALL_ENODE_MACS	((u8[6]) { 1, 0x10, 0x18, 1, 0, 1 })
+#define FIP_ALL_FCF_MACS	((u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
+
+#define FIP_VER		1		/* version for fip_header */
+
+struct fip_header {
+	__u8	fip_ver;		/* upper 4 bits are the version */
+	__u8	fip_resv1;		/* reserved */
+	__be16	fip_op;			/* operation code */
+	__u8	fip_resv2;		/* reserved */
+	__u8	fip_subcode;		/* lower 4 bits are sub-code */
+	__be16	fip_dl_len;		/* length of descriptors in words */
+	__be16	fip_flags;		/* header flags */
+} __attribute__((packed));
+
+#define FIP_VER_SHIFT	4
+#define FIP_VER_ENCAPS(v) ((v) << FIP_VER_SHIFT)
+#define FIP_VER_DECAPS(v) ((v) >> FIP_VER_SHIFT)
+#define FIP_BPW		4		/* bytes per word for lengths */
+
+/*
+ * fip_op.
+ */
+enum fip_opcode {
+	FIP_OP_DISC =	1,		/* discovery, advertisement, etc. */
+	FIP_OP_LS =	2,		/* Link Service request or reply */
+	FIP_OP_CTRL =	3,		/* Keep Alive / Link Reset */
+	FIP_OP_VLAN =	4,		/* VLAN discovery */
+	FIP_OP_VENDOR_MIN = 0xfff8,	/* min vendor-specific opcode */
+	FIP_OP_VENDOR_MAX = 0xfffe,	/* max vendor-specific opcode */
+};
+
+/*
+ * Subcodes for FIP_OP_DISC.
+ */
+enum fip_disc_subcode {
+	FIP_SC_SOL =	1,		/* solicitation */
+	FIP_SC_ADV =	2,		/* advertisement */
+};
+
+/*
+ * Subcodes for FIP_OP_LS.
+ */
+enum fip_trans_subcode {
+	FIP_SC_REQ =	1,		/* request */
+	FIP_SC_REP =	2,		/* reply */
+};
+
+/*
+ * Subcodes for FIP_OP_RESET.
+ */
+enum fip_reset_subcode {
+	FIP_SC_KEEP_ALIVE = 1,		/* keep-alive from VN_Port */
+	FIP_SC_CLR_VLINK = 2,		/* clear virtual link from VF_Port */
+};
+
+/*
+ * Subcodes for FIP_OP_VLAN.
+ */
+enum fip_vlan_subcode {
+	FIP_SC_VL_REQ =	1,		/* request */
+	FIP_SC_VL_REP =	2,		/* reply */
+};
+
+/*
+ * flags in header fip_flags.
+ */
+enum fip_flag {
+	FIP_FL_FPMA =	0x8000,		/* supports FPMA fabric-provided MACs */
+	FIP_FL_SPMA =	0x4000,		/* supports SPMA server-provided MACs */
+	FIP_FL_AVAIL =	0x0004,		/* available for FLOGI/ELP */
+	FIP_FL_SOL =	0x0002,		/* this is a solicited message */
+	FIP_FL_FPORT =	0x0001,		/* sent from an F port */
+};
+
+/*
+ * Common descriptor header format.
+ */
+struct fip_desc {
+	__u8	fip_dtype;		/* type - see below */
+	__u8	fip_dlen;		/* length - in 32-bit words */
+};
+
+enum fip_desc_type {
+	FIP_DT_PRI =	1,		/* priority for forwarder selection */
+	FIP_DT_MAC =	2,		/* MAC address */
+	FIP_DT_MAP_OUI = 3,		/* FC-MAP OUI */
+	FIP_DT_NAME =	4,		/* switch name or node name */
+	FIP_DT_FAB =	5,		/* fabric descriptor */
+	FIP_DT_FCOE_SIZE = 6,		/* max FCoE frame size */
+	FIP_DT_FLOGI =	7,		/* FLOGI request or response */
+	FIP_DT_FDISC =	8,		/* FDISC request or response */
+	FIP_DT_LOGO =	9,		/* LOGO request or response */
+	FIP_DT_ELP =	10,		/* ELP request or response */
+	FIP_DT_VN_ID =	11,		/* VN_Node Identifier */
+	FIP_DT_FKA =	12,		/* advertisement keep-alive period */
+	FIP_DT_VENDOR =	13,		/* vendor ID */
+	FIP_DT_VLAN =	14,		/* vlan number */
+	FIP_DT_LIMIT,			/* max defined desc_type + 1 */
+	FIP_DT_VENDOR_BASE = 128,	/* first vendor-specific desc_type */
+};
+
+/*
+ * FIP_DT_PRI - priority descriptor.
+ */
+struct fip_pri_desc {
+	struct fip_desc fd_desc;
+	__u8		fd_resvd;
+	__u8		fd_pri;		/* FCF priority:  higher is better */
+} __attribute__((packed));
+
+/*
+ * FIP_DT_MAC - MAC address descriptor.
+ */
+struct fip_mac_desc {
+	struct fip_desc fd_desc;
+	__u8		fd_mac[ETH_ALEN];
+} __attribute__((packed));
+
+/*
+ * FIP_DT_MAP - descriptor.
+ */
+struct fip_map_desc {
+	struct fip_desc fd_desc;
+	__u8		fd_resvd[3];
+	__u8		fd_map[3];
+} __attribute__((packed));
+
+/*
+ * FIP_DT_NAME descriptor.
+ */
+struct fip_wwn_desc {
+	struct fip_desc fd_desc;
+	__u8		fd_resvd[2];
+	__be64		fd_wwn;		/* 64-bit WWN, unaligned */
+} __attribute__((packed));
+
+/*
+ * FIP_DT_FAB descriptor.
+ */
+struct fip_fab_desc {
+	struct fip_desc fd_desc;
+	__be16		fd_vfid;	/* virtual fabric ID */
+	__u8		fd_resvd;
+	__u8		fd_map[3];	/* FC-MAP value */
+	__be64		fd_wwn;		/* fabric name, unaligned */
+} __attribute__((packed));
+
+/*
+ * FIP_DT_FCOE_SIZE descriptor.
+ */
+struct fip_size_desc {
+	struct fip_desc fd_desc;
+	__be16		fd_size;
+} __attribute__((packed));
+
+/*
+ * Descriptor that encapsulates an ELS or ILS frame.
+ * The encapsulated frame immediately follows this header, without
+ * SOF, EOF, or CRC.
+ */
+struct fip_encaps {
+	struct fip_desc fd_desc;
+	__u8		fd_resvd[2];
+} __attribute__((packed));
+
+/*
+ * FIP_DT_VN_ID - VN_Node Identifier descriptor.
+ */
+struct fip_vn_desc {
+	struct fip_desc fd_desc;
+	__u8		fd_mac[ETH_ALEN];
+	__u8		fd_resvd;
+	__u8		fd_fc_id[3];
+	__be64		fd_wwpn;	/* port name, unaligned */
+} __attribute__((packed));
+
+/*
+ * FIP_DT_FKA - Advertisement keep-alive period.
+ */
+struct fip_fka_desc {
+	struct fip_desc fd_desc;
+	__u8		fd_resvd[2];
+	__be32		fd_fka_period;	/* adv./keep-alive period in mS */
+} __attribute__((packed));
+
+/*
+ * FIP_DT_VENDOR descriptor.
+ */
+struct fip_vendor_desc {
+	struct fip_desc fd_desc;
+	__u8		fd_resvd[2];
+	__u8		fd_vendor_id[8];
+} __attribute__((packed));
+
+#endif /* _FC_FIP_H_ */
diff --git a/include/scsi/fc/fc_fs.h b/include/scsi/fc/fc_fs.h
index 3e4801d..1b7af3a 100644
--- a/include/scsi/fc/fc_fs.h
+++ b/include/scsi/fc/fc_fs.h
@@ -337,4 +337,9 @@ enum fc_pf_rjt_reason {
 	FC_RJT_VENDOR =		0xff,	/* vendor specific reject */
 };
 
+/* default timeout values */
+
+#define FC_DEF_E_D_TOV	2000UL
+#define FC_DEF_R_A_TOV	10000UL
+
 #endif /* _FC_FS_H_ */
diff --git a/include/scsi/fc_compat.h b/include/scsi/fc_compat.h
new file mode 100644
index 0000000..9f9f14f
--- /dev/null
+++ b/include/scsi/fc_compat.h
@@ -0,0 +1,104 @@
+/*
+ * Compat helpers for libfc
+ */
+#ifndef _FC_COMPAT_H_
+#define _FC_COMPAT_H_
+
+#include <asm/unaligned.h>
+#include <linux/workqueue.h>
+#include <linux/scatterlist.h>
+#include <linux/if_ether.h>
+
+#ifndef for_each_sg
+#define for_each_sg(sglist, __sg, nr, __i)        \
+	for (__i = 0, __sg = (sglist); __i < (nr); __i++, __sg = sg_next(__sg))
+
+static inline struct scatterlist *sg_next(struct scatterlist *sg)
+{       
+	if (!sg)
+		return NULL;
+	return sg + 1;
+}
+
+static inline struct page *sg_page(struct scatterlist *sg)
+{ 
+	return sg->page;
+}
+#endif
+
+#define BIT(nr) (1UL << (nr))
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_BUF_SIZE 18
+#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE]
+
+static size_t _format_mac_addr(char *buf, int buflen,
+                                const unsigned char *addr, int len)
+{
+	int i;
+	char *cp = buf;
+
+	for (i = 0; i < len; i++) {
+		cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]);
+		if (i == len - 1)
+			break;
+		cp += strlcpy(cp, ":", buflen - (cp - buf));
+	}
+	return cp - buf;
+}
+
+static inline char *print_mac(char *buf, const unsigned char *addr)
+{
+	_format_mac_addr(buf, MAC_BUF_SIZE, addr, ETH_ALEN);
+	return buf;
+}
+
+#define dev_get_by_name(_inet, _name)  dev_get_by_name(_name)
+
+#define vlan_dev_real_dev(_ndev) VLAN_DEV_INFO(_ndev)->real_dev
+
+#define put_unaligned_be64(_val, _ptr) put_unaligned(cpu_to_be64(_val), _ptr)
+#define get_unaligned_be64(_ptr) be64_to_cpu(get_unaligned(_ptr))
+
+#define kmem_cache_create(_name, _size, _align, _flags, _ctor) \
+	kmem_cache_create(_name, _size, _align, _flags, _ctor, NULL)
+
+#define flush_work(_wk) flush_scheduled_work()
+
+struct delayed_work {
+        struct work_struct work;
+};
+
+static inline int cancel_delayed_work_sync(struct delayed_work *dwork)
+{
+	int ret;
+
+	ret = cancel_delayed_work(&dwork->work);
+	if (!ret)
+		flush_scheduled_work();
+	return ret;
+}
+
+#define cancel_delayed_work(_dwork) cancel_delayed_work(&(_dwork)->work)
+
+static inline int schedule_delayed_work_compat(struct delayed_work *work,
+					       unsigned long delay)
+{
+	if (likely(!delay))
+		return schedule_work(&work->work);
+	else
+		return schedule_delayed_work(&work->work, delay);
+}
+
+static inline void INIT_WORK_compat(struct work_struct *work, void *func)
+{
+        INIT_WORK(work, func, work);
+}
+
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) INIT_WORK_compat(_work, _func)
+#define INIT_DELAYED_WORK(_work,_func) INIT_WORK(&(_work)->work, _func)
+
+#define schedule_delayed_work schedule_delayed_work_compat
+
+#endif
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
index a09df7a..7cc2aa7 100644
--- a/include/scsi/fc_encode.h
+++ b/include/scsi/fc_encode.h
@@ -20,6 +20,7 @@
 #ifndef _FC_ENCODE_H_
 #define _FC_ENCODE_H_
 #include <asm/unaligned.h>
+#include <scsi/fc_compat.h>
 
 struct fc_ns_rft {
 	struct fc_ns_fid fid;	/* port ID object */
@@ -103,11 +104,10 @@ static inline int fc_ct_fill(struct fc_lport *lport, struct fc_frame *fp,
 		hton24(ct->payload.rn.fr_fid.fp_fid,
 		       fc_host_port_id(lport->host));
 		ct->payload.rft.fts = lport->fcts;
-		put_unaligned(htonll(lport->wwpn), &ct->payload.rn.fr_wwn);
+		put_unaligned_be64(lport->wwpn, &ct->payload.rn.fr_wwn);
 		break;
 
 	default:
-		FC_DBG("Invalid op code %x \n", op);
 		return -EINVAL;
 	}
 	*r_ctl = FC_RCTL_DD_UNSOL_CTL;
@@ -129,8 +129,9 @@ static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp,
 	plogi = fc_frame_payload_get(fp, sizeof(*plogi));
 	memset(plogi, 0, sizeof(*plogi));
 	plogi->fl_cmd = (u8) op;
-	put_unaligned(htonll(lport->wwpn), &plogi->fl_wwpn);
-	put_unaligned(htonll(lport->wwnn), &plogi->fl_wwnn);
+	put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn);
+	put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn);
+
 	csp = &plogi->fl_csp;
 	csp->sp_hi_ver = 0x20;
 	csp->sp_lo_ver = 0x20;
@@ -160,8 +161,8 @@ static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp)
 	flogi = fc_frame_payload_get(fp, sizeof(*flogi));
 	memset(flogi, 0, sizeof(*flogi));
 	flogi->fl_cmd = (u8) ELS_FLOGI;
-	put_unaligned(htonll(lport->wwpn), &flogi->fl_wwpn);
-	put_unaligned(htonll(lport->wwnn), &flogi->fl_wwnn);
+	put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
+	put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
 	sp = &flogi->fl_csp;
 	sp->sp_hi_ver = 0x20;
 	sp->sp_lo_ver = 0x20;
@@ -297,7 +298,6 @@ static inline int fc_els_fill(struct fc_lport *lport, struct fc_rport *rport,
 		break;
 
 	default:
-		FC_DBG("Invalid op code %x \n", op);
 		return -EINVAL;
 	}
 
diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h
index dc5f734..5951105 100644
--- a/include/scsi/fc_frame.h
+++ b/include/scsi/fc_frame.h
@@ -54,8 +54,8 @@
 #define fr_eof(fp)	(fr_cb(fp)->fr_eof)
 #define fr_flags(fp)	(fr_cb(fp)->fr_flags)
 #define fr_max_payload(fp)	(fr_cb(fp)->fr_max_payload)
-#define fr_cmd(fp)	(fr_cb(fp)->fr_cmd)
-#define fr_dir(fp)	(fr_cmd(fp)->sc_data_direction)
+#define fr_fsp(fp)	(fr_cb(fp)->fr_fsp)
+#define fr_crc(fp)	(fr_cb(fp)->fr_crc)
 
 struct fc_frame {
 	struct sk_buff skb;
@@ -65,13 +65,15 @@ struct fcoe_rcv_info {
 	struct packet_type  *ptype;
 	struct fc_lport	*fr_dev;	/* transport layer private pointer */
 	struct fc_seq	*fr_seq;	/* for use with exchange manager */
-	struct scsi_cmnd *fr_cmd;	/* for use of scsi command */
+	struct fc_fcp_pkt *fr_fsp;	/* for the corresponding fcp I/O */
+	u32		fr_crc;
+	u16		fr_max_payload;	/* max FC payload */
 	enum fc_sof	fr_sof;		/* start of frame delimiter */
 	enum fc_eof	fr_eof;		/* end of frame delimiter */
 	u8		fr_flags;	/* flags - see below */
-	u16		fr_max_payload;	/* max FC payload */
 };
 
+
 /*
  * Get fc_frame pointer for an skb that's already been imported.
  */
@@ -215,20 +217,6 @@ static inline bool fc_frame_is_cmd(const struct fc_frame *fp)
 	return fc_frame_rctl(fp) == FC_RCTL_DD_UNSOL_CMD;
 }
 
-static inline bool fc_frame_is_read(const struct fc_frame *fp)
-{
-	if (fc_frame_is_cmd(fp) && fr_cmd(fp))
-		return fr_dir(fp) == DMA_FROM_DEVICE;
-	return false;
-}
-
-static inline bool fc_frame_is_write(const struct fc_frame *fp)
-{
-	if (fc_frame_is_cmd(fp) && fr_cmd(fp))
-		return fr_dir(fp) == DMA_TO_DEVICE;
-	return false;
-}
-
 /*
  * Check for leaks.
  * Print the frame header of any currently allocated frame, assuming there
diff --git a/include/scsi/fc_transport_fcoe.h b/include/scsi/fc_transport_fcoe.h
deleted file mode 100644
index 2816462..0000000
--- a/include/scsi/fc_transport_fcoe.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef FC_TRANSPORT_FCOE_H
-#define FC_TRANSPORT_FCOE_H
-
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <scsi/scsi_host.h>
-#include <scsi/libfc.h>
-
-/**
- * struct fcoe_transport - FCoE transport struct for generic transport
- * for Ethernet devices as well as pure HBAs
- *
- * @name: name for thsi transport
- * @bus: physical bus type (pci_bus_type)
- * @driver: physical bus driver for network device
- * @create: entry create function
- * @destroy: exit destroy function
- * @list: list of transports
- */
-struct fcoe_transport {
-	char *name;
-	unsigned short vendor;
-	unsigned short device;
-	struct bus_type *bus;
-	struct device_driver *driver;
-	int (*create)(struct net_device *device);
-	int (*destroy)(struct net_device *device);
-	bool (*match)(struct net_device *device);
-	struct list_head list;
-	struct list_head devlist;
-	struct mutex devlock;
-};
-
-/**
- * MODULE_ALIAS_FCOE_PCI
- *
- * some care must be taken with this, vendor and device MUST be a hex value
- * preceded with 0x and with letters in lower case (0x12ab, not 0x12AB or 12AB)
- */
-#define MODULE_ALIAS_FCOE_PCI(vendor, device) \
-	MODULE_ALIAS("fcoe-pci-" __stringify(vendor) "-" __stringify(device))
-
-/* exported funcs */
-int fcoe_transport_attach(struct net_device *netdev);
-int fcoe_transport_release(struct net_device *netdev);
-int fcoe_transport_register(struct fcoe_transport *t);
-int fcoe_transport_unregister(struct fcoe_transport *t);
-int __init fcoe_transport_init(void);
-int __exit fcoe_transport_exit(void);
-
-/* fcow_sw is the default transport */
-extern struct fcoe_transport fcoe_sw_transport;
-#endif /* FC_TRANSPORT_FCOE_H */
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 13b9592..2493661 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -22,6 +22,7 @@
 
 #include <linux/timer.h>
 #include <linux/if.h>
+#include <linux/percpu.h>
 
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
@@ -35,17 +36,74 @@
 
 #include <scsi/fc_frame.h>
 
-#define LIBFC_DEBUG
-
-#ifdef LIBFC_DEBUG
-/* Log messages */
-#define FC_DBG(fmt, args...)						\
-	do {								\
-		printk(KERN_INFO "%s " fmt, __func__, ##args);		\
-	} while (0)
-#else
-#define FC_DBG(fmt, args...)
-#endif
+#include <scsi/fc_compat.h>
+
+#define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */
+#define FC_LPORT_LOGGING 0x02 /* lport layer logging */
+#define FC_DISC_LOGGING  0x04 /* discovery layer logging */
+#define FC_RPORT_LOGGING 0x08 /* rport layer logging */
+#define FC_FCP_LOGGING   0x10 /* I/O path logging */
+#define FC_EM_LOGGING    0x20 /* Exchange Manager logging */
+#define FC_EXCH_LOGGING  0x40 /* Exchange/Sequence logging */
+#define FC_SCSI_LOGGING  0x80 /* SCSI logging (mostly error handling) */
+
+extern unsigned int fc_debug_logging;
+
+#define FC_CHECK_LOGGING(LEVEL, CMD)				\
+do {								\
+	if (unlikely(fc_debug_logging & LEVEL))			\
+		do {						\
+			CMD;					\
+		} while (0);					\
+} while (0);
+
+#define FC_LIBFC_DBG(fmt, args...)					\
+	FC_CHECK_LOGGING(FC_LIBFC_LOGGING,				\
+			 printk(KERN_INFO "libfc: " fmt, ##args);)
+
+#define FC_LPORT_DBG(lport, fmt, args...)				\
+	FC_CHECK_LOGGING(FC_LPORT_LOGGING,				\
+			 printk(KERN_INFO "lport: %6x: " fmt,		\
+				fc_host_port_id(lport->host), ##args);)
+
+#define FC_DISC_DBG(disc, fmt, args...)					\
+	FC_CHECK_LOGGING(FC_DISC_LOGGING,				\
+			 printk(KERN_INFO "disc: %6x: " fmt,		\
+				fc_host_port_id(disc->lport->host),	\
+				##args);)
+
+#define FC_RPORT_DBG(rport, fmt, args...)				\
+do {									\
+	struct fc_rport_libfc_priv *rdata = rport->dd_data;		\
+	struct fc_lport *lport = rdata->local_port;			\
+	FC_CHECK_LOGGING(FC_RPORT_LOGGING,				\
+			 printk(KERN_INFO "rport: %6x: %6x: " fmt,	\
+				fc_host_port_id(lport->host),		\
+				rport->port_id, ##args);)		\
+} while (0);
+
+#define FC_FCP_DBG(pkt, fmt, args...)					\
+	FC_CHECK_LOGGING(FC_FCP_LOGGING,				\
+			 printk(KERN_INFO "fcp: %6x: %6x: " fmt,	\
+				fc_host_port_id(pkt->lp->host),		\
+				pkt->rport->port_id, ##args);)
+
+#define FC_EM_DBG(em, fmt, args...)					\
+	FC_CHECK_LOGGING(FC_EM_LOGGING,					\
+			 printk(KERN_INFO "em: %6x: " fmt,		\
+				fc_host_port_id(em->lp->host),		\
+				##args);)
+
+#define FC_EXCH_DBG(exch, fmt, args...)					\
+	FC_CHECK_LOGGING(FC_EXCH_LOGGING,				\
+			 printk(KERN_INFO "exch: %6x: %4x: " fmt,	\
+				fc_host_port_id(exch->lp->host),	\
+				exch->xid, ##args);)
+
+#define FC_SCSI_DBG(lport, fmt, args...)				\
+	FC_CHECK_LOGGING(FC_SCSI_LOGGING,                               \
+			 printk(KERN_INFO "scsi: %6x: " fmt,		\
+				fc_host_port_id(lport->host), ##args);)
 
 /*
  * libfc error codes
@@ -70,9 +128,6 @@
 /*
  * FC HBA status
  */
-#define FC_PAUSE		    (1 << 1)
-#define FC_LINK_UP		    (1 << 0)
-
 enum fc_lport_state {
 	LPORT_ST_NONE = 0,
 	LPORT_ST_FLOGI,
@@ -91,14 +146,6 @@ enum fc_disc_event {
 	DISC_EV_FAILED
 };
 
-enum fc_lport_event {
-	RPORT_EV_NONE = 0,
-	RPORT_EV_CREATED,
-	RPORT_EV_FAILED,
-	RPORT_EV_STOP,
-	RPORT_EV_LOGO
-};
-
 enum fc_rport_state {
 	RPORT_ST_NONE = 0,
 	RPORT_ST_INIT,		/* initialized */
@@ -128,6 +175,19 @@ struct fc_disc_port {
 	struct work_struct	    rport_work;
 };
 
+enum fc_rport_event {
+	RPORT_EV_NONE = 0,
+	RPORT_EV_CREATED,
+	RPORT_EV_FAILED,
+	RPORT_EV_STOP,
+	RPORT_EV_LOGO
+};
+
+struct fc_rport_operations {
+	void (*event_callback)(struct fc_lport *, struct fc_rport *,
+			       enum fc_rport_event);
+};
+
 /**
  * struct fc_rport_libfc_priv - libfc internal information about a remote port
  * @local_port: Fibre Channel host port instance
@@ -142,24 +202,22 @@ struct fc_disc_port {
  * @event_callback: Callback for rport READY, FAILED or LOGO
  */
 struct fc_rport_libfc_priv {
-	struct fc_lport		*local_port;
-	enum fc_rport_state rp_state;
-	u16			flags;
+	struct fc_lport		   *local_port;
+	enum fc_rport_state        rp_state;
+	u16			   flags;
 	#define FC_RP_FLAGS_REC_SUPPORTED	(1 << 0)
 	#define FC_RP_FLAGS_RETRY		(1 << 1)
-	u16		max_seq;
-	unsigned int	retries;
-	unsigned int	e_d_tov;
-	unsigned int	r_a_tov;
-	enum fc_rport_trans_state trans_state;
-	struct mutex    rp_mutex;
-	struct work_struct	retry_work;
-	enum fc_lport_event     event;
-	void (*event_callback)(struct fc_lport *,
-			       struct fc_rport *,
-			       enum fc_lport_event);
-	struct list_head         peers;
-	struct work_struct       event_work;
+	u16		           max_seq;
+	unsigned int	           retries;
+	unsigned int	           e_d_tov;
+	unsigned int	           r_a_tov;
+	enum fc_rport_trans_state  trans_state;
+	struct mutex               rp_mutex;
+	struct delayed_work	   retry_work;
+	enum fc_rport_event        event;
+	struct fc_rport_operations *ops;
+	struct list_head           peers;
+	struct work_struct         event_work;
 };
 
 #define PRIV_TO_RPORT(x)						\
@@ -168,7 +226,6 @@ struct fc_rport_libfc_priv {
 	(struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport));
 
 struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *);
-void fc_rport_rogue_destroy(struct fc_rport *);
 
 static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn)
 {
@@ -248,6 +305,7 @@ struct fc_fcp_pkt {
 	 */
 	struct fcp_cmnd cdb_cmd;
 	size_t		xfer_len;
+	u16		xfer_ddp;	/* this xfer is ddped */
 	u32		xfer_contig_end; /* offset of end of contiguous xfer */
 	u16		max_payload;	/* max payload size in bytes */
 
@@ -270,6 +328,15 @@ struct fc_fcp_pkt {
 	u8		recov_retry;	/* count of recovery retries */
 	struct fc_seq	*recov_seq;	/* sequence for REC or SRR */
 };
+/*
+ * FC_FCP HELPER FUNCTIONS
+ *****************************/
+static inline bool fc_fcp_is_read(const struct fc_fcp_pkt *fsp)
+{
+	if (fsp && fsp->cmd)
+		return fsp->cmd->sc_data_direction == DMA_FROM_DEVICE;
+	return false;
+}
 
 /*
  * Structure and function definitions for managing Fibre Channel Exchanges
@@ -298,11 +365,10 @@ struct fc_seq {
 /*
  * Exchange.
  *
- * Locking notes: The ex_lock protects changes to the following fields:
- *	esb_stat, f_ctl, seq.ssb_stat, seq.f_ctl.
+ * Locking notes: The ex_lock protects following items:
+ *	state, esb_stat, f_ctl, seq.ssb_stat
  *	seq_id
  *	sequence allocation
- *
  */
 struct fc_exch {
 	struct fc_exch_mgr *em;		/* exchange manager */
@@ -311,7 +377,7 @@ struct fc_exch {
 	struct list_head	ex_list;	/* free or busy list linkage */
 	spinlock_t	ex_lock;	/* lock covering exchange state */
 	atomic_t	ex_refcnt;	/* reference counter */
-	struct work_struct timeout_work; /* timer for upper level protocols */
+	struct delayed_work timeout_work; /* timer for upper level protocols */
 	struct fc_lport	*lp;		/* fc device instance */
 	u16		oxid;		/* originator's exchange ID */
 	u16		rxid;		/* responder's exchange ID */
@@ -340,31 +406,17 @@ struct fc_exch {
 
 struct libfc_function_template {
 
-	/**
-	 * Mandatory Fields
-	 *
-	 * These handlers must be implemented by the LLD.
-	 */
-
 	/*
 	 * Interface to send a FC frame
-	 */
-	int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp);
-
-	/**
-	 * Optional Fields
 	 *
-	 * The LLD may choose to implement any of the following handlers.
-	 * If LLD doesn't specify hander and leaves its pointer NULL then
-	 * the default libfc function will be used for that handler.
-	 */
-
-	/**
-	 * ELS/CT interfaces
+	 * STATUS: REQUIRED
 	 */
+	int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp);
 
 	/*
-	 * elsct_send - sends ELS/CT frame
+	 * Interface to send ELS/CT frames
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	struct fc_seq *(*elsct_send)(struct fc_lport *lport,
 				     struct fc_rport *rport,
@@ -374,9 +426,6 @@ struct libfc_function_template {
 					     struct fc_frame *fp,
 					     void *arg),
 				     void *arg, u32 timer_msec);
-	/**
-	 * Exhance Manager interfaces
-	 */
 
 	/*
 	 * Send the FC frame payload using a new exchange and sequence.
@@ -408,6 +457,8 @@ struct libfc_function_template {
 	 * timer_msec argument is specified. The timer is canceled when
 	 * it fires or when the exchange is done. The exchange timeout handler
 	 * is registered by EM layer.
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	struct fc_seq *(*exch_seq_send)(struct fc_lport *lp,
 					struct fc_frame *fp,
@@ -419,14 +470,33 @@ struct libfc_function_template {
 					void *arg, unsigned int timer_msec);
 
 	/*
-	 * send a frame using existing sequence and exchange.
+	 * Sets up the DDP context for a given exchange id on the given
+	 * scatterlist if LLD supports DDP for large receive.
+	 *
+	 * STATUS: OPTIONAL
+	 */
+	int (*ddp_setup)(struct fc_lport *lp, u16 xid,
+			 struct scatterlist *sgl, unsigned int sgc);
+	/*
+	 * Completes the DDP transfer and returns the length of data DDPed
+	 * for the given exchange id.
+	 *
+	 * STATUS: OPTIONAL
+	 */
+	int (*ddp_done)(struct fc_lport *lp, u16 xid);
+	/*
+	 * Send a frame using an existing sequence and exchange.
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	int (*seq_send)(struct fc_lport *lp, struct fc_seq *sp,
 			struct fc_frame *fp);
 
 	/*
-	 * Send ELS response using mainly infomation
-	 * in exchange and sequence in EM layer.
+	 * Send an ELS response using infomation from a previous
+	 * exchange and sequence.
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	void (*seq_els_rsp_send)(struct fc_seq *sp, enum fc_els_cmd els_cmd,
 				 struct fc_seq_els_data *els_data);
@@ -438,6 +508,8 @@ struct libfc_function_template {
 	 * A timer_msec can be specified for abort timeout, if non-zero
 	 * timer_msec value is specified then exchange resp handler
 	 * will be called with timeout error if no response to abort.
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	int (*seq_exch_abort)(const struct fc_seq *req_sp,
 			      unsigned int timer_msec);
@@ -445,6 +517,8 @@ struct libfc_function_template {
 	/*
 	 * Indicate that an exchange/sequence tuple is complete and the memory
 	 * allocated for the related objects may be freed.
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	void (*exch_done)(struct fc_seq *sp);
 
@@ -452,6 +526,8 @@ struct libfc_function_template {
 	 * Assigns a EM and a free XID for an new exchange and then
 	 * allocates a new exchange and sequence pair.
 	 * The fp can be used to determine free XID.
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp);
 
@@ -459,12 +535,16 @@ struct libfc_function_template {
 	 * Release previously assigned XID by exch_get API.
 	 * The LLD may implement this if XID is assigned by LLD
 	 * in exch_get().
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp,
 			 u16 ex_id);
 
 	/*
 	 * Start a new sequence on the same exchange/sequence tuple.
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	struct fc_seq *(*seq_start_next)(struct fc_seq *sp);
 
@@ -472,26 +552,38 @@ struct libfc_function_template {
 	 * Reset an exchange manager, completing all sequences and exchanges.
 	 * If s_id is non-zero, reset only exchanges originating from that FID.
 	 * If d_id is non-zero, reset only exchanges sending to that FID.
+	 *
+	 * STATUS: OPTIONAL
 	 */
-	void (*exch_mgr_reset)(struct fc_exch_mgr *,
+	void (*exch_mgr_reset)(struct fc_lport *,
 			       u32 s_id, u32 d_id);
 
-	void (*rport_flush_queue)(void);
-	/**
-	 * Local Port interfaces
+	/*
+	 * Flush the rport work queue. Generally used before shutdown.
+	 *
+	 * STATUS: OPTIONAL
 	 */
+	void (*rport_flush_queue)(void);
 
 	/*
-	 * Receive a frame to a local port.
+	 * Receive a frame for a local port.
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	void (*lport_recv)(struct fc_lport *lp, struct fc_seq *sp,
 			   struct fc_frame *fp);
 
+	/*
+	 * Reset the local port.
+	 *
+	 * STATUS: OPTIONAL
+	 */
 	int (*lport_reset)(struct fc_lport *);
 
-	/**
-	 * Remote Port interfaces
+	/*
+	 * Create a remote port
 	 */
+	struct fc_rport *(*rport_create)(struct fc_disc_port *);
 
 	/*
 	 * Initiates the RP state machine. It is called from the LP module.
@@ -501,26 +593,33 @@ struct libfc_function_template {
 	 * - PLOGI
 	 * - PRLI
 	 * - RTV
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	int (*rport_login)(struct fc_rport *rport);
 
 	/*
 	 * Logoff, and remove the rport from the transport if
 	 * it had been added. This will send a LOGO to the target.
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	int (*rport_logoff)(struct fc_rport *rport);
 
 	/*
 	 * Recieve a request from a remote port.
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	void (*rport_recv_req)(struct fc_seq *, struct fc_frame *,
 			       struct fc_rport *);
 
-	struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
-
-	/**
-	 * FCP interfaces
+	/*
+	 * lookup an rport by it's port ID.
+	 *
+	 * STATUS: OPTIONAL
 	 */
+	struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
 
 	/*
 	 * Send a fcp cmd from fsp pkt.
@@ -528,30 +627,38 @@ struct libfc_function_template {
 	 *
 	 * The resp handler is called when FCP_RSP received.
 	 *
+	 * STATUS: OPTIONAL
 	 */
 	int (*fcp_cmd_send)(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
 			    void (*resp)(struct fc_seq *, struct fc_frame *fp,
 					 void *arg));
 
 	/*
-	 * Used at least durring linkdown and reset
+	 * Cleanup the FCP layer, used durring link down and reset
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	void (*fcp_cleanup)(struct fc_lport *lp);
 
 	/*
 	 * Abort all I/O on a local port
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	void (*fcp_abort_io)(struct fc_lport *lp);
 
-	/**
-	 * Discovery interfaces
+	/*
+	 * Receive a request for the discovery layer.
+	 *
+	 * STATUS: OPTIONAL
 	 */
-
 	void (*disc_recv_req)(struct fc_seq *,
 			      struct fc_frame *, struct fc_lport *);
 
 	/*
 	 * Start discovery for a local port.
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	void (*disc_start)(void (*disc_callback)(struct fc_lport *,
 						 enum fc_disc_event),
@@ -560,6 +667,8 @@ struct libfc_function_template {
 	/*
 	 * Stop discovery for a given lport. This will remove
 	 * all discovered rports
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	void (*disc_stop) (struct fc_lport *);
 
@@ -567,11 +676,32 @@ struct libfc_function_template {
 	 * Stop discovery for a given lport. This will block
 	 * until all discovered rports are deleted from the
 	 * FC transport class
+	 *
+	 * STATUS: OPTIONAL
 	 */
 	void (*disc_stop_final) (struct fc_lport *);
 };
 
-struct fc_disc;
+/* information used by the discovery layer */
+struct fc_disc {
+	unsigned char		retry_count;
+	unsigned char		delay;
+	unsigned char		pending;
+	unsigned char		requested;
+	unsigned short		seq_count;
+	unsigned char		buf_len;
+	enum fc_disc_event	event;
+
+	void (*disc_callback)(struct fc_lport *,
+			      enum fc_disc_event);
+
+	struct list_head	 rports;
+	struct list_head	 rogue_rports;
+	struct fc_lport		*lport;
+	struct mutex		disc_mutex;
+	struct fc_gpn_ft_resp	partial_buf;	/* partial name buffer */
+	struct delayed_work	disc_work;
+};
 
 struct fc_lport {
 	struct list_head list;
@@ -581,17 +711,19 @@ struct fc_lport {
 	struct fc_exch_mgr	*emp;
 	struct fc_rport		*dns_rp;
 	struct fc_rport		*ptp_rp;
-	struct fc_disc          *disc;
 	void			*scsi_priv;
+	struct fc_disc          disc;
 
 	/* Operational Information */
 	struct libfc_function_template tt;
-	u16			link_status;
+	u8			link_up;
+	u8			qfull;
 	enum fc_lport_state	state;
 	unsigned long		boot_time;
 
 	struct fc_host_statistics host_stats;
-	struct fcoe_dev_stats	*dev_stats[NR_CPUS];
+	struct fcoe_dev_stats	*dev_stats;
+
 	u64			wwpn;
 	u64			wwnn;
 	u8			retry_count;
@@ -606,9 +738,11 @@ struct fc_lport {
 	unsigned int		e_d_tov;
 	unsigned int		r_a_tov;
 	u8			max_retry_count;
+	u8			max_rport_retry_count;
 	u16			link_speed;
 	u16			link_supported_speeds;
 	u16			lro_xid;	/* max xid for fcoe lro */
+	unsigned int		lso_max;	/* max large send size */
 	struct fc_ns_fts	fcts;	        /* FC-4 type masks */
 	struct fc_els_rnid_gen	rnid_gen;	/* RNID information */
 
@@ -616,18 +750,13 @@ struct fc_lport {
 	struct mutex lp_mutex;
 
 	/* Miscellaneous */
-	struct work_struct	retry_work;
-	struct work_struct	disc_work;
+	struct delayed_work	retry_work;
+	struct delayed_work	disc_work;
 };
 
-/**
+/*
  * FC_LPORT HELPER FUNCTIONS
  *****************************/
-static inline void *lport_priv(const struct fc_lport *lp)
-{
-	return (void *)(lp + 1);
-}
-
 static inline int fc_lport_test_ready(struct fc_lport *lp)
 {
 	return lp->state == LPORT_ST_READY;
@@ -651,8 +780,44 @@ static inline void fc_lport_state_enter(struct fc_lport *lp,
 	lp->state = state;
 }
 
+static inline int fc_lport_init_stats(struct fc_lport *lp)
+{
+	/* allocate per cpu stats block */
+	lp->dev_stats = alloc_percpu(struct fcoe_dev_stats);
+	if (!lp->dev_stats)
+		return -ENOMEM;
+	return 0;
+}
+
+static inline void fc_lport_free_stats(struct fc_lport *lp)
+{
+	free_percpu(lp->dev_stats);
+}
+
+static inline struct fcoe_dev_stats *fc_lport_get_stats(struct fc_lport *lp)
+{
+	return per_cpu_ptr(lp->dev_stats, smp_processor_id());
+}
+
+static inline void *lport_priv(const struct fc_lport *lp)
+{
+	return (void *)(lp + 1);
+}
 
 /**
+ * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport
+ * @sht: ptr to the scsi host templ
+ * @priv_size: size of private data after fc_lport
+ *
+ * Returns: ptr to Scsi_Host
+ */
+static inline struct Scsi_Host *
+libfc_host_alloc(struct scsi_host_template *sht, int priv_size)
+{
+	return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
+}
+
+/*
  * LOCAL PORT LAYER
  *****************************/
 int fc_lport_init(struct fc_lport *lp);
@@ -687,12 +852,6 @@ void fc_linkup(struct fc_lport *);
 void fc_linkdown(struct fc_lport *);
 
 /*
- * Pause and unpause traffic.
- */
-void fc_pause(struct fc_lport *);
-void fc_unpause(struct fc_lport *);
-
-/*
  * Configure the local port.
  */
 int fc_lport_config(struct fc_lport *);
@@ -708,19 +867,19 @@ int fc_lport_reset(struct fc_lport *);
 int fc_set_mfs(struct fc_lport *lp, u32 mfs);
 
 
-/**
+/*
  * REMOTE PORT LAYER
  *****************************/
 int fc_rport_init(struct fc_lport *lp);
 void fc_rport_terminate_io(struct fc_rport *rp);
 
-/**
+/*
  * DISCOVERY LAYER
  *****************************/
 int fc_disc_init(struct fc_lport *lp);
 
 
-/**
+/*
  * SCSI LAYER
  *****************************/
 /*
@@ -781,7 +940,12 @@ int fc_change_queue_type(struct scsi_device *sdev, int tag_type);
  */
 void fc_fcp_destroy(struct fc_lport *);
 
-/**
+/*
+ * Set up direct-data placement for this I/O request
+ */
+void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
+
+/*
  * ELS/CT interface
  *****************************/
 /*
@@ -790,7 +954,7 @@ void fc_fcp_destroy(struct fc_lport *);
 int fc_elsct_init(struct fc_lport *lp);
 
 
-/**
+/*
  * EXCHANGE MANAGER LAYER
  *****************************/
 /*
@@ -899,7 +1063,7 @@ struct fc_seq *fc_seq_start_next(struct fc_seq *sp);
  * If s_id is non-zero, reset only exchanges originating from that FID.
  * If d_id is non-zero, reset only exchanges sending to that FID.
  */
-void fc_exch_mgr_reset(struct fc_exch_mgr *, u32 s_id, u32 d_id);
+void fc_exch_mgr_reset(struct fc_lport *, u32 s_id, u32 d_id);
 
 /*
  * Functions for fc_functions_template
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 4ca5902..666cc13 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -1,5 +1,6 @@
 /*
- * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ * Copyright (c) 2008-2009 Cisco Systems, Inc.  All rights reserved.
+ * Copyright (c) 2007-2008 Intel Corporation.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -20,158 +21,144 @@
 #ifndef _LIBFCOE_H
 #define _LIBFCOE_H
 
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
+#include <linux/workqueue.h>
 #include <scsi/fc/fc_fcoe.h>
 #include <scsi/libfc.h>
 
 /*
- * this percpu struct for fcoe
+ * FIP tunable parameters.
  */
-struct fcoe_percpu_s {
-	int		cpu;
-	struct task_struct *thread;
-	struct sk_buff_head fcoe_rx_list;
-	struct page *crc_eof_page;
-	int crc_eof_offset;
+#define FCOE_CTLR_START_DELAY	2000	/* mS after first adv. to choose FCF */
+#define FCOE_CTRL_SOL_TOV	2000	/* min. solicitation interval (mS) */
+#define FCOE_CTLR_FCF_LIMIT	20	/* max. number of FCF entries */
+
+/**
+ * enum fip_state - internal state of FCoE controller.
+ * @FIP_ST_DISABLED: 	controller has been disabled or not yet enabled.
+ * @FIP_ST_LINK_WAIT:	the physical link is down or unusable.
+ * @FIP_ST_AUTO:	determining whether to use FIP or non-FIP mode.
+ * @FIP_ST_NON_FIP:	non-FIP mode selected.
+ * @FIP_ST_ENABLED:	FIP mode selected.
+ */
+enum fip_state {
+	FIP_ST_DISABLED,
+	FIP_ST_LINK_WAIT,
+	FIP_ST_AUTO,
+	FIP_ST_NON_FIP,
+	FIP_ST_ENABLED,
 };
 
-/*
- * the fcoe sw transport private data
+/**
+ * struct fcoe_ctlr - FCoE Controller and FIP state.
+ * @state:	internal FIP state for network link and FIP or non-FIP mode.
+ * @lp:		&fc_lport: libfc local port.
+ * @sel_fcf:	currently selected FCF, or NULL.
+ * @fcfs:	list of discovered FCFs.
+ * @fcf_count:	number of discovered FCF entries.
+ * @sol_time:	time when a multicast solicitation was last sent.
+ * @sel_time:	time after which to select an FCF.
+ * @port_ka_time: time of next port keep-alive.
+ * @ctlr_ka_time: time of next controller keep-alive.
+ * @timer:	timer struct used for all delayed events.
+ * @link_work:	&work_struct for doing FCF selection.
+ * @recv_work:	&work_struct for receiving FIP frames.
+ * @fip_recv_list: list of received FIP frames.
+ * @user_mfs:	configured maximum FC frame size, including FC header.
+ * @flogi_oxid: exchange ID of most recent fabric login.
+ * @flogi_count: number of FLOGI attempts in AUTO mode.
+ * @link:	current link status for libfc.
+ * @last_link:	last link state reported to libfc.
+ * @map_dest:	use the FC_MAP mode for destination MAC addresses.
+ * @dest_addr:	MAC address of the selected FC forwarder.
+ * @ctl_src_addr: the native MAC address of our local port.
+ * @data_src_addr: the assigned MAC address for the local port after FLOGI.
+ * @send:	LLD-supplied function to handle sending of FIP Ethernet frames.
+ * @update_mac: LLD-supplied function to handle changes to MAC addresses.
+ * @lock:	lock protecting this structure.
+ *
+ * This structure is used by all FCoE drivers.  It contains information
+ * needed by all FCoE low-level drivers (LLDs) as well as internal state
+ * for FIP, and fields shared with the LLDS.
  */
-struct fcoe_softc {
-	struct list_head list;
+struct fcoe_ctlr {
+	enum fip_state state;
 	struct fc_lport *lp;
-	struct net_device *real_dev;
-	struct net_device *phys_dev;		/* device with ethtool_ops */
-	struct packet_type  fcoe_packet_type;
-	struct sk_buff_head fcoe_pending_queue;
-	u16 user_mfs;			/* configured max frame size */
-
+	struct fcoe_fcf *sel_fcf;
+	struct list_head fcfs;
+	u16 fcf_count;
+	unsigned long sol_time;
+	unsigned long sel_time;
+	unsigned long port_ka_time;
+	unsigned long ctlr_ka_time;
+	struct timer_list timer;
+	struct work_struct link_work;
+	struct work_struct recv_work;
+	struct sk_buff_head fip_recv_list;
+	u16 user_mfs;
+	u16 flogi_oxid;
+	u8 flogi_count;
+	u8 link;
+	u8 last_link;
+	u8 map_dest;
 	u8 dest_addr[ETH_ALEN];
 	u8 ctl_src_addr[ETH_ALEN];
 	u8 data_src_addr[ETH_ALEN];
-	/*
-	 * fcoe protocol address learning related stuff
-	 */
-	u16 flogi_oxid;
-	u8 flogi_progress;
-	u8 address_mode;
-};
-
-static inline struct fcoe_softc *fcoe_softc(
-	const struct fc_lport *lp)
-{
-	return (struct fcoe_softc *)lport_priv(lp);
-}
-
-static inline struct net_device *fcoe_netdev(
-	const struct fc_lport *lp)
-{
-	return fcoe_softc(lp)->real_dev;
-}
-
-static inline struct fcoe_hdr *skb_fcoe_header(const struct sk_buff *skb)
-{
-	return (struct fcoe_hdr *)skb_network_header(skb);
-}
-
-static inline int skb_fcoe_offset(const struct sk_buff *skb)
-{
-	return skb_network_offset(skb);
-}
-
-static inline struct fc_frame_header *skb_fc_header(const struct sk_buff *skb)
-{
-	return (struct fc_frame_header *)skb_transport_header(skb);
-}
-
-static inline int skb_fc_offset(const struct sk_buff *skb)
-{
-	return skb_transport_offset(skb);
-}
-
-static inline void skb_reset_fc_header(struct sk_buff *skb)
-{
-	skb_reset_network_header(skb);
-	skb_set_transport_header(skb, skb_network_offset(skb) +
-				 sizeof(struct fcoe_hdr));
-}
 
-static inline bool skb_fc_is_data(const struct sk_buff *skb)
-{
-	return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_SOL_DATA;
-}
-
-static inline bool skb_fc_is_cmd(const struct sk_buff *skb)
-{
-	return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD;
-}
-
-static inline bool skb_fc_has_exthdr(const struct sk_buff *skb)
-{
-	return (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_VFTH) ||
-	    (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_IFRH) ||
-	    (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_ENCH);
-}
-
-static inline bool skb_fc_is_roff(const struct sk_buff *skb)
-{
-	return skb_fc_header(skb)->fh_f_ctl[2] & FC_FC_REL_OFF;
-}
+	void (*send)(struct fcoe_ctlr *, struct sk_buff *);
+	void (*update_mac)(struct fcoe_ctlr *, u8 *old, u8 *new);
+	spinlock_t lock;
+};
 
-static inline u16 skb_fc_oxid(const struct sk_buff *skb)
-{
-	return be16_to_cpu(skb_fc_header(skb)->fh_ox_id);
-}
+/*
+ * struct fcoe_fcf - Fibre-Channel Forwarder.
+ * @list:	list linkage.
+ * @time:	system time (jiffies) when an advertisement was last received.
+ * @switch_name: WWN of switch from advertisement.
+ * @fabric_name: WWN of fabric from advertisement.
+ * @fc_map:	FC_MAP value from advertisement.
+ * @fcf_mac:	Ethernet address of the FCF.
+ * @vfid:	virtual fabric ID.
+ * @pri:	seletion priority, smaller values are better.
+ * @flags:	flags received from advertisement.
+ * @fka_period:	keep-alive period, in jiffies.
+ *
+ * A Fibre-Channel Forwarder (FCF) is the entity on the Ethernet that
+ * passes FCoE frames on to an FC fabric.  This structure represents
+ * one FCF from which advertisements have been received.
+ *
+ * When looking up an FCF, @switch_name, @fabric_name, @fc_map, @vfid, and
+ * @fcf_mac together form the lookup key.
+ */
+struct fcoe_fcf {
+	struct list_head list;
+	unsigned long time;
 
-static inline u16 skb_fc_rxid(const struct sk_buff *skb)
-{
-	return be16_to_cpu(skb_fc_header(skb)->fh_rx_id);
-}
+	u64 switch_name;
+	u64 fabric_name;
+	u32 fc_map;
+	u16 vfid;
+	u8 fcf_mac[ETH_ALEN];
 
-/* FIXME - DMA_BIDIRECTIONAL ? */
-#define skb_cb(skb)	((struct fcoe_rcv_info *)&((skb)->cb[0]))
-#define skb_cmd(skb)	(skb_cb(skb)->fr_cmd)
-#define skb_dir(skb)	(skb_cmd(skb)->sc_data_direction)
-static inline bool skb_fc_is_read(const struct sk_buff *skb)
-{
-	if (skb_fc_is_cmd(skb) && skb_cmd(skb))
-		return skb_dir(skb) == DMA_FROM_DEVICE;
-	return false;
-}
+	u8 pri;
+	u16 flags;
+	u32 fka_period;
+};
 
-static inline bool skb_fc_is_write(const struct sk_buff *skb)
-{
-	if (skb_fc_is_cmd(skb) && skb_cmd(skb))
-		return skb_dir(skb) == DMA_TO_DEVICE;
-	return false;
-}
+/* FIP API functions */
+void fcoe_ctlr_init(struct fcoe_ctlr *);
+void fcoe_ctlr_destroy(struct fcoe_ctlr *);
+void fcoe_ctlr_link_up(struct fcoe_ctlr *);
+int fcoe_ctlr_link_down(struct fcoe_ctlr *);
+int fcoe_ctlr_els_send(struct fcoe_ctlr *, struct sk_buff *);
+void fcoe_ctlr_recv(struct fcoe_ctlr *, struct sk_buff *);
+int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_frame *fp, u8 *sa);
 
 /* libfcoe funcs */
-int fcoe_reset(struct Scsi_Host *shost);
-u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
-		      unsigned int scheme, unsigned int port);
-
-u32 fcoe_fc_crc(struct fc_frame *fp);
-int fcoe_xmit(struct fc_lport *, struct fc_frame *);
-int fcoe_rcv(struct sk_buff *, struct net_device *,
-	     struct packet_type *, struct net_device *);
-
-int fcoe_percpu_receive_thread(void *arg);
-void fcoe_clean_pending_queue(struct fc_lport *lp);
-void fcoe_percpu_clean(struct fc_lport *lp);
-void fcoe_watchdog(ulong vp);
-int fcoe_link_ok(struct fc_lport *lp);
-
-struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
-int fcoe_hostlist_add(const struct fc_lport *);
-int fcoe_hostlist_remove(const struct fc_lport *);
-
-struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *, int);
+u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
 int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *);
 
-/* fcoe sw hba */
-int __init fcoe_sw_init(void);
-int __exit fcoe_sw_exit(void);
 #endif /* _LIBFCOE_H */