Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 89877e42827f16fa5f86b1df0c2860b1 > files > 2169

kernel-2.6.18-128.1.10.el5.src.rpm

From: Tom Coughlan <coughlan@redhat.com>
Date: Mon, 25 Aug 2008 15:50:44 -0400
Subject: [scsi] lpfc: update to version 8.2.0.30
Message-id: 1219693845.31197.47.camel@p670.boston.redhat.com
O-Subject: [RHEL5.3 PATCH] Update Emulex lpfc to version 8.2.0.30
Bugzilla: 441746

Resolves bz 441746.

Problem description:

Various bug fixes, new hardware support, and minor enhancements to lpfc
driver. Requested by IBM and Emulex.

Changelog for this patch:

Changed version number to 8.2.0.30
* Added logic to control I/O queue depth based on I/O completion time.
* Update driver to use new Host byte error code DID_TRANSPORT_DISRUPTED, if defined.
  (This is to fix dm-multipath BZ 447586, which may or may not make the 5.3 kernel.)
* Fixed slow vport deletes seen on certain switches.
* Fixed PCI-EEH permanent-disabling device not handled cleanly.
* Change usage of __FUNCTION__ to __func__ to match upstream usage
* Fix Hornet CEE fw upgrade doesn't work after first time.
* Fix diagnostic dump takes too long to compete.
* Fixed write IEEE problem when board in the stopped state.
* Add support for Proteus PCI IDs
* Fix for vport delete crash.
* Added support for MBX_READ_EVENT_LOG, (0x38).
* Fix numerous duplicate log message numbers
* Fixed conditions when invalid fru messages would be displayed.
* Fix the wrong variable name used for checking node active usage status
* Fix rejection followed by acceptance in handling RPL and RPS unsolicited event
* Remove repeated code statement in lpfc_els_timeout_handler
* Fix Unable to set the lpfc_sg_seg_cnt parameter w/out NPIV enabled.
* Fix vport name is not shown after hbacmd vportcreate.
* Fix Devloss timeout while running FC swap test with I/O and 20 vport and 25 LUNs
* Fix applications unable to send mailbox commands during discovery.
* Fixed Menlo/FCoE debug messages
* Fix two lpfc_sli_mode module parameters.
* Fix missing put of ndlp structure during driver unload.
* Added support of 2KB mailbox commands.
* Fix memory leak during driver init.
* Fix crash due to reused mailbox structure.
* Fix Max VPORTs for Helios and Zephyr should be 100
* Improve worker thread sleep management
* Fixed menlo panic command error.
* Increase Max Vport Count to support whatever the hardware can support.
* Fix abort IOCB Panic (CR 28988)
* Fix FAN processing. (CR 73991)
* Fix two return without unlock cases in sli_issue_mbox
* Fix PCI EEH handling crash
* Fix applications unable to send mailboxes if discovery fails.
* Fix ADISC timeout causes devloss timeout on target.
* Change device reset to abort all outstanding I/O for the target.
* Fixed handling of enable_npiv=1 and topology=4 combination
* Fixed a big endian issue in the fa bit handling.
* Added write access to the control registers when HBA is online.
* Fixed for completing the menlo maintenance mode on Link up events.
* Added support for LP21000 HBAs (FCoE support) and HBAnyware 4.0 functionality.

Upstream status:

Most of the changes are upstream. The first four items at the top of
the changelog were just posted for upstream review:

http://marc.info/?l=linux-scsi&m=121962893631252&w=2

We will keep an eye on the upstream review, and back out those changes
during beta if necessary.

Build status:

Built in Brew.
https://brewweb.devel.redhat.com/taskinfo?taskID=1435089

Testing:

Emulex has tested this on 5.2.
IBM and Emulex have agreed to test this during 5.3. beta.

Please review and ACK.

Tom

diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 7e03178..6162b37 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -24,11 +24,11 @@ ifneq ($(GCOV),)
   EXTRA_CFLAGS += -O0
 endif
 
-EXTRA_CFLAGS += -DNETLINK_FCTRANSPORT=19
+EXTRA_CFLAGS += -DNETLINK_FCTRANSPORT=25
 
 obj-$(CONFIG_SCSI_LPFC) := lpfc.o
 
 lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o	\
 	lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
 	lpfc_vport.o lpfc_debugfs.o lpfc_security.o lpfc_auth_access.o \
-	lpfc_auth.o lpfc_ioctl.o
+	lpfc_auth.o lpfc_ioctl.o lpfc_menlo.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 398b7cd..39262b8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -23,7 +23,7 @@
 
 struct lpfc_sli2_slim;
 
-#define LPFC_MAX_TARGET		256	/* max number of targets supported */
+#define LPFC_MAX_TARGET		4096	/* max number of targets supported */
 #define LPFC_MAX_DISC_THREADS	64	/* max outstanding discovery els
 					   requests */
 #define LPFC_MAX_NS_RETRY	3	/* Number of retry attempts to contact
@@ -33,6 +33,12 @@ struct lpfc_sli2_slim;
 #define LPFC_MAX_SG_SEG_CNT	256	/* sg element count per scsi cmnd */
 #define LPFC_IOCB_LIST_CNT	2250	/* list of IOCBs for fast-path usage. */
 #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
+#define LPFC_VNAME_LEN		100	/* vport symbolic name length */
+#define LPFC_TGTQ_INTERVAL	40000	/* Min amount of time between tgt
+					   queue depth change in millisecs */
+#define LPFC_TGTQ_RAMPUP_PCENT	5	/* Target queue rampup in percentage */
+#define LPFC_MIN_TGT_QDEPTH	100
+#define LPFC_MAX_TGT_QDEPTH	0xFFFF
 
 /*
  * Following time intervals are used of adjusting SCSI device
@@ -59,6 +65,9 @@ struct lpfc_sli2_slim;
 
 #define MAX_HBAEVT	32
 
+/* lpfc wait event data ready flag */
+#define LPFC_DATA_READY		(1<<0)
+
 enum lpfc_polling_flags {
 	ENABLE_FCP_RING_POLLING = 0x1,
 	DISABLE_FCP_RING_INT    = 0x2
@@ -200,6 +209,22 @@ struct lpfc_stats {
 	uint32_t fcpLocalErr;
 };
 
+struct lpfc_timedout_iocb_ctxt {
+	struct lpfc_iocbq *rspiocbq;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_dmabuf *bmp;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_dmabufext *outdmp;
+	struct lpfc_dmabufext *indmp;
+};
+
+struct lpfc_dmabufext {
+	struct lpfc_dmabuf dma;
+	uint32_t size;
+	uint32_t flag;
+};
+
+
 enum sysfs_mbox_state {
 	SMBOX_IDLE,
 	SMBOX_WRITING,
@@ -230,6 +255,44 @@ struct lpfc_sysfs_mbox {
 	struct lpfc_dmabuf *  txmit_buff;
 	struct lpfc_dmabuf *  rcv_buff;
 };
+#define MENLO_DID 0x0000FC0E
+
+enum sysfs_menlo_state {
+	SMENLO_IDLE,
+	SMENLO_WRITING,
+	SMENLO_WRITING_MBEXT,
+	SMENLO_READING
+};
+
+struct lpfc_sysfs_menlo_hdr {
+	uint32_t cmd;
+	uint32_t cmdsize;
+	uint32_t rspsize;
+};
+
+struct lpfc_menlo_genreq64 {
+	size_t				offset;
+	struct lpfc_iocbq		*cmdiocbq;
+	struct lpfc_iocbq		*rspiocbq;
+	struct lpfc_dmabuf		*bmp;
+	struct lpfc_dmabufext		*indmp;
+	struct ulp_bde64		*cmdbpl;
+	struct lpfc_dmabufext		*outdmp;
+	uint32_t			timeout;
+	struct list_head		inhead;
+	struct list_head		outhead;
+};
+
+struct lpfc_sysfs_menlo {
+	enum sysfs_menlo_state		state;
+	/* process id of the mgmt application */
+	struct lpfc_sysfs_menlo_hdr	cmdhdr;
+	struct lpfc_menlo_genreq64	cr;
+	struct lpfc_menlo_genreq64	cx;
+	pid_t				pid;
+	struct list_head		list;
+};
+
 
 struct lpfc_hba;
 
@@ -301,12 +364,17 @@ struct lpfc_auth {
 	uint32_t reauth_interval;
 
 	uint8_t security_active;
-	uint8_t auth_state;
-	uint8_t auth_msg_state;
+	enum auth_state auth_state;
+	enum auth_msg_state auth_msg_state;
 	uint32_t trans_id;              /* current transaction id. Can be set
 					   by incomming transactions as well */
 	uint32_t group_id;
 	uint32_t hash_id;
+	uint32_t direction;
+#define AUTH_DIRECTION_NONE	0
+#define AUTH_DIRECTION_REMOTE	0x1
+#define AUTH_DIRECTION_LOCAL	0x2
+#define AUTH_DIRECTION_BIDI	(AUTH_DIRECTION_LOCAL|AUTH_DIRECTION_REMOTE)
 
 	uint8_t *challenge;
 	uint32_t challenge_len;
@@ -434,6 +502,7 @@ struct lpfc_vport {
 	uint32_t cfg_max_luns;
 	uint32_t cfg_enable_da_id;
 	uint32_t cfg_enable_auth;
+	uint32_t cfg_max_scsicmpl_time;
 
 	uint32_t dev_loss_tmo_changed;
 
@@ -504,7 +573,6 @@ struct lpfc_hba {
 
 	uint16_t pci_cfg_value;
 
-
 	uint8_t fc_linkspeed;	/* Link speed after last READ_LA */
 
 	uint32_t fc_eventTag;	/* event tag for link attention */
@@ -568,8 +636,9 @@ struct lpfc_hba {
 	uint32_t              work_hs;      /* HS stored in case of ERRAT */
 	uint32_t              work_status[2]; /* Extra status from SLIM */
 
-	wait_queue_head_t    *work_wait;
+	wait_queue_head_t    work_waitq;
 	struct task_struct   *worker_thread;
+	long data_flags;
 
 	uint32_t hbq_in_use;		/* HBQs in use flag */
 	struct list_head hbqbuf_in_list;  /* in-fly hbq buffer list */
@@ -623,6 +692,7 @@ struct lpfc_hba {
 
 	/* List of mailbox commands issued through sysfs */
 	struct list_head sysfs_mbox_list;
+	struct list_head sysfs_menlo_list;
 
 	/* fastpath list. */
 	spinlock_t scsi_buf_list_lock;
@@ -649,7 +719,8 @@ struct lpfc_hba {
 	struct list_head port_list;
 	struct lpfc_vport *pport;	/* physical lpfc_vport pointer */
 	uint16_t max_vpi;		/* Maximum virtual nports */
-#define LPFC_MAX_VPI	64		/* Max number of VPI supported */
+#define LPFC_MAX_VPI	0xFFFF		/* Max number of VPI supported */
+#define LPFC_INTR_VPI	100		/* Intermediate VPI supported */
 	unsigned long *vpi_bmask;	/* vpi allocation table */
 
 	/* Data structure used by fabric iocb scheduler */
@@ -693,6 +764,8 @@ struct lpfc_hba {
 	 */
 #define QUE_BUFTAG_BIT  (1<<31)
 	uint32_t buffer_tag_count;
+	int	wait_4_mlo_maint_flg;
+	wait_queue_head_t wait_4_mlo_m_q;
 };
 
 static inline struct Scsi_Host *
@@ -718,6 +791,17 @@ lpfc_is_link_up(struct lpfc_hba *phba)
 		phba->link_state == LPFC_HBA_READY;
 }
 
+static inline void
+lpfc_worker_wake_up(struct lpfc_hba *phba)
+{
+	/* Set the lpfc data pending flag */
+	set_bit(LPFC_DATA_READY, &phba->data_flags);
+
+	/* Wake up worker thread */
+	wake_up(&phba->work_waitq);
+	return;
+}
+
 #define FC_REG_DUMP_EVENT		0x10	/* Register for Dump events */
 #define FC_REG_TEMPERATURE_EVENT	0x20    /* Register for temperature
 						   event */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 6e93fe4..3b54fa9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -50,6 +50,15 @@
 #define LPFC_LINK_SPEED_BITMAP 0x00000117
 #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
 
+extern struct bin_attribute sysfs_menlo_attr;
+
+/*
+ * Write key size should be multiple of 4. If write key is changed
+ * make sure that library write key is also changed.
+ */
+#define LPFC_REG_WRITE_KEY_SIZE	4
+#define LPFC_REG_WRITE_KEY	"EMLX"
+
 static void
 lpfc_jedec_to_ascii(int incr, char hdw[])
 {
@@ -136,6 +145,16 @@ lpfc_programtype_show(struct class_device *cdev, char *buf)
 }
 
 static ssize_t
+lpfc_mlomgmt_show(struct class_device *cdev, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(cdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		(phba->sli.sli_flag & LPFC_MENLO_MAINT));
+}
+static ssize_t
 lpfc_vportnum_show(struct class_device *cdev, char *buf)
 {
 	struct Scsi_Host  *shost = class_to_shost(cdev);
@@ -228,8 +247,10 @@ lpfc_state_show(struct class_device *cdev, char *buf)
 					"Unknown\n");
 			break;
 		}
-
-		if (phba->fc_topology == TOPOLOGY_LOOP) {
+		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
+			len += snprintf(buf + len, PAGE_SIZE-len,
+					"   Menlo Maint Mode\n");
+		else if (phba->fc_topology == TOPOLOGY_LOOP) {
 			if (vport->fc_flag & FC_PUBLIC_LOOP)
 				len += snprintf(buf + len, PAGE_SIZE-len,
 						"   Public Loop\n");
@@ -679,6 +700,134 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
 	return strlen(buf);
 }
 
+static ssize_t
+lpfc_auth_state_show(struct class_device *cdev, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(cdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	switch (vport->auth.auth_state) {
+	case LPFC_AUTH_UNKNOWN:
+		if (vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE ||
+		    vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE ||
+		    vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY ||
+		    vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS_REPLY)
+			return snprintf(buf, PAGE_SIZE, "Authenticating\n");
+		else
+			return snprintf(buf, PAGE_SIZE, "Not Authenticated\n");
+	case LPFC_AUTH_FAIL:
+		return snprintf(buf, PAGE_SIZE, "Failed\n");
+	case LPFC_AUTH_SUCCESS:
+		if (vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE ||
+		    vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE ||
+		    vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY ||
+		    vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS_REPLY)
+			return snprintf(buf, PAGE_SIZE, "Authenticating\n");
+		else if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS)
+			return snprintf(buf, PAGE_SIZE, "Authenticated\n");
+	}
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_auth_dir_show(struct class_device *cdev, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(cdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	if (!vport->cfg_enable_auth ||
+	    vport->auth.auth_state != LPFC_AUTH_SUCCESS)
+		return snprintf(buf, PAGE_SIZE, "Unknown\n");
+	if (vport->auth.direction == AUTH_DIRECTION_LOCAL)
+		return snprintf(buf, PAGE_SIZE, "Local Authenticated\n");
+	else if (vport->auth.direction == AUTH_DIRECTION_REMOTE)
+		return snprintf(buf, PAGE_SIZE, "Remote Authenticated\n");
+	else if (vport->auth.direction == AUTH_DIRECTION_BIDI)
+		return snprintf(buf, PAGE_SIZE, "Bidi Authentication\n");
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_auth_protocol_show(struct class_device *cdev, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(cdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	if (vport->cfg_enable_auth &&
+	    vport->auth.auth_state == LPFC_AUTH_SUCCESS)
+		return snprintf(buf, PAGE_SIZE, "1 (DH-CHAP)\n");
+	else
+		return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_auth_dhgroup_show(struct class_device *cdev, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(cdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	if (!vport->cfg_enable_auth ||
+	    vport->auth.auth_state != LPFC_AUTH_SUCCESS)
+		return snprintf(buf, PAGE_SIZE, "Unknown\n");
+	switch (vport->auth.group_id) {
+	case DH_GROUP_NULL:
+		return snprintf(buf, PAGE_SIZE, "0 (NULL)\n");
+	case DH_GROUP_1024:
+		return snprintf(buf, PAGE_SIZE, "1 (1024)\n");
+	case DH_GROUP_1280:
+		return snprintf(buf, PAGE_SIZE, "2 (1280)\n");
+	case DH_GROUP_1536:
+		return snprintf(buf, PAGE_SIZE, "3 (1536)\n");
+	case DH_GROUP_2048:
+		return snprintf(buf, PAGE_SIZE, "4 (2048)\n");
+	}
+	return snprintf(buf, PAGE_SIZE, "%d (Unrecognized)\n",
+			vport->auth.group_id);
+}
+
+static ssize_t
+lpfc_auth_hash_show(struct class_device *cdev, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(cdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	if (!vport->cfg_enable_auth ||
+	    vport->auth.auth_state != LPFC_AUTH_SUCCESS)
+		return snprintf(buf, PAGE_SIZE, "Unknown\n");
+	switch (vport->auth.hash_id) {
+	case FC_SP_HASH_MD5:
+		return snprintf(buf, PAGE_SIZE, "5 (MD5)\n");
+	case FC_SP_HASH_SHA1:
+		return snprintf(buf, PAGE_SIZE, "6 (SHA1)\n");
+	}
+	return snprintf(buf, PAGE_SIZE, "%d (Unrecognized)\n",
+			vport->auth.hash_id);
+}
+static ssize_t
+lpfc_auth_last_show(struct class_device *cdev, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(cdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct timeval last_time;
+	if (!vport->cfg_enable_auth || vport->auth.last_auth == 0)
+		return snprintf(buf, PAGE_SIZE, "%d\n", -1);
+	jiffies_to_timeval((jiffies - vport->auth.last_auth), &last_time);
+	return snprintf(buf, PAGE_SIZE, "%ld\n", last_time.tv_sec);
+}
+
+static ssize_t
+lpfc_auth_next_show(struct class_device *cdev, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(cdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	unsigned long next_jiff;
+	struct timeval next_time;
+	if (!vport->cfg_enable_auth ||
+	    vport->auth.last_auth == 0 ||
+	    vport->auth.reauth_interval == 0)
+		return snprintf(buf, PAGE_SIZE, "%d\n", -1);
+	/* calculate the amount of time left until next auth */
+	next_jiff = (msecs_to_jiffies(vport->auth.reauth_interval * 60000) +
+		     vport->auth.last_auth) - jiffies;
+	jiffies_to_timeval(next_jiff, &next_time);
+	return snprintf(buf, PAGE_SIZE, "%ld\n", next_time.tv_sec);
+}
+
 #define lpfc_param_show(attr)	\
 static ssize_t \
 lpfc_##attr##_show(struct class_device *cdev, char *buf) \
@@ -783,7 +932,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
 		return 0;\
 	}\
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
-			 "0449 lpfc_"#attr" attribute cannot be set to %d, "\
+			 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
 			 "allowed range is ["#minval", "#maxval"]\n", val); \
 	vport->cfg_##attr = default;\
 	return -EINVAL;\
@@ -798,7 +947,7 @@ lpfc_##attr##_set(struct lpfc_vport *vport, int val) \
 		return 0;\
 	}\
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
-			 "0450 lpfc_"#attr" attribute cannot be set to %d, "\
+			 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
 			 "allowed range is ["#minval", "#maxval"]\n", val); \
 	return -EINVAL;\
 }
@@ -922,6 +1071,7 @@ static CLASS_DEVICE_ATTR(option_rom_version, S_IRUGO,
 					lpfc_option_rom_version_show, NULL);
 static CLASS_DEVICE_ATTR(num_discovered_ports, S_IRUGO,
 					lpfc_num_discovered_ports_show, NULL);
+static CLASS_DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
 static CLASS_DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
 static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
 			 NULL);
@@ -939,6 +1089,13 @@ static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
 static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
 static CLASS_DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show,
 			 NULL);
+static CLASS_DEVICE_ATTR(auth_state, S_IRUGO, lpfc_auth_state_show, NULL);
+static CLASS_DEVICE_ATTR(auth_dir, S_IRUGO, lpfc_auth_dir_show, NULL);
+static CLASS_DEVICE_ATTR(auth_protocol, S_IRUGO, lpfc_auth_protocol_show, NULL);
+static CLASS_DEVICE_ATTR(auth_dhgroup, S_IRUGO, lpfc_auth_dhgroup_show, NULL);
+static CLASS_DEVICE_ATTR(auth_hash, S_IRUGO, lpfc_auth_hash_show, NULL);
+static CLASS_DEVICE_ATTR(auth_last, S_IRUGO, lpfc_auth_last_show, NULL);
+static CLASS_DEVICE_ATTR(auth_next, S_IRUGO, lpfc_auth_next_show, NULL);
 
 static int
 lpfc_parse_wwn(const char *ns, uint8_t *nm)
@@ -964,21 +1121,37 @@ lpfc_parse_wwn(const char *ns, uint8_t *nm)
 
 	return 0;
 }
+
 static ssize_t
 lpfc_create_vport(struct class_device *cdev, const char *buf, size_t count)
 {
 	struct Scsi_Host  *shost = class_to_shost(cdev);
 	uint8_t wwnn[8];
 	uint8_t wwpn[8];
+	char vname[LPFC_VNAME_LEN + 1];
 	uint8_t stat;
+	int i = 0;
+	int vname_length = 0;
 
-	stat = lpfc_parse_wwn(&buf[0], wwpn);
+	stat = lpfc_parse_wwn(&buf[i], wwpn);
 	if (stat)
 		return stat;
-	stat = lpfc_parse_wwn(&buf[17], wwnn);
+	/* The wwnn starts one character after the wwpn */
+	i += (sizeof(wwpn) * 2) + 1;
+	stat = lpfc_parse_wwn(&buf[i], wwnn);
 	if (stat)
 		return stat;
-	if (lpfc_vport_create(shost, wwnn, wwpn))
+	/* The vname starts one character after the wwnn */
+	i += (sizeof(wwpn) * 2) + 1;
+	/* Skip the null terminator at the end and see how long the vname is */
+	if (count > (i + 1)) {
+		vname_length = count - (i + 1);
+		if (vname_length > LPFC_VNAME_LEN)
+			vname_length = LPFC_VNAME_LEN;
+		memcpy(vname, &buf[i], vname_length);
+	}
+	vname[vname_length] = '\0';
+	if (lpfc_vport_create(shost, wwnn, wwpn, vname))
 		return -EIO;
 	return count;
 }
@@ -994,11 +1167,14 @@ lpfc_delete_vport(struct class_device *cdev, const char *buf, size_t count)
 	uint8_t wwnn[8];
 	uint8_t wwpn[8];
 	struct lpfc_vport *vport;
+	int i = 0;
 
-	stat = lpfc_parse_wwn(&buf[0], wwpn);
+	stat = lpfc_parse_wwn(&buf[i], wwpn);
 	if (stat)
 		return stat;
-	stat = lpfc_parse_wwn(&buf[17], wwnn);
+	/* The wwnn starts one character after the wwpn */
+	i += (sizeof(wwpn) * 2) + 1;
+	stat = lpfc_parse_wwn(&buf[i], wwnn);
 	if (stat)
 		return stat;
 
@@ -1045,6 +1221,21 @@ static CLASS_DEVICE_ATTR(npiv_vports_inuse, S_IRUGO,
 			 lpfc_npiv_vports_inuse_show, NULL);
 static CLASS_DEVICE_ATTR(max_npiv_vports, S_IRUGO, lpfc_max_vpi_show, NULL);
 
+static ssize_t
+lpfc_symbolic_name_show(struct class_device *cdev, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(cdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	int length;
+	char symbname[256];
+
+	length = lpfc_vport_symbolic_port_name(vport, symbname, 256);
+	return snprintf(buf, PAGE_SIZE, "%s\n", symbname);
+}
+
+static CLASS_DEVICE_ATTR(lpfc_symbolic_name, S_IRUGO,
+			 lpfc_symbolic_name_show, NULL);
+
 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
 
 static ssize_t
@@ -1280,7 +1471,7 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
 		vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
 		if (val != LPFC_DEF_DEVLOSS_TMO)
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0402 Ignoring nodev_tmo module "
+					 "0407 Ignoring nodev_tmo module "
 					 "parameter because devloss_tmo is "
 					 "set.\n");
 		return 0;
@@ -1534,7 +1725,7 @@ lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
 {
 	if (val < 0 || val > 1) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				 "0449 lpfc_restrict_login attribute cannot "
+				 "0422 lpfc_restrict_login attribute cannot "
 				 "be set to %d, allowed range is [0, 1]\n",
 				 val);
 		vport->cfg_restrict_login = 1;
@@ -1553,7 +1744,7 @@ lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
 {
 	if (val < 0 || val > 1) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				 "0450 lpfc_restrict_login attribute cannot "
+				 "0425 lpfc_restrict_login attribute cannot "
 				 "be set to %d, allowed range is [0, 1]\n",
 				 val);
 		vport->cfg_restrict_login = 1;
@@ -1685,7 +1876,7 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val)
 		return 0;
 	}
 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"0454 lpfc_link_speed attribute cannot "
+			"0405 lpfc_link_speed attribute cannot "
 			"be set to %d, allowed values are "
 			"["LPFC_LINK_SPEED_STRING"]\n", val);
 	phba->cfg_link_speed = 0;
@@ -1711,6 +1902,48 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
 		   "Use ADISC on rediscovery to authenticate FCP devices");
 
 /*
+# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
+# depth. Default value is 0. When the value of this parameter is zero the
+# SCSI command completion time is not used for controlling I/O queue depth. When
+# the parameter is set to a non-zero value, the I/O queue depth is controlled
+# to limit the I/O completion time to the parameter value.
+# The value is set in milliseconds.
+*/
+static int lpfc_max_scsicmpl_time = 0;
+module_param(lpfc_max_scsicmpl_time, int, 0);
+MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
+	"Use command completion time to control queue depth");
+lpfc_vport_param_show(max_scsicmpl_time);
+lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
+static int
+lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+
+	if (val == vport->cfg_max_scsicmpl_time)
+		return 0;
+	if ((val < 0) || (val > 60000))
+		return -EINVAL;
+	vport->cfg_max_scsicmpl_time = val;
+
+	spin_lock_irq(shost->host_lock);
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+			continue;
+		ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+	}
+	spin_unlock_irq(shost->host_lock);
+	return 0;
+}
+lpfc_vport_param_store(max_scsicmpl_time);
+static CLASS_DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
+			 lpfc_max_scsicmpl_time_show,
+			 lpfc_max_scsicmpl_time_store);
+
+/*
 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
 # range is [0,1]. Default value is 0.
 */
@@ -1828,7 +2061,7 @@ lpfc_enable_auth_set(struct lpfc_vport *vport, int val)
 		return 0;
 	}
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-			 "0450 lpfc_enable_auth attribute cannot be set to %d, "
+			 "0426 lpfc_enable_auth attribute cannot be set to %d, "
 			 "allowed range is [0, 1]\n", val);
 	return -EINVAL;
 }
@@ -1932,6 +2165,7 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
 	&class_device_attr_option_rom_version,
 	&class_device_attr_state,
 	&class_device_attr_num_discovered_ports,
+	&class_device_attr_menlo_mgmt_mode,
 	&class_device_attr_lpfc_drvr_version,
 	&class_device_attr_lpfc_temp_sensor,
 	&class_device_attr_lpfc_log_verbose,
@@ -1977,12 +2211,21 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
 	&class_device_attr_max_npiv_vports,
 	&class_device_attr_vport_delete,
 	&class_device_attr_vport_create,
+	&class_device_attr_auth_state,
+	&class_device_attr_auth_dir,
+	&class_device_attr_auth_protocol,
+	&class_device_attr_auth_dhgroup,
+	&class_device_attr_auth_hash,
+	&class_device_attr_auth_last,
+	&class_device_attr_auth_next,
+	&class_device_attr_lpfc_symbolic_name,
 	&class_device_attr_lpfc_soft_wwnn,
 	&class_device_attr_lpfc_soft_wwpn,
 	&class_device_attr_lpfc_soft_wwn_enable,
 	&class_device_attr_lpfc_enable_hba_reset,
 	&class_device_attr_lpfc_enable_hba_heartbeat,
 	&class_device_attr_lpfc_sg_seg_cnt,
+	&class_device_attr_lpfc_max_scsicmpl_time,
 	NULL,
 };
 
@@ -1998,6 +2241,7 @@ struct class_device_attribute *lpfc_hba_attrs_no_npiv[] = {
 	&class_device_attr_option_rom_version,
 	&class_device_attr_state,
 	&class_device_attr_num_discovered_ports,
+	&class_device_attr_menlo_mgmt_mode,
 	&class_device_attr_lpfc_drvr_version,
 	&class_device_attr_lpfc_temp_sensor,
 	&class_device_attr_lpfc_log_verbose,
@@ -2042,6 +2286,15 @@ struct class_device_attribute *lpfc_hba_attrs_no_npiv[] = {
 	&class_device_attr_lpfc_soft_wwnn,
 	&class_device_attr_lpfc_soft_wwpn,
 	&class_device_attr_lpfc_soft_wwn_enable,
+	&class_device_attr_auth_state,
+	&class_device_attr_auth_dir,
+	&class_device_attr_auth_protocol,
+	&class_device_attr_auth_dhgroup,
+	&class_device_attr_auth_hash,
+	&class_device_attr_auth_last,
+	&class_device_attr_auth_next,
+	&class_device_attr_lpfc_symbolic_name,
+	&class_device_attr_lpfc_max_scsicmpl_time,
 	NULL,
 };
 
@@ -2066,7 +2319,16 @@ struct class_device_attribute *lpfc_vport_attrs[] = {
 	&class_device_attr_management_version,
 	&class_device_attr_npiv_info,
 	&class_device_attr_lpfc_enable_da_id,
+	&class_device_attr_lpfc_max_scsicmpl_time,
 	&class_device_attr_lpfc_dev_loss_initiator,
+	&class_device_attr_auth_state,
+	&class_device_attr_auth_dir,
+	&class_device_attr_auth_protocol,
+	&class_device_attr_auth_dhgroup,
+	&class_device_attr_auth_hash,
+	&class_device_attr_auth_last,
+	&class_device_attr_auth_next,
+	&class_device_attr_lpfc_symbolic_name,
 	NULL,
 };
 
@@ -2080,21 +2342,23 @@ sysfs_ctlreg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 
-	if ((off + count) > FF_REG_AREA_SIZE)
+	if ((off + count) > FF_REG_AREA_SIZE + LPFC_REG_WRITE_KEY_SIZE)
 		return -ERANGE;
 
-	if (count == 0) return 0;
+	if (count <= LPFC_REG_WRITE_KEY_SIZE)
+		return 0;
 
 	if (off % 4 || count % 4 || (unsigned long)buf % 4)
 		return -EINVAL;
 
-	if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
-		return -EPERM;
-	}
+	/* This is to protect HBA registers from accidental writes. */
+	if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
+		return -EINVAL;
 
 	spin_lock_irq(&phba->hbalock);
-	for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
-		writel(*((uint32_t *)(buf + buf_off)),
+	for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
+			buf_off += sizeof(uint32_t))
+		writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
 		       phba->ctrl_regs_memmap_p + off + buf_off);
 
 	spin_unlock_irq(&phba->hbalock);
@@ -2167,7 +2431,7 @@ lpfc_get_sysfs_mbox(struct lpfc_hba *phba, uint8_t create)
 		return NULL;
 	}
 	spin_unlock_irq(&phba->hbalock);
-	sysfs_mbox = kzalloc(sizeof( struct lpfc_sysfs_mbox),
+	sysfs_mbox = kzalloc(sizeof(struct lpfc_sysfs_mbox),
 			GFP_KERNEL);
 	if (!sysfs_mbox)
 		return NULL;
@@ -2228,20 +2492,24 @@ lpfc_syfs_mbox_copy_rcv_buff(struct lpfc_hba *phba,
 		return -EAGAIN;
 	}
 
-	size = sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize;
+	if (sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG)
+		size = sysfs_mbox->mbox->mb.un.
+			varRdEventLog.rcv_bde64.tus.f.bdeSize;
+	else
+		size = sysfs_mbox->mbox->mb.un.
+			varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize;
+
 
 	if ((count + off) > size) {
 		sysfs_mbox_idle(phba, sysfs_mbox);
 		spin_unlock_irq(&phba->hbalock);
 		return -ERANGE;
 	}
-
-	if (size > LPFC_BPL_SIZE) {
+	if (count > LPFC_BPL_SIZE) {
 		sysfs_mbox_idle(phba, sysfs_mbox);
 		spin_unlock_irq(&phba->hbalock);
 		return -ERANGE;
 	}
-
 	if (sysfs_mbox->extoff != off) {
 		sysfs_mbox_idle(phba, sysfs_mbox);
 		spin_unlock_irq(&phba->hbalock);
@@ -2375,7 +2643,8 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
 		 * app doesnot know how to do it, use a different
 		 * context.
 		 */
-		if (sysfs_mbox->state == SMBOX_READING_BUFF) {
+		if (sysfs_mbox->state == SMBOX_READING_BUFF ||
+		    sysfs_mbox->state == SMBOX_READING_MBEXT) {
 			spin_lock_irq(&phba->hbalock);
 			sysfs_mbox_idle(phba, sysfs_mbox);
 			spin_unlock_irq(&phba->hbalock);
@@ -2567,6 +2836,33 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
 			}
 			return count;
 	}
+	if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) &&
+		(sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG)) {
+		sysfs_mbox->state = SMBOX_WRITING;
+		spin_unlock_irq(&phba->hbalock);
+
+
+		/* Allocate rcv buffer */
+		sysfs_mbox->rcv_buff =
+			kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (!sysfs_mbox->rcv_buff) {
+			spin_lock_irq(&phba->hbalock);
+			sysfs_mbox_idle(phba, sysfs_mbox);
+			spin_unlock_irq(&phba->hbalock);
+			return -ENOMEM;
+		}
+		INIT_LIST_HEAD(&sysfs_mbox->rcv_buff->list);
+		sysfs_mbox->rcv_buff->virt =
+			lpfc_mbuf_alloc(phba, 0,
+				&(sysfs_mbox->rcv_buff->phys));
+		if (!sysfs_mbox->rcv_buff->virt) {
+			spin_lock_irq(&phba->hbalock);
+			sysfs_mbox_idle(phba, sysfs_mbox);
+			spin_unlock_irq(&phba->hbalock);
+			return -ENOMEM;
+		}
+		return count;
+	}
 
 	spin_unlock_irq(&phba->hbalock);
 
@@ -2582,6 +2878,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 	int rc;
+	int wait_4_menlo_maint = 0;
 	struct lpfc_sysfs_mbox *sysfs_mbox;
 	ssize_t ret;
 	sysfs_mbox = lpfc_get_sysfs_mbox(phba, 0);
@@ -2604,6 +2901,11 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
 	if (sysfs_mbox->state == SMBOX_READING_BUFF) {
 		ret = lpfc_syfs_mbox_copy_rcv_buff(phba, sysfs_mbox,
 					buf, off, count);
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"1245 mbox: cmd 0x%x, 0x%x ret %x\n",
+				sysfs_mbox->mbox->mb.mbxCommand,
+				sysfs_mbox->mbox->mb.un.varWords[0],
+				(uint32_t)ret);
 		return ret;
 	}
 
@@ -2678,7 +2980,21 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
 		case MBX_BEACON:
 		case MBX_DEL_LD_ENTRY:
 		case MBX_SET_DEBUG:
+			break;
+		case MBX_READ_EVENT_LOG_STATUS:
+			break;
 		case MBX_SET_VARIABLE:
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"1226 mbox: set_variable 0x%x, 0x%x\n",
+				sysfs_mbox->mbox->mb.un.varWords[0],
+				sysfs_mbox->mbox->mb.un.varWords[1]);
+			if ((sysfs_mbox->mbox->mb.un.varWords[0]
+				== SETVAR_MLOMNT)
+				&& (sysfs_mbox->mbox->mb.un.varWords[1]
+				== 1)) {
+				wait_4_menlo_maint = 1;
+				phba->wait_4_mlo_maint_flg = 1;
+				}
 		case MBX_WRITE_WWN:
 		case MBX_UPDATE_CFG:
 			break;
@@ -2706,6 +3022,21 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
 				putPaddrLow(sysfs_mbox->rcv_buff->phys);
 			}
 			break;
+		case MBX_WRITE_EVENT_LOG:
+			break;
+		case MBX_READ_EVENT_LOG:
+
+			if (sysfs_mbox->mbox->mb.un.varRdEventLog.
+				rcv_bde64.tus.f.bdeSize) {
+				sysfs_mbox->mbox->mb.un.varRdEventLog.
+					rcv_bde64.addrHigh =
+					putPaddrHigh(sysfs_mbox->
+						rcv_buff->phys);
+				sysfs_mbox->mbox->mb.un.varRdEventLog.
+				rcv_bde64.addrLow =
+				putPaddrLow(sysfs_mbox->rcv_buff->phys);
+			}
+			break;
 
 		case MBX_READ_SPARM64:
 		case MBX_READ_LA:
@@ -2743,22 +3074,22 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
 		/* If HBA encountered an error attention, allow only DUMP
 		 * or RESTART mailbox commands until the HBA is restarted.
 		 */
-		if ((phba->pport->stopped) &&
-			(sysfs_mbox->mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
-			 sysfs_mbox->mbox->mb.mbxCommand != MBX_RESTART &&
-			 sysfs_mbox->mbox->mb.mbxCommand != MBX_WRITE_VPARMS)) {
-			sysfs_mbox_idle(phba,sysfs_mbox);
-			spin_unlock_irq(&phba->hbalock);
-			return -EPERM;
-		}
+		if (phba->pport->stopped &&
+		    sysfs_mbox->mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
+		    sysfs_mbox->mbox->mb.mbxCommand != MBX_RESTART &&
+		    sysfs_mbox->mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
+		    sysfs_mbox->mbox->mb.mbxCommand != MBX_WRITE_WWN)
+			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+					"1259 mbox: Issued mailbox cmd "
+					"0x%x while in stopped state.\n",
+					sysfs_mbox->mbox->mb.mbxCommand);
 
 		sysfs_mbox->mbox->vport = vport;
 
 		/* Don't allow mailbox commands to be sent when blocked
 		 * or when in the middle of discovery
 		 */
-		if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO ||
-		    vport->fc_flag & FC_NDISC_ACTIVE) {
+		if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
 			sysfs_mbox_idle(phba,sysfs_mbox);
 			spin_unlock_irq(&phba->hbalock);
 			return  -EAGAIN;
@@ -2790,6 +3121,33 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
 			spin_unlock_irq(&phba->hbalock);
 			return  (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
 		}
+		if (wait_4_menlo_maint) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"1229 waiting for menlo mnt\n");
+			spin_unlock_irq(&phba->hbalock);
+			if (phba->wait_4_mlo_maint_flg)
+				wait_event_interruptible_timeout(
+					phba->wait_4_mlo_m_q,
+					phba->wait_4_mlo_maint_flg ==0,
+					60 * HZ);
+			spin_lock_irq(&phba->hbalock);
+			if (phba->wait_4_mlo_maint_flg) {
+				sysfs_mbox_idle(phba,sysfs_mbox);
+				phba->wait_4_mlo_maint_flg = 0;
+				spin_unlock_irq(&phba->hbalock);
+				return -EINTR;
+			} else
+				spin_unlock_irq(&phba->hbalock);
+
+			spin_lock_irq(&phba->hbalock);
+			if (phba->wait_4_mlo_maint_flg != 0) {
+				sysfs_mbox_idle(phba,sysfs_mbox);
+				phba->wait_4_mlo_maint_flg = 0;
+				spin_unlock_irq(&phba->hbalock);
+				return -ETIME;
+			}
+
+		}
 		sysfs_mbox->state = SMBOX_READING;
 	}
 	else if (sysfs_mbox->offset != off ||
@@ -2804,7 +3162,8 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
 	sysfs_mbox->offset = off + count;
 
 	if ((sysfs_mbox->offset == MAILBOX_CMD_SIZE) &&
-		(sysfs_mbox->mbox->mb.mbxCommand == MBX_RUN_BIU_DIAG64)) {
+		((sysfs_mbox->mbox->mb.mbxCommand == MBX_RUN_BIU_DIAG64) ||
+		(sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG))) {
 		sysfs_mbox->state  = SMBOX_READING_BUFF;
 		sysfs_mbox->extoff = 0;
 		spin_unlock_irq(&phba->hbalock);
@@ -2854,6 +3213,11 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
 	if (error)
 		goto out_remove_ctlreg_attr;
 
+	error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
+				      &sysfs_menlo_attr);
+	if (error)
+		goto out_remove_ctlreg_attr;
+
 	return 0;
 out_remove_ctlreg_attr:
 	sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr);
@@ -2868,6 +3232,7 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
 
 	sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_mbox_attr);
 	sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr);
+	sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_menlo_attr);
 }
 
 
@@ -3400,6 +3765,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	phba->cfg_soft_wwpn = 0L;
 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
 	/* Also reinitialize the host templates with new values. */
+	lpfc_template_no_npiv.sg_tablesize = phba->cfg_sg_seg_cnt;
 	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
 	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
 	/*
@@ -3425,6 +3791,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
 	lpfc_restrict_login_init(vport, lpfc_restrict_login);
 	lpfc_fcp_class_init(vport, lpfc_fcp_class);
 	lpfc_use_adisc_init(vport, lpfc_use_adisc);
+	lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
 	lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
 	lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
 	lpfc_max_luns_init(vport, lpfc_max_luns);
diff --git a/drivers/scsi/lpfc/lpfc_auth.c b/drivers/scsi/lpfc/lpfc_auth.c
index c734b97..4101836 100644
--- a/drivers/scsi/lpfc/lpfc_auth.c
+++ b/drivers/scsi/lpfc/lpfc_auth.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2006-2007 Emulex.  All rights reserved.           *
+ * Copyright (C) 2006-2008 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -171,6 +171,7 @@ lpfc_dhchap_authenticate(struct Scsi_Host *shost,
 		lpfc_issue_els_auth(vport, ndlp, DHCHAP_SUCCESS,
 				    suc_payload, suc_payload_len);
 		kfree(suc_payload);
+		vport->auth.direction |= AUTH_DIRECTION_LOCAL;
 	} else {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
 				 "1005 AUTHENTICATION_FAILURE Nport:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_auth_access.c b/drivers/scsi/lpfc/lpfc_auth_access.c
index 8fc04e4..d41e611 100644
--- a/drivers/scsi/lpfc/lpfc_auth_access.c
+++ b/drivers/scsi/lpfc/lpfc_auth_access.c
@@ -525,14 +525,14 @@ lpfc_fc_sc_schedule_msg(struct fc_nl_sc_message *fc_nl_sc_msg, int rcvlen)
 	if (!vport) {
 		printk(KERN_WARNING
 			"%s: Host does not exist for msg type %x.\n",
-			__FUNCTION__, fc_nl_sc_msg->snlh.msgtype);
+			__func__, fc_nl_sc_msg->snlh.msgtype);
 		return -EBADR;
 	}
 	shost = lpfc_shost_from_vport(vport);
 
 	if (vport->port_state == FC_PORTSTATE_DELETED) {
 		printk(KERN_WARNING
-		"%s: Host being deleted.\n", __FUNCTION__);
+		"%s: Host being deleted.\n", __func__);
 		return -EBADR;
 	}
 
@@ -655,7 +655,7 @@ lpfc_fc_handle_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int rcvlen)
 
 	default:
 		printk(KERN_WARNING "%s: unknown msg type 0x%x len %d\n",
-		       __FUNCTION__, snlh->msgtype, rcvlen);
+		       __func__, snlh->msgtype, rcvlen);
 		netlink_ack(skb, nlh, -EBADR);
 		skb_pull(skb, rcvlen);
 		break;
@@ -680,20 +680,20 @@ lpfc_fc_nl_rcv_msg(struct sk_buff *skb)
 		if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*snlh))) ||
 		    (skb->len < nlh->nlmsg_len)) {
 			printk(KERN_WARNING "%s: discarding partial skb\n",
-			       __FUNCTION__);
+			       __func__);
 			break;
 		}
 
 		rlen = NLMSG_ALIGN(nlh->nlmsg_len);
 		if (rlen > skb->len) {
 			printk(KERN_WARNING "%s: rlen > skb->len\n",
-				 __FUNCTION__);
+				 __func__);
 			rlen = skb->len;
 		}
 
 		if (nlh->nlmsg_type != FC_TRANSPORT_MSG) {
 			printk(KERN_WARNING "%s: Not FC_TRANSPORT_MSG\n",
-			       __FUNCTION__);
+			       __func__);
 			err = -EBADMSG;
 			goto next_msg;
 		}
@@ -702,14 +702,14 @@ lpfc_fc_nl_rcv_msg(struct sk_buff *skb)
 		if ((snlh->version != SCSI_NL_VERSION) ||
 		    (snlh->magic != SCSI_NL_MAGIC)) {
 			printk(KERN_WARNING "%s: Bad Version or Magic number\n",
-			       __FUNCTION__);
+			       __func__);
 			err = -EPROTOTYPE;
 			goto next_msg;
 		}
 
 next_msg:
 		if (err) {
-			printk(KERN_WARNING "%s: err %d\n", __FUNCTION__, err);
+			printk(KERN_WARNING "%s: err %d\n", __func__, err);
 			netlink_ack(skb, nlh, err);
 			skb_pull(skb, rlen);
 			continue;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index e51f059..c60a116 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -227,6 +227,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
 			struct lpfc_iocbq *, uint32_t);
 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
 int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
 			     struct lpfc_dmabuf *);
 struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
@@ -298,6 +299,7 @@ extern int lpfc_sli_mode;
 extern int lpfc_enable_npiv;
 
 int  lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
+int  lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
 void lpfc_terminate_rport_io(struct fc_rport *);
 void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
 
@@ -347,6 +349,8 @@ void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
 void lpfc_adjust_queue_depth(struct lpfc_hba *);
 void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
 void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
+void lpfc_scsi_dev_block(struct lpfc_hba *);
+void lpfc_scsi_dev_rescan(struct lpfc_hba *);
 
 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
 #define HBA_EVENT_RSCN                   5
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 5434d87..dd9225a 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -437,7 +437,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
 				    (!(vport->ct_flags & FC_CT_RFF_ID)) ||
 				    (!vport->cfg_restrict_login)) {
 					ndlp = lpfc_setup_disc_node(vport, Did);
-					if (ndlp) {
+					if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
 						lpfc_debugfs_disc_trc(vport,
 						LPFC_DISC_TRC_CT,
 						"Parse GID_FTrsp: "
@@ -763,7 +763,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
 	/* This is a target port, unregistered port, or the GFF_ID failed */
 	ndlp = lpfc_setup_disc_node(vport, did);
-	if (ndlp) {
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
 				 "0242 Process x%x GFF "
 				 "NameServer Rsp Data: x%x x%x x%x\n",
@@ -860,7 +860,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
 		retry++;
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-				 "0216 Retrying NS cmd %x\n", cmdcode);
+				 "0250 Retrying NS cmd %x\n", cmdcode);
 		rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
 		if (rc == 0)
 			goto out;
@@ -989,7 +989,7 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	return;
 }
 
-static int
+int
 lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
 	size_t size)
 {
@@ -1678,20 +1678,18 @@ lpfc_fdmi_tmo(unsigned long ptr)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
 	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
 	unsigned long iflag;
 
 	spin_lock_irqsave(&vport->work_port_lock, iflag);
-	if (!(vport->work_port_events & WORKER_FDMI_TMO)) {
+	tmo_posted = vport->work_port_events & WORKER_FDMI_TMO;
+	if (!tmo_posted)
 		vport->work_port_events |= WORKER_FDMI_TMO;
-		spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
 
-		spin_lock_irqsave(&phba->hbalock, iflag);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
-		spin_unlock_irqrestore(&phba->hbalock, iflag);
-	}
-	else
-		spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
 }
 
 void
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 90272e6..a0af4a1 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -863,7 +863,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 		atomic_set(&lpfc_debugfs_hba_count, 0);
 		if (!lpfc_debugfs_root) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0409 Cannot create debugfs root\n");
+					 "0408 Cannot create debugfs root\n");
 			goto debug_failed;
 		}
 	}
@@ -877,7 +877,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 			debugfs_create_dir(name, lpfc_debugfs_root);
 		if (!phba->hba_debugfs_root) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0409 Cannot create debugfs hba\n");
+					 "0412 Cannot create debugfs hba\n");
 			goto debug_failed;
 		}
 		atomic_inc(&lpfc_debugfs_hba_count);
@@ -891,7 +891,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 				 phba, &lpfc_debugfs_op_hbqinfo);
 		if (!phba->debug_hbqinfo) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0409 Cannot create debugfs hbqinfo\n");
+				"0411 Cannot create debugfs hbqinfo\n");
 			goto debug_failed;
 		}
 
@@ -903,7 +903,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 				 phba, &lpfc_debugfs_op_dumpHBASlim);
 		if (!phba->debug_dumpHBASlim) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0409 Cannot create debugfs dumpHBASlim\n");
+				"0413 Cannot create debugfs dumpHBASlim\n");
 			goto debug_failed;
 		}
 
@@ -915,7 +915,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 				 phba, &lpfc_debugfs_op_dumpHostSlim);
 		if (!phba->debug_dumpHostSlim) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				"0409 Cannot create debugfs dumpHostSlim\n");
+				"0414 Cannot create debugfs dumpHostSlim\n");
 			goto debug_failed;
 		}
 
@@ -945,7 +945,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 				 phba, &lpfc_debugfs_op_slow_ring_trc);
 		if (!phba->debug_slow_ring_trc) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0409 Cannot create debugfs "
+					 "0415 Cannot create debugfs "
 					 "slow_ring_trace\n");
 			goto debug_failed;
 		}
@@ -956,7 +956,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 				GFP_KERNEL);
 			if (!phba->slow_ring_trc) {
 				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-						 "0409 Cannot create debugfs "
+						 "0416 Cannot create debugfs "
 						 "slow_ring buffer\n");
 				goto debug_failed;
 			}
@@ -973,7 +973,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 			debugfs_create_dir(name, phba->hba_debugfs_root);
 		if (!vport->vport_debugfs_root) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0409 Cant create debugfs");
+					 "0417 Cant create debugfs");
 			goto debug_failed;
 		}
 		atomic_inc(&phba->debugfs_vport_count);
@@ -1002,7 +1002,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 
 	if (!vport->disc_trc) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				 "0409 Cannot create debugfs disc trace "
+				 "0418 Cannot create debugfs disc trace "
 				 "buffer\n");
 		goto debug_failed;
 	}
@@ -1015,7 +1015,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
 				 vport, &lpfc_debugfs_op_disc_trc);
 	if (!vport->debug_disc_trc) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				 "0409 Cannot create debugfs "
+				 "0419 Cannot create debugfs "
 				 "discovery_trace\n");
 		goto debug_failed;
 	}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 7112e08..d0614c1 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -94,6 +94,9 @@ struct lpfc_nodelist {
 	unsigned long last_q_full_time;		/* jiffy of last queue full */
 	struct kref     kref;
 	unsigned long unreg_time;		/* Last time ndlp was unreged */
+	atomic_t cmd_pending;
+	uint32_t cmd_qdepth;
+	unsigned long last_change_time;
 };
 
 /* Defines for nlp_flag (uint32) */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 981373e..786fc8a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -382,7 +382,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		 */
 		list_for_each_entry_safe(np, next_np,
 					&vport->fc_nodes, nlp_listp) {
-			if (!NLP_CHK_NODE_ACT(ndlp))
+			if (!NLP_CHK_NODE_ACT(np))
 				continue;
 			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
 				   !(np->nlp_flag & NLP_NPR_ADISC))
@@ -739,7 +739,8 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
 		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
 		    icmd->un.elsreq64.bdl.ulpIoTag32) {
 			ndlp = (struct lpfc_nodelist *)(iocb->context1);
-			if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+			    (ndlp->nlp_DID == Fabric_DID)) {
 				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
 			}
 		}
@@ -1264,6 +1265,71 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	return 0;
 }
 
+static void
+lpfc_rscn_disc(struct lpfc_vport *vport)
+{
+	lpfc_can_disctmo(vport);
+
+	/* RSCN discovery */
+	/* go thru NPR nodes and issue ELS PLOGIs */
+	if (vport->fc_npr_cnt)
+		if (lpfc_els_disc_plogi(vport))
+			return;
+
+	lpfc_end_rscn(vport);
+}
+
+/**
+ * lpfc_adisc_done: Complete the adisc phase of discovery.
+ * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
+ *
+ * This function is called when the final ADISC is completed during discovery.
+ * This function handles clearing link attention or issuing reg_vpi depending
+ * on whether npiv is enabled. This function also kicks off the PLOGI phase of
+ * discovery.
+ * This function is called with no locks held.
+ **/
+void
+lpfc_adisc_done(struct lpfc_vport *vport)
+{
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+
+	/*
+	 * For NPIV, cmpl_reg_vpi will set port_state to READY,
+	 * and continue discovery.
+	 */
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+	    !(vport->fc_flag & FC_RSCN_MODE)) {
+		lpfc_issue_reg_vpi(phba, vport);
+		return;
+	}
+	/*
+	* For SLI2, we need to set port_state to READY
+	* and continue discovery.
+	*/
+	if (vport->port_state < LPFC_VPORT_READY) {
+		/* If we get here, there is nothing to ADISC */
+		if (vport->port_type == LPFC_PHYSICAL_PORT)
+			lpfc_issue_clear_la(phba, vport);
+		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+			vport->num_disc_nodes = 0;
+			/* go thru NPR list, issue ELS PLOGIs */
+			if (vport->fc_npr_cnt)
+				lpfc_els_disc_plogi(vport);
+			if (!vport->num_disc_nodes) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_NDISC_ACTIVE;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_can_disctmo(vport);
+				lpfc_end_rscn(vport);
+			}
+		}
+		vport->port_state = LPFC_VPORT_READY;
+	} else
+		lpfc_rscn_disc(vport);
+}
+
 void
 lpfc_more_adisc(struct lpfc_vport *vport)
 {
@@ -1283,24 +1349,12 @@ lpfc_more_adisc(struct lpfc_vport *vport)
 		/* go thru NPR nodes and issue any remaining ELS ADISCs */
 		sentadisc = lpfc_els_disc_adisc(vport);
 	}
+	if (!vport->num_disc_nodes)
+		lpfc_adisc_done(vport);
 	return;
 }
 
 static void
-lpfc_rscn_disc(struct lpfc_vport *vport)
-{
-	lpfc_can_disctmo(vport);
-
-	/* RSCN discovery */
-	/* go thru NPR nodes and issue ELS PLOGIs */
-	if (vport->fc_npr_cnt)
-		if (lpfc_els_disc_plogi(vport))
-			return;
-
-	lpfc_end_rscn(vport);
-}
-
-static void
 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		    struct lpfc_iocbq *rspiocb)
 {
@@ -1364,52 +1418,9 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
 					NLP_EVT_CMPL_ADISC);
 
-	if (disc && vport->num_disc_nodes) {
-		/* Check to see if there are more ADISCs to be sent */
+	/* Check to see if there are more ADISCs to be sent */
+	if (disc && vport->num_disc_nodes)
 		lpfc_more_adisc(vport);
-
-		/* Check to see if we are done with ADISC authentication */
-		if (vport->num_disc_nodes == 0) {
-			/* If we get here, there is nothing left to ADISC */
-			/*
-			 * For NPIV, cmpl_reg_vpi will set port_state to READY,
-			 * and continue discovery.
-			 */
-			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
-			   !(vport->fc_flag & FC_RSCN_MODE)) {
-				lpfc_issue_reg_vpi(phba, vport);
-				goto out;
-			}
-			/*
-			 * For SLI2, we need to set port_state to READY
-			 * and continue discovery.
-			 */
-			if (vport->port_state < LPFC_VPORT_READY) {
-				/* If we get here, there is nothing to ADISC */
-				if (vport->port_type == LPFC_PHYSICAL_PORT)
-					lpfc_issue_clear_la(phba, vport);
-
-				if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
-					vport->num_disc_nodes = 0;
-					/* go thru NPR list, issue ELS PLOGIs */
-					if (vport->fc_npr_cnt)
-						lpfc_els_disc_plogi(vport);
-
-					if (!vport->num_disc_nodes) {
-						spin_lock_irq(shost->host_lock);
-						vport->fc_flag &=
-							~FC_NDISC_ACTIVE;
-						spin_unlock_irq(
-							shost->host_lock);
-						lpfc_can_disctmo(vport);
-					}
-				}
-				vport->port_state = LPFC_VPORT_READY;
-			} else {
-				lpfc_rscn_disc(vport);
-			}
-		}
-	}
 out:
 	lpfc_els_free_iocb(phba, cmdiocb);
 	return;
@@ -1785,35 +1796,37 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_work_evt *evtp;
 
+	if (!(nlp->nlp_flag & NLP_DELAY_TMO))
+		return;
 	spin_lock_irq(shost->host_lock);
 	nlp->nlp_flag &= ~NLP_DELAY_TMO;
 	spin_unlock_irq(shost->host_lock);
 	del_timer_sync(&nlp->nlp_delayfunc);
 	nlp->nlp_last_elscmd = 0;
-
 	if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
 		list_del_init(&nlp->els_retry_evt.evt_listp);
 		/* Decrement nlp reference count held for the delayed retry */
 		evtp = &nlp->els_retry_evt;
 		lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
 	}
-
 	if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
 		spin_lock_irq(shost->host_lock);
 		nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
 		spin_unlock_irq(shost->host_lock);
 		if (vport->num_disc_nodes) {
-			/* Check to see if there are more
-			 * PLOGIs to be sent
-			 */
-			lpfc_more_plogi(vport);
-
-			if (vport->num_disc_nodes == 0) {
-				spin_lock_irq(shost->host_lock);
-				vport->fc_flag &= ~FC_NDISC_ACTIVE;
-				spin_unlock_irq(shost->host_lock);
-				lpfc_can_disctmo(vport);
-				lpfc_end_rscn(vport);
+			if (vport->port_state < LPFC_VPORT_READY) {
+				/* Check if there are more ADISCs to be sent */
+				lpfc_more_adisc(vport);
+			} else {
+				/* Check if there are more PLOGIs to be sent */
+				lpfc_more_plogi(vport);
+				if (vport->num_disc_nodes == 0) {
+					spin_lock_irq(shost->host_lock);
+					vport->fc_flag &= ~FC_NDISC_ACTIVE;
+					spin_unlock_irq(shost->host_lock);
+					lpfc_can_disctmo(vport);
+					lpfc_end_rscn(vport);
+				}
 			}
 		}
 	}
@@ -1829,10 +1842,6 @@ lpfc_els_retry_delay(unsigned long ptr)
 	unsigned long flags;
 	struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
 
-	ndlp = (struct lpfc_nodelist *) ptr;
-	phba = ndlp->vport->phba;
-	evtp = &ndlp->els_retry_evt;
-
 	spin_lock_irqsave(&phba->hbalock, flags);
 	if (!list_empty(&evtp->evt_listp)) {
 		spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -1846,8 +1855,7 @@ lpfc_els_retry_delay(unsigned long ptr)
 	if (evtp->evt_arg1) {
 		evtp->evt = LPFC_EVT_ELS_RETRY;
 		list_add_tail(&evtp->evt_listp, &phba->work_list);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
+		lpfc_worker_wake_up(phba);
 	}
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 	return;
@@ -2082,7 +2090,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
 			  ) {
 				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-						 "0123 FDISC Failed (x%x). "
+						 "0122 FDISC Failed (x%x). "
 						 "Fabric Detected Bad WWN\n",
 						 stat.un.lsRjtError);
 				lpfc_vport_set_state(vport,
@@ -2141,7 +2149,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		}
 
 		phba->fc_stat.elsXmitRetry++;
-		if (ndlp && delay) {
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
 			phba->fc_stat.elsDelayRetry++;
 			ndlp->nlp_retry = cmdiocb->retry;
 
@@ -2171,7 +2179,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
 			return 1;
 		case ELS_CMD_PLOGI:
-			if (ndlp) {
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
 				ndlp->nlp_prev_state = ndlp->nlp_state;
 				lpfc_nlp_set_state(vport, ndlp,
 						   NLP_STE_PLOGI_ISSUE);
@@ -2200,7 +2208,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			ndlp->nlp_prev_state = ndlp->nlp_state;
 			ndlp->nlp_state = NLP_STE_NPR_NODE;
 			lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-					"0122 Authentication LS_RJT Logical "
+					"0143 Authentication LS_RJT Logical "
 					"busy\n");
 			lpfc_start_authentication(vport, ndlp);
 			return 1;
@@ -2364,7 +2372,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
 	mempool_free(pmb, phba->mbox_mem_pool);
-	if (ndlp) {
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
 		lpfc_nlp_put(ndlp);
 		/* This is the end of the default RPI cleanup logic for this
 		 * ndlp. If no other discovery threads are using this ndlp.
@@ -2397,7 +2405,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
 	 */
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
-	if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+	    (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
 		/* A LS_RJT associated with Default RPI cleanup has its own
 		 * seperate code path.
 		 */
@@ -2406,7 +2415,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	}
 
 	/* Check to see if link went down during discovery */
-	if (!ndlp || lpfc_els_chk_latt(vport)) {
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
 		if (mbox) {
 			mp = (struct lpfc_dmabuf *) mbox->context1;
 			if (mp) {
@@ -2415,7 +2424,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			}
 			mempool_free(mbox, phba->mbox_mem_pool);
 		}
-		if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+		    (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
 			if (lpfc_nlp_not_used(ndlp)) {
 				ndlp = NULL;
 				/* Indicate the node has already released,
@@ -2505,7 +2515,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		mempool_free(mbox, phba->mbox_mem_pool);
 	}
 out:
-	if (ndlp) {
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
 		spin_unlock_irq(shost->host_lock);
@@ -2802,10 +2812,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 	npr = (PRLI *) pcmd;
 	vpd = &phba->vpd;
 	/*
-	 * If our firmware version is 3.20 or later,
-	 * set the following bits for FC-TAPE support.
+	 * If the remote port is a target and our firmware version is 3.20 or
+	 * later, set the following bits for FC-TAPE support.
 	 */
-	if (vpd->rev.feaLevelHigh >= 0x02) {
+	if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+	    (vpd->rev.feaLevelHigh >= 0x02)) {
 		npr->ConfmComplAllowed = 1;
 		npr->Retry = 1;
 		npr->TaskRetryIdReq = 1;
@@ -3097,27 +3108,16 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
 {
 	struct lpfc_nodelist *ndlp = NULL;
 
-	/* Look at all nodes effected by pending RSCNs and move
-	 * them to NPR state.
-	 */
-
+	/* Move all affected nodes by pending RSCNs to NPR state. */
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 		if (!NLP_CHK_NODE_ACT(ndlp) ||
-		    ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
-		    lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
+		    (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
+		    !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
 			continue;
-
 		lpfc_disc_state_machine(vport, ndlp, NULL,
-						NLP_EVT_DEVICE_RECOVERY);
-
-		/*
-		 * Make sure NLP_DELAY_TMO is NOT running after a device
-		 * recovery event.
-		 */
-		if (ndlp->nlp_flag & NLP_DELAY_TMO)
-			lpfc_cancel_retry_delay_tmo(vport, ndlp);
+					NLP_EVT_DEVICE_RECOVERY);
+		lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	}
-
 	return 0;
 }
 
@@ -3180,7 +3180,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 		if (rscn_id == hba_id) {
 			/* ALL NPortIDs in RSCN are on HBA */
 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-					 "0214 Ignore RSCN "
+					 "0219 Ignore RSCN "
 					 "Data: x%x x%x x%x x%x\n",
 					 vport->fc_flag, payload_len,
 					 *lp, vport->fc_rscn_id_cnt);
@@ -3594,14 +3594,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 	struct ls_rjt stat;
 
 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
-	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
-		stat.un.b.lsRjtRsvd0 = 0;
-		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
-		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
-		stat.un.b.vendorUnique = 0;
-		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
-			NULL);
-	}
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+		/* reject the unsolicited RPS request and done with it */
+		goto reject_out;
 
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 	lp = (uint32_t *) pcmd->virt;
@@ -3634,6 +3629,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 			mempool_free(mbox, phba->mbox_mem_pool);
 		}
 	}
+
+reject_out:
+	/* issue rejection response */
 	stat.un.b.lsRjtRsvd0 = 0;
 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
@@ -3708,12 +3706,15 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 
 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+		/* issue rejection response */
 		stat.un.b.lsRjtRsvd0 = 0;
 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
 		stat.un.b.vendorUnique = 0;
 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
 			NULL);
+		/* rejected the unsolicited RPL request and done with it */
+		return 0;
 	}
 
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -3822,91 +3823,27 @@ static int
 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 		 struct lpfc_nodelist *fan_ndlp)
 {
-	struct lpfc_dmabuf *pcmd;
+	struct lpfc_hba *phba = vport->phba;
 	uint32_t *lp;
-	IOCB_t *icmd;
-	uint32_t cmd, did;
 	FAN *fp;
-	struct lpfc_nodelist *ndlp, *next_ndlp;
-	struct lpfc_hba *phba = vport->phba;
-
-	/* FAN received */
-	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-			 "0265 FAN received\n");
-	icmd = &cmdiocb->iocb;
-	did = icmd->un.elsreq64.remoteID;
-	pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
-	lp = (uint32_t *)pcmd->virt;
-
-	cmd = *lp++;
-	fp = (FAN *) lp;
 
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
+	lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+	fp = (FAN *) ++lp;
 	/* FAN received; Fan does not have a reply sequence */
-
-	if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) {
+	if ((vport == phba->pport) &&
+	    (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
 		if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
-			sizeof(struct lpfc_name)) != 0) ||
+			    sizeof(struct lpfc_name))) ||
 		    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
-			sizeof(struct lpfc_name)) != 0)) {
-			/*
-			 * This node has switched fabrics.  FLOGI is required
-			 * Clean up the old rpi's
-			 */
-
-			list_for_each_entry_safe(ndlp, next_ndlp,
-						 &vport->fc_nodes, nlp_listp) {
-				if (!NLP_CHK_NODE_ACT(ndlp))
-					continue;
-				if (ndlp->nlp_state != NLP_STE_NPR_NODE)
-					continue;
-				if (ndlp->nlp_type & NLP_FABRIC) {
-					/*
-					 * Clean up old Fabric, Nameserver and
-					 * other NLP_FABRIC logins
-					 */
-					lpfc_drop_node(vport, ndlp);
-
-				} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
-					/* Fail outstanding I/O now since this
-					 * device is marked for PLOGI
-					 */
-					lpfc_unreg_rpi(vport, ndlp);
-				}
-			}
-
+			    sizeof(struct lpfc_name)))) {
+			/* This port has switched fabrics. FLOGI is required */
 			lpfc_initial_flogi(vport);
-			return 0;
-		}
-		/* Discovery not needed,
-		 * move the nodes to their original state.
-		 */
-		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
-					 nlp_listp) {
-			if (!NLP_CHK_NODE_ACT(ndlp))
-				continue;
-			if (ndlp->nlp_state != NLP_STE_NPR_NODE)
-				continue;
-
-			switch (ndlp->nlp_prev_state) {
-			case NLP_STE_UNMAPPED_NODE:
-				ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
-				lpfc_nlp_set_state(vport, ndlp,
-						   NLP_STE_UNMAPPED_NODE);
-				break;
-
-			case NLP_STE_MAPPED_NODE:
-				ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
-				lpfc_nlp_set_state(vport, ndlp,
-						   NLP_STE_MAPPED_NODE);
-				break;
-
-			default:
-				break;
-			}
+		} else {
+			/* FAN verified - skip FLOGI */
+			vport->fc_myDID = vport->fc_prevDID;
+			lpfc_issue_fabric_reglogin(vport);
 		}
-
-		/* Start discovery - this should just do CLEAR_LA */
-		lpfc_disc_start(vport);
 	}
 	return 0;
 }
@@ -3916,20 +3853,17 @@ lpfc_els_timeout(unsigned long ptr)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
 	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
 	unsigned long iflag;
 
 	spin_lock_irqsave(&vport->work_port_lock, iflag);
-	if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
+	tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
+	if (!tmo_posted)
 		vport->work_port_events |= WORKER_ELS_TMO;
-		spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
 
-		spin_lock_irqsave(&phba->hbalock, iflag);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
-		spin_unlock_irqrestore(&phba->hbalock, iflag);
-	}
-	else
-		spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
 	return;
 }
 
@@ -3974,9 +3908,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
 		    els_command == ELS_CMD_FDISC)
 			continue;
 
-		if (vport != piocb->vport)
-			continue;
-
 		if (piocb->drvrTimeout > 0) {
 			if (piocb->drvrTimeout >= timeout)
 				piocb->drvrTimeout -= timeout;
@@ -3991,7 +3922,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
 		else {
 			struct lpfc_nodelist *ndlp;
 			ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
-			if (ndlp)
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp))
 				remote_ID = ndlp->nlp_DID;
 		}
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -4138,6 +4069,7 @@ lpfc_els_rcv_auth_neg(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 		lpfc_issue_els_auth_reject(vport, ndlp, reason, explanation);
 		return;
 	}
+	vport->auth.direction = AUTH_DIRECTION_NONE;
 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY,
 			 "1033 Received auth_negotiate from Nport:x%x\n",
 			 ndlp->nlp_DID);
@@ -4208,6 +4140,7 @@ lpfc_els_rcv_chap_chal(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 		lpfc_issue_els_auth_reject(vport, ndlp, reason, explanation);
 		return;
 	}
+	vport->auth.direction = AUTH_DIRECTION_NONE;
 
 	fc_req_len = (sizeof(struct fc_auth_req) +
 		      vport->auth.challenge_len +
@@ -4467,6 +4400,7 @@ lpfc_els_rcv_chap_suc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 			mod_timer(&ndlp->nlp_reauth_tmr, jiffies +
 				vport->auth.reauth_interval * 60 * HZ);
 	}
+	vport->auth.direction |= AUTH_DIRECTION_REMOTE;
 }
 static void
 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
@@ -5086,7 +5020,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			goto out;
 		/* FDISC failed */
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-				 "0124 FDISC failed. (%d/%d)\n",
+				 "0126 FDISC failed. (%d/%d)\n",
 				 irsp->ulpStatus, irsp->un.ulpWord[4]);
 		goto fdisc_failed;
 	}
@@ -5306,18 +5240,16 @@ lpfc_fabric_block_timeout(unsigned long ptr)
 	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
 	unsigned long iflags;
 	uint32_t tmo_posted;
+
 	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
 	tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
 	if (!tmo_posted)
 		phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
 
-	if (!tmo_posted) {
-		spin_lock_irqsave(&phba->hbalock, iflags);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
-		spin_unlock_irqrestore(&phba->hbalock, iflags);
-	}
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
 }
 
 static void
@@ -5332,11 +5264,12 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
 repeat:
 	iocb = NULL;
 	spin_lock_irqsave(&phba->hbalock, iflags);
-				/* Post any pending iocb to the SLI layer */
+	/* Post any pending iocb to the SLI layer */
 	if (atomic_read(&phba->fabric_iocb_count) == 0) {
 		list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
 				 list);
 		if (iocb)
+			/* Increment fabric iocb count to hold the position */
 			atomic_inc(&phba->fabric_iocb_count);
 	}
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -5383,9 +5316,7 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
 	int blocked;
 
 	blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
-				/* Start a timer to unblock fabric
-				 * iocbs after 100ms
-				 */
+	/* Start a timer to unblock fabric iocbs after 100ms */
 	if (!blocked)
 		mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
 
@@ -5433,8 +5364,8 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
 	atomic_dec(&phba->fabric_iocb_count);
 	if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
-				/* Post any pending iocbs to HBA */
-		    lpfc_resume_fabric_iocbs(phba);
+		/* Post any pending iocbs to HBA */
+		lpfc_resume_fabric_iocbs(phba);
 	}
 }
 
@@ -5453,6 +5384,9 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
 	ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
 		!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
 
+	if (ready)
+		/* Increment fabric iocb count to hold the position */
+		atomic_inc(&phba->fabric_iocb_count);
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
 	if (ready) {
 		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
@@ -5463,7 +5397,6 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
 			"Fabric sched2:   ste:x%x",
 			iocb->vport->port_state, 0, 0);
 
-		atomic_inc(&phba->fabric_iocb_count);
 		ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
 
 		if (ret == IOCB_ERROR) {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 350b32d..e772a90 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -56,6 +56,8 @@ static uint8_t lpfcAlpaArray[] = {
 	0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
 };
 
+extern void lpfc_check_menlo_cfg(struct lpfc_hba *phba);
+
 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
 
 extern void lpfc_dev_loss_delay(unsigned long ptr);
@@ -70,7 +72,7 @@ lpfc_start_discovery(struct lpfc_vport *vport)
 	if (vport->auth.security_active &&
 	    vport->auth.auth_state != LPFC_AUTH_SUCCESS) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
-				 "0266 Authentication not complete.\n");
+				 "0285 Authentication not complete.\n");
 		return;
 	}
 	if (vport->port_type == LPFC_NPIV_PORT) {
@@ -112,7 +114,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
 	rdata = rport->dd_data;
 	ndlp = rdata->pnode;
 
-	if (!ndlp) {
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 		if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
 			printk(KERN_ERR "Cannot find remote node"
 			" to terminate I/O Data x%x\n",
@@ -157,7 +159,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
 
 	rdata = rport->dd_data;
 	ndlp = rdata->pnode;
-	if (!ndlp)
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
 		return;
 
 	vport = ndlp->vport;
@@ -199,8 +201,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
 	if (evtp->evt_arg1) {
 		evtp->evt = LPFC_EVT_DEV_LOSS;
 		list_add_tail(&evtp->evt_listp, &phba->work_list);
-		if (phba->work_wait)
-			wake_up(phba->work_wait);
+		lpfc_worker_wake_up(phba);
 	}
 	spin_unlock_irq(&phba->hbalock);
 
@@ -261,7 +262,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
 
 	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-				 "0219 Devloss timeout Ignored on "
+				 "0284 Devloss timeout Ignored on "
 				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
 				 "NPort x%x\n",
 				 *name, *(name+1), *(name+2), *(name+3),
@@ -280,7 +281,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
 		       msecs_to_jiffies(vport->cfg_devloss_tmo * 1000),
 		       jiffies)) {
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
-				 "0213 Devloss timeout Skipped on "
+				 "0283 Devloss timeout Skipped on "
 				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
 				 "NPort x%x Time %d\n",
 				 *name, *(name+1), *(name+2), *(name+3),
@@ -331,8 +332,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
 				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
 	}
-	if (vport->load_flag & FC_UNLOADING)
-		warn_on = 0;
 
 	if (warn_on) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
@@ -370,14 +369,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
 		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 }
 
-
-void
-lpfc_worker_wake_up(struct lpfc_hba *phba)
-{
-	wake_up(phba->work_wait);
-	return;
-}
-
 static void
 lpfc_work_list_done(struct lpfc_hba *phba)
 {
@@ -560,6 +551,8 @@ lpfc_work_done(struct lpfc_hba *phba)
 		|| (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
 		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
+			/* Set the lpfc data pending flag */
+			set_bit(LPFC_DATA_READY, &phba->data_flags);
 		} else {
 			pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
 			lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -590,54 +583,29 @@ lpfc_work_done(struct lpfc_hba *phba)
 	lpfc_work_list_done(phba);
 }
 
-static int
-check_work_wait_done(struct lpfc_hba *phba)
-{
-	struct lpfc_vport *vport;
-	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-
-	spin_lock_irq(&phba->hbalock);
-	list_for_each_entry(vport, &phba->port_list, listentry) {
-		if (vport->work_port_events) {
-			spin_unlock_irq(&phba->hbalock);
-			return 1;
-		}
-	}
-	if (phba->work_ha || (!list_empty(&phba->work_list)) ||
-	    kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
-		spin_unlock_irq(&phba->hbalock);
-		return 1;
-	}
-	spin_unlock_irq(&phba->hbalock);
-	return 0;
-}
-
-
 int
 lpfc_do_work(void *p)
 {
 	struct lpfc_hba *phba = p;
 	int rc;
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
 
 	set_user_nice(current, -20);
-	phba->work_wait = &work_waitq;
+	phba->data_flags = 0;
 
 	while (1) {
-
-		rc = wait_event_interruptible(work_waitq,
-					      check_work_wait_done(phba));
-
+		/* wait and check worker queue activities */
+		rc = wait_event_interruptible(phba->work_waitq,
+					(test_and_clear_bit(LPFC_DATA_READY,
+							    &phba->data_flags)
+					 || kthread_should_stop()));
 		BUG_ON(rc);
 
 		if (kthread_should_stop())
 			break;
 
+		/* Attend pending lpfc data processing */
 		lpfc_work_done(phba);
 	}
-	spin_lock_irq(&phba->hbalock);
-	phba->work_wait = NULL;
-	spin_unlock_irq(&phba->hbalock);
 	return 0;
 }
 
@@ -667,10 +635,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
 
 	spin_lock_irqsave(&phba->hbalock, flags);
 	list_add_tail(&evtp->evt_listp, &phba->work_list);
-	if (phba->work_wait)
-		lpfc_worker_wake_up(phba);
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 
+	lpfc_worker_wake_up(phba);
+
 	return 1;
 }
 
@@ -1103,6 +1071,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 	if (phba->fc_topology == TOPOLOGY_LOOP) {
 		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 
+		if (phba->cfg_enable_npiv)
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1310 Link Up Event npiv not supported in loop "
+				"topology\n");
 		/* Get Loop Map information */
 		if (la->il)
 			vport->fc_flag |= FC_LBIT;
@@ -1195,14 +1167,10 @@ out:
 }
 
 static void
-lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+lpfc_enable_la(struct lpfc_hba *phba)
 {
 	uint32_t control;
 	struct lpfc_sli *psli = &phba->sli;
-
-	lpfc_linkdown(phba);
-
-	/* turn on Link Attention interrupts - no CLEAR_LA needed */
 	spin_lock_irq(&phba->hbalock);
 	psli->sli_flag |= LPFC_PROCESS_LA;
 	control = readl(phba->HCregaddr);
@@ -1212,6 +1180,15 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
 	spin_unlock_irq(&phba->hbalock);
 }
 
+static void
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+{
+	lpfc_linkdown(phba);
+	lpfc_enable_la(phba);
+	/* turn on Link Attention interrupts - no CLEAR_LA needed */
+}
+
+
 /*
  * This routine handles processing a READ_LA mailbox
  * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1227,6 +1204,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	MAILBOX_t *mb = &pmb->mb;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 
+	/* Unblock ELS traffic */
+	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
 	/* Check for error */
 	if (mb->mbxStatus) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -1257,8 +1236,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	}
 
 	phba->fc_eventTag = la->eventTag;
+	if (la->mm)
+		phba->sli.sli_flag |= LPFC_MENLO_MAINT;
+	else
+		phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
 
-	if (la->attType == AT_LINK_UP) {
+	if (la->attType == AT_LINK_UP && (!la->mm)) {
 		phba->fc_stat.LinkUp++;
 		if (phba->link_flag & LS_LOOPBACK_MODE) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -1270,17 +1253,19 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		} else {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 					"1303 Link Up Event x%x received "
-					"Data: x%x x%x x%x x%x\n",
+					"Data: x%x x%x x%x x%x x%x x%x %d\n",
 					la->eventTag, phba->fc_eventTag,
 					la->granted_AL_PA, la->UlnkSpeed,
-					phba->alpa_map[0]);
+					phba->alpa_map[0],
+					la->mm, la->fa,
+					phba->wait_4_mlo_maint_flg);
 		}
 		lpfc_mbx_process_link_up(phba, la);
-	} else {
+	} else if (la->attType == AT_LINK_DOWN) {
 		phba->fc_stat.LinkDown++;
 		if (phba->link_flag & LS_LOOPBACK_MODE) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
-				"1308 Link Down Event in loop back mode "
+				"1300 Link Down Event in loop back mode "
 				"x%x received "
 				"Data: x%x x%x x%x\n",
 				la->eventTag, phba->fc_eventTag,
@@ -1289,13 +1274,50 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 		else {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 				"1305 Link Down Event x%x received "
+				"Data: x%x x%x x%x x%x x%x\n",
+				la->eventTag, phba->fc_eventTag,
+				phba->pport->port_state, vport->fc_flag,
+				la->mm, la->fa);
+		}
+		lpfc_mbx_issue_link_down(phba);
+	}
+	if (la->mm && la->attType == AT_LINK_UP) {
+		if (phba->link_state != LPFC_LINK_DOWN) {
+			phba->fc_stat.LinkDown++;
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1309 Link Down Event x%x received "
 				"Data: x%x x%x x%x\n",
 				la->eventTag, phba->fc_eventTag,
 				phba->pport->port_state, vport->fc_flag);
+			lpfc_mbx_issue_link_down(phba);
+		} else
+			lpfc_enable_la(phba);
+
+		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1308 Menlo Maint Mode Link up Event x%x rcvd "
+				"Data: x%x x%x x%x\n",
+				la->eventTag, phba->fc_eventTag,
+				phba->pport->port_state, vport->fc_flag);
+		/*
+		 * The cmnd that triggered this will be waiting for this
+		 * signal.
+		 */
+		/* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
+		if ( phba->wait_4_mlo_maint_flg) {
+			phba->wait_4_mlo_maint_flg = 0;
+			wake_up_interruptible(&phba->wait_4_mlo_m_q);
 		}
-		lpfc_mbx_issue_link_down(phba);
 	}
 
+	if (la->fa ) {
+		if (la->mm)
+			lpfc_issue_clear_la(phba, vport);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+				"1311 fa %d\n", la->fa);
+		lpfc_check_menlo_cfg(phba);
+	}
+
+
 lpfc_mbx_cmpl_read_la_free_mbuf:
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
@@ -1768,7 +1790,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		ndlp->nlp_DID, old_state, state);
 
 	if (old_state == NLP_STE_NPR_NODE &&
-	    (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
 	    state != NLP_STE_NPR_NODE)
 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if (old_state == NLP_STE_UNMAPPED_NODE) {
@@ -1805,8 +1826,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-	if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
-		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
 		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
 	spin_lock_irq(shost->host_lock);
@@ -1819,8 +1839,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 void
 lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
-	if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
-		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
 		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
 	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
@@ -2252,10 +2271,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	del_timer_sync(&ndlp->nlp_reauth_tmr);
 	del_timer_sync(&ndlp->nlp_initiator_tmr);
 
-	if (!list_empty(&ndlp->els_retry_evt.evt_listp))
-		list_del_init(&ndlp->els_retry_evt.evt_listp);
-	if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
-		list_del_init(&ndlp->dev_loss_evt.evt_listp);
+	list_del_init(&ndlp->els_retry_evt.evt_listp);
+	list_del_init(&ndlp->dev_loss_evt.evt_listp);
 	if (!list_empty(&ndlp->els_reauth_evt.evt_listp))
 		list_del_init(&ndlp->els_reauth_evt.evt_listp);
 
@@ -2277,10 +2294,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	LPFC_MBOXQ_t *mbox;
 	int rc;
 
-	if (ndlp->nlp_flag & NLP_DELAY_TMO) {
-		lpfc_cancel_retry_delay_tmo(vport, ndlp);
-	}
-
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
 		/* For this case we need to cleanup the default rpi
 		 * allocated by the firmware.
@@ -2450,8 +2464,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
 			/* Since this node is marked for discovery,
 			 * delay timeout is not needed.
 			 */
-			if (ndlp->nlp_flag & NLP_DELAY_TMO)
-				lpfc_cancel_retry_delay_tmo(vport, ndlp);
+			lpfc_cancel_retry_delay_tmo(vport, ndlp);
 		} else
 			ndlp = NULL;
 	} else {
@@ -2775,21 +2788,20 @@ lpfc_disc_timeout(unsigned long ptr)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
 	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
 	unsigned long flags = 0;
 
 	if (unlikely(!phba))
 		return;
 
-	if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
-		spin_lock_irqsave(&vport->work_port_lock, flags);
+	spin_lock_irqsave(&vport->work_port_lock, flags);
+	tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
+	if (!tmo_posted)
 		vport->work_port_events |= WORKER_DISC_TMO;
-		spin_unlock_irqrestore(&vport->work_port_lock, flags);
+	spin_unlock_irqrestore(&vport->work_port_lock, flags);
 
-		spin_lock_irqsave(&phba->hbalock, flags);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
-		spin_unlock_irqrestore(&phba->hbalock, flags);
-	}
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
 	return;
 }
 
@@ -2968,7 +2980,7 @@ restart_disc:
 
 	default:
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
-				 "0229 Unexpected discovery timeout, "
+				 "0273 Unexpected discovery timeout, "
 				 "vport State x%x\n", vport->port_state);
 		break;
 	}
@@ -3181,8 +3193,7 @@ lpfc_dev_loss_delay(unsigned long ptr)
 	if (evtp->evt_arg1) {
 		evtp->evt = LPFC_EVT_DEV_LOSS_DELAY;
 		list_add_tail(&evtp->evt_listp, &phba->work_list);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
+		lpfc_worker_wake_up(phba);
 	}
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 	return;
@@ -3214,6 +3225,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	INIT_LIST_HEAD(&ndlp->nlp_listp);
 	kref_init(&ndlp->kref);
 	NLP_INT_NODE_ACT(ndlp);
+	atomic_set(&ndlp->cmd_pending, 0);
+	ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
 		"node init:       did:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index e88dd79..8ef0f9c 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -64,6 +64,7 @@
 #define SLI3_IOCB_CMD_SIZE	128
 #define SLI3_IOCB_RSP_SIZE	64
 
+#define BUF_SZ_4K		4096
 
 /* Common Transport structures and definitions */
 
@@ -1131,6 +1132,8 @@ typedef struct {
 /* Start FireFly Register definitions */
 #define PCI_VENDOR_ID_EMULEX        0x10df
 #define PCI_DEVICE_ID_FIREFLY       0x1ae5
+#define PCI_DEVICE_ID_PROTEUS_VF    0xe100
+#define PCI_DEVICE_ID_PROTEUS_PF    0xe180
 #define PCI_DEVICE_ID_SAT_SMB       0xf011
 #define PCI_DEVICE_ID_SAT_MID       0xf015
 #define PCI_DEVICE_ID_RFLY          0xf095
@@ -1157,10 +1160,12 @@ typedef struct {
 #define PCI_DEVICE_ID_LP11000S      0xfc10
 #define PCI_DEVICE_ID_LPE11000S     0xfc20
 #define PCI_DEVICE_ID_SAT_S         0xfc40
+#define PCI_DEVICE_ID_PROTEUS_S     0xfc50
 #define PCI_DEVICE_ID_HELIOS        0xfd00
 #define PCI_DEVICE_ID_HELIOS_SCSP   0xfd11
 #define PCI_DEVICE_ID_HELIOS_DCSP   0xfd12
 #define PCI_DEVICE_ID_ZEPHYR        0xfe00
+#define PCI_DEVICE_ID_HORNET        0xfe05
 #define PCI_DEVICE_ID_ZEPHYR_SCSP   0xfe11
 #define PCI_DEVICE_ID_ZEPHYR_DCSP   0xfe12
 
@@ -1178,6 +1183,7 @@ typedef struct {
 #define ZEPHYR_JEDEC_ID             0x0577
 #define VIPER_JEDEC_ID              0x4838
 #define SATURN_JEDEC_ID             0x1004
+#define HORNET_JDEC_ID              0x2057706D
 
 #define JEDEC_ID_MASK               0x0FFFF000
 #define JEDEC_ID_SHIFT              12
@@ -1312,6 +1318,10 @@ typedef struct {		/* FireFly BIU registers */
 #define MBX_HEARTBEAT       0x31
 #define MBX_WRITE_VPARMS    0x32
 #define MBX_ASYNCEVT_ENABLE 0x33
+#define MBX_READ_EVENT_LOG_STATUS 0x37
+#define MBX_READ_EVENT_LOG  0x38
+#define MBX_WRITE_EVENT_LOG 0x39
+
 
 #define MBX_CONFIG_HBQ	    0x7C
 #define MBX_LOAD_AREA       0x81
@@ -1621,6 +1631,14 @@ typedef struct {
 	} un;
 } BIU_DIAG_VAR;
 
+/* Structure for MB command READ_EVENT_LOG (0x38) */
+typedef struct {
+	uint32_t rsvd1;
+	uint32_t offset;
+	struct ulp_bde64 rcv_bde64;
+}READ_EVENT_LOG_VAR;
+
+
 /* Structure for MB Command INIT_LINK (05) */
 
 typedef struct {
@@ -2226,7 +2244,10 @@ typedef struct {
 typedef struct {
 	uint32_t eventTag;	/* Event tag */
 #ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t rsvd1:22;
+	uint32_t rsvd1:19;
+	uint32_t fa:1;
+	uint32_t mm:1;		/* Menlo Maintenance mode enabled */
+	uint32_t rx:1;
 	uint32_t pb:1;
 	uint32_t il:1;
 	uint32_t attType:8;
@@ -2234,7 +2255,10 @@ typedef struct {
 	uint32_t attType:8;
 	uint32_t il:1;
 	uint32_t pb:1;
-	uint32_t rsvd1:22;
+	uint32_t rx:1;
+	uint32_t mm:1;
+	uint32_t fa:1;
+	uint32_t rsvd1:19;
 #endif
 
 #define AT_RESERVED    0x00	/* Reserved - attType */
@@ -2255,6 +2279,7 @@ typedef struct {
 
 #define TOPOLOGY_PT_PT 0x01	/* Topology is pt-pt / pt-fabric */
 #define TOPOLOGY_LOOP  0x02	/* Topology is FC-AL */
+#define TOPOLOGY_LNK_MENLO_MAINTENANCE 0x05 /* maint mode zephtr to menlo */
 
 	union {
 		struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
@@ -2709,7 +2734,7 @@ typedef struct {
 /* Union of all Mailbox Command types */
 #define MAILBOX_CMD_WSIZE	32
 #define MAILBOX_CMD_SIZE	(MAILBOX_CMD_WSIZE * sizeof(uint32_t))
-#define MAILBOX_EXT_WSIZE	256
+#define MAILBOX_EXT_WSIZE	512
 #define MAILBOX_EXT_SIZE	(MAILBOX_EXT_WSIZE * sizeof(uint32_t))
 #define MAILBOX_HBA_EXT_OFFSET  0x100
 #define MAILBOX_MAX_XMIT_SIZE   1024
@@ -2751,6 +2776,7 @@ typedef union {
 	REG_VPI_VAR varRegVpi;		/* cmd = 0x96 (REG_VPI) */
 	UNREG_VPI_VAR varUnregVpi;	/* cmd = 0x97 (UNREG_VPI) */
 	ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
+	READ_EVENT_LOG_VAR varRdEventLog; /* cmd = 0x38 (READ_EVENT_LOG) */
 } MAILVARIANTS;
 
 /*
@@ -3364,3 +3390,10 @@ lpfc_error_lost_link(IOCB_t *iocbp)
 		 iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
 		 iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
 }
+
+#define MENLO_TRANSPORT_TYPE 0xfe
+#define MENLO_CONTEXT 0
+#define MENLO_PU 3
+#define MENLO_TIMEOUT 30
+#define SETVAR_MLOMNT 0x103107
+#define SETVAR_MLORST 0x103007
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b730edb..e97148e 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -109,6 +109,26 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
 			pci_release_region(pdev, i);
 }
 
+/*
+ * lpfc_hba_max_vpi - Get the maximum supported VPI for an HBA
+ * @device: The PCI device ID for this HBA
+ *
+ * Description:
+ * This routine will return the maximum supported VPI limit for each HBA. In
+ * most cases the maximum VPI limit will be 0xFFFF, which indicates that the
+ * driver supports whatever the HBA can support. In some cases the driver
+ * supports fewer VPI that the HBA supports.
+ */
+static inline uint16_t
+lpfc_hba_max_vpi(unsigned short device)
+{
+	if ((device == PCI_DEVICE_ID_HELIOS) ||
+	    (device == PCI_DEVICE_ID_ZEPHYR))
+		return LPFC_INTR_VPI;
+	else
+		return LPFC_MAX_VPI;
+}
+
 /************************************************************************/
 /*                                                                      */
 /*    lpfc_config_port_prep                                             */
@@ -202,8 +222,10 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
 		return -ERESTART;
 	}
 
-	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp)
+	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
+		mempool_free(pmb, phba->mbox_mem_pool);
 		return -EINVAL;
+	}
 
 	/* Save information as VPD data */
 	vp->rev.rBit = 1;
@@ -463,7 +485,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 	if (vport->cfg_enable_auth) {
 		if (lpfc_security_service_state == SECURITY_OFFLINE) {
 			lpfc_printf_log(vport->phba, KERN_ERR, LOG_SECURITY,
-				"1029 Authentication is enabled but "
+				"1000 Authentication is enabled but "
 				"authentication service is not running\n");
 			vport->auth.auth_mode = FC_AUTHMODE_UNKNOWN;
 			phba->link_state = LPFC_HBA_ERROR;
@@ -480,7 +502,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 		lpfc_printf_log(phba,
 				KERN_ERR,
 				LOG_INIT,
-				"1031 Adapter failed to init, mbxCmd x%x "
+				"1001 Adapter failed to init, mbxCmd x%x "
 				"INIT_LINK, mbxStatus x%x\n",
 				mb->mbxCommand, mb->mbxStatus);
 
@@ -619,18 +641,18 @@ void
 lpfc_hb_timeout(unsigned long ptr)
 {
 	struct lpfc_hba *phba;
+	uint32_t tmo_posted;
 	unsigned long iflag;
 
 	phba = (struct lpfc_hba *)ptr;
 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
-	if (!(phba->pport->work_port_events & WORKER_HB_TMO))
+	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
+	if (!tmo_posted)
 		phba->pport->work_port_events |= WORKER_HB_TMO;
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
 
-	spin_lock_irqsave(&phba->hbalock, iflag);
-	if (phba->work_wait)
-		wake_up(phba->work_wait);
-	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
 	return;
 }
 
@@ -789,6 +811,10 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
 	struct temp_event temp_event_data;
 	struct Scsi_Host  *shost;
 
+	/* If the pci channel is offline, ignore possible errors,
+	 * since we cannot communicate with the pci card anyway. */
+	if (phba->pcidev->error_state != pci_channel_io_normal)
+		return;
 
 	/* If resets are disabled then leave the HBA alone and return */
 	if (!phba->cfg_enable_hba_reset)
@@ -834,7 +860,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
 		temp_event_data.data = (uint32_t)temperature;
 
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0459 Adapter maximum temperature exceeded "
+				"0406 Adapter maximum temperature exceeded "
 				"(%ld), taking this port offline "
 				"Data: x%x x%x x%x\n",
 				temperature, phba->work_hs,
@@ -916,6 +942,8 @@ lpfc_handle_latt(struct lpfc_hba *phba)
 	lpfc_read_la(phba, pmb, mp);
 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
 	pmb->vport = vport;
+	/* Block ELS IOCBs until we have processed this mbox command */
+	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
 	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED) {
 		rc = 4;
@@ -931,6 +959,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
 	return;
 
 lpfc_handle_latt_free_mbuf:
+	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 lpfc_handle_latt_free_mp:
 	kfree(mp);
@@ -1106,6 +1135,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 	lpfc_vpd_t *vp;
 	uint16_t dev_id = phba->pcidev->device;
 	int max_speed;
+	int GE = 0;
 	struct {
 		char * name;
 		int    max_speed;
@@ -1237,6 +1267,19 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 	case PCI_DEVICE_ID_SAT_S:
 		m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
 		break;
+	case PCI_DEVICE_ID_HORNET:
+		m = (typeof(m)){"LP21000", max_speed, "PCIe"};
+		GE = 1;
+		break;
+	case PCI_DEVICE_ID_PROTEUS_VF:
+		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
+		break;
+	case PCI_DEVICE_ID_PROTEUS_PF:
+		m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
+		break;
+	case PCI_DEVICE_ID_PROTEUS_S:
+		m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
+		break;
 	default:
 		m = (typeof(m)){ NULL };
 		break;
@@ -1246,8 +1289,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 		snprintf(mdp, 79,"%s", m.name);
 	if (descp && descp[0] == '\0')
 		snprintf(descp, 255,
-			 "Emulex %s %dGb %s Fibre Channel Adapter",
-			 m.name, m.max_speed, m.bus);
+			"Emulex %s %d%s %s %s",
+			m.name, m.max_speed,
+			(GE) ? "GE":"Gb",
+			m.bus,
+			(GE) ? "FCoE Adapter" : "Fibre Channel Adapter");
 }
 
 /**************************************************/
@@ -1520,6 +1566,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
 
 		lpfc_disc_state_machine(vport, ndlp, NULL,
 					     NLP_EVT_DEVICE_RM);
+
 	}
 
 	/* At this point, ALL ndlp's should be gone
@@ -1535,7 +1582,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
 						&vport->fc_nodes, nlp_listp) {
 				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
 						LOG_NODE,
-						"0282: did:x%x ndlp:x%p "
+						"0282 did:x%x ndlp:x%p "
 						"usgmap:x%x refcnt:%d\n",
 						ndlp->nlp_DID, (void *)ndlp,
 						ndlp->nlp_usg_map,
@@ -2076,11 +2123,16 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 
 	phba->pcidev = pdev;
 
+	/* Workaround for driver backward compatibility with RHEL5.1 */
+	if (!pdev->error_state)
+		pdev->error_state = pci_channel_io_normal;
+
 	/* Assign an unused board number */
 	if ((phba->brd_no = lpfc_get_instance()) < 0)
 		goto out_free_phba;
 
 	INIT_LIST_HEAD(&phba->port_list);
+	init_waitqueue_head(&phba->wait_4_mlo_m_q);
 	/*
 	 * Get all the module params for configuring this host and then
 	 * establish the host.
@@ -2090,7 +2142,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 	/* Check if we need to change the DMA length */
 	lpfc_setup_max_dma_length(phba);
 
-	phba->max_vpi = LPFC_MAX_VPI;
+	phba->max_vpi = lpfc_hba_max_vpi(phba->pcidev->device);
 
 	/* Initialize timers used by driver */
 	init_timer(&phba->hb_tmofunc);
@@ -2193,7 +2245,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 		if (iocbq_entry == NULL) {
 			printk(KERN_ERR "%s: only allocated %d iocbs of "
 				"expected %d count. Unloading driver.\n",
-				__FUNCTION__, i, LPFC_IOCB_LIST_CNT);
+				__func__, i, LPFC_IOCB_LIST_CNT);
 			error = -ENOMEM;
 			goto out_free_iocbq;
 		}
@@ -2203,7 +2255,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 			kfree (iocbq_entry);
 			printk(KERN_ERR "%s: failed to allocate IOTAG. "
 			       "Unloading driver.\n",
-				__FUNCTION__);
+				__func__);
 			error = -ENOMEM;
 			goto out_free_iocbq;
 		}
@@ -2224,6 +2276,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 	phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
 	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
 
+	/* Initialize the wait queue head for the kernel thread */
+	init_waitqueue_head(&phba->work_waitq);
+
 	/* Startup the kernel thread for this host adapter. */
 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
 				       "lpfc_worker_%d", phba->brd_no);
@@ -2244,6 +2299,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 
 	/* Initialize list of sysfs mailbox commands */
 	INIT_LIST_HEAD(&phba->sysfs_mbox_list);
+	/* Initialize list of sysfs menlo commands */
+	INIT_LIST_HEAD(&phba->sysfs_menlo_list);
 
 	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
 	if (!vport)
@@ -2415,6 +2472,15 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
 	lpfcdfc_host_del(phba->dfc_host);
 	phba->dfc_host = NULL;
 
+	/* In case PCI channel permanently disabled, rescan SCSI devices
+	 * to force all the SCSI hosts devloss timeout for terminating any
+	 * on-going SCSI sessions and user processes properly before
+	 * proceeding to fc_remove_host() and scsi_remove_host(), which
+	 * do not clean up on-going SCSI sessions and user processes.
+	 */
+	if (pdev->error_state == pci_channel_io_perm_failure)
+		lpfc_scsi_dev_rescan(phba);
+
 	spin_lock_irq(&phba->hbalock);
 	vport->load_flag |= FC_UNLOADING;
 	spin_unlock_irq(&phba->hbalock);
@@ -2504,8 +2570,15 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
 	struct lpfc_sli *psli = &phba->sli;
 	struct lpfc_sli_ring  *pring;
 
-	if (state == pci_channel_io_perm_failure)
+	if (state == pci_channel_io_perm_failure) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0472 PCI channel I/O permanent failure\n");
+		/* Block all SCSI devices' I/Os on the host */
+		lpfc_scsi_dev_block(phba);
+		/* Clean up all driver's outstanding SCSI I/Os */
+		lpfc_sli_flush_fcp_rings(phba);
 		return PCI_ERS_RESULT_DISCONNECT;
+	}
 
 	pci_disable_device(pdev);
 	/*
@@ -2544,9 +2617,6 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
 
 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
 
-	/* Workaround on core EEH code not settng pdev->error_state properly */
-	pdev->error_state = pci_channel_io_normal;
-
 	if (pci_enable_device_bars(pdev, bars)) {
 		printk(KERN_ERR "lpfc: Cannot re-enable "
 			"PCI device after reset.\n");
@@ -2649,6 +2719,8 @@ static struct pci_device_id lpfc_id_table[] = {
 		PCI_ANY_ID, PCI_ANY_ID, },
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
 		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
+		PCI_ANY_ID, PCI_ANY_ID, },
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
 		PCI_ANY_ID, PCI_ANY_ID, },
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
@@ -2679,6 +2751,12 @@ static struct pci_device_id lpfc_id_table[] = {
 		PCI_ANY_ID, PCI_ANY_ID, },
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
 		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
+		PCI_ANY_ID, PCI_ANY_ID, },
 	{ 0 }
 };
 
diff --git a/drivers/scsi/lpfc/lpfc_ioctl.c b/drivers/scsi/lpfc/lpfc_ioctl.c
index e344708..479ccde 100644
--- a/drivers/scsi/lpfc/lpfc_ioctl.c
+++ b/drivers/scsi/lpfc/lpfc_ioctl.c
@@ -96,20 +96,6 @@ struct lpfcdfc_host {
 };
 
 
-struct lpfc_timedout_iocb_ctxt {
-	struct lpfc_iocbq *rspiocbq;
-	struct lpfc_dmabuf *mp;
-	struct lpfc_dmabuf *bmp;
-	struct lpfc_scsi_buf *lpfc_cmd;
-	struct lpfc_dmabufext *outdmp;
-	struct lpfc_dmabufext *indmp;
-};
-
-struct lpfc_dmabufext {
-	struct lpfc_dmabuf dma;
-	uint32_t size;
-	uint32_t flag;
-};
 
 
 static void lpfc_ioctl_timeout_iocb_cmpl(struct lpfc_hba *,
@@ -536,7 +522,7 @@ lpfc_ioctl_send_mgmt_rsp(struct lpfc_hba * phba,
 	int rc = 0;
 	unsigned long iflag;
 
-	if (!reqbfrcnt || (reqbfrcnt > (80 * 4096))) {
+	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
 		rc = ERANGE;
 		return rc;
 	}
@@ -608,7 +594,8 @@ lpfc_ioctl_send_mgmt_cmd(struct lpfc_hba * phba,
 	reqbfrcnt = cip->lpfc_arg4;
 	snsbfrcnt = cip->lpfc_arg5;
 
-	if (!reqbfrcnt || !snsbfrcnt || (reqbfrcnt + snsbfrcnt > 80 * 4096)) {
+	if (!reqbfrcnt || !snsbfrcnt
+		|| (reqbfrcnt + snsbfrcnt > 80 * BUF_SZ_4K)) {
 		rc = ERANGE;
 		goto send_mgmt_cmd_exit;
 	}
@@ -829,7 +816,7 @@ lpfc_ioctl_send_mgmt_cmd(struct lpfc_hba * phba,
 		rc = ERANGE;
 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 				"1209 C_CT Request error Data: x%x x%x\n",
-				outdmp->flag, 4096);
+				outdmp->flag, BUF_SZ_4K);
 		goto send_mgmt_cmd_free_outdmp;
 	}
 
@@ -1328,7 +1315,7 @@ static int lpfcdfc_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
 	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
 	ctreq->FsSubType = 0;
 	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
-	ctreq->CommandResponse.bits.Size   = 0;
+	ctreq->CommandResponse.bits.Size = 0;
 
 
 	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
@@ -1550,7 +1537,7 @@ lpfc_ioctl_loopback_test(struct lpfc_hba *phba,
 	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE))
 		return EACCES;
 
-	if ((size == 0) || (size > 80 * 4096))
+	if ((size == 0) || (size > 80 * BUF_SZ_4K))
 		return  ERANGE;
 
 	mutex_lock(&lpfcdfc_lock);
@@ -1634,6 +1621,7 @@ lpfc_ioctl_loopback_test(struct lpfc_hba *phba,
 				    + current_offset,
 				    segment_len - segment_offset)) {
 			rc = EIO;
+			list_del(&head);
 			goto err_loopback_test_exit;
 		}
 
@@ -1750,8 +1738,8 @@ dfc_rsp_data_copy(struct lpfc_hba * phba,
 			break;
 
 		/* We copy chucks of 4K */
-		if (size > 4096)
-			cnt = 4096;
+		if (size > BUF_SZ_4K)
+			cnt = BUF_SZ_4K;
 		else
 			cnt = size;
 
@@ -2029,9 +2017,10 @@ error_unsol_ct_exit:
 }
 
 
-static struct lpfc_dmabufext *
-dfc_cmd_data_alloc(struct lpfc_hba * phba,
-		   char *indataptr, struct ulp_bde64 * bpl, uint32_t size)
+struct lpfc_dmabufext *
+__dfc_cmd_data_alloc(struct lpfc_hba * phba,
+		   char *indataptr, struct ulp_bde64 * bpl, uint32_t size,
+		   int nocopydata)
 {
 	struct lpfc_dmabufext *mlist = NULL;
 	struct lpfc_dmabufext *dmp;
@@ -2041,9 +2030,9 @@ dfc_cmd_data_alloc(struct lpfc_hba * phba,
 	pcidev = phba->pcidev;
 
 	while (size) {
-		/* We get chucks of 4K */
-		if (size > 4096)
-			cnt = 4096;
+		/* We get chunks of 4K */
+		if (size > BUF_SZ_4K)
+			cnt = BUF_SZ_4K;
 		else
 			cnt = size;
 
@@ -2071,13 +2060,14 @@ dfc_cmd_data_alloc(struct lpfc_hba * phba,
 
 		dmp->size = cnt;
 
-		if (indataptr) {
-			/* Copy data from user space in */
-			if (copy_from_user
-			    ((uint8_t *) dmp->dma.virt,
-			     (void __user *) (indataptr + offset), cnt)) {
-				goto out;
-			}
+		if (indataptr || nocopydata) {
+			if (indataptr)
+				/* Copy data from user space in */
+				if (copy_from_user ((uint8_t *) dmp->dma.virt,
+					(void __user *) (indataptr + offset),
+					cnt)) {
+					goto out;
+				}
 			bpl->tus.f.bdeFlags = 0;
 
 			pci_dma_sync_single_for_device(phba->pcidev,
@@ -2107,6 +2097,20 @@ out:
 	return NULL;
 }
 
+static struct lpfc_dmabufext *
+dfc_cmd_data_alloc(struct lpfc_hba * phba,
+		   char *indataptr, struct ulp_bde64 * bpl, uint32_t size)
+{
+	/* if indataptr is null it is a rsp buffer. */
+	return __dfc_cmd_data_alloc(phba, indataptr, bpl, size,
+					0 /* don't copy user data */);
+}
+
+int
+__dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist)
+{
+	return dfc_cmd_data_free(phba, mlist);
+}
 static int
 dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist)
 {
@@ -2297,7 +2301,7 @@ lpfcdfc_do_ioctl(struct lpfcCmdInput *cip)
 	if (dfchba)
 		dfchba->ref_count++;
 	mutex_unlock(&lpfcdfc_lock);
-	if (cip->lpfc_outsz >= 4096) {
+	if (cip->lpfc_outsz >= BUF_SZ_4K) {
 
 		/*
 		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
@@ -2312,7 +2316,7 @@ lpfcdfc_do_ioctl(struct lpfcCmdInput *cip)
 			total_mem = 64 * 1024;
 	} else {
 		/* Allocate memory for ioctl data */
-		total_mem = 4096;
+		total_mem = BUF_SZ_4K;
 	}
 
 	/*
@@ -2502,7 +2506,7 @@ lpfc_cdev_init(void)
 	lpfcdfc_major = register_chrdev(0,  LPFC_CHAR_DEV_NAME, &lpfc_fops);
 	if (lpfcdfc_major < 0) {
 		printk(KERN_ERR "%s:%d Unable to register \"%s\" device.\n",
-				__FUNCTION__, __LINE__, LPFC_CHAR_DEV_NAME);
+		       __func__, __LINE__, LPFC_CHAR_DEV_NAME);
 		return lpfcdfc_major;
 	}
 
diff --git a/drivers/scsi/lpfc/lpfc_menlo.c b/drivers/scsi/lpfc/lpfc_menlo.c
new file mode 100644
index 0000000..cb30091
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_menlo.c
@@ -0,0 +1,1176 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2007-2008 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_version.h"
+#include "lpfc_compat.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_auth_access.h"
+
+#define MENLO_CMD_FW_DOWNLOAD                   0x00000002
+
+static void lpfc_menlo_iocb_timeout_cmpl(struct lpfc_hba *,
+			struct lpfc_iocbq *, struct lpfc_iocbq *);
+
+extern int
+__dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist);
+
+extern struct lpfc_dmabufext *
+__dfc_cmd_data_alloc(struct lpfc_hba * phba,
+		   char *indataptr, struct ulp_bde64 * bpl, uint32_t size,
+		   int nocopydata);
+/*
+ * The size for the menlo interface is set at 336k because it only uses
+ * one bpl. A bpl can contain 85 BDE descriptors. Each BDE can represent
+ * up to 4k. I used 84 BDE entries to do this calculation because the
+ * 1st sysfs_menlo_write is for just the cmd header which is 12 bytes.
+ * size = PAGE_SZ * (sizeof(bpl) / sizeof(BDE)) -1;
+ */
+#define SYSFS_MENLO_ATTR_SIZE 344064
+typedef struct menlo_get_cmd
+{
+	uint32_t code;          /* Command code */
+	uint32_t context;       /* Context */
+	uint32_t length;        /* Max response length */
+} menlo_get_cmd_t;
+
+typedef struct menlo_init_rsp
+{
+	uint32_t code;
+	uint32_t bb_credit;     /* Menlo FC BB Credit */
+	uint32_t frame_size;    /* Menlo FC receive frame size */
+	uint32_t fw_version;    /* Menlo firmware version   */
+	uint32_t reset_status;  /* Reason for previous reset */
+
+#define MENLO_RESET_STATUS_NORMAL               0
+#define MENLO_RESET_STATUS_PANIC                1
+
+	uint32_t maint_status;  /* Menlo Maintenance Mode status at link up */
+
+
+#define MENLO_MAINTENANCE_MODE_DISABLE  0
+#define MENLO_MAINTENANCE_MODE_ENABLE   1
+	uint32_t fw_type;
+	uint32_t fru_data_valid; /* 0=invalid, 1=valid */
+} menlo_init_rsp_t;
+
+#define MENLO_CMD_GET_INIT 0x00000007
+#define MENLO_FW_TYPE_OPERATIONAL 0xABCD0001
+#define MENLO_FW_TYPE_GOLDEN    0xABCD0002
+#define MENLO_FW_TYPE_DIAG      0xABCD0003
+
+void
+BE_swap32_buffer(void *srcp, uint32_t cnt)
+{
+	uint32_t *src = srcp;
+	uint32_t *dest = srcp;
+	uint32_t ldata;
+	int i;
+
+	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
+		ldata = *src;
+		ldata = cpu_to_le32(ldata);
+		*dest = ldata;
+		src++;
+		dest++;
+	}
+}
+
+
+static int
+lpfc_alloc_menlo_genrequest64(struct lpfc_hba * phba,
+			struct lpfc_menlo_genreq64 *sysfs_menlo,
+			struct lpfc_sysfs_menlo_hdr *cmdhdr)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
+	struct ulp_bde64 *bpl = NULL;
+	IOCB_t *cmd = NULL, *rsp = NULL;
+	struct lpfc_sli *psli = NULL;
+	struct lpfc_sli_ring *pring = NULL;
+	int rc = 0;
+	uint32_t cmdsize;
+	uint32_t rspsize;
+
+	psli = &phba->sli;
+	pring = &psli->ring[LPFC_ELS_RING];
+
+	if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
+		rc = EACCES;
+		goto send_menlomgmt_cmd_exit;
+	}
+
+	if (!sysfs_menlo) {
+		rc = EINVAL;
+		goto send_menlomgmt_cmd_exit;
+	}
+
+	cmdsize = cmdhdr->cmdsize;
+	rspsize = cmdhdr->rspsize;
+
+	if (!cmdsize || !rspsize || (cmdsize + rspsize > 80 * BUF_SZ_4K)) {
+		rc = ERANGE;
+		goto send_menlomgmt_cmd_exit;
+	}
+
+	spin_lock_irq(shost->host_lock);
+	sysfs_menlo->cmdiocbq = lpfc_sli_get_iocbq(phba);
+	if (!sysfs_menlo->cmdiocbq) {
+		rc = ENOMEM;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1202 alloc_menlo_genreq64: couldn't alloc cmdiocbq\n");
+		goto send_menlomgmt_cmd_exit;
+	}
+	cmd = &sysfs_menlo->cmdiocbq->iocb;
+
+	sysfs_menlo->rspiocbq = lpfc_sli_get_iocbq(phba);
+	if (!sysfs_menlo->rspiocbq) {
+		rc = ENOMEM;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1203 alloc_menlo_genreq64: couldn't alloc rspiocbq\n");
+		goto send_menlomgmt_cmd_exit;
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	rsp = &sysfs_menlo->rspiocbq->iocb;
+
+
+	sysfs_menlo->bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+	if (!sysfs_menlo->bmp) {
+		rc = ENOMEM;
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1204 alloc_menlo_genreq64: couldn't alloc bmp\n");
+		goto send_menlomgmt_cmd_exit;
+	}
+
+	spin_lock_irq(shost->host_lock);
+	sysfs_menlo->bmp->virt = lpfc_mbuf_alloc(phba, 0,
+					&sysfs_menlo->bmp->phys);
+	if (!sysfs_menlo->bmp->virt) {
+		rc = ENOMEM;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1205 alloc_menlo_genreq64: couldn't alloc bpl\n");
+		goto send_menlomgmt_cmd_exit;
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	INIT_LIST_HEAD(&sysfs_menlo->bmp->list);
+	bpl = (struct ulp_bde64 *) sysfs_menlo->bmp->virt;
+	memset((uint8_t*)bpl, 0 , 1024);
+	sysfs_menlo->indmp = __dfc_cmd_data_alloc(phba, NULL, bpl, cmdsize, 1);
+	if (!sysfs_menlo->indmp) {
+		rc = ENOMEM;
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1206 alloc_menlo_genreq64: couldn't alloc cmdbuf\n");
+		goto send_menlomgmt_cmd_exit;
+	}
+	sysfs_menlo->cmdbpl = bpl;
+	INIT_LIST_HEAD(&sysfs_menlo->inhead);
+	list_add_tail(&sysfs_menlo->inhead, &sysfs_menlo->indmp->dma.list);
+
+	/* flag contains total number of BPLs for xmit */
+
+	bpl += sysfs_menlo->indmp->flag;
+
+	sysfs_menlo->outdmp = __dfc_cmd_data_alloc(phba, NULL, bpl, rspsize, 0);
+	if (!sysfs_menlo->outdmp) {
+		rc = ENOMEM;
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1207 alloc_menlo_genreq64: couldn't alloc rspbuf\n");
+		goto send_menlomgmt_cmd_exit;
+	}
+	INIT_LIST_HEAD(&sysfs_menlo->outhead);
+	list_add_tail(&sysfs_menlo->outhead, &sysfs_menlo->outdmp->dma.list);
+
+	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
+	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(sysfs_menlo->bmp->phys);
+	cmd->un.genreq64.bdl.addrLow = putPaddrLow(sysfs_menlo->bmp->phys);
+	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+	cmd->un.genreq64.bdl.bdeSize =
+	    (sysfs_menlo->outdmp->flag + sysfs_menlo->indmp->flag)
+		* sizeof(struct ulp_bde64);
+	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
+	cmd->un.genreq64.w5.hcsw.Rctl = FC_FCP_CMND;
+	cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
+	cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
+	cmd->ulpBdeCount = 1;
+	cmd->ulpClass = CLASS3;
+	cmd->ulpContext = MENLO_CONTEXT; /* 0 */
+	cmd->ulpOwner = OWN_CHIP;
+	cmd->ulpPU = MENLO_PU; /* 3 */
+	cmd->ulpLe = 1; /* Limited Edition */
+	sysfs_menlo->cmdiocbq->vport = phba->pport;
+	sysfs_menlo->cmdiocbq->context1 = NULL;
+	sysfs_menlo->cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	/* We want the firmware to timeout before we do */
+	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
+
+	sysfs_menlo->timeout = cmd->ulpTimeout;
+
+send_menlomgmt_cmd_exit:
+	return rc;
+}
+
+void
+sysfs_menlo_genreq_free(struct lpfc_hba *phba,
+		struct lpfc_menlo_genreq64 *sysfs_menlo)
+{
+	if ( !list_empty(&sysfs_menlo->outhead))
+		list_del_init( &sysfs_menlo->outhead);
+
+	if (!list_empty(&sysfs_menlo->inhead))
+		list_del_init( &sysfs_menlo->inhead);
+
+	if (sysfs_menlo->outdmp) {
+		__dfc_cmd_data_free(phba, sysfs_menlo->outdmp);
+		sysfs_menlo->outdmp = NULL;
+	}
+	if (sysfs_menlo->indmp) {
+		__dfc_cmd_data_free(phba, sysfs_menlo->indmp);
+		sysfs_menlo->indmp = NULL;
+	}
+	if (sysfs_menlo->bmp) {
+		lpfc_mbuf_free(phba, sysfs_menlo->bmp->virt,
+				sysfs_menlo->bmp->phys);
+		kfree(sysfs_menlo->bmp);
+		sysfs_menlo->bmp = NULL;
+	}
+	if (sysfs_menlo->rspiocbq) {
+		lpfc_sli_release_iocbq(phba, sysfs_menlo->rspiocbq);
+		sysfs_menlo->rspiocbq = NULL;
+	}
+
+	if (sysfs_menlo->cmdiocbq) {
+		lpfc_sli_release_iocbq(phba, sysfs_menlo->cmdiocbq);
+		sysfs_menlo->cmdiocbq = NULL;
+	}
+}
+
+static void
+sysfs_menlo_idle(struct lpfc_hba *phba,
+		struct lpfc_sysfs_menlo *sysfs_menlo)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
+
+	spin_lock_irq(&phba->hbalock);
+	list_del_init(&sysfs_menlo->list);
+	spin_unlock_irq(&phba->hbalock);
+	spin_lock_irq(shost->host_lock);
+
+	if (sysfs_menlo->cr.cmdiocbq)
+		sysfs_menlo_genreq_free(phba, &sysfs_menlo->cr);
+	if (sysfs_menlo->cx.cmdiocbq)
+		sysfs_menlo_genreq_free(phba, &sysfs_menlo->cx);
+
+	spin_unlock_irq(shost->host_lock);
+	kfree(sysfs_menlo);
+}
+
+static void
+lpfc_menlo_iocb_timeout_cmpl(struct lpfc_hba *phba,
+					struct lpfc_iocbq *cmdq,
+					struct lpfc_iocbq *rspq)
+{
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+		"1241 Menlo IOCB timeout: deleting %p\n",
+		cmdq->context3);
+	sysfs_menlo_idle(phba, (struct lpfc_sysfs_menlo *)cmdq->context3);
+}
+
+static void
+lpfc_menlo_iocb_cmpl(struct lpfc_hba *phba,
+					struct lpfc_iocbq *cmdq,
+					struct lpfc_iocbq *rspq)
+{
+	struct lpfc_sysfs_menlo * sysfs_menlo =
+		(struct lpfc_sysfs_menlo *)cmdq->context2;
+	struct lpfc_dmabufext *mlast = NULL;
+	IOCB_t *rsp = NULL;
+	IOCB_t *cmd = NULL;
+	uint32_t * tmpptr = NULL;
+	menlo_init_rsp_t *mlorsp = NULL;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+		"1254 Menlo IOCB complete: %p\n",
+		cmdq->context2);
+	rsp = &rspq->iocb;
+	cmd = &cmdq->iocb;
+	if ( !sysfs_menlo ) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1255 Menlo IOCB complete:NULL CTX \n");
+		return;
+	}
+	if ( rsp->ulpStatus ) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1242 iocb async cmpl: ulpStatus 0x%x "
+			"ulpWord[4] 0x%x\n",
+			rsp->ulpStatus, rsp->un.ulpWord[4]);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1260 cr:%.08x %.08x %.08x %.08x "
+			"%.08x %.08x %.08x %.08x\n",
+			cmd->un.ulpWord[0], cmd->un.ulpWord[1],
+			cmd->un.ulpWord[2], cmd->un.ulpWord[3],
+			cmd->un.ulpWord[4], cmd->un.ulpWord[5],
+			cmd->un.ulpWord[6], cmd->un.ulpWord[7]);
+		mlast = list_get_first(&sysfs_menlo->cr.inhead,
+				struct lpfc_dmabufext,
+				dma.list);
+		if (!mlast) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"1231 bad bpl:\n");
+			goto lpfc_menlo_iocb_cmpl_ext;
+		}
+		tmpptr = ( uint32_t *) mlast->dma.virt;
+		BE_swap32_buffer ((uint8_t *) tmpptr,
+			sizeof( menlo_get_cmd_t));
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1261 cmd:%.08x %.08x %.08x\n",
+			*tmpptr, *(tmpptr+1), *(tmpptr+2));
+		goto lpfc_menlo_iocb_cmpl_ext;
+	}
+
+	mlast = list_get_first(&sysfs_menlo->cr.outhead,
+				struct lpfc_dmabufext,
+				dma.list);
+	if (!mlast) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1256 bad bpl:\n");
+		goto lpfc_menlo_iocb_cmpl_ext;
+	}
+	mlorsp = ( menlo_init_rsp_t *) mlast->dma.virt;
+	BE_swap32_buffer ((uint8_t *) mlorsp,
+		sizeof( menlo_init_rsp_t));
+
+	if (mlorsp->code != 0) {
+		lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
+			"1243 Menlo command error. code=%d.\n", mlorsp->code);
+		goto lpfc_menlo_iocb_cmpl_ext;
+
+	}
+
+	switch (mlorsp->fw_type)
+	{
+	case MENLO_FW_TYPE_OPERATIONAL:	/* Menlo Operational */
+		break;
+	case MENLO_FW_TYPE_GOLDEN:	/* Menlo Golden */
+		lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
+			"1246 FCoE chip is running golden firmware. "
+			"Update FCoE chip firmware immediately %x\n",
+			mlorsp->fw_type);
+		break;
+	case MENLO_FW_TYPE_DIAG:	/* Menlo Diag */
+		lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
+			"1247 FCoE chip is running diagnostic "
+			"firmware. Operational use suspended. %x\n",
+			mlorsp->fw_type);
+		break;
+	default:
+		lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
+			"1248 FCoE chip is running unknown "
+			"firmware x%x.\n", mlorsp->fw_type);
+		break;
+	}
+	if (!mlorsp->fru_data_valid
+		&& (mlorsp->fw_type == MENLO_FW_TYPE_OPERATIONAL)
+		&& (!mlorsp->maint_status))
+		lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
+			"1249 Invalid FRU data found on adapter."
+			"Return adapter to Emulex for repair\n");
+
+lpfc_menlo_iocb_cmpl_ext:
+	sysfs_menlo_idle(phba, (struct lpfc_sysfs_menlo *)cmdq->context2);
+}
+
+static struct lpfc_sysfs_menlo *
+lpfc_get_sysfs_menlo(struct lpfc_hba *phba, uint8_t create)
+{
+	struct lpfc_sysfs_menlo *sysfs_menlo;
+	pid_t pid;
+
+	pid = current->pid;
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry(sysfs_menlo, &phba->sysfs_menlo_list, list) {
+		if (sysfs_menlo->pid == pid) {
+			spin_unlock_irq(&phba->hbalock);
+			return sysfs_menlo;
+		}
+	}
+	if (!create) {
+		spin_unlock_irq(&phba->hbalock);
+		return NULL;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	sysfs_menlo = kzalloc(sizeof(struct lpfc_sysfs_menlo),
+			GFP_KERNEL);
+	if (!sysfs_menlo)
+		return NULL;
+	sysfs_menlo->state = SMENLO_IDLE;
+	sysfs_menlo->pid = pid;
+	spin_lock_irq(&phba->hbalock);
+	list_add_tail(&sysfs_menlo->list, &phba->sysfs_menlo_list);
+
+	spin_unlock_irq(&phba->hbalock);
+	return sysfs_menlo;
+
+}
+
+static ssize_t
+lpfc_menlo_write(struct lpfc_hba *phba,
+		 char *buf, loff_t off, size_t count)
+{
+	struct lpfc_sysfs_menlo *sysfs_menlo;
+	struct lpfc_dmabufext *mlast = NULL;
+	struct lpfc_sysfs_menlo_hdr cmdhdrCR;
+	struct lpfc_menlo_genreq64 *genreq = NULL;
+	loff_t temp_off = 0;
+	struct ulp_bde64 *bpl = NULL;
+	int mlastcnt = 0;
+	uint32_t * tmpptr = NULL;
+	uint32_t addr_high = 0;
+	uint32_t addr_low = 0;
+	int hdr_offset = sizeof(struct lpfc_sysfs_menlo_hdr);
+
+	if (off % 4 ||  count % 4 || (unsigned long)buf % 4)
+		return -EINVAL;
+
+	if (count == 0)
+		return 0;
+
+	if (off == 0) {
+		ssize_t rc;
+		struct lpfc_sysfs_menlo_hdr *cmdhdr =
+			(struct lpfc_sysfs_menlo_hdr *)buf;
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1208 menlo_write: cmd %x cmdsz %d rspsz %d\n",
+				cmdhdr->cmd, cmdhdr->cmdsize,
+				cmdhdr->rspsize);
+		if (count != sizeof(struct lpfc_sysfs_menlo_hdr)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"1210 Invalid cmd size: cmd %x "
+				"cmdsz %d rspsz %d\n",
+				cmdhdr->cmd, cmdhdr->cmdsize,
+				cmdhdr->rspsize);
+			return -EINVAL;
+		}
+
+		sysfs_menlo = lpfc_get_sysfs_menlo(phba, 1);
+		if (!sysfs_menlo)
+			return -ENOMEM;
+		sysfs_menlo->cmdhdr = *cmdhdr;
+		if (cmdhdr->cmd == MENLO_CMD_FW_DOWNLOAD) {
+			sysfs_menlo->cmdhdr.cmdsize
+				-= sizeof(struct lpfc_sysfs_menlo_hdr);
+
+			rc = lpfc_alloc_menlo_genrequest64(phba,
+					&sysfs_menlo->cx,
+					&sysfs_menlo->cmdhdr);
+			if (rc != 0) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"1211 genreq alloc failed: %d\n",
+					(int) rc);
+				sysfs_menlo_idle(phba,sysfs_menlo);
+				return -ENOMEM;
+			}
+			cmdhdrCR.cmd = cmdhdr->cmd;
+			cmdhdrCR.cmdsize = sizeof(struct lpfc_sysfs_menlo_hdr);
+			cmdhdrCR.rspsize = 4;
+		} else
+			cmdhdrCR = *cmdhdr;
+
+		rc = lpfc_alloc_menlo_genrequest64(phba,
+				&sysfs_menlo->cr,&cmdhdrCR);
+		if (rc != 0) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"1223 menlo_write: couldn't alloc genreq %d\n",
+				(int) rc);
+			sysfs_menlo_idle(phba,sysfs_menlo);
+			return -ENOMEM;
+		}
+	} else {
+		sysfs_menlo = lpfc_get_sysfs_menlo(phba, 0);
+		if (!sysfs_menlo)
+			return -EAGAIN;
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1212 menlo_write: sysfs_menlo %p cmd %x cmdsz %d"
+			" rspsz %d cr-off %d cx-off %d count %d\n",
+			sysfs_menlo,
+			sysfs_menlo->cmdhdr.cmd,
+			sysfs_menlo->cmdhdr.cmdsize,
+			sysfs_menlo->cmdhdr.rspsize,
+			(int)sysfs_menlo->cr.offset,
+			(int)sysfs_menlo->cx.offset,
+			(int)count);
+	}
+
+	if ((count + sysfs_menlo->cr.offset) > sysfs_menlo->cmdhdr.cmdsize) {
+		if ( sysfs_menlo->cmdhdr.cmdsize != 4) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+			"1213 FCoE cmd overflow: off %d + cnt %d > cmdsz %d\n",
+			(int)sysfs_menlo->cr.offset,
+			(int)count,
+			(int)sysfs_menlo->cmdhdr.cmdsize);
+		sysfs_menlo_idle(phba, sysfs_menlo);
+		return -ERANGE;
+		}
+	}
+
+	spin_lock_irq(&phba->hbalock);
+	if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD)
+		genreq = &sysfs_menlo->cx;
+	else
+		genreq = &sysfs_menlo->cr;
+
+	if (off == 0) {
+		if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) {
+			tmpptr = NULL;
+			genreq = &sysfs_menlo->cr;
+
+			if (!mlast) {
+			 mlast = list_get_first(&genreq->inhead,
+						struct lpfc_dmabufext,
+						dma.list);
+			}
+			if (mlast) {
+				bpl = genreq->cmdbpl;
+				memcpy((uint8_t *) mlast->dma.virt, buf, count);
+				genreq->offset += count;
+				tmpptr = (uint32_t *)mlast->dma.virt;
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"1258 cmd %x cmdsz %d rspsz %d "
+					"copied %d addrL:%x addrH:%x\n",
+					*tmpptr,
+					*(tmpptr+1),
+					*(tmpptr+2),
+					(int)count,
+					bpl->addrLow,bpl->addrHigh);
+			} else {
+				lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"1230 Could not find buffer for FCoE"
+					" cmd:off %d indmp %p %d\n", (int)off,
+					genreq->indmp,(int)count);
+			}
+		}
+
+		sysfs_menlo->state = SMENLO_WRITING;
+		spin_unlock_irq(&phba->hbalock);
+		return count;
+	} else {
+		ssize_t adj_off = off - sizeof(struct lpfc_sysfs_menlo_hdr);
+		int found = 0;
+		if (sysfs_menlo->state  != SMENLO_WRITING ||
+		    genreq->offset != adj_off) {
+			spin_unlock_irq(&phba->hbalock);
+			sysfs_menlo_idle(phba, sysfs_menlo);
+			return -EAGAIN;
+		}
+		mlast = NULL;
+		temp_off = sizeof(struct lpfc_sysfs_menlo_hdr);
+		if (genreq->indmp) {
+			list_for_each_entry(mlast,
+				&genreq->inhead, dma.list) {
+				if (temp_off == off)
+					break;
+				else
+					temp_off += BUF_SZ_4K;
+				mlastcnt++;
+			}
+		}
+		addr_low = le32_to_cpu( putPaddrLow(mlast->dma.phys) );
+		addr_high = le32_to_cpu( putPaddrHigh(mlast->dma.phys) );
+		bpl = genreq->cmdbpl;
+		bpl += mlastcnt;
+		if (bpl->addrLow != addr_low ||  bpl->addrHigh != addr_high) {
+			mlast = NULL;
+			list_for_each_entry(mlast,
+				&genreq->inhead, dma.list) {
+
+				addr_low = le32_to_cpu(
+					putPaddrLow(mlast->dma.phys) );
+				addr_high = le32_to_cpu(
+					putPaddrHigh(mlast->dma.phys) );
+				if (bpl->addrLow == addr_low
+					&&  bpl->addrHigh == addr_high) {
+					found = 1;
+					break;
+				}
+			if ( mlastcnt < 3 )
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"1234 menlo_write: off:%d "
+					" mlastcnt:%d addl:%x addl:%x "
+					" addrh:%x addrh:%x mlast:%p\n",
+					(int)genreq->offset,
+					mlastcnt,
+					bpl->addrLow,
+					addr_low,
+					bpl->addrHigh,
+					addr_high,mlast);
+			}
+		} else
+			found = 1;
+
+		if (!found) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"1235 Could not find buffer for FCoE"
+				" cmd: off:%d  poff:%d cnt:%d"
+				" mlastcnt:%d addl:%x addh:%x mdsz:%d \n",
+				(int)genreq->offset,
+				(int)off,
+				(int)count,
+				mlastcnt,
+				bpl->addrLow,
+				bpl->addrHigh,
+				(int)sysfs_menlo->cmdhdr.cmdsize);
+			mlast = NULL;
+		}
+
+	}
+
+	if (mlast) {
+		if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD ) {
+			bpl = genreq->cmdbpl;
+			bpl += mlastcnt;
+			tmpptr = (uint32_t *)mlast->dma.virt;
+			if ( genreq->offset < hdr_offset ) {
+				memcpy((uint8_t *) mlast->dma.virt,
+					 buf+hdr_offset,
+					 count-hdr_offset);
+				bpl->tus.f.bdeSize = (ushort)count-hdr_offset;
+				mlast->size = (ushort)count-hdr_offset;
+				bpl->tus.f.bdeFlags = 0;
+				bpl->tus.w = le32_to_cpu(bpl->tus.w);
+			} else {
+				bpl->tus.f.bdeSize = (ushort)count;
+				mlast->size = (ushort)count;
+				bpl->tus.f.bdeFlags = 0;
+				bpl->tus.w = le32_to_cpu(bpl->tus.w);
+				memcpy((uint8_t *) mlast->dma.virt, buf, count);
+			}
+
+		} else
+			memcpy((uint8_t *) mlast->dma.virt, buf, count);
+
+		if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD
+			&& genreq->offset < hdr_offset) {
+			if (sysfs_menlo->cr.indmp
+				&& sysfs_menlo->cr.indmp->dma.virt) {
+				mlast = sysfs_menlo->cr.indmp;
+				memcpy((uint8_t *) mlast->dma.virt,
+					buf, hdr_offset);
+				tmpptr = (uint32_t *)mlast->dma.virt;
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"1237 cmd %x cmd1 %x cmd2 %x "
+					"copied %d\n",
+					*tmpptr,
+					*(tmpptr+1),
+					*(tmpptr+2),
+					hdr_offset);
+			}
+		}
+		genreq->offset += count;
+	} else {
+		spin_unlock_irq(&phba->hbalock);
+		sysfs_menlo_idle(phba,sysfs_menlo);
+		return -ERANGE;
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+	return count;
+
+}
+
+
+static ssize_t
+sysfs_menlo_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
+{
+	struct class_device *cdev = container_of(kobj, struct class_device,
+						 kobj);
+	struct Scsi_Host  *shost = class_to_shost(cdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return lpfc_menlo_write(phba, buf, off, count);
+}
+
+
+static ssize_t
+sysfs_menlo_issue_iocb_wait(struct lpfc_hba *phba,
+		struct lpfc_menlo_genreq64 *req,
+		struct lpfc_sysfs_menlo *sysfs_menlo)
+{
+	struct lpfc_sli *psli = NULL;
+	struct lpfc_sli_ring *pring = NULL;
+	int rc = 0;
+	IOCB_t *rsp = NULL;
+	struct lpfc_iocbq *cmdiocbq = NULL;
+
+	psli = &phba->sli;
+	pring = &psli->ring[LPFC_ELS_RING];
+	rsp = &req->rspiocbq->iocb;
+	cmdiocbq = req->cmdiocbq;
+
+	rc = lpfc_sli_issue_iocb_wait(phba, pring, req->cmdiocbq, req->rspiocbq,
+			req->timeout);
+
+	if (rc == IOCB_TIMEDOUT) {
+
+		cmdiocbq->context2 = NULL;
+		cmdiocbq->context3 = sysfs_menlo;
+		cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_timeout_cmpl;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+			"1227 FCoE IOCB TMO: handler set for %p\n",
+			cmdiocbq->context3);
+		return -EACCES;
+	}
+
+	if (rc != IOCB_SUCCESS) {
+		rc =  -EFAULT;
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+			"1216 FCoE IOCB failed: off %d rc=%d \n",
+			(int)req->offset, rc);
+		goto sysfs_menlo_issue_iocb_wait_exit;
+	}
+
+	if (rsp->ulpStatus) {
+		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+			switch (rsp->un.ulpWord[4] & 0xff) {
+			case IOERR_SEQUENCE_TIMEOUT:
+				rc = -ETIMEDOUT;
+				break;
+			case IOERR_INVALID_RPI:
+				rc = -EFAULT;
+				break;
+			default:
+				rc = -EFAULT;
+				break;
+			}
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"1217 mlo_issueIocb:2 off %d rc=%d "
+				"ulpWord[4] 0x%x\n",
+				(int)req->offset, rc, rsp->un.ulpWord[4]);
+		}
+	}
+sysfs_menlo_issue_iocb_wait_exit:
+	return rc;
+}
+
+
+static ssize_t
+sysfs_menlo_issue_iocb(struct lpfc_hba *phba, struct lpfc_menlo_genreq64 *req,
+		struct lpfc_sysfs_menlo *sysfs_menlo)
+{
+	struct lpfc_sli *psli = NULL;
+	struct lpfc_sli_ring *pring = NULL;
+	int rc = 0;
+	IOCB_t *rsp = NULL;
+	struct lpfc_iocbq *cmdiocbq = NULL;
+
+	psli = &phba->sli;
+	pring = &psli->ring[LPFC_ELS_RING];
+	rsp = &req->rspiocbq->iocb;
+	cmdiocbq = req->cmdiocbq;
+	cmdiocbq->context2 = sysfs_menlo;
+	cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_cmpl;
+	lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+		"1257 lpfc_menlo_issue_iocb: handler set for %p\n",
+		cmdiocbq->context3);
+
+	rc = lpfc_sli_issue_iocb(phba, pring, req->cmdiocbq, 0);
+
+	if (rc == IOCB_TIMEDOUT) {
+
+		cmdiocbq->context2 = NULL;
+		cmdiocbq->context3 = sysfs_menlo;
+		cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_timeout_cmpl;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+			"1228 FCoE IOCB TMO: handler set for %p\n",
+			cmdiocbq->context3);
+		return -EACCES;
+	}
+
+	if (rc != IOCB_SUCCESS) {
+		rc =  -EFAULT;
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+			"1238 FCoE IOCB failed: off %d rc=%d \n",
+			(int)req->offset, rc);
+		goto sysfs_menlo_issue_iocb_exit;
+	}
+
+	if (rsp->ulpStatus) {
+		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+			switch (rsp->un.ulpWord[4] & 0xff) {
+			case IOERR_SEQUENCE_TIMEOUT:
+				rc = -ETIMEDOUT;
+				break;
+			case IOERR_INVALID_RPI:
+				rc = -EFAULT;
+				break;
+			default:
+				rc = -EFAULT;
+				break;
+			}
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"1239 mlo_issueIocb:2 off %d rc=%d "
+				"ulpWord[4] 0x%x\n",
+				(int)req->offset, rc, rsp->un.ulpWord[4]);
+		}
+	}
+sysfs_menlo_issue_iocb_exit:
+	return rc;
+}
+
+static ssize_t
+lpfc_menlo_read(struct lpfc_hba *phba, char *buf, loff_t off, size_t count,
+	int wait)
+{
+	struct lpfc_sli *psli = NULL;
+	struct lpfc_sli_ring *pring = NULL;
+	int rc = 0;
+	struct lpfc_sysfs_menlo *sysfs_menlo;
+	struct lpfc_dmabufext *mlast = NULL;
+	loff_t temp_off = 0;
+	struct lpfc_menlo_genreq64 *genreq = NULL;
+	IOCB_t *cmd = NULL, *rsp = NULL;
+	uint32_t * uptr = NULL;
+
+
+	psli = &phba->sli;
+	pring = &psli->ring[LPFC_ELS_RING];
+
+	if (off > SYSFS_MENLO_ATTR_SIZE)
+		return -ERANGE;
+
+	if ((count + off) > SYSFS_MENLO_ATTR_SIZE)
+		count =  SYSFS_MENLO_ATTR_SIZE - off;
+
+	if (off % 4 ||  count % 4 || (unsigned long)buf % 4)
+		return -EINVAL;
+
+	if (off && count == 0)
+		return 0;
+
+	sysfs_menlo = lpfc_get_sysfs_menlo(phba, 0);
+
+	if (!sysfs_menlo)
+		return -EPERM;
+
+	if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
+		sysfs_menlo_idle(phba, sysfs_menlo);
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+			"1214 Can not issue FCoE cmd,"
+			" SLI not active: off %d rc= -EACCESS\n",
+			(int)off);
+		return -EACCES;
+	}
+
+
+	if ((phba->link_state < LPFC_LINK_UP)
+		&& !(psli->sli_flag & LPFC_MENLO_MAINT)
+		&& wait) {
+		rc =  -EPERM;
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+			"1215 Can not issue FCoE cmd:"
+			" not ready or not in maint mode"
+			" off %d rc=%d \n",
+			(int)off, rc);
+		spin_lock_irq(&phba->hbalock);
+		goto lpfc_menlo_read_err_exit;
+	}
+
+	if (off == 0 && sysfs_menlo->state == SMENLO_WRITING) {
+		if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) {
+			spin_lock_irq(&phba->hbalock);
+			genreq = &sysfs_menlo->cr;
+			spin_unlock_irq(&phba->hbalock);
+		}
+		if ( wait )
+			rc = sysfs_menlo_issue_iocb_wait(phba,
+							&sysfs_menlo->cr,
+							sysfs_menlo);
+		else {
+			rc = sysfs_menlo_issue_iocb(phba,
+							&sysfs_menlo->cr,
+							sysfs_menlo);
+			return rc;
+		}
+
+		spin_lock_irq(&phba->hbalock);
+		if (rc < 0) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"1224 FCoE iocb failed: off %d rc=%d \n",
+				(int)off, rc);
+			if (rc != -EACCES)
+				goto lpfc_menlo_read_err_exit;
+			else {
+				spin_unlock_irq(&phba->hbalock);
+				return rc;
+			}
+		}
+
+		if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) {
+			cmd = &sysfs_menlo->cx.cmdiocbq->iocb;
+			rsp = &sysfs_menlo->cr.rspiocbq->iocb;
+			mlast = list_get_first(&sysfs_menlo->cr.outhead,
+				struct lpfc_dmabufext,
+				dma.list);
+			if ( *((uint32_t *) mlast->dma.virt) != 0 ) {
+				memcpy(buf,(uint8_t *) mlast->dma.virt, count);
+				goto lpfc_menlo_read_err_exit;
+			}
+			mlast = NULL;
+
+			cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
+			cmd->ulpContext = rsp->ulpContext;
+			cmd->ulpPU = 1;  /* RelOffset */
+			cmd->un.ulpWord[4] = 0; /* offset 0 */
+
+			spin_unlock_irq(&phba->hbalock);
+			rc = sysfs_menlo_issue_iocb_wait(phba, &sysfs_menlo->cx,
+					sysfs_menlo);
+			spin_lock_irq(&phba->hbalock);
+			if (rc < 0) {
+				uptr = (uint32_t *) rsp;
+
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"1225 menlo_read: off %d rc=%d "
+					"rspxri %d cmdxri %d \n",
+					(int)off, rc, rsp->ulpContext,
+					cmd->ulpContext);
+				uptr = (uint32_t *)
+					&sysfs_menlo->cr.cmdiocbq->iocb;
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"1236 cr:%.08x %.08x %.08x %.08x "
+					"%.08x %.08x %.08x %.08x %.08x\n",
+					*uptr, *(uptr+1), *(uptr+2),
+					*(uptr+3), *(uptr+4), *(uptr+5),
+					*(uptr+6), *(uptr+7), *(uptr+8));
+				uptr = (uint32_t *)rsp;
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"1232 cr-rsp:%.08x %.08x %.08x %.08x "
+					"%.08x %.08x %.08x %.08x %.08x\n",
+					*uptr, *(uptr+1), *(uptr+2),
+					*(uptr+3), *(uptr+4), *(uptr+5),
+					*(uptr+6), *(uptr+7), *(uptr+8));
+				uptr = (uint32_t *)cmd;
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"1233 cx:%.08x %.08x %.08x %.08x "
+					"%.08x %.08x %.08x %.08x %.08x\n",
+					*uptr, *(uptr+1), *(uptr+2),
+					*(uptr+3), *(uptr+4), *(uptr+5),
+					*(uptr+6), *(uptr+7), *(uptr+8));
+				if (rc != -EACCES)
+					goto lpfc_menlo_read_err_exit;
+				else {
+					spin_unlock_irq(&phba->hbalock);
+					return rc;
+				}
+			}
+		}
+		sysfs_menlo->state = SMENLO_READING;
+		sysfs_menlo->cr.offset = 0;
+
+	} else
+		spin_lock_irq(&phba->hbalock);
+
+	if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD)
+		genreq = &sysfs_menlo->cx;
+	else
+		genreq = &sysfs_menlo->cr;
+
+	/* Copy back response data */
+	if (sysfs_menlo->cmdhdr.rspsize > count) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"1218 MloMgnt Rqst err Data: x%x %d %d %d %d\n",
+				genreq->outdmp->flag,
+				sysfs_menlo->cmdhdr.rspsize,
+				(int)count, (int)off, (int)genreq->offset);
+	}
+
+	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+		rc =   -EAGAIN;
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1219 menlo_read:4 off %d rc=%d \n",
+			(int)off, rc);
+		goto lpfc_menlo_read_err_exit;
+	}
+	else if ( sysfs_menlo->state  != SMENLO_READING) {
+		rc =  -EAGAIN;
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1220 menlo_read:5 off %d reg off %d rc=%d state %x\n",
+			(int)off,(int)genreq->offset, sysfs_menlo->state, rc);
+		goto lpfc_menlo_read_err_exit;
+	}
+	temp_off = 0;
+	mlast = NULL;
+	list_for_each_entry(mlast, &genreq->outhead, dma.list) {
+		if (temp_off == off)
+			break;
+		else
+			temp_off += BUF_SZ_4K;
+	}
+	if (mlast)
+		memcpy(buf,(uint8_t *) mlast->dma.virt, count);
+	else {
+		rc = -ERANGE;
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1221 menlo_read:6 off %d rc=%d \n",
+			(int)off, rc);
+		goto lpfc_menlo_read_err_exit;
+	}
+	genreq->offset += count;
+
+
+	if (genreq->offset >= sysfs_menlo->cmdhdr.rspsize) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"1222 menlo_read: done off %d rc=%d"
+			" cnt %d rsp_code %x\n",
+			(int)off, rc, (int)count,*((uint32_t *)buf));
+		rc = count;
+		goto lpfc_menlo_read_err_exit;
+	}
+
+	if (count >= sysfs_menlo->cmdhdr.rspsize)
+		rc = sysfs_menlo->cmdhdr.rspsize;
+	else /* Can there be a > 4k response */
+		rc = count;
+	if (genreq->offset < sysfs_menlo->cmdhdr.rspsize) {
+		spin_unlock_irq(&phba->hbalock);
+		return rc;
+	}
+
+lpfc_menlo_read_err_exit:
+	spin_unlock_irq(&phba->hbalock);
+	sysfs_menlo_idle(phba,sysfs_menlo);
+	return rc;
+}
+
+
+static ssize_t
+sysfs_menlo_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
+{
+	struct class_device *cdev = container_of(kobj, struct class_device,
+						 kobj);
+	struct Scsi_Host  *shost = class_to_shost(cdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return lpfc_menlo_read(phba, buf, off, count, 1);
+}
+int need_non_blocking = 0;
+void lpfc_check_menlo_cfg(struct lpfc_hba *phba)
+{
+	uint32_t cmd_size;
+	uint32_t rsp_size;
+	menlo_get_cmd_t *cmd = NULL;
+	menlo_init_rsp_t *rsp = NULL;
+	int rc = 0;
+
+	lpfc_printf_log (phba, KERN_INFO, LOG_LINK_EVENT,
+		"1253 Checking FCoE chip firmware.\n");
+	if ( need_non_blocking ) /* Need non blocking issue_iocb */
+		return;
+
+	cmd_size = sizeof (menlo_get_cmd_t);
+	cmd = kmalloc(cmd_size, GFP_KERNEL);
+	if (!cmd ) {
+		lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
+		"1240 Unable to allocate command buffer memory.\n");
+		return;
+	}
+
+	rsp_size = sizeof (menlo_init_rsp_t);
+	rsp = kmalloc(rsp_size, GFP_KERNEL);
+	if (!rsp ) {
+		lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
+		"1244 Unable to allocate response buffer memory.\n");
+		kfree(rsp);
+		return;
+	}
+
+	memset(cmd,0, cmd_size);
+	memset(rsp,0, rsp_size);
+
+	cmd->code = MENLO_CMD_GET_INIT;
+	cmd->context = cmd_size;
+	cmd->length = rsp_size;
+	rc = lpfc_menlo_write (phba, (char *) cmd, 0, cmd_size);
+	if ( rc != cmd_size ) {
+		lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
+			"1250 Menlo command error. code=%d.\n", rc);
+
+		kfree (cmd);
+		kfree (rsp);
+		return;
+	}
+	cmd->code = MENLO_CMD_GET_INIT;
+	cmd->context = 0;
+	cmd->length = rsp_size;
+	BE_swap32_buffer ((uint8_t *) cmd, cmd_size);
+	rc = lpfc_menlo_write (phba, (char *) cmd, cmd_size, cmd_size);
+	if ( rc != cmd_size ) {
+		lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
+			"1251 Menlo command error. code=%d.\n", rc);
+
+		kfree (cmd);
+		kfree (rsp);
+		return;
+	}
+	rc = lpfc_menlo_read (phba, (char *) rsp, 0, rsp_size,0);
+	if ( rc && rc != rsp_size ) {
+		lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
+			"1252 Menlo command error. code=%d.\n", rc);
+
+	}
+	kfree (cmd);
+	kfree (rsp);
+	return;
+}
+
+struct bin_attribute sysfs_menlo_attr = {
+	.attr = {
+		.name = "menlo",
+		.mode = S_IRUSR | S_IWUSR,
+		.owner = THIS_MODULE,
+	},
+	.size = SYSFS_MENLO_ATTR_SIZE,
+	.read = sysfs_menlo_read,
+	.write = sysfs_menlo_write,
+};
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 11583f3..83ffa75 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -235,10 +235,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 			(iocb->iocb_cmpl) (phba, iocb, iocb);
 		}
 	}
-
-	/* If we are delaying issuing an ELS command, cancel it */
-	if (ndlp->nlp_flag & NLP_DELAY_TMO)
-		lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
 	return 0;
 }
 
@@ -249,7 +246,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba    *phba = vport->phba;
 	struct lpfc_dmabuf *pcmd;
-	struct lpfc_work_evt *evtp;
 	uint32_t *lp;
 	IOCB_t *icmd;
 	struct serv_parm *sp;
@@ -425,73 +421,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			ndlp, mbox);
 		return 1;
 	}
-
-	/* If the remote NPort logs into us, before we can initiate
-	 * discovery to them, cleanup the NPort from discovery accordingly.
-	 */
-	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
-		spin_lock_irq(shost->host_lock);
-		ndlp->nlp_flag &= ~NLP_DELAY_TMO;
-		spin_unlock_irq(shost->host_lock);
-		del_timer_sync(&ndlp->nlp_delayfunc);
-		ndlp->nlp_last_elscmd = 0;
-
-		if (!list_empty(&ndlp->els_retry_evt.evt_listp)) {
-			list_del_init(&ndlp->els_retry_evt.evt_listp);
-			/* Decrement ndlp reference count held for the
-			 * delayed retry
-			 */
-			evtp = &ndlp->els_retry_evt;
-			lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
-		}
-
-		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
-			spin_lock_irq(shost->host_lock);
-			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-			spin_unlock_irq(shost->host_lock);
-
-			if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
-			    (vport->num_disc_nodes)) {
-				/* Check to see if there are more
-				 * ADISCs to be sent
-				 */
-				lpfc_more_adisc(vport);
-
-				if ((vport->num_disc_nodes == 0) &&
-					(vport->fc_npr_cnt))
-					lpfc_els_disc_plogi(vport);
-
-				if (vport->num_disc_nodes == 0) {
-					spin_lock_irq(shost->host_lock);
-					vport->fc_flag &= ~FC_NDISC_ACTIVE;
-					spin_unlock_irq(shost->host_lock);
-					lpfc_can_disctmo(vport);
-					lpfc_end_rscn(vport);
-				}
-			}
-		}
-	} else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
-		   (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
-		   (vport->num_disc_nodes)) {
-		spin_lock_irq(shost->host_lock);
-		ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-		spin_unlock_irq(shost->host_lock);
-		/* Check to see if there are more
-		 * PLOGIs to be sent
-		 */
-		lpfc_more_plogi(vport);
-		if (vport->num_disc_nodes == 0) {
-			spin_lock_irq(shost->host_lock);
-			vport->fc_flag &= ~FC_NDISC_ACTIVE;
-			spin_unlock_irq(shost->host_lock);
-			lpfc_can_disctmo(vport);
-			lpfc_end_rscn(vport);
-		}
-	}
-
 	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
 	return 1;
-
 out:
 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
@@ -574,7 +505,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	else
 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 
-	if (!(ndlp->nlp_type & NLP_FABRIC) ||
+	if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+	     ((ndlp->nlp_type & NLP_FCP_TARGET) ||
+	      !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
 	    (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
 		/* Only try to re-login if this is NOT a Fabric Node */
 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
@@ -751,6 +684,7 @@ static uint32_t
 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			   void *arg, uint32_t evt)
 {
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_iocbq *cmdiocb = arg;
 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -776,7 +710,22 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
 			NULL);
 	} else {
-		lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
+		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+		    (vport->num_disc_nodes)) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+			spin_unlock_irq(shost->host_lock);
+			/* Check if there are more PLOGIs to be sent */
+			lpfc_more_plogi(vport);
+			if (vport->num_disc_nodes == 0) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_NDISC_ACTIVE;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_can_disctmo(vport);
+				lpfc_end_rscn(vport);
+			}
+		}
 	} /* If our portname was less */
 
 	return ndlp->nlp_state;
@@ -1040,6 +989,7 @@ static uint32_t
 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			   void *arg, uint32_t evt)
 {
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_iocbq *cmdiocb;
 
@@ -1048,9 +998,16 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
 	cmdiocb = (struct lpfc_iocbq *) arg;
 
-	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb))
+	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+			spin_unlock_irq(shost->host_lock);
+			if (vport->num_disc_nodes)
+				lpfc_more_adisc(vport);
+		}
 		return ndlp->nlp_state;
-
+	}
 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
 	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
@@ -1742,24 +1699,21 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
 
 	/* Ignore PLOGI if we have an outstanding LOGO */
-	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) {
+	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
 		return ndlp->nlp_state;
-	}
-
 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+		lpfc_cancel_retry_delay_tmo(vport, ndlp);
 		spin_lock_irq(shost->host_lock);
-		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
 		spin_unlock_irq(shost->host_lock);
-		return ndlp->nlp_state;
-	}
-
-	/* send PLOGI immediately, move to PLOGI issue state */
-	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
-		ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
-		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
-		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+		/* send PLOGI immediately, move to PLOGI issue state */
+		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+		}
 	}
-
 	return ndlp->nlp_state;
 }
 
@@ -1810,7 +1764,6 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
 
 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
-
 	/*
 	 * Do not start discovery if discovery is about to start
 	 * or discovery in progress for this node. Starting discovery
@@ -1978,9 +1931,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
 	spin_unlock_irq(shost->host_lock);
-	if (ndlp->nlp_flag & NLP_DELAY_TMO) {
-		lpfc_cancel_retry_delay_tmo(vport, ndlp);
-	}
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	return ndlp->nlp_state;
 }
 
@@ -2197,7 +2148,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		lpfc_nlp_put(ndlp);
 	} else {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-			"0212 DSM out state %d on NPort free\n", rc);
+			"0213 DSM out state %d on NPort free\n", rc);
 
 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
 			"DSM out:         ste:%d did:x%x flg:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index ef25645..0e0ee86 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -50,6 +50,7 @@ void
 lpfc_adjust_queue_depth(struct lpfc_hba *phba)
 {
 	unsigned long flags;
+	uint32_t evt_posted;
 
 	spin_lock_irqsave(&phba->hbalock, flags);
 	atomic_inc(&phba->num_rsrc_err);
@@ -65,17 +66,13 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 
 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
-	if ((phba->pport->work_port_events &
-		WORKER_RAMP_DOWN_QUEUE) == 0) {
+	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
+	if (!evt_posted)
 		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
-	}
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
 
-	spin_lock_irqsave(&phba->hbalock, flags);
-	if (phba->work_wait)
-		wake_up(phba->work_wait);
-	spin_unlock_irqrestore(&phba->hbalock, flags);
-
+	if (!evt_posted)
+		lpfc_worker_wake_up(phba);
 	return;
 }
 
@@ -89,6 +86,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
 {
 	unsigned long flags;
 	struct lpfc_hba *phba = vport->phba;
+	uint32_t evt_posted;
 	atomic_inc(&phba->num_cmd_success);
 
 	if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
@@ -103,16 +101,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 
 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
-	if ((phba->pport->work_port_events &
-		WORKER_RAMP_UP_QUEUE) == 0) {
+	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
+	if (!evt_posted)
 		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
-	}
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
 
-	spin_lock_irqsave(&phba->hbalock, flags);
-	if (phba->work_wait)
-		wake_up(phba->work_wait);
-	spin_unlock_irqrestore(&phba->hbalock, flags);
+	if (!evt_posted)
+		lpfc_worker_wake_up(phba);
+	return;
 }
 
 void
@@ -187,6 +183,43 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
 	atomic_set(&phba->num_cmd_success, 0);
 }
 
+void
+lpfc_scsi_dev_block(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct Scsi_Host  *shost;
+	struct scsi_device *sdev;
+	struct fc_rport *rport;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			shost_for_each_device(sdev, shost) {
+				rport = starget_to_rport(scsi_target(sdev));
+				fc_remote_port_delete(rport);
+			}
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+void
+lpfc_scsi_dev_rescan(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct Scsi_Host  *shost;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			scsi_scan_host(shost);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
 /*
  * This routine allocates a scsi buffer, which contains all the necessary
  * information needed to initiate a SCSI I/O.  The non-DMAable buffer region
@@ -348,7 +381,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
 			printk(KERN_ERR "%s: Too many sg segments from "
 			       "dma_map_sg.  Config %d, seg_cnt %d",
-			       __FUNCTION__, phba->cfg_sg_seg_cnt,
+			       __func__, phba->cfg_sg_seg_cnt,
 			       lpfc_cmd->seg_cnt);
 			dma_unmap_sg(&phba->pcidev->dev, sgel,
 				     lpfc_cmd->seg_cnt, datadir);
@@ -478,7 +511,7 @@ lpfc_handle_fcp_err(struct lpfc_hba *phba, struct lpfc_vport *vport,
 		logit = LOG_FCP;
 
 	lpfc_printf_log(phba, KERN_WARNING, logit,
-			"(%d)0730 FCP command x%x failed: x%x SNS x%x x%x "
+			"(%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
 			"Data: x%x x%x x%x x%x x%x\n",
 			(vport ? vport->vpi : 0),
 			cmnd->cmnd[0], scsi_status,
@@ -502,7 +535,7 @@ lpfc_handle_fcp_err(struct lpfc_hba *phba, struct lpfc_vport *vport,
 		cmnd->resid = be32_to_cpu(fcprsp->rspResId);
 
 		lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-				"(%d)0716 FCP Read Underrun, expected %d, "
+				"(%d):0716 FCP Read Underrun, expected %d, "
 				"residual %d Data: x%x x%x x%x\n",
 				(vport ? vport->vpi : 0),
 				be32_to_cpu(fcpcmd->fcpDl),
@@ -518,7 +551,7 @@ lpfc_handle_fcp_err(struct lpfc_hba *phba, struct lpfc_vport *vport,
 			(cmnd->resid != fcpi_parm)) {
 			lpfc_printf_log(phba, KERN_WARNING,
 					LOG_FCP | LOG_FCP_ERROR,
-					"(%d)0735 FCP Read Check Error "
+					"(%d):0735 FCP Read Check Error "
 					"and Underrun Data: x%x x%x x%x x%x\n",
 					(vport ? vport->vpi : 0),
 					be32_to_cpu(fcpcmd->fcpDl),
@@ -537,7 +570,7 @@ lpfc_handle_fcp_err(struct lpfc_hba *phba, struct lpfc_vport *vport,
 		    (scsi_status == SAM_STAT_GOOD) &&
 		    (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
 			lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-					"(%d)0717 FCP command x%x residual "
+					"(%d):0717 FCP command x%x residual "
 					"underrun converted to error "
 					"Data: x%x x%x x%x\n",
 					(vport ? vport->vpi : 0),
@@ -547,7 +580,7 @@ lpfc_handle_fcp_err(struct lpfc_hba *phba, struct lpfc_vport *vport,
 		}
 	} else if (resp_info & RESID_OVER) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-				"(%d)0720 FCP command x%x residual "
+				"(%d):0720 FCP command x%x residual "
 				"overrun error. Data: x%x x%x \n",
 				(vport ? vport->vpi : 0),
 				cmnd->cmnd[0],
@@ -562,7 +595,7 @@ lpfc_handle_fcp_err(struct lpfc_hba *phba, struct lpfc_vport *vport,
 			(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
 		lpfc_printf_log(phba, KERN_WARNING,
 				LOG_FCP | LOG_FCP_ERROR,
-				"(%d)0734 FCP Read Check Error Data: "
+				"(%d):0734 FCP Read Check Error Data: "
 				"x%x x%x x%x x%x\n",
 				(vport ? vport->vpi : 0),
 				be32_to_cpu(fcpcmd->fcpDl),
@@ -593,6 +626,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 
 	lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
 	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+	atomic_dec(&pnode->cmd_pending);
 
 	if (lpfc_cmd->status) {
 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
@@ -618,23 +652,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 			break;
 		case IOSTAT_NPORT_BSY:
 		case IOSTAT_FABRIC_BSY:
-			cmd->result = ScsiResult(DID_BUS_BUSY, 0);
+			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
 			break;
 		case IOSTAT_LOCAL_REJECT:
 			if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
 			    lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
 				cmd->result = ScsiResult(DID_REQUEUE, 0);
-			break;
-		} /* else: fall through */
+				break;
+			} /* else: fall through */
 		default:
 			cmd->result = ScsiResult(DID_ERROR, 0);
 			break;
 		}
 
-		if ((pnode == NULL )
+		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
 		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
-			cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
+			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
+			SAM_STAT_BUSY);
 	} else {
 		cmd->result = ScsiResult(DID_OK, 0);
 	}
@@ -652,10 +687,30 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 
 	result = cmd->result;
 	sdev = cmd->device;
+	if (vport->cfg_max_scsicmpl_time &&
+	   time_after(jiffies, lpfc_cmd->start_time +
+		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
+		spin_lock_irqsave(sdev->host->host_lock, flags);
+		if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) &&
+		    (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) &&
+		    ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))))
+			pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending);
+
+		pnode->last_change_time = jiffies;
+		spin_unlock_irqrestore(sdev->host->host_lock, flags);
+	} else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
+		   time_after(jiffies, pnode->last_change_time +
+			msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
+		spin_lock_irqsave(sdev->host->host_lock, flags);
+		pnode->cmd_qdepth += pnode->cmd_qdepth *
+			LPFC_TGTQ_RAMPUP_PCENT / 100;
+		if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
+			pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+		pnode->last_change_time = jiffies;
+		spin_unlock_irqrestore(sdev->host->host_lock, flags);
+	}
+
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
-	spin_lock_irqsave(sdev->host->host_lock, flags);
-	lpfc_cmd->pCmd = NULL;	/* This must be done before scsi_done */
-	spin_unlock_irqrestore(sdev->host->host_lock, flags);
 	cmd->scsi_done(cmd);
 
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -664,6 +719,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 		 * wake up the thread.
 		 */
 		spin_lock_irqsave(sdev->host->host_lock, flags);
+		lpfc_cmd->pCmd = NULL;
 		if (lpfc_cmd->waitq)
 			wake_up(lpfc_cmd->waitq);
 		spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -677,7 +733,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	if (!result)
 		lpfc_rampup_queue_depth(vport, sdev);
 
-	if (!result && pnode != NULL &&
+	if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
 	   ((jiffies - pnode->last_ramp_up_time) >
 		LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
 	   ((jiffies - pnode->last_q_full_time) >
@@ -705,7 +761,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	 * Check for queue full.  If the lun is reporting queue full, then
 	 * back off the lun queue depth to prevent target overloads.
 	 */
-	if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) {
+	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
+	    NLP_CHK_NODE_ACT(pnode)) {
 		pnode->last_q_full_time = jiffies;
 
 		shost_for_each_device(tmp_sdev, sdev->host) {
@@ -736,6 +793,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	 * wake up the thread.
 	 */
 	spin_lock_irqsave(sdev->host->host_lock, flags);
+	lpfc_cmd->pCmd = NULL;
 	if (lpfc_cmd->waitq)
 		wake_up(lpfc_cmd->waitq);
 	spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -755,6 +813,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 	int datadir = scsi_cmnd->sc_data_direction;
 	char tag[2];
 
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+		return;
+
 	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
 	/* clear task management bits */
 	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
@@ -852,9 +913,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
 	struct lpfc_nodelist *ndlp = rdata->pnode;
 
-	if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
 		return 0;
-	}
 
 	piocbq = &(lpfc_cmd->cur_iocbq);
 	piocbq->vport = vport;
@@ -908,14 +969,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
 	struct lpfc_iocbq *iocbq;
 	struct lpfc_iocbq *iocbqrsp;
 	int ret;
+	int status;
 
-	if (!rdata->pnode)
+	if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
 		return FAILED;
 
 	lpfc_cmd->rdata = rdata;
-	ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
+	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
 					   FCP_TARGET_RESET);
-	if (!ret)
+	if (!status)
 		return FAILED;
 
 	iocbq = &lpfc_cmd->cur_iocbq;
@@ -928,12 +990,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
 			 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
 			 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
-	ret = lpfc_sli_issue_iocb_wait(phba,
+	status = lpfc_sli_issue_iocb_wait(phba,
 				       &phba->sli.ring[phba->sli.fcp_ring],
 				       iocbq, iocbqrsp, lpfc_cmd->timeout);
-	if (ret != IOCB_SUCCESS) {
-		if (ret == IOCB_TIMEDOUT)
+	if (status != IOCB_SUCCESS) {
+		if (status == IOCB_TIMEDOUT) {
 			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+			ret = TIMEOUT_ERROR;
+		} else
+			ret = FAILED;
 		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
 	} else {
 		ret = SUCCESS;
@@ -1047,10 +1112,13 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	 * Catch race where our node has transitioned, but the
 	 * transport is still transitioning.
 	 */
-	if (!ndlp) {
-		cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
 		goto out_fail_command;
 	}
+	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
+		goto out_host_busy;
+
 	lpfc_cmd = lpfc_get_scsi_buf(phba);
 	if (lpfc_cmd == NULL) {
 		lpfc_adjust_queue_depth(phba);
@@ -1068,6 +1136,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	lpfc_cmd->pCmd  = cmnd;
 	lpfc_cmd->rdata = rdata;
 	lpfc_cmd->timeout = 0;
+	lpfc_cmd->start_time = jiffies;
 	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
 	cmnd->scsi_done = done;
 
@@ -1080,6 +1149,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	if ((cmnd->cmnd[0] == REPORT_LUNS) && phba->cfg_enable_npiv)
 		mod_timer(&cmnd->eh_timeout, jiffies + 60 * HZ);
 
+	atomic_inc(&ndlp->cmd_pending);
 	err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
 	if (err)
@@ -1094,6 +1164,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	return 0;
 
  out_host_busy_free_buf:
+	atomic_dec(&ndlp->cmd_pending);
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
 	lpfc_release_scsi_buf(phba, lpfc_cmd);
  out_host_busy:
@@ -1226,121 +1297,96 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
 	struct lpfc_iocbq *iocbq, *iocbqrsp;
 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
 	struct lpfc_nodelist *pnode = rdata->pnode;
-	uint32_t cmd_result = 0, cmd_status = 0;
-	int ret = FAILED;
-	int iocb_status = IOCB_SUCCESS;
-	int cnt, loopcnt;
+	unsigned long later;
+	int ret = SUCCESS;
+	int status;
+	int cnt;
 
 	lpfc_block_error_handler(cmnd);
-	loopcnt = 0;
 	/*
 	 * If target is not in a MAPPED state, delay the reset until
 	 * target is rediscovered or devloss timeout expires.
 	 */
-	while (1) {
-		if (!pnode)
-			goto out;
-
-		if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
-			schedule_timeout_uninterruptible(msecs_to_jiffies(500));
-			loopcnt++;
-			rdata = cmnd->device->hostdata;
-			if (!rdata ||
-				(loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){
-				lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-						 "0721 LUN Reset rport "
-						 "failure: cnt x%x rdata x%p\n",
-						 loopcnt, rdata);
-				goto out;
-			}
-			pnode = rdata->pnode;
-			if (!pnode)
-				goto out;
-		}
+	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+	while (time_after(later, jiffies)) {
+		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+			return FAILED;
 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
 			break;
+		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
+		rdata = cmnd->device->hostdata;
+		if (!rdata)
+			break;
+		pnode = rdata->pnode;
+	}
+	if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "0721 LUN Reset rport "
+				 "failure: msec x%x rdata x%p\n",
+				 jiffies_to_msecs(jiffies - later), rdata);
+		return FAILED;
 	}
-
 	lpfc_cmd = lpfc_get_scsi_buf(phba);
 	if (lpfc_cmd == NULL)
-		goto out;
-
+		return FAILED;
 	lpfc_cmd->timeout = 60;
 	lpfc_cmd->rdata = rdata;
 
-	ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
-					   FCP_TARGET_RESET);
-	if (!ret)
-		goto out_free_scsi_buf;
-
+	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
+					      cmnd->device->lun,
+					      FCP_TARGET_RESET);
+	if (!status) {
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+		return FAILED;
+	}
 	iocbq = &lpfc_cmd->cur_iocbq;
 
 	/* get a buffer for this IOCB command response */
 	iocbqrsp = lpfc_sli_get_iocbq(phba);
-	if (iocbqrsp == NULL)
-		goto out_free_scsi_buf;
-
+	if (iocbqrsp == NULL) {
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+		return FAILED;
+	}
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
 			 "0703 Issue target reset to TGT %d LUN %d "
 			 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
 			 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
-	iocb_status = lpfc_sli_issue_iocb_wait(phba,
-				       &phba->sli.ring[phba->sli.fcp_ring],
-				       iocbq, iocbqrsp, lpfc_cmd->timeout);
-
-	if (iocb_status == IOCB_TIMEDOUT)
+	status = lpfc_sli_issue_iocb_wait(phba,
+					  &phba->sli.ring[phba->sli.fcp_ring],
+					  iocbq, iocbqrsp, lpfc_cmd->timeout);
+	if (status == IOCB_TIMEDOUT) {
 		iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
-
-	if (iocb_status == IOCB_SUCCESS)
-		ret = SUCCESS;
-	else
-		ret = iocb_status;
-
-	cmd_result = iocbqrsp->iocb.un.ulpWord[4];
-	cmd_status = iocbqrsp->iocb.ulpStatus;
-
+		ret = TIMEOUT_ERROR;
+	} else {
+		if (status != IOCB_SUCCESS)
+			ret = FAILED;
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+	}
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "0713 SCSI layer issued device reset (%d, %d) "
+			 "return x%x status x%x result x%x\n",
+			 cmnd->device->id, cmnd->device->lun, ret,
+			 iocbqrsp->iocb.ulpStatus,
+			 iocbqrsp->iocb.un.ulpWord[4]);
 	lpfc_sli_release_iocbq(phba, iocbqrsp);
-
-	/*
-	 * All outstanding txcmplq I/Os should have been aborted by the device.
-	 * Unfortunately, some targets do not abide by this forcing the driver
-	 * to double check.
-	 */
 	cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
-				LPFC_CTX_LUN);
+				LPFC_CTX_TGT);
 	if (cnt)
 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
 				    cmnd->device->id, cmnd->device->lun,
-				    LPFC_CTX_LUN);
-	loopcnt = 0;
-	while(cnt) {
-		schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
-
-		if (++loopcnt
-		    > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
-			break;
-
+				    LPFC_CTX_TGT);
+	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+	while (time_after(later, jiffies) && cnt) {
+		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
 		cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
-					cmnd->device->lun, LPFC_CTX_LUN);
+					cmnd->device->lun, LPFC_CTX_TGT);
 	}
-
 	if (cnt) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 				 "0719 device reset I/O flush failure: "
 				 "cnt x%x\n", cnt);
 		ret = FAILED;
 	}
-
-out_free_scsi_buf:
-	if (iocb_status != IOCB_TIMEDOUT) {
-		lpfc_release_scsi_buf(phba, lpfc_cmd);
-	}
-	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-			 "0713 SCSI layer issued device reset (%d, %d) "
-			 "return x%x status x%x result x%x\n",
-			 cmnd->device->id, cmnd->device->lun, ret,
-			 cmd_status, cmd_result);
-out:
 	return ret;
 }
 
@@ -1352,19 +1398,12 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_nodelist *ndlp = NULL;
 	int match;
-	int ret = FAILED, i, err_count = 0;
-	int cnt, loopcnt;
+	int ret = SUCCESS, status, i;
+	int cnt;
 	struct lpfc_scsi_buf * lpfc_cmd;
+	unsigned long later;
 
 	lpfc_block_error_handler(cmnd);
-
-	lpfc_cmd = lpfc_get_scsi_buf(phba);
-	if (lpfc_cmd == NULL)
-		goto out;
-
-	/* The lpfc_cmd storage is reused.  Set all loop invariants. */
-	lpfc_cmd->timeout = 60;
-
 	/*
 	 * Since the driver manages a single bus device, reset all
 	 * targets known to the driver.  Should any target reset
@@ -1378,7 +1417,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 			if (!NLP_CHK_NODE_ACT(ndlp))
 				continue;
 			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
-			    i == ndlp->nlp_sid &&
+			    ndlp->nlp_sid == i &&
 			    ndlp->rport) {
 				match = 1;
 				break;
@@ -1387,27 +1426,22 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 		spin_unlock_irq(shost->host_lock);
 		if (!match)
 			continue;
-
-		ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
-					  cmnd->device->lun,
-					  ndlp->rport->dd_data);
-		if (ret != SUCCESS) {
+		lpfc_cmd = lpfc_get_scsi_buf(phba);
+		if (lpfc_cmd) {
+			lpfc_cmd->timeout = 60;
+			status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
+						     cmnd->device->lun,
+						     ndlp->rport->dd_data);
+			if (status != TIMEOUT_ERROR)
+				lpfc_release_scsi_buf(phba, lpfc_cmd);
+		}
+		if (!lpfc_cmd || status != SUCCESS) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 					 "0700 Bus Reset on target %d failed\n",
 					 i);
-			err_count++;
-			break;
+			ret = FAILED;
 		}
 	}
-
-	if (ret != IOCB_TIMEDOUT)
-		lpfc_release_scsi_buf(phba, lpfc_cmd);
-
-	if (err_count == 0)
-		ret = SUCCESS;
-	else
-		ret = FAILED;
-
 	/*
 	 * All outstanding txcmplq I/Os should have been aborted by
 	 * the targets.  Unfortunately, some targets do not abide by
@@ -1417,27 +1451,19 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 	if (cnt)
 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
 				    0, 0, LPFC_CTX_HOST);
-	loopcnt = 0;
-	while(cnt) {
-		schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
-
-		if (++loopcnt
-		    > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
-			break;
-
+	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+	while (time_after(later, jiffies) && cnt) {
+		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
 		cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
 	}
-
 	if (cnt) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 				 "0715 Bus Reset I/O flush failure: "
 				 "cnt x%x left x%x\n", cnt, i);
 		ret = FAILED;
 	}
-
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
-out:
 	return ret;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index daba923..17cd6d3 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -20,6 +20,10 @@
 
 #include <asm/byteorder.h>
 
+#ifndef DID_TRANSPORT_DISRUPTED
+#define DID_TRANSPORT_DISRUPTED DID_BUS_BUSY
+#endif
+
 struct lpfc_hba;
 
 #define list_remove_head(list, entry, type, member)		\
@@ -139,6 +143,7 @@ struct lpfc_scsi_buf {
 	 */
 	struct lpfc_iocbq cur_iocbq;
 	wait_queue_head_t *waitq;
+	unsigned long start_time;
 };
 
 #define LPFC_SCSI_DMA_EXT_SIZE 264
diff --git a/drivers/scsi/lpfc/lpfc_security.c b/drivers/scsi/lpfc/lpfc_security.c
index 29c2194..b5e24c7 100644
--- a/drivers/scsi/lpfc/lpfc_security.c
+++ b/drivers/scsi/lpfc/lpfc_security.c
@@ -245,8 +245,7 @@ lpfc_reauth_node(unsigned long ptr)
 	if (evtp->evt_arg1) {
 		evtp->evt = LPFC_EVT_REAUTH;
 		list_add_tail(&evtp->evt_listp, &phba->work_list);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
+		lpfc_worker_wake_up(phba);
 	}
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 	return;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index c8ba14d..9496887 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -219,7 +219,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
 	case CMD_IOCB_LOGENTRY_CN:
 	case CMD_IOCB_LOGENTRY_ASYNC_CN:
 		printk("%s - Unhandled SLI-3 Command x%x\n",
-				__FUNCTION__, iocb_cmnd);
+				__func__, iocb_cmnd);
 		type = LPFC_UNKNOWN_IOCB;
 		break;
 	default:
@@ -324,9 +324,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 			phba->work_ha |= HA_ERATT;
 			phba->work_hs = HS_FFER3;
 
-			/* hbalock should already be held */
-			if (phba->work_wait)
-				lpfc_worker_wake_up(phba);
+			lpfc_worker_wake_up(phba);
 
 			return NULL;
 		}
@@ -789,6 +787,9 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
 	case MBX_REG_VPI:
 	case MBX_UNREG_VPI:
 	case MBX_HEARTBEAT:
+	case MBX_READ_EVENT_LOG_STATUS:
+	case MBX_READ_EVENT_LOG:
+	case MBX_WRITE_EVENT_LOG:
 		ret = mbxCommand;
 		break;
 	default:
@@ -1311,9 +1312,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 	phba->work_ha |= HA_ERATT;
 	phba->work_hs = HS_FFER3;
 
-	/* hbalock should already be held */
-	if (phba->work_wait)
-		lpfc_worker_wake_up(phba);
+	lpfc_worker_wake_up(phba);
 
 	return;
 }
@@ -1719,7 +1718,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
 		rspiocbp = __lpfc_sli_get_iocbq(phba);
 		if (rspiocbp == NULL) {
 			printk(KERN_ERR "%s: out of buffers! Failing "
-			       "completion.\n", __FUNCTION__);
+			       "completion.\n", __func__);
 			break;
 		}
 
@@ -1947,6 +1946,73 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 	}
 }
 
+void
+lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
+{
+	LIST_HEAD(txq);
+	LIST_HEAD(txcmplq);
+	struct lpfc_iocbq *iocb;
+	IOCB_t *cmd = NULL;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring  *pring;
+
+	/* Currently, only one fcp ring */
+	pring = &psli->ring[psli->fcp_ring];
+
+	spin_lock_irq(&phba->hbalock);
+	/* Retrieve everything on txq */
+	list_splice_init(&pring->txq, &txq);
+	pring->txq_cnt = 0;
+
+	/* Retrieve everything on the txcmplq */
+	list_splice_init(&pring->txcmplq, &txcmplq);
+	pring->txcmplq_cnt = 0;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Flush the txq */
+	while (!list_empty(&txq)) {
+		iocb = list_get_first(&txq, struct lpfc_iocbq, list);
+		cmd = &iocb->iocb;
+		list_del_init(&iocb->list);
+
+		if (!iocb->iocb_cmpl)
+			lpfc_sli_release_iocbq(phba, iocb);
+		else {
+			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+			cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
+			(iocb->iocb_cmpl) (phba, iocb, iocb);
+		}
+	}
+
+	/* Flush the txcmpq */
+	while (!list_empty(&txcmplq)) {
+		iocb = list_get_first(&txcmplq, struct lpfc_iocbq, list);
+		cmd = &iocb->iocb;
+		list_del_init(&iocb->list);
+
+		if (!iocb->iocb_cmpl)
+			lpfc_sli_release_iocbq(phba, iocb);
+		else {
+			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+			cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
+			(iocb->iocb_cmpl) (phba, iocb, iocb);
+		}
+	}
+}
+
+/**
+ * lpfc_sli_brdready: Check for host status bits.
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function reads the host status register and compares
+ * with the provided bit mask to check if HBA completed
+ * the restart. This function will wait in a loop for the
+ * HBA to complete restart. If the HBA does not restart within
+ * 15 iterations, the function will reset the HBA again. The
+ * function returns 1 when HBA fail to restart otherwise returns
+ * zero.
+ **/
 int
 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
 {
@@ -2670,12 +2736,9 @@ lpfc_mbox_timeout(unsigned long ptr)
 		phba->pport->work_port_events |= WORKER_MBOX_TMO;
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
 
-	if (!tmo_posted) {
-		spin_lock_irqsave(&phba->hbalock, iflag);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
-		spin_unlock_irqrestore(&phba->hbalock, iflag);
-	}
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
 }
 
 void
@@ -2761,6 +2824,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 	if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
 		pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
 		if(!pmbox->vport) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 			lpfc_printf_log(phba, KERN_ERR,
 					LOG_MBOX | LOG_VPORT,
 					"1806 Mbox x%x failed. No vport\n",
@@ -2770,6 +2834,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 		}
 	}
 
+	/* If the PCI channel is in offline state, do not post mbox. */
+	if (unlikely(phba->pcidev->error_state != pci_channel_io_normal)) {
+		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+		goto out_not_finished;
+	}
 
 	psli = &phba->sli;
 
@@ -3122,6 +3191,9 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		return IOCB_ERROR;
 	}
 
+	/* If the PCI channel is in offline state, do not post iocbs. */
+	if (unlikely(phba->pcidev->error_state != pci_channel_io_normal))
+		return IOCB_ERROR;
 
 	/*
 	 * We should never get an IOCB if we are in a < LINK_DOWN state
@@ -3142,6 +3214,17 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		 * can be issued if the link is not up.
 		 */
 		switch (piocb->iocb.ulpCommand) {
+		case CMD_GEN_REQUEST64_CR:
+		case CMD_GEN_REQUEST64_CX:
+			if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
+				(piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
+					FC_FCP_CMND) ||
+				(piocb->iocb.un.genreq64.w5.hcsw.Type !=
+					MENLO_TRANSPORT_TYPE))
+
+				goto iocb_busy;
+			break;
+
 		case CMD_QUE_RING_BUF_CN:
 		case CMD_QUE_RING_BUF64_CN:
 			/*
@@ -3457,8 +3540,12 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
 	for (i = 0; i < psli->num_rings; i++) {
 		pring = &psli->ring[i];
 		prev_pring_flag = pring->flag;
-		if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+		/* Only slow rings */
+		if (pring->ringno == LPFC_ELS_RING) {
 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
+			/* Set the lpfc data pending flag */
+			set_bit(LPFC_DATA_READY, &phba->data_flags);
+		}
 		/*
 		 * Error everything on the txq since these iocbs have not been
 		 * given to the FW yet.
@@ -3517,8 +3604,12 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
 	spin_lock_irqsave(&phba->hbalock, flags);
 	for (i = 0; i < psli->num_rings; i++) {
 		pring = &psli->ring[i];
-		if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+		/* Only slow rings */
+		if (pring->ringno == LPFC_ELS_RING) {
 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
+			/* Set the lpfc data pending flag */
+			set_bit(LPFC_DATA_READY, &phba->data_flags);
+		}
 
 		/*
 		 * Error everything on the txq since these iocbs have not been
@@ -3647,7 +3738,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
 	spin_unlock_irq(&phba->hbalock);
 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"0410 Cannot find virtual addr for buffer tag on "
+			"0402 Cannot find virtual addr for buffer tag on "
 			"ring %d Data x%lx x%p x%p x%x\n",
 			pring->ringno, (unsigned long) tag,
 			slp->next, slp->prev, pring->postbufq_cnt);
@@ -3755,7 +3846,7 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
 	/* ELS cmd tag <ulpIoTag> completes */
 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-			"0133 Ignoring ELS cmd tag x%x completion Data: "
+			"0139 Ignoring ELS cmd tag x%x completion Data: "
 			"x%x x%x x%x\n",
 			irsp->ulpIoTag, irsp->ulpStatus,
 			irsp->un.ulpWord[4], irsp->ulpTimeout);
@@ -3850,7 +3941,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
 			   lpfc_ctx_cmd ctx_cmd)
 {
 	struct lpfc_scsi_buf *lpfc_cmd;
-	struct scsi_cmnd *cmnd;
+	struct scsi_lun fcp_lun;
 	int rc = 1;
 
 	if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
@@ -3860,21 +3951,22 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
 		return rc;
 
 	lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
-	cmnd = lpfc_cmd->pCmd;
 
-	if (cmnd == NULL)
+	if (lpfc_cmd->pCmd == NULL)
 		return rc;
 
 	switch (ctx_cmd) {
 	case LPFC_CTX_LUN:
-		if (cmnd->device &&
-		    (cmnd->device->id == tgt_id) &&
-		    (cmnd->device->lun == lun_id))
+		int_to_scsilun(lun_id, &fcp_lun);
+		if ((lpfc_cmd->rdata->pnode) &&
+		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
+		    (!memcmp(&lpfc_cmd->fcp_cmnd->fcp_lun, &fcp_lun,
+			     sizeof(fcp_lun))))
 			rc = 0;
 		break;
 	case LPFC_CTX_TGT:
-		if (cmnd->device &&
-			(cmnd->device->id == tgt_id))
+		if ((lpfc_cmd->rdata->pnode) &&
+		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
 			rc = 0;
 		break;
 	case LPFC_CTX_HOST:
@@ -3882,7 +3974,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
 		break;
 	default:
 		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
-			__FUNCTION__, ctx_cmd);
+			__func__, ctx_cmd);
 		break;
 	}
 
@@ -4052,7 +4144,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
 		}
 	} else {
 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-				":0332 IOCB wait issue failed, Data x%x\n",
+				"0332 IOCB wait issue failed, Data x%x\n",
 				retval);
 		retval = IOCB_ERROR;
 	}
@@ -4084,6 +4176,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
 	if (pmboxq->context1)
 		return MBX_NOT_FINISHED;
 
+	pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
 	/* setup wake call as IOCB callback */
 	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
 	/* setup context field to pass wait_queue pointer to wake function  */
@@ -4171,6 +4264,9 @@ lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
 	if (unlikely(!phba))
 		return IRQ_NONE;
 
+	/* If the pci channel is offline, ignore all the interrupts. */
+	if (unlikely(phba->pcidev->error_state != pci_channel_io_normal))
+		return IRQ_NONE;
 
 	phba->sli.slistat.sli_intr++;
 
@@ -4246,7 +4342,7 @@ lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
 						"pwork:x%x hawork:x%x wait:x%x",
 						phba->work_ha, work_ha_copy,
 						(uint32_t)((unsigned long)
-						phba->work_wait));
+						&phba->work_waitq));
 
 					control &=
 					    ~(HC_R0INT_ENA << LPFC_ELS_RING);
@@ -4259,7 +4355,7 @@ lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
 						"x%x hawork:x%x wait:x%x",
 						phba->work_ha, work_ha_copy,
 						(uint32_t)((unsigned long)
-						phba->work_wait));
+						&phba->work_waitq));
 				}
 				spin_unlock(&phba->hbalock);
 			}
@@ -4365,7 +4461,7 @@ lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
 							lpfc_printf_log(phba,
 							KERN_ERR,
 							LOG_MBOX | LOG_SLI,
-							"0306 rc should have"
+							"0350 rc should have"
 							"been MBX_BUSY");
 						goto send_current_mbox;
 					}
@@ -4394,9 +4490,8 @@ send_current_mbox:
 
 		spin_lock(&phba->hbalock);
 		phba->work_ha |= work_ha_copy;
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
 		spin_unlock(&phba->hbalock);
+		lpfc_worker_wake_up(phba);
 	}
 
 	ha_copy &= ~(phba->work_ha_mask);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 026a38a..c72420f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -18,7 +18,6 @@
  * included with this package.                                     *
  *******************************************************************/
 
-
 /* forward declaration for LPFC_IOCB_t's use */
 struct lpfc_hba;
 struct lpfc_vport;
@@ -237,6 +236,7 @@ struct lpfc_sli {
 #define LPFC_SLI2_ACTIVE          0x200	/* SLI2 overlay in firmware is active */
 #define LPFC_PROCESS_LA           0x400	/* Able to process link attention */
 #define LPFC_BLOCK_MGMT_IO        0x800	/* Don't allow mgmt mbx or iocb cmds */
+#define LPFC_MENLO_MAINT          0x1000 /* need for menl fw download */
 
 	struct lpfc_sli_ring ring[LPFC_MAX_RING];
 	int fcp_ring;		/* ring used for FCP initiator commands */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index d528d8e..de3cddf 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.2.0.22"
+#define LPFC_DRIVER_VERSION "8.2.0.30"
 
 #define LPFC_DRIVER_NAME "lpfc"
 
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index b329b53..ef42e04 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -193,9 +193,80 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
 	return 1;
 }
 
+/**
+ * lpfc_discovery_wait: Wait for driver discovery to quiesce.
+ * @vport: The virtual port for which this call is being executed.
+ *
+ * This driver calls this routine specifically from lpfc_vport_delete
+ * to enforce a synchronous execution of vport
+ * delete relative to discovery activities.  The
+ * lpfc_vport_delete routine should not return until it
+ * can reasonably guarantee that discovery has quiesced.
+ * Post FDISC LOGO, the driver must wait until its SAN teardown is
+ * complete and all resources recovered before allowing
+ * cleanup.
+ *
+ * This routine does not require any locks held.
+ **/
+static void lpfc_discovery_wait(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	uint32_t wait_flags = 0;
+	unsigned long wait_time_max;
+	unsigned long start_time;
+
+	wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
+		     FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
+
+	/*
+	 * The time constraint on this loop is a balance between the
+	 * fabric RA_TOV value and dev_loss tmo.  The driver's
+	 * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
+	 */
+	wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
+	wait_time_max += jiffies;
+	start_time = jiffies;
+	while (time_before(jiffies, wait_time_max)) {
+		if ((vport->num_disc_nodes > 0)    ||
+		    (vport->fc_flag & wait_flags)  ||
+		    ((vport->port_state > LPFC_VPORT_FAILED) &&
+		     (vport->port_state < LPFC_VPORT_READY))) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+					"1833 Vport discovery quiesce Wait:"
+					" vpi x%x state x%x fc_flags x%x"
+					" num_nodes x%x, waiting 1000 msecs"
+					" total wait msecs x%x\n",
+					vport->vpi, vport->port_state,
+					vport->fc_flag, vport->num_disc_nodes,
+					jiffies_to_msecs(jiffies - start_time));
+			msleep(1000);
+		} else {
+			/* Base case.  Wait variants satisfied.  Break out */
+			lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+					 "1834 Vport discovery quiesced:"
+					 " vpi x%x state x%x fc_flags x%x"
+					 " wait msecs x%x\n",
+					 vport->vpi, vport->port_state,
+					 vport->fc_flag,
+					 jiffies_to_msecs(jiffies
+						- start_time));
+			break;
+		}
+	}
+
+	if (time_after(jiffies, wait_time_max))
+		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+				"1835 Vport discovery quiesce failed:"
+				" vpi x%x state x%x fc_flags x%x"
+				" wait msecs x%x\n",
+				vport->vpi, vport->port_state,
+				vport->fc_flag,
+				jiffies_to_msecs(jiffies - start_time));
+}
+
 int
 lpfc_vport_create(struct Scsi_Host *shost, const uint8_t *wwnn,
-		  const uint8_t *wwpn)
+		  const uint8_t *wwpn, char *vname)
 {
 	struct lpfc_nodelist *ndlp;
 	static uint8_t null_name[8] = { 0, 0, 0, 0, 0, 0, 0, 0, };
@@ -206,6 +277,7 @@ lpfc_vport_create(struct Scsi_Host *shost, const uint8_t *wwnn,
 	int vpi;
 	int rc = VPORT_ERROR;
 	int status;
+	int size;
 
 	if ((phba->sli_rev < 3) || !phba->cfg_enable_npiv) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -267,7 +339,20 @@ lpfc_vport_create(struct Scsi_Host *shost, const uint8_t *wwnn,
 
 	memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
 	memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
-
+	size = strnlen(vname, LPFC_VNAME_LEN);
+	if (size) {
+		vport->vname = kzalloc(size+1, GFP_KERNEL);
+		if (!vport->vname) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+					 "1814 Create VPORT failed. "
+					 "vname allocation failed.\n");
+			rc = VPORT_ERROR;
+			lpfc_free_vpi(phba, vpi);
+			destroy_port(vport);
+			goto error_out;
+		}
+		memcpy(vport->vname, vname, size+1);
+	}
 	if (wwnn && memcmp(wwnn, null_name, 8))
 		memcpy(vport->fc_nodename.u.wwn, wwnn, 8);
 	if (wwpn && memcmp(wwpn, null_name, 8))
@@ -532,11 +617,16 @@ lpfc_vport_delete(struct Scsi_Host *shost)
 		}
 		vport->unreg_vpi_cmpl = VPORT_INVAL;
 		timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+			goto skip_logo;
 		if (!lpfc_issue_els_npiv_logo(vport, ndlp))
 			while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
 				timeout = schedule_timeout(timeout);
 	}
 
+	if (!(phba->pport->load_flag & FC_UNLOADING))
+		lpfc_discovery_wait(vport);
+
 skip_logo:
 	lpfc_cleanup(vport);
 	lpfc_sli_host_down(vport);
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 345ce7a..87004dc 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -84,7 +84,8 @@ struct vport_data {
 #define VPORT_NOMEM	-3
 #define VPORT_NORESOURCES	-4
 
-int lpfc_vport_create(struct Scsi_Host *, const uint8_t *, const uint8_t *);
+int lpfc_vport_create(struct Scsi_Host *, const uint8_t *, const uint8_t *,
+		      char *);
 int lpfc_vport_delete(struct Scsi_Host *);
 int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
 int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);