Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull first round of SCSI updates from James Bottomley:
 "This patch consists of the usual driver updates (megaraid_sas,
  scsi_debug, qla2xxx, qla4xxx, lpfc, bnx2fc, be2iscsi, hpsa, ipr) plus
  an assortment of minor fixes and the first precursors of SCSI-MQ (the
  code path simplifications) and the bug fix for the USB oops on remove
  (which involves an infrastructure change, so is sent via the main tree
  with a delayed backport after a cycle in which it is shown to
  introduce no new bugs)"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (196 commits)
  [SCSI] sd: Quiesce mode sense error messages
  [SCSI] add support for per-host cmd pools
  [SCSI] simplify command allocation and freeing a bit
  [SCSI] megaraid: simplify internal command handling
  [SCSI] ses: Use vpd information from scsi_device
  [SCSI] Add EVPD page 0x83 and 0x80 to sysfs
  [SCSI] Return VPD page length in scsi_vpd_inquiry()
  [SCSI] scsi_sysfs: Implement 'is_visible' callback
  [SCSI] hpsa: update driver version to 3.4.4-1
  [SCSI] hpsa: fix bad endif placement in RAID 5 mapper code
  [SCSI] qla2xxx: Fix build errors related to invalid print fields on some architectures.
  [SCSI] bfa: Replace large udelay() with mdelay()
  [SCSI] vmw_pvscsi: Some improvements in pvscsi driver.
  [SCSI] vmw_pvscsi: Add support for I/O requests coalescing.
  [SCSI] vmw_pvscsi: Fix pvscsi_abort() function.
  [SCSI] remove deprecated IRQF_DISABLED from SCSI
  [SCSI] bfa: Updating Maintainers email ids
  [SCSI] ipr: Add new CCIN definition for Grand Canyon support
  [SCSI] ipr: Format HCAM overlay ID 0x21
  [SCSI] ipr: Use pci_enable_msi_range() and pci_enable_msix_range()
  ...
diff --git a/Documentation/ABI/testing/sysfs-class-scsi_host b/Documentation/ABI/testing/sysfs-class-scsi_host
index 29a4f89..0eb255e 100644
--- a/Documentation/ABI/testing/sysfs-class-scsi_host
+++ b/Documentation/ABI/testing/sysfs-class-scsi_host
@@ -11,3 +11,19 @@
 		guaranteed.  The 'isci_id' attribute unambiguously identifies
 		the controller index: '0' for the first controller,
 		'1' for the second.
+
+What:		/sys/class/scsi_host/hostX/acciopath_status
+Date:		November 2013
+Contact:	Stephen M. Cameron <scameron@beardog.cce.hp.com>
+Description:	This file contains the current status of the "SSD Smart Path"
+		feature of HP Smart Array RAID controllers using the hpsa
+		driver.  SSD Smart Path, when enabled permits the driver to
+		send i/o requests directly to physical devices that are part
+		of a logical drive, bypassing the controllers firmware RAID
+		stack for a performance advantage when possible.  A value of
+		'1' indicates the feature is enabled, and the controller may
+		use the direct i/o path to physical devices.  A value of zero
+		means the feature is disabled and the controller may not use
+		the direct i/o path to physical devices.  This setting is
+		controller wide, affecting all configured logical drives on the
+		controller.  This file is readable and writable.
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 6edaa65..91ba58e 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,16 @@
+Release Date    : Mon. Mar 10, 2014 17:00:00 PST 2014 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+			Kashyap Desai
+			Sumit Saxena
+Current Version : 06.803.01.00-rc1
+Old Version     : 06.700.06.00-rc1
+    1. Load correct raid context timeout value for multipathing & clustering.
+    2. Fix megasas_ioc_init_fusion to use local stack variable.
+    3. Return leaked MPT frames to MPT command pool.
+    4. Add Dell PowerEdge VRTX SR-IOV VF device support.
+    5. Version and Changelog update.
+-------------------------------------------------------------------------------
 Release Date    : Sat. Aug 31, 2013 17:00:00 PST 2013 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Adam Radford
diff --git a/MAINTAINERS b/MAINTAINERS
index 7fe5977..c1b982c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1921,8 +1921,8 @@
 F:	include/linux/bcma/
 
 BROCADE BFA FC SCSI DRIVER
-M:	Anil Gurumurthy <agurumur@brocade.com>
-M:	Vijaya Mohan Guvva <vmohan@brocade.com>
+M:	Anil Gurumurthy <anil.gurumurthy@qlogic.com>
+M:	Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
 L:	linux-scsi@vger.kernel.org
 S:	Supported
 F:	drivers/scsi/bfa/
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 1e9d6ad..bcd2238 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -584,7 +584,7 @@
 	NCR5380_setup(instance);
 
 	for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
-		if ((mask & possible) && (request_irq(i, &probe_intr, IRQF_DISABLED, "NCR-probe", NULL) == 0))
+		if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
 			trying_irqs |= mask;
 
 	timeout = jiffies + (250 * HZ / 1000);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 9323d05..eaaf870 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
  *----------------------------------------------------------------------------*/
 
 #ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 30200
+# define AAC_DRIVER_BUILD 30300
 # define AAC_DRIVER_BRANCH "-ms"
 #endif
 #define MAXIMUM_NUM_CONTAINERS	32
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index dada38a..5c6a870 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -480,7 +480,7 @@
 
 static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
 {
-	u32 var;
+	u32 var = 0;
 
 	if (!(dev->supplement_adapter_info.SupportedOptions2 &
 	  AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
@@ -500,13 +500,14 @@
 		if (bled && (bled != -ETIMEDOUT))
 			return -EINVAL;
 	}
-	if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */
+	if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */
 		rx_writel(dev, MUnit.reserved2, 3);
 		msleep(5000); /* Delay 5 seconds */
 		var = 0x00000001;
 	}
-	if (var != 0x00000001)
+	if (bled && (var != 0x00000001))
 		return -EINVAL;
+	ssleep(5);
 	if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
 		return -ENODEV;
 	if (startup_timeout < 300)
@@ -646,7 +647,7 @@
 	dev->sync_mode = 0;	/* sync. mode not supported */
 	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
-			IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+			IRQF_SHARED, "aacraid", dev) < 0) {
 		if (dev->msi)
 			pci_disable_msi(dev->pdev);
 		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 2244f31..e66477c 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -387,8 +387,7 @@
 		goto error_irq;
 	dev->sync_mode = 0;	/* sync. mode not supported */
 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
-			IRQF_SHARED|IRQF_DISABLED,
-			"aacraid", (void *)dev ) < 0) {
+			IRQF_SHARED, "aacraid", (void *)dev) < 0) {
 		printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
 			name, instance);
 		goto error_iounmap;
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 7e17107..9c65aed 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -647,7 +647,7 @@
 	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
 
 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
-			IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+			IRQF_SHARED, "aacraid", dev) < 0) {
 
 		if (dev->msi)
 			pci_disable_msi(dev->pdev);
@@ -804,7 +804,7 @@
 		goto error_iounmap;
 	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
 	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
-		IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+		IRQF_SHARED, "aacraid", dev) < 0) {
 		if (dev->msi)
 			pci_disable_msi(dev->pdev);
 		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 3f7b6fe..e86eb6a 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -857,7 +857,7 @@
 	SETPORT(SIMODE0, 0);
 	SETPORT(SIMODE1, 0);
 
-	if( request_irq(shpnt->irq, swintr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) {
+	if (request_irq(shpnt->irq, swintr, IRQF_SHARED, "aha152x", shpnt)) {
 		printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq);
 		goto out_host_put;
 	}
@@ -891,7 +891,7 @@
 	SETPORT(SSTAT0, 0x7f);
 	SETPORT(SSTAT1, 0xef);
 
-	if ( request_irq(shpnt->irq, intr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) {
+	if (request_irq(shpnt->irq, intr, IRQF_SHARED, "aha152x", shpnt)) {
 		printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq);
 		goto out_host_put;
 	}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
index 9df9e2c..8373447 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
@@ -209,7 +209,6 @@
 #define AIC_OP_JC16	0x9105
 #define AIC_OP_JNC16	0x9205
 #define AIC_OP_CALL16	0x9305
-#define AIC_OP_CALL16	0x9305
 
 /* Page extension is low three bits of second opcode byte. */
 #define AIC_OP_JMPF	0xA005
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 4f6a30b..652b41b 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2500,16 +2500,15 @@
 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
 {
 	uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
-	dma_addr_t dma_coherent_handle;
+
 	/*
 	********************************************************************
 	** here we need to tell iop 331 our freeccb.HighPart
 	** if freeccb.HighPart is not zero
 	********************************************************************
 	*/
-	dma_coherent_handle = acb->dma_coherent_handle;
-	cdb_phyaddr = (uint32_t)(dma_coherent_handle);
-	cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
+	cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle);
+	cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);
 	acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
 	/*
 	***********************************************************************
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 09ba186..059ff47 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2971,7 +2971,7 @@
 	ec->irqaddr	= ashost->fast + INT_REG;
 	ec->irqmask	= 0x0a;
 
-	ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost);
+	ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost);
 	if (ret) {
 		printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n",
 			host->host_no, ashost->scsi.irq, ret);
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index b679778..f8e0609 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -262,7 +262,7 @@
 		goto out_unmap;
 	}
 
-	ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED,
+	ret = request_irq(host->irq, cumanascsi_intr, 0,
 			  "CumanaSCSI-1", host);
 	if (ret) {
 		printk("scsi%d: IRQ%d not free: %d\n",
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 58915f2..abc66f5 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -431,7 +431,7 @@
 		goto out_free;
 
 	ret = request_irq(ec->irq, cumanascsi_2_intr,
-			  IRQF_DISABLED, "cumanascsi2", info);
+			  0, "cumanascsi2", info);
 	if (ret) {
 		printk("scsi%d: IRQ%d not free: %d\n",
 		       host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index abc9593..5e1b73e 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -358,7 +358,7 @@
 		goto out_free;
 
 	ret = request_irq(ec->irq, powertecscsi_intr,
-			  IRQF_DISABLED, "powertec", info);
+			  0, "powertec", info);
 	if (ret) {
 		printk("scsi%d: IRQ%d not free: %d\n",
 		       host->host_no, ec->irq, ret);
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 2e28f6c..1bfb0bd 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -98,6 +98,14 @@
 	struct be_queue_info cq;
 };
 
+struct beiscsi_mcc_tag_state {
+#define MCC_TAG_STATE_COMPLETED 0x00
+#define MCC_TAG_STATE_RUNNING   0x01
+#define MCC_TAG_STATE_TIMEOUT   0x02
+	uint8_t tag_state;
+	struct be_dma_mem tag_mem_state;
+};
+
 struct be_ctrl_info {
 	u8 __iomem *csr;
 	u8 __iomem *db;		/* Door Bell */
@@ -122,6 +130,8 @@
 	unsigned short mcc_alloc_index;
 	unsigned short mcc_free_index;
 	unsigned int mcc_tag_available;
+
+	struct beiscsi_mcc_tag_state ptag_state[MAX_MCC_CMD + 1];
 };
 
 #include "be_cmds.h"
@@ -129,6 +139,7 @@
 #define PAGE_SHIFT_4K 12
 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
 #define mcc_timeout		120000 /* 12s timeout */
+#define BEISCSI_LOGOUT_SYNC_DELAY	250
 
 /* Returns number of pages spanned by the data starting at the given addr */
 #define PAGES_4K_SPANNED(_address, size)				\
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 3338391..1432ed5 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -138,7 +138,7 @@
  * @phba: Driver private structure
  * @tag: Tag for the MBX Command
  * @wrb: the WRB used for the MBX Command
- * @cmd_hdr: IOCTL Hdr for the MBX Cmd
+ * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
  *
  * Waits for MBX completion with the passed TAG.
  *
@@ -148,21 +148,26 @@
  **/
 int beiscsi_mccq_compl(struct beiscsi_hba *phba,
 		uint32_t tag, struct be_mcc_wrb **wrb,
-		void *cmd_hdr)
+		struct be_dma_mem *mbx_cmd_mem)
 {
 	int rc = 0;
 	uint32_t mcc_tag_response;
 	uint16_t status = 0, addl_status = 0, wrb_num = 0;
 	struct be_mcc_wrb *temp_wrb;
-	struct be_cmd_req_hdr *ioctl_hdr;
-	struct be_cmd_resp_hdr *ioctl_resp_hdr;
+	struct be_cmd_req_hdr *mbx_hdr;
+	struct be_cmd_resp_hdr *mbx_resp_hdr;
 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
 	if (beiscsi_error(phba)) {
 		free_mcc_tag(&phba->ctrl, tag);
-		return -EIO;
+		return -EPERM;
 	}
 
+	/* Set MBX Tag state to Active */
+	spin_lock(&phba->ctrl.mbox_lock);
+	phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING;
+	spin_unlock(&phba->ctrl.mbox_lock);
+
 	/* wait for the mccq completion */
 	rc = wait_event_interruptible_timeout(
 				phba->ctrl.mcc_wait[tag],
@@ -171,56 +176,71 @@
 				BEISCSI_HOST_MBX_TIMEOUT));
 
 	if (rc <= 0) {
+		struct be_dma_mem *tag_mem;
+		/* Set MBX Tag state to timeout */
+		spin_lock(&phba->ctrl.mbox_lock);
+		phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT;
+		spin_unlock(&phba->ctrl.mbox_lock);
+
+		/* Store resource addr to be freed later */
+		tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
+		if (mbx_cmd_mem) {
+			tag_mem->size = mbx_cmd_mem->size;
+			tag_mem->va = mbx_cmd_mem->va;
+			tag_mem->dma = mbx_cmd_mem->dma;
+		} else
+			tag_mem->size = 0;
+
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 			    BEISCSI_LOG_CONFIG,
 			    "BC_%d : MBX Cmd Completion timed out\n");
-		rc = -EBUSY;
-
-		/* decrement the mccq used count */
-		atomic_dec(&phba->ctrl.mcc_obj.q.used);
-
-		goto release_mcc_tag;
-	} else
+		return -EBUSY;
+	} else {
 		rc = 0;
+		/* Set MBX Tag state to completed */
+		spin_lock(&phba->ctrl.mbox_lock);
+		phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
+		spin_unlock(&phba->ctrl.mbox_lock);
+	}
 
 	mcc_tag_response = phba->ctrl.mcc_numtag[tag];
 	status = (mcc_tag_response & CQE_STATUS_MASK);
 	addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
 			CQE_STATUS_ADDL_SHIFT);
 
-	if (cmd_hdr) {
-		ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr;
+	if (mbx_cmd_mem) {
+		mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
 	} else {
 		wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
 			   CQE_STATUS_WRB_SHIFT;
 		temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
-		ioctl_hdr = embedded_payload(temp_wrb);
+		mbx_hdr = embedded_payload(temp_wrb);
 
 		if (wrb)
 			*wrb = temp_wrb;
 	}
 
 	if (status || addl_status) {
-		beiscsi_log(phba, KERN_ERR,
+		beiscsi_log(phba, KERN_WARNING,
 			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 			    BEISCSI_LOG_CONFIG,
 			    "BC_%d : MBX Cmd Failed for "
 			    "Subsys : %d Opcode : %d with "
 			    "Status : %d and Extd_Status : %d\n",
-			    ioctl_hdr->subsystem,
-			    ioctl_hdr->opcode,
+			    mbx_hdr->subsystem,
+			    mbx_hdr->opcode,
 			    status, addl_status);
 
 		if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
-			ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
+			mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
 			beiscsi_log(phba, KERN_WARNING,
 				    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
 				    BEISCSI_LOG_CONFIG,
 				    "BC_%d : Insufficent Buffer Error "
 				    "Resp_Len : %d Actual_Resp_Len : %d\n",
-				    ioctl_resp_hdr->response_length,
-				    ioctl_resp_hdr->actual_resp_len);
+				    mbx_resp_hdr->response_length,
+				    mbx_resp_hdr->actual_resp_len);
 
 			rc = -EAGAIN;
 			goto release_mcc_tag;
@@ -319,6 +339,7 @@
 int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
 				    struct be_mcc_compl *compl)
 {
+	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 	u16 compl_status, extd_status;
 	unsigned short tag;
 
@@ -338,7 +359,32 @@
 	ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
 	ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
 	ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
-	wake_up_interruptible(&ctrl->mcc_wait[tag]);
+
+	if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) {
+		wake_up_interruptible(&ctrl->mcc_wait[tag]);
+	} else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) {
+		struct be_dma_mem *tag_mem;
+		tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+
+		beiscsi_log(phba, KERN_WARNING,
+			    BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
+			    BEISCSI_LOG_CONFIG,
+			    "BC_%d : MBX Completion for timeout Command "
+			    "from FW\n");
+		/* Check if memory needs to be freed */
+		if (tag_mem->size)
+			pci_free_consistent(ctrl->pdev, tag_mem->size,
+					    tag_mem->va, tag_mem->dma);
+
+		/* Change tag state */
+		spin_lock(&phba->ctrl.mbox_lock);
+		ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
+		spin_unlock(&phba->ctrl.mbox_lock);
+
+		/* Free MCC Tag */
+		free_mcc_tag(ctrl, tag);
+	}
+
 	return 0;
 }
 
@@ -354,8 +400,23 @@
 	return NULL;
 }
 
-static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
+/**
+ * be2iscsi_fail_session(): Closing session with appropriate error
+ * @cls_session: ptr to session
+ *
+ * Depending on adapter state appropriate error flag is passed.
+ **/
+void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
 {
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+	uint32_t iscsi_err_flag;
+
+	if (phba->state & BE_ADAPTER_STATE_SHUTDOWN)
+		iscsi_err_flag = ISCSI_ERR_INVALID_HOST;
+	else
+		iscsi_err_flag = ISCSI_ERR_CONN_FAILED;
+
 	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
 }
 
@@ -386,18 +447,6 @@
 	}
 }
 
-static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
-		       u16 num_popped)
-{
-	u32 val = 0;
-	val |= qid & DB_CQ_RING_ID_MASK;
-	if (arm)
-		val |= 1 << DB_CQ_REARM_SHIFT;
-	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
-	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
-}
-
-
 int beiscsi_process_mcc(struct beiscsi_hba *phba)
 {
 	struct be_mcc_compl *compl;
@@ -428,7 +477,7 @@
 	}
 
 	if (num)
-		beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
+		hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
 
 	spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
 	return status;
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 627ebbe..7cf7f99 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -103,7 +103,7 @@
 
 /********** MCC door bell ************/
 #define DB_MCCQ_OFFSET 0x140
-#define DB_MCCQ_RING_ID_MASK 0x7FF		/* bits 0 - 10 */
+#define DB_MCCQ_RING_ID_MASK 0xFFFF		/* bits 0 - 15 */
 /* Number of entries posted */
 #define DB_MCCQ_NUM_POSTED_SHIFT 16		/* bits 16 - 29 */
 
@@ -709,7 +709,8 @@
 void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
 
 int beiscsi_mccq_compl(struct beiscsi_hba *phba,
-			uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
+			uint32_t tag, struct be_mcc_wrb **wrb,
+			struct be_dma_mem *mbx_cmd_mem);
 /*ISCSI Functuions */
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
 int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
@@ -1017,8 +1018,8 @@
 	int *users_final_status;
 } __packed;
 
-#define DB_DEF_PDU_RING_ID_MASK		0x3FF	/* bits 0 - 9 */
-#define DB_DEF_PDU_CQPROC_MASK		0x3FFF	/* bits 0 - 9 */
+#define DB_DEF_PDU_RING_ID_MASK	0x3FFF	/* bits 0 - 13 */
+#define DB_DEF_PDU_CQPROC_MASK		0x3FFF	/* bits 16 - 29 */
 #define DB_DEF_PDU_REARM_SHIFT		14
 #define DB_DEF_PDU_EVENT_SHIFT		15
 #define DB_DEF_PDU_CQPROC_SHIFT		16
@@ -1317,4 +1318,5 @@
 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
 			u8 subsystem, u8 opcode, int cmd_len);
 
+void be2iscsi_fail_session(struct iscsi_cls_session *cls_session);
 #endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 889066d..a3df433 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -793,7 +793,7 @@
 		ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
 		break;
 	case BE2ISCSI_LINK_SPEED_100MBPS:
-		ihost->port_speed = BE2ISCSI_LINK_SPEED_100MBPS;
+		ihost->port_speed = ISCSI_PORT_SPEED_100MBPS;
 		break;
 	case BE2ISCSI_LINK_SPEED_1GBPS:
 		ihost->port_speed = ISCSI_PORT_SPEED_1GBPS;
@@ -1153,16 +1153,18 @@
 		return -EAGAIN;
 	}
 
-	ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
+	ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
 	if (ret) {
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 			    "BS_%d : mgmt_open_connection Failed");
 
-		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-			    nonemb_cmd.va, nonemb_cmd.dma);
+		if (ret != -EBUSY)
+			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+					    nonemb_cmd.va, nonemb_cmd.dma);
+
 		beiscsi_free_ep(beiscsi_ep);
-		return -EBUSY;
+		return ret;
 	}
 
 	ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
@@ -1359,6 +1361,7 @@
 	beiscsi_mccq_compl(phba, tag, NULL, NULL);
 	beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
 free_ep:
+	msleep(BEISCSI_LOGOUT_SYNC_DELAY);
 	beiscsi_free_ep(beiscsi_ep);
 	beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
 	iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 5642a9b..9be818f 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -228,24 +228,25 @@
 	struct invalidate_command_table *inv_tbl;
 	struct be_dma_mem nonemb_cmd;
 	unsigned int cid, tag, num_invalidate;
+	int rc;
 
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (!aborted_task || !aborted_task->sc) {
 		/* we raced */
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		return SUCCESS;
 	}
 
 	aborted_io_task = aborted_task->dd_data;
 	if (!aborted_io_task->scsi_cmnd) {
 		/* raced or invalid command */
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		return SUCCESS;
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	/* Invalidate WRB Posted for this Task */
 	AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
 		      aborted_io_task->pwrb_handle->pwrb,
@@ -285,9 +286,11 @@
 		return FAILED;
 	}
 
-	beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
-	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-			    nonemb_cmd.va, nonemb_cmd.dma);
+	rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+	if (rc != -EBUSY)
+		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+				    nonemb_cmd.va, nonemb_cmd.dma);
+
 	return iscsi_eh_abort(sc);
 }
 
@@ -303,13 +306,14 @@
 	struct invalidate_command_table *inv_tbl;
 	struct be_dma_mem nonemb_cmd;
 	unsigned int cid, tag, i, num_invalidate;
+	int rc;
 
 	/* invalidate iocbs */
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		return FAILED;
 	}
 	conn = session->leadconn;
@@ -338,7 +342,7 @@
 		num_invalidate++;
 		inv_tbl++;
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	inv_tbl = phba->inv_tbl;
 
 	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -363,9 +367,10 @@
 		return FAILED;
 	}
 
-	beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
-	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-			    nonemb_cmd.va, nonemb_cmd.dma);
+	rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+	if (rc != -EBUSY)
+		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+				    nonemb_cmd.va, nonemb_cmd.dma);
 	return iscsi_eh_device_reset(sc);
 }
 
@@ -674,8 +679,19 @@
 	}
 
 	pci_set_master(pcidev);
-	if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
-		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
+	ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
+	if (ret) {
+		ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+		if (ret) {
+			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
+			pci_disable_device(pcidev);
+			return ret;
+		} else {
+			ret = pci_set_consistent_dma_mask(pcidev,
+							  DMA_BIT_MASK(32));
+		}
+	} else {
+		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
 		if (ret) {
 			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
 			pci_disable_device(pcidev);
@@ -804,14 +820,23 @@
 			   unsigned char rearm, unsigned char event)
 {
 	u32 val = 0;
-	val |= id & DB_EQ_RING_ID_MASK;
+
 	if (rearm)
 		val |= 1 << DB_EQ_REARM_SHIFT;
 	if (clr_interrupt)
 		val |= 1 << DB_EQ_CLR_SHIFT;
 	if (event)
 		val |= 1 << DB_EQ_EVNT_SHIFT;
+
 	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
+	/* Setting lower order EQ_ID Bits */
+	val |= (id & DB_EQ_RING_ID_LOW_MASK);
+
+	/* Setting Higher order EQ_ID Bits */
+	val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
+		  DB_EQ_RING_ID_HIGH_MASK)
+		  << DB_EQ_HIGH_SET_SHIFT);
+
 	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
 }
 
@@ -1093,15 +1118,25 @@
 	return ret;
 }
 
-static void hwi_ring_cq_db(struct beiscsi_hba *phba,
+void hwi_ring_cq_db(struct beiscsi_hba *phba,
 			   unsigned int id, unsigned int num_processed,
 			   unsigned char rearm, unsigned char event)
 {
 	u32 val = 0;
-	val |= id & DB_CQ_RING_ID_MASK;
+
 	if (rearm)
 		val |= 1 << DB_CQ_REARM_SHIFT;
+
 	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
+
+	/* Setting lower order CQ_ID Bits */
+	val |= (id & DB_CQ_RING_ID_LOW_MASK);
+
+	/* Setting Higher order CQ_ID Bits */
+	val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
+		  DB_CQ_RING_ID_HIGH_MASK)
+		  << DB_CQ_HIGH_SET_SHIFT);
+
 	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
 }
 
@@ -1150,9 +1185,9 @@
 		return 1;
 	}
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->back_lock);
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->back_lock);
 	return 0;
 }
 
@@ -1342,8 +1377,10 @@
 	resid = csol_cqe->res_cnt;
 
 	if (!task->sc) {
-		if (io_task->scsi_cmnd)
+		if (io_task->scsi_cmnd) {
 			scsi_dma_unmap(io_task->scsi_cmnd);
+			io_task->scsi_cmnd = NULL;
+		}
 
 		return;
 	}
@@ -1380,6 +1417,7 @@
 		conn->rxdata_octets += resid;
 unmap:
 	scsi_dma_unmap(io_task->scsi_cmnd);
+	io_task->scsi_cmnd = NULL;
 	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
 }
 
@@ -1568,7 +1606,7 @@
 	pwrb = pwrb_handle->pwrb;
 	type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->back_lock);
 	switch (type) {
 	case HWH_TYPE_IO:
 	case HWH_TYPE_IO_RD:
@@ -1607,7 +1645,7 @@
 		break;
 	}
 
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->back_lock);
 }
 
 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
@@ -4360,12 +4398,16 @@
 		goto boot_freemem;
 	}
 
-	ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
+	ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
 	if (ret) {
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
 			    "BM_%d : beiscsi_get_session_info Failed");
-		goto boot_freemem;
+
+		if (ret != -EBUSY)
+			goto boot_freemem;
+		else
+			return ret;
 	}
 
 	session_resp = nonemb_cmd.va ;
@@ -4625,6 +4667,11 @@
 			spin_unlock(&phba->io_sgl_lock);
 			io_task->psgl_handle = NULL;
 		}
+
+		if (io_task->scsi_cmnd) {
+			scsi_dma_unmap(io_task->scsi_cmnd);
+			io_task->scsi_cmnd = NULL;
+		}
 	} else {
 		if (!beiscsi_conn->login_in_progress)
 			beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
@@ -4646,9 +4693,9 @@
 	 * login/startup related tasks.
 	 */
 	beiscsi_conn->login_in_progress = 0;
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->back_lock);
 	beiscsi_cleanup_task(task);
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->back_lock);
 
 	pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
 
@@ -5273,6 +5320,8 @@
 		return;
 	}
 
+	phba->state = BE_ADAPTER_STATE_SHUTDOWN;
+	iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session);
 	beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
 	pci_disable_device(pcidev);
 }
@@ -5594,6 +5643,8 @@
 		phba->ctrl.mcc_tag[i] = i + 1;
 		phba->ctrl.mcc_numtag[i + 1] = 0;
 		phba->ctrl.mcc_tag_available++;
+		memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
+		       sizeof(struct beiscsi_mcc_tag_state));
 	}
 
 	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 31fa27b..9380b55 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
 #include <scsi/scsi_transport_iscsi.h>
 
 #define DRV_NAME		"be2iscsi"
-#define BUILD_STR		"10.0.659.0"
+#define BUILD_STR		"10.2.125.0"
 #define BE_NAME			"Emulex OneConnect" \
 				"Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC		BE_NAME " " "Driver"
@@ -97,9 +97,14 @@
 
 #define INVALID_SESS_HANDLE	0xFFFFFFFF
 
+/**
+ * Adapter States
+ **/
 #define BE_ADAPTER_LINK_UP	0x001
 #define BE_ADAPTER_LINK_DOWN	0x002
 #define BE_ADAPTER_PCI_ERR	0x004
+#define BE_ADAPTER_STATE_SHUTDOWN	0x008
+
 
 #define BEISCSI_CLEAN_UNLOAD	0x01
 #define BEISCSI_EEH_UNLOAD	0x02
@@ -135,11 +140,15 @@
 #define DB_RXULP0_OFFSET 0xA0
 /********* Event Q door bell *************/
 #define DB_EQ_OFFSET			DB_CQ_OFFSET
-#define DB_EQ_RING_ID_MASK		0x1FF	/* bits 0 - 8 */
+#define DB_EQ_RING_ID_LOW_MASK		0x1FF	/* bits 0 - 8 */
 /* Clear the interrupt for this eq */
 #define DB_EQ_CLR_SHIFT			(9)	/* bit 9 */
 /* Must be 1 */
 #define DB_EQ_EVNT_SHIFT		(10)	/* bit 10 */
+/* Higher Order EQ_ID bit */
+#define DB_EQ_RING_ID_HIGH_MASK	0x1F /* bits 11 - 15 */
+#define DB_EQ_HIGH_SET_SHIFT	11
+#define DB_EQ_HIGH_FEILD_SHIFT	9
 /* Number of event entries processed */
 #define DB_EQ_NUM_POPPED_SHIFT		(16)	/* bits 16 - 28 */
 /* Rearm bit */
@@ -147,7 +156,12 @@
 
 /********* Compl Q door bell *************/
 #define DB_CQ_OFFSET			0x120
-#define DB_CQ_RING_ID_MASK		0x3FF	/* bits 0 - 9 */
+#define DB_CQ_RING_ID_LOW_MASK		0x3FF	/* bits 0 - 9 */
+/* Higher Order CQ_ID bit */
+#define DB_CQ_RING_ID_HIGH_MASK	0x1F /* bits 11 - 15 */
+#define DB_CQ_HIGH_SET_SHIFT	11
+#define DB_CQ_HIGH_FEILD_SHIFT	10
+
 /* Number of event entries processed */
 #define DB_CQ_NUM_POPPED_SHIFT		(16)	/* bits 16 - 28 */
 /* Rearm bit */
@@ -821,6 +835,9 @@
 void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
 				     struct iscsi_task *task);
 
+void hwi_ring_cq_db(struct beiscsi_hba *phba,
+		     unsigned int id, unsigned int num_processed,
+		     unsigned char rearm, unsigned char event);
 static inline bool beiscsi_error(struct beiscsi_hba *phba)
 {
 	return phba->ue_detected || phba->fw_timeout;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index b2fcac7..088bdf7 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -828,22 +828,25 @@
 	be_mcc_notify(phba);
 	spin_unlock(&ctrl->mbox_lock);
 
-	rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
-	if (rc) {
-		/* Check if the IOCTL needs to be re-issued */
-		if (rc == -EAGAIN)
-			return rc;
-
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-			    "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
-
-		goto free_cmd;
-	}
+	rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd);
 
 	if (resp_buf)
 		memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
 
+	if (rc) {
+		/* Check if the MBX Cmd needs to be re-issued */
+		if (rc == -EAGAIN)
+			return rc;
+
+		beiscsi_log(phba, KERN_WARNING,
+			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+			    "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
+
+		if (rc != -EBUSY)
+			goto free_cmd;
+		else
+			return rc;
+	}
 free_cmd:
 	pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
 			    nonemb_cmd->va, nonemb_cmd->dma);
@@ -1348,7 +1351,6 @@
 {
 	int rc;
 	unsigned int tag;
-	struct be_mcc_wrb *wrb = NULL;
 
 	tag = be_cmd_set_vlan(phba, vlan_tag);
 	if (!tag) {
@@ -1358,7 +1360,7 @@
 		return -EBUSY;
 	}
 
-	rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+	rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
 	if (rc) {
 		beiscsi_log(phba, KERN_ERR,
 			    (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 65180e1..315d6d6 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3878,7 +3878,7 @@
 		bfa_trc(sfp, sfp->data_valid);
 		if (sfp->data_valid) {
 			u32	size = sizeof(struct sfp_mem_s);
-			u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
+			u8 *des = (u8 *) &(sfp->sfpmem);
 			memcpy(des, sfp->dbuf_kva, size);
 		}
 		/*
@@ -6851,7 +6851,7 @@
 bfa_flash_status_read(void __iomem *pci_bar)
 {
 	union bfa_flash_dev_status_reg_u	dev_status;
-	u32				status;
+	int				status;
 	u32			ret_status;
 	int				i;
 
@@ -6899,7 +6899,7 @@
 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
 			 char *buf)
 {
-	u32 status;
+	int status;
 
 	/*
 	 * len must be mutiple of 4 and not exceeding fifo size
@@ -7006,7 +7006,7 @@
 	while (!bfa_raw_sem_get(bar)) {
 		if (--n <= 0)
 			return BFA_STATUS_BADFLASH;
-		udelay(10000);
+		mdelay(10);
 	}
 	return BFA_STATUS_OK;
 }
@@ -7021,7 +7021,8 @@
 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
 		       u32 len)
 {
-	u32 n, status;
+	u32 n;
+	int status;
 	u32 off, l, s, residue, fifo_sz;
 
 	residue = len;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 157f604..8994fb8 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2304,8 +2304,10 @@
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	if (bfa_fcport_is_dport(&bfad->bfa))
+	if (bfa_fcport_is_dport(&bfad->bfa)) {
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 		return BFA_STATUS_DPORT_ERR;
+	}
 
 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 9967f9c..f067332 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -73,9 +73,14 @@
 
 		break;
 
-	case BFI_IOIM_STS_ABORTED:
 	case BFI_IOIM_STS_TIMEDOUT:
+		host_status = DID_TIME_OUT;
+		cmnd->result = ScsiResult(host_status, 0);
+		break;
 	case BFI_IOIM_STS_PATHTOV:
+		host_status = DID_TRANSPORT_DISRUPTED;
+		cmnd->result = ScsiResult(host_status, 0);
+		break;
 	default:
 		host_status = DID_ERROR;
 		cmnd->result = ScsiResult(host_status, 0);
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 1ebf3fb..6a97665 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"2.4.1"
+#define BNX2FC_VERSION		"2.4.2"
 
 #define PFX			"bnx2fc: "
 
@@ -367,6 +367,7 @@
 	atomic_t num_active_ios;
 	u32 flush_in_prog;
 	unsigned long timestamp;
+	unsigned long retry_delay_timestamp;
 	struct list_head free_task_list;
 	struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
 	struct list_head active_cmd_queue;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 9b94850..6287f6a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@
 
 #define DRV_MODULE_NAME		"bnx2fc"
 #define DRV_MODULE_VERSION	BNX2FC_VERSION
-#define DRV_MODULE_RELDATE	"Sep 17, 2013"
+#define DRV_MODULE_RELDATE	"Dec 11, 2013"
 
 
 static char version[] =
@@ -850,6 +850,9 @@
 				__bnx2fc_destroy(interface);
 		}
 		mutex_unlock(&bnx2fc_dev_lock);
+
+		/* Ensure ALL destroy work has been completed before return */
+		flush_workqueue(bnx2fc_wq);
 		return;
 
 	default:
@@ -2389,6 +2392,9 @@
 			__bnx2fc_destroy(interface);
 	mutex_unlock(&bnx2fc_dev_lock);
 
+	/* Ensure ALL destroy work has been completed before return */
+	flush_workqueue(bnx2fc_wq);
+
 	bnx2fc_ulp_stop(hba);
 	/* unregister cnic device */
 	if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index e9279a8..32a5e0a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1871,7 +1871,15 @@
 		rc = SCSI_MLQUEUE_TARGET_BUSY;
 		goto exit_qcmd;
 	}
-
+	if (tgt->retry_delay_timestamp) {
+		if (time_after(jiffies, tgt->retry_delay_timestamp)) {
+			tgt->retry_delay_timestamp = 0;
+		} else {
+			/* If retry_delay timer is active, flow off the ML */
+			rc = SCSI_MLQUEUE_TARGET_BUSY;
+			goto exit_qcmd;
+		}
+	}
 	io_req = bnx2fc_cmd_alloc(tgt);
 	if (!io_req) {
 		rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -1961,6 +1969,15 @@
 				 " fcp_resid = 0x%x\n",
 				io_req->cdb_status, io_req->fcp_resid);
 			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+
+			if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
+			    io_req->cdb_status == SAM_STAT_BUSY) {
+				/* Set the jiffies + retry_delay_timer * 100ms
+				   for the rport/tgt */
+				tgt->retry_delay_timestamp = jiffies +
+					fcp_rsp->retry_delay_timer * HZ / 10;
+			}
+
 		}
 		if (io_req->fcp_resid)
 			scsi_set_resid(sc_cmd, io_req->fcp_resid);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index d9bae56..6870cf6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -386,6 +386,7 @@
 	tgt->rq_prod_idx = 0x8000;
 	tgt->rq_cons_idx = 0;
 	atomic_set(&tgt->num_active_ios, 0);
+	tgt->retry_delay_timestamp = 0;
 
 	if (rdata->flags & FC_RP_FLAGS_RETRY &&
 	    rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index b87a193..b5ffd28 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1361,7 +1361,7 @@
 	u32 datalen = 0;
 
 	resp_cqe = (struct bnx2i_cmd_response *)cqe;
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 				 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
 	if (!task)
@@ -1432,7 +1432,7 @@
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
 			     conn->data, datalen);
 fail:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->back_lock);
 	return 0;
 }
 
@@ -1457,7 +1457,7 @@
 	int pad_len;
 
 	login = (struct bnx2i_login_response *) cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 				 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
 	if (!task)
@@ -1500,7 +1500,7 @@
 		bnx2i_conn->gen_pdu.resp_buf,
 		bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
 done:
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 	return 0;
 }
 
@@ -1525,7 +1525,7 @@
 	int pad_len;
 
 	text = (struct bnx2i_text_response *) cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
 	if (!task)
 		goto done;
@@ -1561,7 +1561,7 @@
 			     bnx2i_conn->gen_pdu.resp_wr_ptr -
 			     bnx2i_conn->gen_pdu.resp_buf);
 done:
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 	return 0;
 }
 
@@ -1584,7 +1584,7 @@
 	struct iscsi_tm_rsp *resp_hdr;
 
 	tmf_cqe = (struct bnx2i_tmf_response *)cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 				 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
 	if (!task)
@@ -1600,7 +1600,7 @@
 
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
 done:
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 	return 0;
 }
 
@@ -1623,7 +1623,7 @@
 	struct iscsi_logout_rsp *resp_hdr;
 
 	logout = (struct bnx2i_logout_response *) cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 				 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
 	if (!task)
@@ -1647,7 +1647,7 @@
 
 	bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD;
 done:
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 	return 0;
 }
 
@@ -1668,12 +1668,12 @@
 	struct iscsi_task *task;
 
 	nop_in = (struct bnx2i_nop_in_msg *)cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 				 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
 	if (task)
 		__iscsi_put_task(task);
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 }
 
 /**
@@ -1712,7 +1712,7 @@
 
 	nop_in = (struct bnx2i_nop_in_msg *)cqe;
 
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
 	memset(hdr, 0, sizeof(struct iscsi_hdr));
 	hdr->opcode = nop_in->op_code;
@@ -1738,7 +1738,7 @@
 	}
 done:
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 
 	return tgt_async_nop;
 }
@@ -1771,7 +1771,7 @@
 		return;
 	}
 
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
 	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
 	resp_hdr->opcode = async_cqe->op_code;
@@ -1790,7 +1790,7 @@
 
 	__iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
 			     (struct iscsi_hdr *)resp_hdr, NULL, 0);
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 }
 
 
@@ -1817,7 +1817,7 @@
 	} else
 		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
 
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
 	memset(hdr, 0, sizeof(struct iscsi_hdr));
 	hdr->opcode = reject->op_code;
@@ -1828,7 +1828,7 @@
 	hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
 			     reject->data_length);
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 }
 
 /**
@@ -1848,13 +1848,13 @@
 	struct iscsi_task *task;
 
 	cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(conn,
 			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
 	if (!task)
 		printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
 			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 	complete(&bnx2i_conn->cmd_cleanup_cmpl);
 }
 
@@ -1921,11 +1921,11 @@
 	int rc = 0;
 	int cpu;
 
-	spin_lock(&session->lock);
+	spin_lock(&session->back_lock);
 	task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
 				 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
 	if (!task || !task->sc) {
-		spin_unlock(&session->lock);
+		spin_unlock(&session->back_lock);
 		return -EINVAL;
 	}
 	sc = task->sc;
@@ -1935,7 +1935,7 @@
 	else
 		cpu = sc->request->cpu;
 
-	spin_unlock(&session->lock);
+	spin_unlock(&session->back_lock);
 
 	p = &per_cpu(bnx2i_percpu, cpu);
 	spin_lock(&p->p_work_lock);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index c8b0aff..166543f 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1170,10 +1170,10 @@
 	if (task->state == ISCSI_TASK_ABRT_TMF) {
 		bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
 
-		spin_unlock_bh(&conn->session->lock);
+		spin_unlock_bh(&conn->session->back_lock);
 		wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
 				msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
-		spin_lock_bh(&conn->session->lock);
+		spin_lock_bh(&conn->session->back_lock);
 	}
 	bnx2i_iscsi_unmap_sg_list(task->dd_data);
 }
@@ -2060,7 +2060,7 @@
 		goto out;
 
 	if (session) {
-		spin_lock_bh(&session->lock);
+		spin_lock_bh(&session->frwd_lock);
 		if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
 			if (session->state == ISCSI_STATE_LOGGING_OUT) {
 				if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
@@ -2076,7 +2076,7 @@
 		} else
 			close = 1;
 
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 	}
 
 	bnx2i_ep->state = EP_STATE_DISCONN_START;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a9f842..e8ee5e5 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -175,52 +175,6 @@
 			sizeof(struct fw_ofld_tx_data_wr));
 }
 
-
-#define VLAN_NONE 0xfff
-#define FILTER_SEL_VLAN_NONE 0xffff
-#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
-#define FILTER_SEL_WIDTH_VIN_P_FC \
-	(6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
-#define FILTER_SEL_WIDTH_TAG_P_FC \
-	(3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
-#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
-
-static unsigned int select_ntuple(struct cxgbi_device *cdev,
-				struct l2t_entry *l2t)
-{
-	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
-	unsigned int ntuple = 0;
-	u32 viid;
-
-	switch (lldi->filt_mode) {
-
-	/* default filter mode */
-	case HW_TPL_FR_MT_PR_IV_P_FC:
-		if (l2t->vlan == VLAN_NONE)
-			ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
-		else {
-			ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
-			ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-		}
-		ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
-			  FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-		break;
-	case HW_TPL_FR_MT_PR_OV_P_FC: {
-		viid = cxgb4_port_viid(l2t->neigh->dev);
-
-		ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
-		ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
-		ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
-		ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
-			  FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-		break;
-	}
-	default:
-		break;
-	}
-	return ntuple;
-}
-
 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
 				struct l2t_entry *e)
 {
@@ -248,8 +202,6 @@
 		struct cpl_act_open_req *req =
 				(struct cpl_act_open_req *)skb->head;
 
-		req = (struct cpl_act_open_req *)skb->head;
-
 		INIT_TP_WR(req, 0);
 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 					qid_atid));
@@ -258,7 +210,9 @@
 		req->local_ip = csk->saddr.sin_addr.s_addr;
 		req->peer_ip = csk->daddr.sin_addr.s_addr;
 		req->opt0 = cpu_to_be64(opt0);
-		req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
+		req->params = cpu_to_be32(cxgb4_select_ntuple(
+					csk->cdev->ports[csk->port_id],
+					csk->l2t));
 		opt2 |= 1 << 22;
 		req->opt2 = cpu_to_be32(opt2);
 
@@ -271,8 +225,6 @@
 		struct cpl_t5_act_open_req *req =
 				(struct cpl_t5_act_open_req *)skb->head;
 
-		req = (struct cpl_t5_act_open_req *)skb->head;
-
 		INIT_TP_WR(req, 0);
 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 					qid_atid));
@@ -281,7 +233,10 @@
 		req->local_ip = csk->saddr.sin_addr.s_addr;
 		req->peer_ip = csk->daddr.sin_addr.s_addr;
 		req->opt0 = cpu_to_be64(opt0);
-		req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
+		req->params = cpu_to_be64(V_FILTER_TUPLE(
+				cxgb4_select_ntuple(
+					csk->cdev->ports[csk->port_id],
+					csk->l2t)));
 		opt2 |= 1 << 31;
 		req->opt2 = cpu_to_be32(opt2);
 
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index d01f016..eb29fe7 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -277,7 +277,7 @@
 		/* With interrupts enabled, it will sometimes hang when doing heavy
 		 * reads. So better not enable them until I finger it out. */
 		if (instance->irq != SCSI_IRQ_NONE)
-			if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED,
+			if (request_irq(instance->irq, dtc_intr, 0,
 					"dtc", instance)) {
 				printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
 				instance->irq = SCSI_IRQ_NONE;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 94de889..ebf5736 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1221,7 +1221,7 @@
 
 	/* Board detected, allocate its IRQ */
 	if (request_irq(irq, do_interrupt_handler,
-			IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0),
+			(subversion == ESA) ? IRQF_SHARED : 0,
 			driver_name, (void *)&sha[j])) {
 		printk("%s: unable to allocate IRQ %u, detaching.\n", name,
 		       irq);
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 1663173..8319d2b 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -687,7 +687,7 @@
 		return 0;
 
 	if (!reg_IRQ[gc->IRQ]) {	/* Interrupt already registered ? */
-		if (!request_irq(gc->IRQ, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", sh)) {
+		if (!request_irq(gc->IRQ, do_eata_pio_int_handler, 0, "EATA-PIO", sh)) {
 			reg_IRQ[gc->IRQ]++;
 			if (!gc->IRQ_TR)
 				reg_IRQL[gc->IRQ] = 1;	/* IRQ is edge triggered */
@@ -921,7 +921,7 @@
 
 	for (i = 0; i < MAXIRQ; i++)
 		if (reg_IRQ[i])
-			request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL);
+			request_irq(i, do_eata_pio_int_handler, 0, "EATA-PIO", NULL);
 
 	HBA_ptr = first_HBA;
 
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index b9750e2..6776931 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -231,7 +231,7 @@
 
 static void esas2r_claim_interrupts(struct esas2r_adapter *a)
 {
-	unsigned long flags = IRQF_DISABLED;
+	unsigned long flags = 0;
 
 	if (a->intr_mode == INTR_MODE_LEGACY)
 		flags |= IRQF_SHARED;
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
index 9bf285d..a82030a 100644
--- a/drivers/scsi/esas2r/esas2r_log.c
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -165,13 +165,9 @@
 
 		/*
 		 * Put a line break at the end of the formatted string so that
-		 * we don't wind up with run-on messages.  only append if there
-		 * is enough space in the buffer.
+		 * we don't wind up with run-on messages.
 		 */
-		if (strlen(event_buffer) < buflen)
-			strcat(buffer, "\n");
-
-		printk(event_buffer);
+		printk("%s\n", event_buffer);
 
 		spin_unlock_irqrestore(&event_buffer_lock, flags);
 	}
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 5cec6c6..7176365 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -461,7 +461,7 @@
 
 		if (instance->irq != SCSI_IRQ_NONE)
 			if (request_irq(instance->irq, generic_NCR5380_intr,
-					IRQF_DISABLED, "NCR5380", instance)) {
+					0, "NCR5380", instance)) {
 				printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
 				instance->irq = SCSI_IRQ_NONE;
 			}
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index ce5ef0190..0f1ae13 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4711,7 +4711,7 @@
 	printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n",
 		isa_bios, ha->irq, ha->drq);
 
-	error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha);
+	error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
 	if (error) {
 		printk("GDT-ISA: Unable to allocate IRQ\n");
 		goto out_host_put;
@@ -4843,7 +4843,7 @@
 	printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
 		eisa_slot >> 12, ha->irq);
 
-	error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha);
+	error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);
 	if (error) {
 		printk("GDT-EISA: Unable to allocate IRQ\n");
 		goto out_host_put;
@@ -4979,7 +4979,7 @@
 		ha->irq);
 
 	error = request_irq(ha->irq, gdth_interrupt,
-				IRQF_DISABLED|IRQF_SHARED, "gdth", ha);
+				IRQF_SHARED, "gdth", ha);
 	if (error) {
 		printk("GDT-PCI: Unable to allocate IRQ\n");
 		goto out_host_put;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f28ea07..3cbb57a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -398,7 +398,7 @@
 	shost->ordered_tag = sht->ordered_tag;
 	shost->no_write_same = sht->no_write_same;
 
-	if (shost_eh_deadline == -1)
+	if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
 		shost->eh_deadline = -1;
 	else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
 		shost_printk(KERN_WARNING, shost,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 868318a..8cf4a0c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,6 +1,6 @@
 /*
  *    Disk Array driver for HP Smart Array SAS controllers
- *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
@@ -47,13 +47,13 @@
 #include <linux/string.h>
 #include <linux/bitmap.h>
 #include <linux/atomic.h>
-#include <linux/kthread.h>
 #include <linux/jiffies.h>
+#include <asm/div64.h>
 #include "hpsa_cmd.h"
 #include "hpsa.h"
 
 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "3.4.0-1"
+#define HPSA_DRIVER_VERSION "3.4.4-1"
 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
 #define HPSA "hpsa"
 
@@ -118,6 +118,11 @@
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
+	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
+	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
+	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
+	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
+	{PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
 	{0,}
@@ -163,6 +168,11 @@
 	{0x21C7103C, "Smart Array", &SA5_access},
 	{0x21C8103C, "Smart Array", &SA5_access},
 	{0x21C9103C, "Smart Array", &SA5_access},
+	{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
+	{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
+	{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
+	{0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
+	{0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
 	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
 };
 
@@ -182,8 +192,9 @@
 static struct CommandList *cmd_alloc(struct ctlr_info *h);
 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
-	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
+	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
 	int cmd_type);
+#define VPD_PAGE (1 << 8)
 
 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
 static void hpsa_scan_start(struct Scsi_Host *);
@@ -204,7 +215,7 @@
 	struct CommandList *c);
 /* performant mode helper functions */
 static void calc_bucket_map(int *bucket, int num_buckets,
-	int nsgs, int *bucket_map);
+	int nsgs, int min_blocks, int *bucket_map);
 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
 static inline u32 next_command(struct ctlr_info *h, u8 q);
 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
@@ -216,8 +227,14 @@
 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
 				     int wait_for_ready);
 static inline void finish_cmd(struct CommandList *c);
+static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
 #define BOARD_NOT_READY 0
 #define BOARD_READY 1
+static void hpsa_drain_accel_commands(struct ctlr_info *h);
+static void hpsa_flush_cache(struct ctlr_info *h);
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
+	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+	u8 *scsi3addr);
 
 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
 {
@@ -280,6 +297,55 @@
 	return 1;
 }
 
+static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	int status, len;
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
+	char tmpbuf[10];
+
+	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+		return -EACCES;
+	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+	strncpy(tmpbuf, buf, len);
+	tmpbuf[len] = '\0';
+	if (sscanf(tmpbuf, "%d", &status) != 1)
+		return -EINVAL;
+	h = shost_to_hba(shost);
+	h->acciopath_status = !!status;
+	dev_warn(&h->pdev->dev,
+		"hpsa: HP SSD Smart Path %s via sysfs update.\n",
+		h->acciopath_status ? "enabled" : "disabled");
+	return count;
+}
+
+static ssize_t host_store_raid_offload_debug(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	int debug_level, len;
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
+	char tmpbuf[10];
+
+	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+		return -EACCES;
+	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
+	strncpy(tmpbuf, buf, len);
+	tmpbuf[len] = '\0';
+	if (sscanf(tmpbuf, "%d", &debug_level) != 1)
+		return -EINVAL;
+	if (debug_level < 0)
+		debug_level = 0;
+	h = shost_to_hba(shost);
+	h->raid_offload_debug = debug_level;
+	dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
+		h->raid_offload_debug);
+	return count;
+}
+
 static ssize_t host_store_rescan(struct device *dev,
 				 struct device_attribute *attr,
 				 const char *buf, size_t count)
@@ -327,6 +393,17 @@
 			"performant" : "simple");
 }
 
+static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
+
+	h = shost_to_hba(shost);
+	return snprintf(buf, 30, "HP SSD Smart Path %s\n",
+		(h->acciopath_status == 1) ?  "enabled" : "disabled");
+}
+
 /* List of controllers which cannot be hard reset on kexec with reset_devices */
 static u32 unresettable_controller[] = {
 	0x324a103C, /* Smart Array P712m */
@@ -416,6 +493,13 @@
 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
 	"1(ADM)", "UNKNOWN"
 };
+#define HPSA_RAID_0	0
+#define HPSA_RAID_4	1
+#define HPSA_RAID_1	2	/* also used for RAID 10 */
+#define HPSA_RAID_5	3	/* also used for RAID 50 */
+#define HPSA_RAID_51	4
+#define HPSA_RAID_6	5	/* also used for RAID 60 */
+#define HPSA_RAID_ADM	6	/* also used for RAID 1+0 ADM */
 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
 
 static ssize_t raid_level_show(struct device *dev,
@@ -504,10 +588,39 @@
 			sn[12], sn[13], sn[14], sn[15]);
 }
 
+static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
+	     struct device_attribute *attr, char *buf)
+{
+	struct ctlr_info *h;
+	struct scsi_device *sdev;
+	struct hpsa_scsi_dev_t *hdev;
+	unsigned long flags;
+	int offload_enabled;
+
+	sdev = to_scsi_device(dev);
+	h = sdev_to_hba(sdev);
+	spin_lock_irqsave(&h->lock, flags);
+	hdev = sdev->hostdata;
+	if (!hdev) {
+		spin_unlock_irqrestore(&h->lock, flags);
+		return -ENODEV;
+	}
+	offload_enabled = hdev->offload_enabled;
+	spin_unlock_irqrestore(&h->lock, flags);
+	return snprintf(buf, 20, "%d\n", offload_enabled);
+}
+
 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
+			host_show_hp_ssd_smart_path_enabled, NULL);
+static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
+		host_show_hp_ssd_smart_path_status,
+		host_store_hp_ssd_smart_path_status);
+static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
+			host_store_raid_offload_debug);
 static DEVICE_ATTR(firmware_revision, S_IRUGO,
 	host_show_firmware_revision, NULL);
 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
@@ -521,6 +634,7 @@
 	&dev_attr_raid_level,
 	&dev_attr_lunid,
 	&dev_attr_unique_id,
+	&dev_attr_hp_ssd_smart_path_enabled,
 	NULL,
 };
 
@@ -530,6 +644,8 @@
 	&dev_attr_commands_outstanding,
 	&dev_attr_transport_mode,
 	&dev_attr_resettable,
+	&dev_attr_hp_ssd_smart_path_status,
+	&dev_attr_raid_offload_debug,
 	NULL,
 };
 
@@ -570,6 +686,9 @@
 	struct reply_pool *rq = &h->reply_queue[q];
 	unsigned long flags;
 
+	if (h->transMethod & CFGTBL_Trans_io_accel1)
+		return h->access.command_completed(h, q);
+
 	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
 		return h->access.command_completed(h, q);
 
@@ -590,6 +709,32 @@
 	return a;
 }
 
+/*
+ * There are some special bits in the bus address of the
+ * command that we have to set for the controller to know
+ * how to process the command:
+ *
+ * Normal performant mode:
+ * bit 0: 1 means performant mode, 0 means simple mode.
+ * bits 1-3 = block fetch table entry
+ * bits 4-6 = command type (== 0)
+ *
+ * ioaccel1 mode:
+ * bit 0 = "performant mode" bit.
+ * bits 1-3 = block fetch table entry
+ * bits 4-6 = command type (== 110)
+ * (command type is needed because ioaccel1 mode
+ * commands are submitted through the same register as normal
+ * mode commands, so this is how the controller knows whether
+ * the command is normal mode or ioaccel1 mode.)
+ *
+ * ioaccel2 mode:
+ * bit 0 = "performant mode" bit.
+ * bits 1-4 = block fetch table entry (note extra bit)
+ * bits 4-6 = not needed, because ioaccel2 mode has
+ * a separate special register for submitting commands.
+ */
+
 /* set_performant_mode: Modify the tag for cciss performant
  * set bit 0 for pull model, bits 3-1 for block fetch
  * register number
@@ -598,12 +743,47 @@
 {
 	if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
 		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
-		if (likely(h->msix_vector))
+		if (likely(h->msix_vector > 0))
 			c->Header.ReplyQueue =
 				raw_smp_processor_id() % h->nreply_queues;
 	}
 }
 
+static void set_ioaccel1_performant_mode(struct ctlr_info *h,
+						struct CommandList *c)
+{
+	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
+
+	/* Tell the controller to post the reply to the queue for this
+	 * processor.  This seems to give the best I/O throughput.
+	 */
+	cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
+	/* Set the bits in the address sent down to include:
+	 *  - performant mode bit (bit 0)
+	 *  - pull count (bits 1-3)
+	 *  - command type (bits 4-6)
+	 */
+	c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
+					IOACCEL1_BUSADDR_CMDTYPE;
+}
+
+static void set_ioaccel2_performant_mode(struct ctlr_info *h,
+						struct CommandList *c)
+{
+	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+
+	/* Tell the controller to post the reply to the queue for this
+	 * processor.  This seems to give the best I/O throughput.
+	 */
+	cp->reply_queue = smp_processor_id() % h->nreply_queues;
+	/* Set the bits in the address sent down to include:
+	 *  - performant mode bit not used in ioaccel mode 2
+	 *  - pull count (bits 0-3)
+	 *  - command type isn't needed for ioaccel2
+	 */
+	c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
+}
+
 static int is_firmware_flash_cmd(u8 *cdb)
 {
 	return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
@@ -638,7 +818,16 @@
 {
 	unsigned long flags;
 
-	set_performant_mode(h, c);
+	switch (c->cmd_type) {
+	case CMD_IOACCEL1:
+		set_ioaccel1_performant_mode(h, c);
+		break;
+	case CMD_IOACCEL2:
+		set_ioaccel2_performant_mode(h, c);
+		break;
+	default:
+		set_performant_mode(h, c);
+	}
 	dial_down_lockup_detection_during_fw_flash(h, c);
 	spin_lock_irqsave(&h->lock, flags);
 	addQ(&h->reqQ, c);
@@ -782,6 +971,14 @@
 
 	/* Raid level changed. */
 	h->dev[entry]->raid_level = new_entry->raid_level;
+
+	/* Raid offload parameters changed. */
+	h->dev[entry]->offload_config = new_entry->offload_config;
+	h->dev[entry]->offload_enabled = new_entry->offload_enabled;
+	h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+	h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
+	h->dev[entry]->raid_map = new_entry->raid_map;
+
 	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
 		scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
 		new_entry->target, new_entry->lun);
@@ -902,6 +1099,10 @@
 	 */
 	if (dev1->raid_level != dev2->raid_level)
 		return 1;
+	if (dev1->offload_config != dev2->offload_config)
+		return 1;
+	if (dev1->offload_enabled != dev2->offload_enabled)
+		return 1;
 	return 0;
 }
 
@@ -932,6 +1133,9 @@
 					return DEVICE_UPDATED;
 				return DEVICE_SAME;
 			} else {
+				/* Keep offline devices offline */
+				if (needle->volume_offline)
+					return DEVICE_NOT_FOUND;
 				return DEVICE_CHANGED;
 			}
 		}
@@ -940,6 +1144,110 @@
 	return DEVICE_NOT_FOUND;
 }
 
+static void hpsa_monitor_offline_device(struct ctlr_info *h,
+					unsigned char scsi3addr[])
+{
+	struct offline_device_entry *device;
+	unsigned long flags;
+
+	/* Check to see if device is already on the list */
+	spin_lock_irqsave(&h->offline_device_lock, flags);
+	list_for_each_entry(device, &h->offline_device_list, offline_list) {
+		if (memcmp(device->scsi3addr, scsi3addr,
+			sizeof(device->scsi3addr)) == 0) {
+			spin_unlock_irqrestore(&h->offline_device_lock, flags);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&h->offline_device_lock, flags);
+
+	/* Device is not on the list, add it. */
+	device = kmalloc(sizeof(*device), GFP_KERNEL);
+	if (!device) {
+		dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
+		return;
+	}
+	memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
+	spin_lock_irqsave(&h->offline_device_lock, flags);
+	list_add_tail(&device->offline_list, &h->offline_device_list);
+	spin_unlock_irqrestore(&h->offline_device_lock, flags);
+}
+
+/* Print a message explaining various offline volume states */
+static void hpsa_show_volume_status(struct ctlr_info *h,
+	struct hpsa_scsi_dev_t *sd)
+{
+	if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+	switch (sd->volume_offline) {
+	case HPSA_LV_OK:
+		break;
+	case HPSA_LV_UNDERGOING_ERASE:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_UNDERGOING_RPI:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_PENDING_RPI:
+		dev_info(&h->pdev->dev,
+				"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
+				h->scsi_host->host_no,
+				sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_ENCRYPTED_NO_KEY:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_UNDERGOING_ENCRYPTION:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_PENDING_ENCRYPTION:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
+	}
+}
+
 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
 	struct hpsa_scsi_dev_t *sd[], int nsds)
 {
@@ -1004,6 +1312,20 @@
 	for (i = 0; i < nsds; i++) {
 		if (!sd[i]) /* if already added above. */
 			continue;
+
+		/* Don't add devices which are NOT READY, FORMAT IN PROGRESS
+		 * as the SCSI mid-layer does not handle such devices well.
+		 * It relentlessly loops sending TUR at 3Hz, then READ(10)
+		 * at 160Hz, and prevents the system from coming up.
+		 */
+		if (sd[i]->volume_offline) {
+			hpsa_show_volume_status(h, sd[i]);
+			dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
+				h->scsi_host->host_no,
+				sd[i]->bus, sd[i]->target, sd[i]->lun);
+			continue;
+		}
+
 		device_change = hpsa_scsi_find_entry(sd[i], h->dev,
 					h->ndevices, &entry);
 		if (device_change == DEVICE_NOT_FOUND) {
@@ -1022,6 +1344,17 @@
 	}
 	spin_unlock_irqrestore(&h->devlock, flags);
 
+	/* Monitor devices which are in one of several NOT READY states to be
+	 * brought online later. This must be done without holding h->devlock,
+	 * so don't touch h->dev[]
+	 */
+	for (i = 0; i < nsds; i++) {
+		if (!sd[i]) /* if already added above. */
+			continue;
+		if (sd[i]->volume_offline)
+			hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
+	}
+
 	/* Don't notify scsi mid layer of any changes the first time through
 	 * (or if there are no changes) scsi_scan_host will do it later the
 	 * first time through.
@@ -1187,11 +1520,163 @@
 	pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
 }
 
+
+/* Decode the various types of errors on ioaccel2 path.
+ * Return 1 for any error that should generate a RAID path retry.
+ * Return 0 for errors that don't require a RAID path retry.
+ */
+static int handle_ioaccel_mode2_error(struct ctlr_info *h,
+					struct CommandList *c,
+					struct scsi_cmnd *cmd,
+					struct io_accel2_cmd *c2)
+{
+	int data_len;
+	int retry = 0;
+
+	switch (c2->error_data.serv_response) {
+	case IOACCEL2_SERV_RESPONSE_COMPLETE:
+		switch (c2->error_data.status) {
+		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+			break;
+		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
+			dev_warn(&h->pdev->dev,
+				"%s: task complete with check condition.\n",
+				"HP SSD Smart Path");
+			if (c2->error_data.data_present !=
+					IOACCEL2_SENSE_DATA_PRESENT)
+				break;
+			/* copy the sense data */
+			data_len = c2->error_data.sense_data_len;
+			if (data_len > SCSI_SENSE_BUFFERSIZE)
+				data_len = SCSI_SENSE_BUFFERSIZE;
+			if (data_len > sizeof(c2->error_data.sense_data_buff))
+				data_len =
+					sizeof(c2->error_data.sense_data_buff);
+			memcpy(cmd->sense_buffer,
+				c2->error_data.sense_data_buff, data_len);
+			cmd->result |= SAM_STAT_CHECK_CONDITION;
+			retry = 1;
+			break;
+		case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
+			dev_warn(&h->pdev->dev,
+				"%s: task complete with BUSY status.\n",
+				"HP SSD Smart Path");
+			retry = 1;
+			break;
+		case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
+			dev_warn(&h->pdev->dev,
+				"%s: task complete with reservation conflict.\n",
+				"HP SSD Smart Path");
+			retry = 1;
+			break;
+		case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
+			/* Make scsi midlayer do unlimited retries */
+			cmd->result = DID_IMM_RETRY << 16;
+			break;
+		case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
+			dev_warn(&h->pdev->dev,
+				"%s: task complete with aborted status.\n",
+				"HP SSD Smart Path");
+			retry = 1;
+			break;
+		default:
+			dev_warn(&h->pdev->dev,
+				"%s: task complete with unrecognized status: 0x%02x\n",
+				"HP SSD Smart Path", c2->error_data.status);
+			retry = 1;
+			break;
+		}
+		break;
+	case IOACCEL2_SERV_RESPONSE_FAILURE:
+		/* don't expect to get here. */
+		dev_warn(&h->pdev->dev,
+			"unexpected delivery or target failure, status = 0x%02x\n",
+			c2->error_data.status);
+		retry = 1;
+		break;
+	case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
+		break;
+	case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
+		break;
+	case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
+		dev_warn(&h->pdev->dev, "task management function rejected.\n");
+		retry = 1;
+		break;
+	case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
+		dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
+		break;
+	default:
+		dev_warn(&h->pdev->dev,
+			"%s: Unrecognized server response: 0x%02x\n",
+			"HP SSD Smart Path",
+			c2->error_data.serv_response);
+		retry = 1;
+		break;
+	}
+
+	return retry;	/* retry on raid path? */
+}
+
+static void process_ioaccel2_completion(struct ctlr_info *h,
+		struct CommandList *c, struct scsi_cmnd *cmd,
+		struct hpsa_scsi_dev_t *dev)
+{
+	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
+	int raid_retry = 0;
+
+	/* check for good status */
+	if (likely(c2->error_data.serv_response == 0 &&
+			c2->error_data.status == 0)) {
+		cmd_free(h, c);
+		cmd->scsi_done(cmd);
+		return;
+	}
+
+	/* Any RAID offload error results in retry which will use
+	 * the normal I/O path so the controller can handle whatever's
+	 * wrong.
+	 */
+	if (is_logical_dev_addr_mode(dev->scsi3addr) &&
+		c2->error_data.serv_response ==
+			IOACCEL2_SERV_RESPONSE_FAILURE) {
+		if (c2->error_data.status ==
+			IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
+			dev_warn(&h->pdev->dev,
+				"%s: Path is unavailable, retrying on standard path.\n",
+				"HP SSD Smart Path");
+		else
+			dev_warn(&h->pdev->dev,
+				"%s: Error 0x%02x, retrying on standard path.\n",
+				"HP SSD Smart Path", c2->error_data.status);
+
+		dev->offload_enabled = 0;
+		h->drv_req_rescan = 1;	/* schedule controller for a rescan */
+		cmd->result = DID_SOFT_ERROR << 16;
+		cmd_free(h, c);
+		cmd->scsi_done(cmd);
+		return;
+	}
+	raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
+	/* If error found, disable Smart Path, schedule a rescan,
+	 * and force a retry on the standard path.
+	 */
+	if (raid_retry) {
+		dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
+			"HP SSD Smart Path");
+		dev->offload_enabled = 0; /* Disable Smart Path */
+		h->drv_req_rescan = 1;	  /* schedule controller rescan */
+		cmd->result = DID_SOFT_ERROR << 16;
+	}
+	cmd_free(h, c);
+	cmd->scsi_done(cmd);
+}
+
 static void complete_scsi_command(struct CommandList *cp)
 {
 	struct scsi_cmnd *cmd;
 	struct ctlr_info *h;
 	struct ErrorInfo *ei;
+	struct hpsa_scsi_dev_t *dev;
 
 	unsigned char sense_key;
 	unsigned char asc;      /* additional sense code */
@@ -1201,13 +1686,19 @@
 	ei = cp->err_info;
 	cmd = (struct scsi_cmnd *) cp->scsi_cmd;
 	h = cp->h;
+	dev = cmd->device->hostdata;
 
 	scsi_dma_unmap(cmd); /* undo the DMA mappings */
-	if (cp->Header.SGTotal > h->max_cmd_sg_entries)
+	if ((cp->cmd_type == CMD_SCSI) &&
+		(cp->Header.SGTotal > h->max_cmd_sg_entries))
 		hpsa_unmap_sg_chain_block(h, cp);
 
 	cmd->result = (DID_OK << 16); 		/* host byte */
 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
+
+	if (cp->cmd_type == CMD_IOACCEL2)
+		return process_ioaccel2_completion(h, cp, cmd, dev);
+
 	cmd->result |= ei->ScsiStatus;
 
 	/* copy the sense data whether we need to or not. */
@@ -1227,6 +1718,32 @@
 		return;
 	}
 
+	/* For I/O accelerator commands, copy over some fields to the normal
+	 * CISS header used below for error handling.
+	 */
+	if (cp->cmd_type == CMD_IOACCEL1) {
+		struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
+		cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
+		cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
+		cp->Header.Tag.lower = c->Tag.lower;
+		cp->Header.Tag.upper = c->Tag.upper;
+		memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
+		memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
+
+		/* Any RAID offload error results in retry which will use
+		 * the normal I/O path so the controller can handle whatever's
+		 * wrong.
+		 */
+		if (is_logical_dev_addr_mode(dev->scsi3addr)) {
+			if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
+				dev->offload_enabled = 0;
+			cmd->result = DID_SOFT_ERROR << 16;
+			cmd_free(h, cp);
+			cmd->scsi_done(cmd);
+			return;
+		}
+	}
+
 	/* an error has occurred */
 	switch (ei->CommandStatus) {
 
@@ -1389,6 +1906,14 @@
 		cmd->result = DID_ERROR << 16;
 		dev_warn(&h->pdev->dev, "Command unabortable\n");
 		break;
+	case CMD_IOACCEL_DISABLED:
+		/* This only handles the direct pass-through case since RAID
+		 * offload is handled above.  Just attempt a retry.
+		 */
+		cmd->result = DID_SOFT_ERROR << 16;
+		dev_warn(&h->pdev->dev,
+				"cp %p had HP SSD Smart Path error\n", cp);
+		break;
 	default:
 		cmd->result = DID_ERROR << 16;
 		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
@@ -1438,6 +1963,7 @@
 	cp->SG[0].Addr.upper =
 	  (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
 	cp->SG[0].Len = buflen;
+	cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
 	cp->Header.SGList = (u8) 1;   /* no. SGs contig in this cmd */
 	cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
 	return 0;
@@ -1490,17 +2016,37 @@
 	hpsa_pci_unmap(h->pdev, c, 1, data_direction);
 }
 
-static void hpsa_scsi_interpret_error(struct CommandList *cp)
+static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
+				struct CommandList *c)
 {
-	struct ErrorInfo *ei;
-	struct device *d = &cp->h->pdev->dev;
+	const u8 *cdb = c->Request.CDB;
+	const u8 *lun = c->Header.LUN.LunAddrBytes;
 
-	ei = cp->err_info;
+	dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
+	" CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+		txt, lun[0], lun[1], lun[2], lun[3],
+		lun[4], lun[5], lun[6], lun[7],
+		cdb[0], cdb[1], cdb[2], cdb[3],
+		cdb[4], cdb[5], cdb[6], cdb[7],
+		cdb[8], cdb[9], cdb[10], cdb[11],
+		cdb[12], cdb[13], cdb[14], cdb[15]);
+}
+
+static void hpsa_scsi_interpret_error(struct ctlr_info *h,
+			struct CommandList *cp)
+{
+	const struct ErrorInfo *ei = cp->err_info;
+	struct device *d = &cp->h->pdev->dev;
+	const u8 *sd = ei->SenseInfo;
+
 	switch (ei->CommandStatus) {
 	case CMD_TARGET_STATUS:
-		dev_warn(d, "cmd %p has completed with errors\n", cp);
-		dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
-				ei->ScsiStatus);
+		hpsa_print_cmd(h, "SCSI status", cp);
+		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
+			dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
+				sd[2] & 0x0f, sd[12], sd[13]);
+		else
+			dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
 		if (ei->ScsiStatus == 0)
 			dev_warn(d, "SCSI status is abnormally zero.  "
 			"(probably indicates selection timeout "
@@ -1508,54 +2054,51 @@
 			"firmware bug, circa July, 2001.)\n");
 		break;
 	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
-			dev_info(d, "UNDERRUN\n");
 		break;
 	case CMD_DATA_OVERRUN:
-		dev_warn(d, "cp %p has completed with data overrun\n", cp);
+		hpsa_print_cmd(h, "overrun condition", cp);
 		break;
 	case CMD_INVALID: {
 		/* controller unfortunately reports SCSI passthru's
 		 * to non-existent targets as invalid commands.
 		 */
-		dev_warn(d, "cp %p is reported invalid (probably means "
-			"target device no longer present)\n", cp);
-		/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
-		print_cmd(cp);  */
+		hpsa_print_cmd(h, "invalid command", cp);
+		dev_warn(d, "probably means device no longer present\n");
 		}
 		break;
 	case CMD_PROTOCOL_ERR:
-		dev_warn(d, "cp %p has protocol error \n", cp);
+		hpsa_print_cmd(h, "protocol error", cp);
 		break;
 	case CMD_HARDWARE_ERR:
-		/* cmd->result = DID_ERROR << 16; */
-		dev_warn(d, "cp %p had hardware error\n", cp);
+		hpsa_print_cmd(h, "hardware error", cp);
 		break;
 	case CMD_CONNECTION_LOST:
-		dev_warn(d, "cp %p had connection lost\n", cp);
+		hpsa_print_cmd(h, "connection lost", cp);
 		break;
 	case CMD_ABORTED:
-		dev_warn(d, "cp %p was aborted\n", cp);
+		hpsa_print_cmd(h, "aborted", cp);
 		break;
 	case CMD_ABORT_FAILED:
-		dev_warn(d, "cp %p reports abort failed\n", cp);
+		hpsa_print_cmd(h, "abort failed", cp);
 		break;
 	case CMD_UNSOLICITED_ABORT:
-		dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
+		hpsa_print_cmd(h, "unsolicited abort", cp);
 		break;
 	case CMD_TIMEOUT:
-		dev_warn(d, "cp %p timed out\n", cp);
+		hpsa_print_cmd(h, "timed out", cp);
 		break;
 	case CMD_UNABORTABLE:
-		dev_warn(d, "Command unabortable\n");
+		hpsa_print_cmd(h, "unabortable", cp);
 		break;
 	default:
-		dev_warn(d, "cp %p returned unknown status %x\n", cp,
+		hpsa_print_cmd(h, "unknown status", cp);
+		dev_warn(d, "Unknown command status %x\n",
 				ei->CommandStatus);
 	}
 }
 
 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
-			unsigned char page, unsigned char *buf,
+			u16 page, unsigned char *buf,
 			unsigned char bufsize)
 {
 	int rc = IO_OK;
@@ -1577,7 +2120,7 @@
 	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
 	ei = c->err_info;
 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
-		hpsa_scsi_interpret_error(c);
+		hpsa_scsi_interpret_error(h, c);
 		rc = -1;
 	}
 out:
@@ -1585,7 +2128,39 @@
 	return rc;
 }
 
-static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
+static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
+		unsigned char *scsi3addr, unsigned char page,
+		struct bmic_controller_parameters *buf, size_t bufsize)
+{
+	int rc = IO_OK;
+	struct CommandList *c;
+	struct ErrorInfo *ei;
+
+	c = cmd_special_alloc(h);
+
+	if (c == NULL) {			/* trouble... */
+		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+		return -ENOMEM;
+	}
+
+	if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
+			page, scsi3addr, TYPE_CMD)) {
+		rc = -1;
+		goto out;
+	}
+	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+	ei = c->err_info;
+	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+		hpsa_scsi_interpret_error(h, c);
+		rc = -1;
+	}
+out:
+	cmd_special_free(h, c);
+	return rc;
+	}
+
+static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+	u8 reset_type)
 {
 	int rc = IO_OK;
 	struct CommandList *c;
@@ -1599,14 +2174,15 @@
 	}
 
 	/* fill_cmd can't fail here, no data buffer to map. */
-	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
-			NULL, 0, 0, scsi3addr, TYPE_MSG);
+	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+			scsi3addr, TYPE_MSG);
+	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
 	hpsa_scsi_do_simple_cmd_core(h, c);
 	/* no unmap needed here because no data xfer. */
 
 	ei = c->err_info;
 	if (ei->CommandStatus != 0) {
-		hpsa_scsi_interpret_error(c);
+		hpsa_scsi_interpret_error(h, c);
 		rc = -1;
 	}
 	cmd_special_free(h, c);
@@ -1623,7 +2199,7 @@
 	buf = kzalloc(64, GFP_KERNEL);
 	if (!buf)
 		return;
-	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
 	if (rc == 0)
 		*raid_level = buf[8];
 	if (*raid_level > RAID_UNKNOWN)
@@ -1632,6 +2208,204 @@
 	return;
 }
 
+#define HPSA_MAP_DEBUG
+#ifdef HPSA_MAP_DEBUG
+static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
+				struct raid_map_data *map_buff)
+{
+	struct raid_map_disk_data *dd = &map_buff->data[0];
+	int map, row, col;
+	u16 map_cnt, row_cnt, disks_per_row;
+
+	if (rc != 0)
+		return;
+
+	/* Show details only if debugging has been activated. */
+	if (h->raid_offload_debug < 2)
+		return;
+
+	dev_info(&h->pdev->dev, "structure_size = %u\n",
+				le32_to_cpu(map_buff->structure_size));
+	dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
+			le32_to_cpu(map_buff->volume_blk_size));
+	dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
+			le64_to_cpu(map_buff->volume_blk_cnt));
+	dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
+			map_buff->phys_blk_shift);
+	dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
+			map_buff->parity_rotation_shift);
+	dev_info(&h->pdev->dev, "strip_size = %u\n",
+			le16_to_cpu(map_buff->strip_size));
+	dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
+			le64_to_cpu(map_buff->disk_starting_blk));
+	dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
+			le64_to_cpu(map_buff->disk_blk_cnt));
+	dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
+			le16_to_cpu(map_buff->data_disks_per_row));
+	dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
+			le16_to_cpu(map_buff->metadata_disks_per_row));
+	dev_info(&h->pdev->dev, "row_cnt = %u\n",
+			le16_to_cpu(map_buff->row_cnt));
+	dev_info(&h->pdev->dev, "layout_map_count = %u\n",
+			le16_to_cpu(map_buff->layout_map_count));
+	dev_info(&h->pdev->dev, "flags = %u\n",
+			le16_to_cpu(map_buff->flags));
+	if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
+		dev_info(&h->pdev->dev, "encrypytion = ON\n");
+	else
+		dev_info(&h->pdev->dev, "encrypytion = OFF\n");
+	dev_info(&h->pdev->dev, "dekindex = %u\n",
+			le16_to_cpu(map_buff->dekindex));
+
+	map_cnt = le16_to_cpu(map_buff->layout_map_count);
+	for (map = 0; map < map_cnt; map++) {
+		dev_info(&h->pdev->dev, "Map%u:\n", map);
+		row_cnt = le16_to_cpu(map_buff->row_cnt);
+		for (row = 0; row < row_cnt; row++) {
+			dev_info(&h->pdev->dev, "  Row%u:\n", row);
+			disks_per_row =
+				le16_to_cpu(map_buff->data_disks_per_row);
+			for (col = 0; col < disks_per_row; col++, dd++)
+				dev_info(&h->pdev->dev,
+					"    D%02u: h=0x%04x xor=%u,%u\n",
+					col, dd->ioaccel_handle,
+					dd->xor_mult[0], dd->xor_mult[1]);
+			disks_per_row =
+				le16_to_cpu(map_buff->metadata_disks_per_row);
+			for (col = 0; col < disks_per_row; col++, dd++)
+				dev_info(&h->pdev->dev,
+					"    M%02u: h=0x%04x xor=%u,%u\n",
+					col, dd->ioaccel_handle,
+					dd->xor_mult[0], dd->xor_mult[1]);
+		}
+	}
+}
+#else
+static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
+			__attribute__((unused)) int rc,
+			__attribute__((unused)) struct raid_map_data *map_buff)
+{
+}
+#endif
+
+static int hpsa_get_raid_map(struct ctlr_info *h,
+	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
+{
+	int rc = 0;
+	struct CommandList *c;
+	struct ErrorInfo *ei;
+
+	c = cmd_special_alloc(h);
+	if (c == NULL) {
+		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+		return -ENOMEM;
+	}
+	if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
+			sizeof(this_device->raid_map), 0,
+			scsi3addr, TYPE_CMD)) {
+		dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
+		cmd_special_free(h, c);
+		return -ENOMEM;
+	}
+	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+	ei = c->err_info;
+	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+		hpsa_scsi_interpret_error(h, c);
+		cmd_special_free(h, c);
+		return -1;
+	}
+	cmd_special_free(h, c);
+
+	/* @todo in the future, dynamically allocate RAID map memory */
+	if (le32_to_cpu(this_device->raid_map.structure_size) >
+				sizeof(this_device->raid_map)) {
+		dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
+		rc = -1;
+	}
+	hpsa_debug_map_buff(h, rc, &this_device->raid_map);
+	return rc;
+}
+
+static int hpsa_vpd_page_supported(struct ctlr_info *h,
+	unsigned char scsi3addr[], u8 page)
+{
+	int rc;
+	int i;
+	int pages;
+	unsigned char *buf, bufsize;
+
+	buf = kzalloc(256, GFP_KERNEL);
+	if (!buf)
+		return 0;
+
+	/* Get the size of the page list first */
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
+				buf, HPSA_VPD_HEADER_SZ);
+	if (rc != 0)
+		goto exit_unsupported;
+	pages = buf[3];
+	if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
+		bufsize = pages + HPSA_VPD_HEADER_SZ;
+	else
+		bufsize = 255;
+
+	/* Get the whole VPD page list */
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
+				buf, bufsize);
+	if (rc != 0)
+		goto exit_unsupported;
+
+	pages = buf[3];
+	for (i = 1; i <= pages; i++)
+		if (buf[3 + i] == page)
+			goto exit_supported;
+exit_unsupported:
+	kfree(buf);
+	return 0;
+exit_supported:
+	kfree(buf);
+	return 1;
+}
+
+static void hpsa_get_ioaccel_status(struct ctlr_info *h,
+	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
+{
+	int rc;
+	unsigned char *buf;
+	u8 ioaccel_status;
+
+	this_device->offload_config = 0;
+	this_device->offload_enabled = 0;
+
+	buf = kzalloc(64, GFP_KERNEL);
+	if (!buf)
+		return;
+	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
+		goto out;
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
+			VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
+	if (rc != 0)
+		goto out;
+
+#define IOACCEL_STATUS_BYTE 4
+#define OFFLOAD_CONFIGURED_BIT 0x01
+#define OFFLOAD_ENABLED_BIT 0x02
+	ioaccel_status = buf[IOACCEL_STATUS_BYTE];
+	this_device->offload_config =
+		!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+	if (this_device->offload_config) {
+		this_device->offload_enabled =
+			!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+		if (hpsa_get_raid_map(h, scsi3addr, this_device))
+			this_device->offload_enabled = 0;
+	}
+out:
+	kfree(buf);
+	return;
+}
+
 /* Get the device id from inquiry page 0x83 */
 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
 	unsigned char *device_id, int buflen)
@@ -1644,7 +2418,7 @@
 	buf = kzalloc(64, GFP_KERNEL);
 	if (!buf)
 		return -1;
-	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
 	if (rc == 0)
 		memcpy(device_id, &buf[8], buflen);
 	kfree(buf);
@@ -1678,8 +2452,16 @@
 	ei = c->err_info;
 	if (ei->CommandStatus != 0 &&
 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
-		hpsa_scsi_interpret_error(c);
+		hpsa_scsi_interpret_error(h, c);
 		rc = -1;
+	} else {
+		if (buf->extended_response_flag != extended_response) {
+			dev_err(&h->pdev->dev,
+				"report luns requested format %u, got %u\n",
+				extended_response,
+				buf->extended_response_flag);
+			rc = -1;
+		}
 	}
 out:
 	cmd_special_free(h, c);
@@ -1707,6 +2489,117 @@
 	device->lun = lun;
 }
 
+/* Use VPD inquiry to get details of volume status */
+static int hpsa_get_volume_status(struct ctlr_info *h,
+					unsigned char scsi3addr[])
+{
+	int rc;
+	int status;
+	int size;
+	unsigned char *buf;
+
+	buf = kzalloc(64, GFP_KERNEL);
+	if (!buf)
+		return HPSA_VPD_LV_STATUS_UNSUPPORTED;
+
+	/* Does controller have VPD for logical volume status? */
+	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) {
+		dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
+		goto exit_failed;
+	}
+
+	/* Get the size of the VPD return buffer */
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
+					buf, HPSA_VPD_HEADER_SZ);
+	if (rc != 0) {
+		dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
+		goto exit_failed;
+	}
+	size = buf[3];
+
+	/* Now get the whole VPD buffer */
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
+					buf, size + HPSA_VPD_HEADER_SZ);
+	if (rc != 0) {
+		dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
+		goto exit_failed;
+	}
+	status = buf[4]; /* status byte */
+
+	kfree(buf);
+	return status;
+exit_failed:
+	kfree(buf);
+	return HPSA_VPD_LV_STATUS_UNSUPPORTED;
+}
+
+/* Determine offline status of a volume.
+ * Return either:
+ *  0 (not offline)
+ * -1 (offline for unknown reasons)
+ *  # (integer code indicating one of several NOT READY states
+ *     describing why a volume is to be kept offline)
+ */
+static unsigned char hpsa_volume_offline(struct ctlr_info *h,
+					unsigned char scsi3addr[])
+{
+	struct CommandList *c;
+	unsigned char *sense, sense_key, asc, ascq;
+	int ldstat = 0;
+	u16 cmd_status;
+	u8 scsi_status;
+#define ASC_LUN_NOT_READY 0x04
+#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
+#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
+
+	c = cmd_alloc(h);
+	if (!c)
+		return 0;
+	(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
+	hpsa_scsi_do_simple_cmd_core(h, c);
+	sense = c->err_info->SenseInfo;
+	sense_key = sense[2];
+	asc = sense[12];
+	ascq = sense[13];
+	cmd_status = c->err_info->CommandStatus;
+	scsi_status = c->err_info->ScsiStatus;
+	cmd_free(h, c);
+	/* Is the volume 'not ready'? */
+	if (cmd_status != CMD_TARGET_STATUS ||
+		scsi_status != SAM_STAT_CHECK_CONDITION ||
+		sense_key != NOT_READY ||
+		asc != ASC_LUN_NOT_READY)  {
+		return 0;
+	}
+
+	/* Determine the reason for not ready state */
+	ldstat = hpsa_get_volume_status(h, scsi3addr);
+
+	/* Keep volume offline in certain cases: */
+	switch (ldstat) {
+	case HPSA_LV_UNDERGOING_ERASE:
+	case HPSA_LV_UNDERGOING_RPI:
+	case HPSA_LV_PENDING_RPI:
+	case HPSA_LV_ENCRYPTED_NO_KEY:
+	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
+	case HPSA_LV_UNDERGOING_ENCRYPTION:
+	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
+	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+		return ldstat;
+	case HPSA_VPD_LV_STATUS_UNSUPPORTED:
+		/* If VPD status page isn't available,
+		 * use ASC/ASCQ to determine state
+		 */
+		if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
+			(ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
+			return ldstat;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
 static int hpsa_update_device_info(struct ctlr_info *h,
 	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
 	unsigned char *is_OBDR_device)
@@ -1745,10 +2638,18 @@
 		sizeof(this_device->device_id));
 
 	if (this_device->devtype == TYPE_DISK &&
-		is_logical_dev_addr_mode(scsi3addr))
+		is_logical_dev_addr_mode(scsi3addr)) {
 		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
-	else
+		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
+			hpsa_get_ioaccel_status(h, scsi3addr, this_device);
+		this_device->volume_offline =
+			hpsa_volume_offline(h, scsi3addr);
+	} else {
 		this_device->raid_level = RAID_UNKNOWN;
+		this_device->offload_config = 0;
+		this_device->offload_enabled = 0;
+		this_device->volume_offline = 0;
+	}
 
 	if (is_OBDR_device) {
 		/* See if this is a One-Button-Disaster-Recovery device
@@ -1878,6 +2779,105 @@
 }
 
 /*
+ * Get address of physical disk used for an ioaccel2 mode command:
+ *	1. Extract ioaccel2 handle from the command.
+ *	2. Find a matching ioaccel2 handle from list of physical disks.
+ *	3. Return:
+ *		1 and set scsi3addr to address of matching physical
+ *		0 if no matching physical disk was found.
+ */
+static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
+	struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
+{
+	struct ReportExtendedLUNdata *physicals = NULL;
+	int responsesize = 24;	/* size of physical extended response */
+	int extended = 2;	/* flag forces reporting 'other dev info'. */
+	int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
+	u32 nphysicals = 0;	/* number of reported physical devs */
+	int found = 0;		/* found match (1) or not (0) */
+	u32 find;		/* handle we need to match */
+	int i;
+	struct scsi_cmnd *scmd;	/* scsi command within request being aborted */
+	struct hpsa_scsi_dev_t *d; /* device of request being aborted */
+	struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
+	u32 it_nexus;		/* 4 byte device handle for the ioaccel2 cmd */
+	u32 scsi_nexus;		/* 4 byte device handle for the ioaccel2 cmd */
+
+	if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
+		return 0; /* no match */
+
+	/* point to the ioaccel2 device handle */
+	c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
+	if (c2a == NULL)
+		return 0; /* no match */
+
+	scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
+	if (scmd == NULL)
+		return 0; /* no match */
+
+	d = scmd->device->hostdata;
+	if (d == NULL)
+		return 0; /* no match */
+
+	it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
+	scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
+	find = c2a->scsi_nexus;
+
+	if (h->raid_offload_debug > 0)
+		dev_info(&h->pdev->dev,
+			"%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+			__func__, scsi_nexus,
+			d->device_id[0], d->device_id[1], d->device_id[2],
+			d->device_id[3], d->device_id[4], d->device_id[5],
+			d->device_id[6], d->device_id[7], d->device_id[8],
+			d->device_id[9], d->device_id[10], d->device_id[11],
+			d->device_id[12], d->device_id[13], d->device_id[14],
+			d->device_id[15]);
+
+	/* Get the list of physical devices */
+	physicals = kzalloc(reportsize, GFP_KERNEL);
+	if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
+		reportsize, extended)) {
+		dev_err(&h->pdev->dev,
+			"Can't lookup %s device handle: report physical LUNs failed.\n",
+			"HP SSD Smart Path");
+		kfree(physicals);
+		return 0;
+	}
+	nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
+							responsesize;
+
+
+	/* find ioaccel2 handle in list of physicals: */
+	for (i = 0; i < nphysicals; i++) {
+		/* handle is in bytes 28-31 of each lun */
+		if (memcmp(&((struct ReportExtendedLUNdata *)
+				physicals)->LUN[i][20], &find, 4) != 0) {
+			continue; /* didn't match */
+		}
+		found = 1;
+		memcpy(scsi3addr, &((struct ReportExtendedLUNdata *)
+					physicals)->LUN[i][0], 8);
+		if (h->raid_offload_debug > 0)
+			dev_info(&h->pdev->dev,
+				"%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+				__func__, find,
+				((struct ReportExtendedLUNdata *)
+					physicals)->LUN[i][20],
+				scsi3addr[0], scsi3addr[1], scsi3addr[2],
+				scsi3addr[3], scsi3addr[4], scsi3addr[5],
+				scsi3addr[6], scsi3addr[7]);
+		break; /* found it */
+	}
+
+	kfree(physicals);
+	if (found)
+		return 1;
+	else
+		return 0;
+
+}
+/*
  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
  * logdev.  The number of luns in physdev and logdev are returned in
  * *nphysicals and *nlogicals, respectively.
@@ -1885,14 +2885,26 @@
  */
 static int hpsa_gather_lun_info(struct ctlr_info *h,
 	int reportlunsize,
-	struct ReportLUNdata *physdev, u32 *nphysicals,
+	struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
 	struct ReportLUNdata *logdev, u32 *nlogicals)
 {
-	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
+	int physical_entry_size = 8;
+
+	*physical_mode = 0;
+
+	/* For I/O accelerator mode we need to read physical device handles */
+	if (h->transMethod & CFGTBL_Trans_io_accel1 ||
+		h->transMethod & CFGTBL_Trans_io_accel2) {
+		*physical_mode = HPSA_REPORT_PHYS_EXTENDED;
+		physical_entry_size = 24;
+	}
+	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
+							*physical_mode)) {
 		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
 		return -1;
 	}
-	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
+	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
+							physical_entry_size;
 	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
 		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
 			"  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
@@ -1923,7 +2935,8 @@
 }
 
 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
-	int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
+	int nphysicals, int nlogicals,
+	struct ReportExtendedLUNdata *physdev_list,
 	struct ReportLUNdata *logdev_list)
 {
 	/* Helper function, figure out where the LUN ID info is coming from
@@ -1947,6 +2960,24 @@
 	return NULL;
 }
 
+static int hpsa_hba_mode_enabled(struct ctlr_info *h)
+{
+	int rc;
+	struct bmic_controller_parameters *ctlr_params;
+	ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
+		GFP_KERNEL);
+
+	if (!ctlr_params)
+		return 0;
+	rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
+		sizeof(struct bmic_controller_parameters));
+	if (rc != 0) {
+		kfree(ctlr_params);
+		return 0;
+	}
+	return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0;
+}
+
 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 {
 	/* the idea here is we could get notified
@@ -1959,16 +2990,18 @@
 	 * tell which devices we already know about, vs. new
 	 * devices, vs.  disappearing devices.
 	 */
-	struct ReportLUNdata *physdev_list = NULL;
+	struct ReportExtendedLUNdata *physdev_list = NULL;
 	struct ReportLUNdata *logdev_list = NULL;
 	u32 nphysicals = 0;
 	u32 nlogicals = 0;
+	int physical_mode = 0;
 	u32 ndev_allocated = 0;
 	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
 	int ncurrent = 0;
-	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
+	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
 	int i, n_ext_target_devs, ndevs_to_allocate;
 	int raid_ctlr_position;
+	u8 rescan_hba_mode;
 	DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
 
 	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -1982,8 +3015,18 @@
 	}
 	memset(lunzerobits, 0, sizeof(lunzerobits));
 
-	if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
-			logdev_list, &nlogicals))
+	rescan_hba_mode = hpsa_hba_mode_enabled(h);
+
+	if (!h->hba_mode_enabled && rescan_hba_mode)
+		dev_warn(&h->pdev->dev, "HBA mode enabled\n");
+	else if (h->hba_mode_enabled && !rescan_hba_mode)
+		dev_warn(&h->pdev->dev, "HBA mode disabled\n");
+
+	h->hba_mode_enabled = rescan_hba_mode;
+
+	if (hpsa_gather_lun_info(h, reportlunsize,
+			(struct ReportLUNdata *) physdev_list, &nphysicals,
+			&physical_mode, logdev_list, &nlogicals))
 		goto out;
 
 	/* We might see up to the maximum number of logical and physical disks
@@ -2064,9 +3107,28 @@
 				ncurrent++;
 			break;
 		case TYPE_DISK:
-			if (i < nphysicals)
+			if (h->hba_mode_enabled) {
+				/* never use raid mapper in HBA mode */
+				this_device->offload_enabled = 0;
+				ncurrent++;
 				break;
-			ncurrent++;
+			} else if (h->acciopath_status) {
+				if (i >= nphysicals) {
+					ncurrent++;
+					break;
+				}
+			} else {
+				if (i < nphysicals)
+					break;
+				ncurrent++;
+				break;
+			}
+			if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
+				memcpy(&this_device->ioaccel_handle,
+					&lunaddrbytes[20],
+					sizeof(this_device->ioaccel_handle));
+				ncurrent++;
+			}
 			break;
 		case TYPE_TAPE:
 		case TYPE_MEDIUM_CHANGER:
@@ -2136,7 +3198,7 @@
 		curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
 		curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
 		curr_sg->Len = len;
-		curr_sg->Ext = 0;  /* we are not chaining */
+		curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
 		curr_sg++;
 	}
 
@@ -2160,6 +3222,726 @@
 	return 0;
 }
 
+#define IO_ACCEL_INELIGIBLE (1)
+static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
+{
+	int is_write = 0;
+	u32 block;
+	u32 block_cnt;
+
+	/* Perform some CDB fixups if needed using 10 byte reads/writes only */
+	switch (cdb[0]) {
+	case WRITE_6:
+	case WRITE_12:
+		is_write = 1;
+	case READ_6:
+	case READ_12:
+		if (*cdb_len == 6) {
+			block = (((u32) cdb[2]) << 8) | cdb[3];
+			block_cnt = cdb[4];
+		} else {
+			BUG_ON(*cdb_len != 12);
+			block = (((u32) cdb[2]) << 24) |
+				(((u32) cdb[3]) << 16) |
+				(((u32) cdb[4]) << 8) |
+				cdb[5];
+			block_cnt =
+				(((u32) cdb[6]) << 24) |
+				(((u32) cdb[7]) << 16) |
+				(((u32) cdb[8]) << 8) |
+				cdb[9];
+		}
+		if (block_cnt > 0xffff)
+			return IO_ACCEL_INELIGIBLE;
+
+		cdb[0] = is_write ? WRITE_10 : READ_10;
+		cdb[1] = 0;
+		cdb[2] = (u8) (block >> 24);
+		cdb[3] = (u8) (block >> 16);
+		cdb[4] = (u8) (block >> 8);
+		cdb[5] = (u8) (block);
+		cdb[6] = 0;
+		cdb[7] = (u8) (block_cnt >> 8);
+		cdb[8] = (u8) (block_cnt);
+		cdb[9] = 0;
+		*cdb_len = 10;
+		break;
+	}
+	return 0;
+}
+
+static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
+	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+	u8 *scsi3addr)
+{
+	struct scsi_cmnd *cmd = c->scsi_cmd;
+	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
+	unsigned int len;
+	unsigned int total_len = 0;
+	struct scatterlist *sg;
+	u64 addr64;
+	int use_sg, i;
+	struct SGDescriptor *curr_sg;
+	u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
+
+	/* TODO: implement chaining support */
+	if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+		return IO_ACCEL_INELIGIBLE;
+
+	BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
+
+	if (fixup_ioaccel_cdb(cdb, &cdb_len))
+		return IO_ACCEL_INELIGIBLE;
+
+	c->cmd_type = CMD_IOACCEL1;
+
+	/* Adjust the DMA address to point to the accelerated command buffer */
+	c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
+				(c->cmdindex * sizeof(*cp));
+	BUG_ON(c->busaddr & 0x0000007F);
+
+	use_sg = scsi_dma_map(cmd);
+	if (use_sg < 0)
+		return use_sg;
+
+	if (use_sg) {
+		curr_sg = cp->SG;
+		scsi_for_each_sg(cmd, sg, use_sg, i) {
+			addr64 = (u64) sg_dma_address(sg);
+			len  = sg_dma_len(sg);
+			total_len += len;
+			curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
+			curr_sg->Addr.upper =
+				(u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
+			curr_sg->Len = len;
+
+			if (i == (scsi_sg_count(cmd) - 1))
+				curr_sg->Ext = HPSA_SG_LAST;
+			else
+				curr_sg->Ext = 0;  /* we are not chaining */
+			curr_sg++;
+		}
+
+		switch (cmd->sc_data_direction) {
+		case DMA_TO_DEVICE:
+			control |= IOACCEL1_CONTROL_DATA_OUT;
+			break;
+		case DMA_FROM_DEVICE:
+			control |= IOACCEL1_CONTROL_DATA_IN;
+			break;
+		case DMA_NONE:
+			control |= IOACCEL1_CONTROL_NODATAXFER;
+			break;
+		default:
+			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+			cmd->sc_data_direction);
+			BUG();
+			break;
+		}
+	} else {
+		control |= IOACCEL1_CONTROL_NODATAXFER;
+	}
+
+	c->Header.SGList = use_sg;
+	/* Fill out the command structure to submit */
+	cp->dev_handle = ioaccel_handle & 0xFFFF;
+	cp->transfer_len = total_len;
+	cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
+			(cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
+	cp->control = control;
+	memcpy(cp->CDB, cdb, cdb_len);
+	memcpy(cp->CISS_LUN, scsi3addr, 8);
+	/* Tag was already set at init time. */
+	enqueue_cmd_and_start_io(h, c);
+	return 0;
+}
+
+/*
+ * Queue a command directly to a device behind the controller using the
+ * I/O accelerator path.
+ */
+static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	struct scsi_cmnd *cmd = c->scsi_cmd;
+	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+
+	return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
+		cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
+}
+
+/*
+ * Set encryption parameters for the ioaccel2 request
+ */
+static void set_encrypt_ioaccel2(struct ctlr_info *h,
+	struct CommandList *c, struct io_accel2_cmd *cp)
+{
+	struct scsi_cmnd *cmd = c->scsi_cmd;
+	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+	struct raid_map_data *map = &dev->raid_map;
+	u64 first_block;
+
+	BUG_ON(!(dev->offload_config && dev->offload_enabled));
+
+	/* Are we doing encryption on this device */
+	if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
+		return;
+	/* Set the data encryption key index. */
+	cp->dekindex = map->dekindex;
+
+	/* Set the encryption enable flag, encoded into direction field. */
+	cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
+
+	/* Set encryption tweak values based on logical block address
+	 * If block size is 512, tweak value is LBA.
+	 * For other block sizes, tweak is (LBA * block size)/ 512)
+	 */
+	switch (cmd->cmnd[0]) {
+	/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
+	case WRITE_6:
+	case READ_6:
+		if (map->volume_blk_size == 512) {
+			cp->tweak_lower =
+				(((u32) cmd->cmnd[2]) << 8) |
+					cmd->cmnd[3];
+			cp->tweak_upper = 0;
+		} else {
+			first_block =
+				(((u64) cmd->cmnd[2]) << 8) |
+					cmd->cmnd[3];
+			first_block = (first_block * map->volume_blk_size)/512;
+			cp->tweak_lower = (u32)first_block;
+			cp->tweak_upper = (u32)(first_block >> 32);
+		}
+		break;
+	case WRITE_10:
+	case READ_10:
+		if (map->volume_blk_size == 512) {
+			cp->tweak_lower =
+				(((u32) cmd->cmnd[2]) << 24) |
+				(((u32) cmd->cmnd[3]) << 16) |
+				(((u32) cmd->cmnd[4]) << 8) |
+					cmd->cmnd[5];
+			cp->tweak_upper = 0;
+		} else {
+			first_block =
+				(((u64) cmd->cmnd[2]) << 24) |
+				(((u64) cmd->cmnd[3]) << 16) |
+				(((u64) cmd->cmnd[4]) << 8) |
+					cmd->cmnd[5];
+			first_block = (first_block * map->volume_blk_size)/512;
+			cp->tweak_lower = (u32)first_block;
+			cp->tweak_upper = (u32)(first_block >> 32);
+		}
+		break;
+	/* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
+	case WRITE_12:
+	case READ_12:
+		if (map->volume_blk_size == 512) {
+			cp->tweak_lower =
+				(((u32) cmd->cmnd[2]) << 24) |
+				(((u32) cmd->cmnd[3]) << 16) |
+				(((u32) cmd->cmnd[4]) << 8) |
+					cmd->cmnd[5];
+			cp->tweak_upper = 0;
+		} else {
+			first_block =
+				(((u64) cmd->cmnd[2]) << 24) |
+				(((u64) cmd->cmnd[3]) << 16) |
+				(((u64) cmd->cmnd[4]) << 8) |
+					cmd->cmnd[5];
+			first_block = (first_block * map->volume_blk_size)/512;
+			cp->tweak_lower = (u32)first_block;
+			cp->tweak_upper = (u32)(first_block >> 32);
+		}
+		break;
+	case WRITE_16:
+	case READ_16:
+		if (map->volume_blk_size == 512) {
+			cp->tweak_lower =
+				(((u32) cmd->cmnd[6]) << 24) |
+				(((u32) cmd->cmnd[7]) << 16) |
+				(((u32) cmd->cmnd[8]) << 8) |
+					cmd->cmnd[9];
+			cp->tweak_upper =
+				(((u32) cmd->cmnd[2]) << 24) |
+				(((u32) cmd->cmnd[3]) << 16) |
+				(((u32) cmd->cmnd[4]) << 8) |
+					cmd->cmnd[5];
+		} else {
+			first_block =
+				(((u64) cmd->cmnd[2]) << 56) |
+				(((u64) cmd->cmnd[3]) << 48) |
+				(((u64) cmd->cmnd[4]) << 40) |
+				(((u64) cmd->cmnd[5]) << 32) |
+				(((u64) cmd->cmnd[6]) << 24) |
+				(((u64) cmd->cmnd[7]) << 16) |
+				(((u64) cmd->cmnd[8]) << 8) |
+					cmd->cmnd[9];
+			first_block = (first_block * map->volume_blk_size)/512;
+			cp->tweak_lower = (u32)first_block;
+			cp->tweak_upper = (u32)(first_block >> 32);
+		}
+		break;
+	default:
+		dev_err(&h->pdev->dev,
+			"ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
+			__func__);
+		BUG();
+		break;
+	}
+}
+
+static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
+	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+	u8 *scsi3addr)
+{
+	struct scsi_cmnd *cmd = c->scsi_cmd;
+	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
+	struct ioaccel2_sg_element *curr_sg;
+	int use_sg, i;
+	struct scatterlist *sg;
+	u64 addr64;
+	u32 len;
+	u32 total_len = 0;
+
+	if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
+		return IO_ACCEL_INELIGIBLE;
+
+	if (fixup_ioaccel_cdb(cdb, &cdb_len))
+		return IO_ACCEL_INELIGIBLE;
+	c->cmd_type = CMD_IOACCEL2;
+	/* Adjust the DMA address to point to the accelerated command buffer */
+	c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
+				(c->cmdindex * sizeof(*cp));
+	BUG_ON(c->busaddr & 0x0000007F);
+
+	memset(cp, 0, sizeof(*cp));
+	cp->IU_type = IOACCEL2_IU_TYPE;
+
+	use_sg = scsi_dma_map(cmd);
+	if (use_sg < 0)
+		return use_sg;
+
+	if (use_sg) {
+		BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
+		curr_sg = cp->sg;
+		scsi_for_each_sg(cmd, sg, use_sg, i) {
+			addr64 = (u64) sg_dma_address(sg);
+			len  = sg_dma_len(sg);
+			total_len += len;
+			curr_sg->address = cpu_to_le64(addr64);
+			curr_sg->length = cpu_to_le32(len);
+			curr_sg->reserved[0] = 0;
+			curr_sg->reserved[1] = 0;
+			curr_sg->reserved[2] = 0;
+			curr_sg->chain_indicator = 0;
+			curr_sg++;
+		}
+
+		switch (cmd->sc_data_direction) {
+		case DMA_TO_DEVICE:
+			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+			cp->direction |= IOACCEL2_DIR_DATA_OUT;
+			break;
+		case DMA_FROM_DEVICE:
+			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+			cp->direction |= IOACCEL2_DIR_DATA_IN;
+			break;
+		case DMA_NONE:
+			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+			cp->direction |= IOACCEL2_DIR_NO_DATA;
+			break;
+		default:
+			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+				cmd->sc_data_direction);
+			BUG();
+			break;
+		}
+	} else {
+		cp->direction &= ~IOACCEL2_DIRECTION_MASK;
+		cp->direction |= IOACCEL2_DIR_NO_DATA;
+	}
+
+	/* Set encryption parameters, if necessary */
+	set_encrypt_ioaccel2(h, c, cp);
+
+	cp->scsi_nexus = ioaccel_handle;
+	cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
+				DIRECT_LOOKUP_BIT;
+	memcpy(cp->cdb, cdb, sizeof(cp->cdb));
+
+	/* fill in sg elements */
+	cp->sg_count = (u8) use_sg;
+
+	cp->data_len = cpu_to_le32(total_len);
+	cp->err_ptr = cpu_to_le64(c->busaddr +
+			offsetof(struct io_accel2_cmd, error_data));
+	cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
+
+	enqueue_cmd_and_start_io(h, c);
+	return 0;
+}
+
+/*
+ * Queue a command to the correct I/O accelerator path.
+ */
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
+	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
+	u8 *scsi3addr)
+{
+	if (h->transMethod & CFGTBL_Trans_io_accel1)
+		return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
+						cdb, cdb_len, scsi3addr);
+	else
+		return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
+						cdb, cdb_len, scsi3addr);
+}
+
+static void raid_map_helper(struct raid_map_data *map,
+		int offload_to_mirror, u32 *map_index, u32 *current_group)
+{
+	if (offload_to_mirror == 0)  {
+		/* use physical disk in the first mirrored group. */
+		*map_index %= map->data_disks_per_row;
+		return;
+	}
+	do {
+		/* determine mirror group that *map_index indicates */
+		*current_group = *map_index / map->data_disks_per_row;
+		if (offload_to_mirror == *current_group)
+			continue;
+		if (*current_group < (map->layout_map_count - 1)) {
+			/* select map index from next group */
+			*map_index += map->data_disks_per_row;
+			(*current_group)++;
+		} else {
+			/* select map index from first group */
+			*map_index %= map->data_disks_per_row;
+			*current_group = 0;
+		}
+	} while (offload_to_mirror != *current_group);
+}
+
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	struct scsi_cmnd *cmd = c->scsi_cmd;
+	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
+	struct raid_map_data *map = &dev->raid_map;
+	struct raid_map_disk_data *dd = &map->data[0];
+	int is_write = 0;
+	u32 map_index;
+	u64 first_block, last_block;
+	u32 block_cnt;
+	u32 blocks_per_row;
+	u64 first_row, last_row;
+	u32 first_row_offset, last_row_offset;
+	u32 first_column, last_column;
+	u64 r0_first_row, r0_last_row;
+	u32 r5or6_blocks_per_row;
+	u64 r5or6_first_row, r5or6_last_row;
+	u32 r5or6_first_row_offset, r5or6_last_row_offset;
+	u32 r5or6_first_column, r5or6_last_column;
+	u32 total_disks_per_row;
+	u32 stripesize;
+	u32 first_group, last_group, current_group;
+	u32 map_row;
+	u32 disk_handle;
+	u64 disk_block;
+	u32 disk_block_cnt;
+	u8 cdb[16];
+	u8 cdb_len;
+#if BITS_PER_LONG == 32
+	u64 tmpdiv;
+#endif
+	int offload_to_mirror;
+
+	BUG_ON(!(dev->offload_config && dev->offload_enabled));
+
+	/* check for valid opcode, get LBA and block count */
+	switch (cmd->cmnd[0]) {
+	case WRITE_6:
+		is_write = 1;
+	case READ_6:
+		first_block =
+			(((u64) cmd->cmnd[2]) << 8) |
+			cmd->cmnd[3];
+		block_cnt = cmd->cmnd[4];
+		break;
+	case WRITE_10:
+		is_write = 1;
+	case READ_10:
+		first_block =
+			(((u64) cmd->cmnd[2]) << 24) |
+			(((u64) cmd->cmnd[3]) << 16) |
+			(((u64) cmd->cmnd[4]) << 8) |
+			cmd->cmnd[5];
+		block_cnt =
+			(((u32) cmd->cmnd[7]) << 8) |
+			cmd->cmnd[8];
+		break;
+	case WRITE_12:
+		is_write = 1;
+	case READ_12:
+		first_block =
+			(((u64) cmd->cmnd[2]) << 24) |
+			(((u64) cmd->cmnd[3]) << 16) |
+			(((u64) cmd->cmnd[4]) << 8) |
+			cmd->cmnd[5];
+		block_cnt =
+			(((u32) cmd->cmnd[6]) << 24) |
+			(((u32) cmd->cmnd[7]) << 16) |
+			(((u32) cmd->cmnd[8]) << 8) |
+		cmd->cmnd[9];
+		break;
+	case WRITE_16:
+		is_write = 1;
+	case READ_16:
+		first_block =
+			(((u64) cmd->cmnd[2]) << 56) |
+			(((u64) cmd->cmnd[3]) << 48) |
+			(((u64) cmd->cmnd[4]) << 40) |
+			(((u64) cmd->cmnd[5]) << 32) |
+			(((u64) cmd->cmnd[6]) << 24) |
+			(((u64) cmd->cmnd[7]) << 16) |
+			(((u64) cmd->cmnd[8]) << 8) |
+			cmd->cmnd[9];
+		block_cnt =
+			(((u32) cmd->cmnd[10]) << 24) |
+			(((u32) cmd->cmnd[11]) << 16) |
+			(((u32) cmd->cmnd[12]) << 8) |
+			cmd->cmnd[13];
+		break;
+	default:
+		return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
+	}
+	BUG_ON(block_cnt == 0);
+	last_block = first_block + block_cnt - 1;
+
+	/* check for write to non-RAID-0 */
+	if (is_write && dev->raid_level != 0)
+		return IO_ACCEL_INELIGIBLE;
+
+	/* check for invalid block or wraparound */
+	if (last_block >= map->volume_blk_cnt || last_block < first_block)
+		return IO_ACCEL_INELIGIBLE;
+
+	/* calculate stripe information for the request */
+	blocks_per_row = map->data_disks_per_row * map->strip_size;
+#if BITS_PER_LONG == 32
+	tmpdiv = first_block;
+	(void) do_div(tmpdiv, blocks_per_row);
+	first_row = tmpdiv;
+	tmpdiv = last_block;
+	(void) do_div(tmpdiv, blocks_per_row);
+	last_row = tmpdiv;
+	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
+	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
+	tmpdiv = first_row_offset;
+	(void) do_div(tmpdiv,  map->strip_size);
+	first_column = tmpdiv;
+	tmpdiv = last_row_offset;
+	(void) do_div(tmpdiv, map->strip_size);
+	last_column = tmpdiv;
+#else
+	first_row = first_block / blocks_per_row;
+	last_row = last_block / blocks_per_row;
+	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
+	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
+	first_column = first_row_offset / map->strip_size;
+	last_column = last_row_offset / map->strip_size;
+#endif
+
+	/* if this isn't a single row/column then give to the controller */
+	if ((first_row != last_row) || (first_column != last_column))
+		return IO_ACCEL_INELIGIBLE;
+
+	/* proceeding with driver mapping */
+	total_disks_per_row = map->data_disks_per_row +
+				map->metadata_disks_per_row;
+	map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
+				map->row_cnt;
+	map_index = (map_row * total_disks_per_row) + first_column;
+
+	switch (dev->raid_level) {
+	case HPSA_RAID_0:
+		break; /* nothing special to do */
+	case HPSA_RAID_1:
+		/* Handles load balance across RAID 1 members.
+		 * (2-drive R1 and R10 with even # of drives.)
+		 * Appropriate for SSDs, not optimal for HDDs
+		 */
+		BUG_ON(map->layout_map_count != 2);
+		if (dev->offload_to_mirror)
+			map_index += map->data_disks_per_row;
+		dev->offload_to_mirror = !dev->offload_to_mirror;
+		break;
+	case HPSA_RAID_ADM:
+		/* Handles N-way mirrors  (R1-ADM)
+		 * and R10 with # of drives divisible by 3.)
+		 */
+		BUG_ON(map->layout_map_count != 3);
+
+		offload_to_mirror = dev->offload_to_mirror;
+		raid_map_helper(map, offload_to_mirror,
+				&map_index, &current_group);
+		/* set mirror group to use next time */
+		offload_to_mirror =
+			(offload_to_mirror >= map->layout_map_count - 1)
+			? 0 : offload_to_mirror + 1;
+		/* FIXME: remove after debug/dev */
+		BUG_ON(offload_to_mirror >= map->layout_map_count);
+		dev_warn(&h->pdev->dev,
+			"DEBUG: Using physical disk map index %d from mirror group %d\n",
+			map_index, offload_to_mirror);
+		dev->offload_to_mirror = offload_to_mirror;
+		/* Avoid direct use of dev->offload_to_mirror within this
+		 * function since multiple threads might simultaneously
+		 * increment it beyond the range of dev->layout_map_count -1.
+		 */
+		break;
+	case HPSA_RAID_5:
+	case HPSA_RAID_6:
+		if (map->layout_map_count <= 1)
+			break;
+
+		/* Verify first and last block are in same RAID group */
+		r5or6_blocks_per_row =
+			map->strip_size * map->data_disks_per_row;
+		BUG_ON(r5or6_blocks_per_row == 0);
+		stripesize = r5or6_blocks_per_row * map->layout_map_count;
+#if BITS_PER_LONG == 32
+		tmpdiv = first_block;
+		first_group = do_div(tmpdiv, stripesize);
+		tmpdiv = first_group;
+		(void) do_div(tmpdiv, r5or6_blocks_per_row);
+		first_group = tmpdiv;
+		tmpdiv = last_block;
+		last_group = do_div(tmpdiv, stripesize);
+		tmpdiv = last_group;
+		(void) do_div(tmpdiv, r5or6_blocks_per_row);
+		last_group = tmpdiv;
+#else
+		first_group = (first_block % stripesize) / r5or6_blocks_per_row;
+		last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+#endif
+		if (first_group != last_group)
+			return IO_ACCEL_INELIGIBLE;
+
+		/* Verify request is in a single row of RAID 5/6 */
+#if BITS_PER_LONG == 32
+		tmpdiv = first_block;
+		(void) do_div(tmpdiv, stripesize);
+		first_row = r5or6_first_row = r0_first_row = tmpdiv;
+		tmpdiv = last_block;
+		(void) do_div(tmpdiv, stripesize);
+		r5or6_last_row = r0_last_row = tmpdiv;
+#else
+		first_row = r5or6_first_row = r0_first_row =
+						first_block / stripesize;
+		r5or6_last_row = r0_last_row = last_block / stripesize;
+#endif
+		if (r5or6_first_row != r5or6_last_row)
+			return IO_ACCEL_INELIGIBLE;
+
+
+		/* Verify request is in a single column */
+#if BITS_PER_LONG == 32
+		tmpdiv = first_block;
+		first_row_offset = do_div(tmpdiv, stripesize);
+		tmpdiv = first_row_offset;
+		first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
+		r5or6_first_row_offset = first_row_offset;
+		tmpdiv = last_block;
+		r5or6_last_row_offset = do_div(tmpdiv, stripesize);
+		tmpdiv = r5or6_last_row_offset;
+		r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
+		tmpdiv = r5or6_first_row_offset;
+		(void) do_div(tmpdiv, map->strip_size);
+		first_column = r5or6_first_column = tmpdiv;
+		tmpdiv = r5or6_last_row_offset;
+		(void) do_div(tmpdiv, map->strip_size);
+		r5or6_last_column = tmpdiv;
+#else
+		first_row_offset = r5or6_first_row_offset =
+			(u32)((first_block % stripesize) %
+						r5or6_blocks_per_row);
+
+		r5or6_last_row_offset =
+			(u32)((last_block % stripesize) %
+						r5or6_blocks_per_row);
+
+		first_column = r5or6_first_column =
+			r5or6_first_row_offset / map->strip_size;
+		r5or6_last_column =
+			r5or6_last_row_offset / map->strip_size;
+#endif
+		if (r5or6_first_column != r5or6_last_column)
+			return IO_ACCEL_INELIGIBLE;
+
+		/* Request is eligible */
+		map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
+			map->row_cnt;
+
+		map_index = (first_group *
+			(map->row_cnt * total_disks_per_row)) +
+			(map_row * total_disks_per_row) + first_column;
+		break;
+	default:
+		return IO_ACCEL_INELIGIBLE;
+	}
+
+	disk_handle = dd[map_index].ioaccel_handle;
+	disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
+			(first_row_offset - (first_column * map->strip_size));
+	disk_block_cnt = block_cnt;
+
+	/* handle differing logical/physical block sizes */
+	if (map->phys_blk_shift) {
+		disk_block <<= map->phys_blk_shift;
+		disk_block_cnt <<= map->phys_blk_shift;
+	}
+	BUG_ON(disk_block_cnt > 0xffff);
+
+	/* build the new CDB for the physical disk I/O */
+	if (disk_block > 0xffffffff) {
+		cdb[0] = is_write ? WRITE_16 : READ_16;
+		cdb[1] = 0;
+		cdb[2] = (u8) (disk_block >> 56);
+		cdb[3] = (u8) (disk_block >> 48);
+		cdb[4] = (u8) (disk_block >> 40);
+		cdb[5] = (u8) (disk_block >> 32);
+		cdb[6] = (u8) (disk_block >> 24);
+		cdb[7] = (u8) (disk_block >> 16);
+		cdb[8] = (u8) (disk_block >> 8);
+		cdb[9] = (u8) (disk_block);
+		cdb[10] = (u8) (disk_block_cnt >> 24);
+		cdb[11] = (u8) (disk_block_cnt >> 16);
+		cdb[12] = (u8) (disk_block_cnt >> 8);
+		cdb[13] = (u8) (disk_block_cnt);
+		cdb[14] = 0;
+		cdb[15] = 0;
+		cdb_len = 16;
+	} else {
+		cdb[0] = is_write ? WRITE_10 : READ_10;
+		cdb[1] = 0;
+		cdb[2] = (u8) (disk_block >> 24);
+		cdb[3] = (u8) (disk_block >> 16);
+		cdb[4] = (u8) (disk_block >> 8);
+		cdb[5] = (u8) (disk_block);
+		cdb[6] = 0;
+		cdb[7] = (u8) (disk_block_cnt >> 8);
+		cdb[8] = (u8) (disk_block_cnt);
+		cdb[9] = 0;
+		cdb_len = 10;
+	}
+	return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
+						dev->scsi3addr);
+}
 
 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
 	void (*done)(struct scsi_cmnd *))
@@ -2169,6 +3951,7 @@
 	unsigned char scsi3addr[8];
 	struct CommandList *c;
 	unsigned long flags;
+	int rc = 0;
 
 	/* Get the ptr to our adapter structure out of cmd->host. */
 	h = sdev_to_hba(cmd->device);
@@ -2203,6 +3986,32 @@
 
 	c->cmd_type = CMD_SCSI;
 	c->scsi_cmd = cmd;
+
+	/* Call alternate submit routine for I/O accelerated commands.
+	 * Retries always go down the normal I/O path.
+	 */
+	if (likely(cmd->retries == 0 &&
+		cmd->request->cmd_type == REQ_TYPE_FS &&
+		h->acciopath_status)) {
+		if (dev->offload_enabled) {
+			rc = hpsa_scsi_ioaccel_raid_map(h, c);
+			if (rc == 0)
+				return 0; /* Sent on ioaccel path */
+			if (rc < 0) {   /* scsi_dma_map failed. */
+				cmd_free(h, c);
+				return SCSI_MLQUEUE_HOST_BUSY;
+			}
+		} else if (dev->ioaccel_handle) {
+			rc = hpsa_scsi_ioaccel_direct_map(h, c);
+			if (rc == 0)
+				return 0; /* Sent on direct map path */
+			if (rc < 0) {   /* scsi_dma_map failed. */
+				cmd_free(h, c);
+				return SCSI_MLQUEUE_HOST_BUSY;
+			}
+		}
+	}
+
 	c->Header.ReplyQueue = 0;  /* unused in simple mode */
 	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
 	c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
@@ -2262,11 +4071,38 @@
 
 static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
 
+static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
+{
+	unsigned long flags;
+
+	/*
+	 * Don't let rescans be initiated on a controller known
+	 * to be locked up.  If the controller locks up *during*
+	 * a rescan, that thread is probably hosed, but at least
+	 * we can prevent new rescan threads from piling up on a
+	 * locked up controller.
+	 */
+	spin_lock_irqsave(&h->lock, flags);
+	if (unlikely(h->lockup_detected)) {
+		spin_unlock_irqrestore(&h->lock, flags);
+		spin_lock_irqsave(&h->scan_lock, flags);
+		h->scan_finished = 1;
+		wake_up_all(&h->scan_wait_queue);
+		spin_unlock_irqrestore(&h->scan_lock, flags);
+		return 1;
+	}
+	spin_unlock_irqrestore(&h->lock, flags);
+	return 0;
+}
+
 static void hpsa_scan_start(struct Scsi_Host *sh)
 {
 	struct ctlr_info *h = shost_to_hba(sh);
 	unsigned long flags;
 
+	if (do_not_scan_if_controller_locked_up(h))
+		return;
+
 	/* wait until any scan already in progress is finished. */
 	while (1) {
 		spin_lock_irqsave(&h->scan_lock, flags);
@@ -2283,6 +4119,9 @@
 	h->scan_finished = 0; /* mark scan as in progress */
 	spin_unlock_irqrestore(&h->scan_lock, flags);
 
+	if (do_not_scan_if_controller_locked_up(h))
+		return;
+
 	hpsa_update_scsi_devices(h, h->scsi_host->host_no);
 
 	spin_lock_irqsave(&h->scan_lock, flags);
@@ -2346,7 +4185,10 @@
 	sh->max_lun = HPSA_MAX_LUN;
 	sh->max_id = HPSA_MAX_LUN;
 	sh->can_queue = h->nr_cmds;
-	sh->cmd_per_lun = h->nr_cmds;
+	if (h->hba_mode_enabled)
+		sh->cmd_per_lun = 7;
+	else
+		sh->cmd_per_lun = h->nr_cmds;
 	sh->sg_tablesize = h->maxsgentries;
 	h->scsi_host = sh;
 	sh->hostdata[0] = (unsigned long) h;
@@ -2372,7 +4214,7 @@
 static int wait_for_device_to_become_ready(struct ctlr_info *h,
 	unsigned char lunaddr[])
 {
-	int rc = 0;
+	int rc;
 	int count = 0;
 	int waittime = 1; /* seconds */
 	struct CommandList *c;
@@ -2392,6 +4234,7 @@
 		 */
 		msleep(1000 * waittime);
 		count++;
+		rc = 0; /* Device ready. */
 
 		/* Increase wait time with each try, up to a point. */
 		if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
@@ -2448,7 +4291,7 @@
 	dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
 		h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
 	/* send a reset to the SCSI LUN which the command was sent to */
-	rc = hpsa_send_reset(h, dev->scsi3addr);
+	rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
 	if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
 		return SUCCESS;
 
@@ -2471,12 +4314,36 @@
 	tag[7] = original_tag[4];
 }
 
+static void hpsa_get_tag(struct ctlr_info *h,
+	struct CommandList *c, u32 *taglower, u32 *tagupper)
+{
+	if (c->cmd_type == CMD_IOACCEL1) {
+		struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
+			&h->ioaccel_cmd_pool[c->cmdindex];
+		*tagupper = cm1->Tag.upper;
+		*taglower = cm1->Tag.lower;
+		return;
+	}
+	if (c->cmd_type == CMD_IOACCEL2) {
+		struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
+			&h->ioaccel2_cmd_pool[c->cmdindex];
+		/* upper tag not used in ioaccel2 mode */
+		memset(tagupper, 0, sizeof(*tagupper));
+		*taglower = cm2->Tag;
+		return;
+	}
+	*tagupper = c->Header.Tag.upper;
+	*taglower = c->Header.Tag.lower;
+}
+
+
 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
 	struct CommandList *abort, int swizzle)
 {
 	int rc = IO_OK;
 	struct CommandList *c;
 	struct ErrorInfo *ei;
+	u32 tagupper, taglower;
 
 	c = cmd_special_alloc(h);
 	if (c == NULL) {	/* trouble... */
@@ -2490,8 +4357,9 @@
 	if (swizzle)
 		swizzle_abort_tag(&c->Request.CDB[4]);
 	hpsa_scsi_do_simple_cmd_core(h, c);
+	hpsa_get_tag(h, abort, &taglower, &tagupper);
 	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
-		__func__, abort->Header.Tag.upper, abort->Header.Tag.lower);
+		__func__, tagupper, taglower);
 	/* no unmap needed here because no data xfer. */
 
 	ei = c->err_info;
@@ -2503,15 +4371,14 @@
 		break;
 	default:
 		dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
-			__func__, abort->Header.Tag.upper,
-			abort->Header.Tag.lower);
-		hpsa_scsi_interpret_error(c);
+			__func__, tagupper, taglower);
+		hpsa_scsi_interpret_error(h, c);
 		rc = -1;
 		break;
 	}
 	cmd_special_free(h, c);
-	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
-		abort->Header.Tag.upper, abort->Header.Tag.lower);
+	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
+		__func__, tagupper, taglower);
 	return rc;
 }
 
@@ -2565,6 +4432,83 @@
 	return NULL;
 }
 
+/* ioaccel2 path firmware cannot handle abort task requests.
+ * Change abort requests to physical target reset, and send to the
+ * address of the physical disk used for the ioaccel 2 command.
+ * Return 0 on success (IO_OK)
+ *	 -1 on failure
+ */
+
+static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
+	unsigned char *scsi3addr, struct CommandList *abort)
+{
+	int rc = IO_OK;
+	struct scsi_cmnd *scmd; /* scsi command within request being aborted */
+	struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
+	unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
+	unsigned char *psa = &phys_scsi3addr[0];
+
+	/* Get a pointer to the hpsa logical device. */
+	scmd = (struct scsi_cmnd *) abort->scsi_cmd;
+	dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
+	if (dev == NULL) {
+		dev_warn(&h->pdev->dev,
+			"Cannot abort: no device pointer for command.\n");
+			return -1; /* not abortable */
+	}
+
+	if (h->raid_offload_debug > 0)
+		dev_info(&h->pdev->dev,
+			"Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
+			scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
+			scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
+
+	if (!dev->offload_enabled) {
+		dev_warn(&h->pdev->dev,
+			"Can't abort: device is not operating in HP SSD Smart Path mode.\n");
+		return -1; /* not abortable */
+	}
+
+	/* Incoming scsi3addr is logical addr. We need physical disk addr. */
+	if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
+		dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
+		return -1; /* not abortable */
+	}
+
+	/* send the reset */
+	if (h->raid_offload_debug > 0)
+		dev_info(&h->pdev->dev,
+			"Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			psa[0], psa[1], psa[2], psa[3],
+			psa[4], psa[5], psa[6], psa[7]);
+	rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
+	if (rc != 0) {
+		dev_warn(&h->pdev->dev,
+			"Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			psa[0], psa[1], psa[2], psa[3],
+			psa[4], psa[5], psa[6], psa[7]);
+		return rc; /* failed to reset */
+	}
+
+	/* wait for device to recover */
+	if (wait_for_device_to_become_ready(h, psa) != 0) {
+		dev_warn(&h->pdev->dev,
+			"Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			psa[0], psa[1], psa[2], psa[3],
+			psa[4], psa[5], psa[6], psa[7]);
+		return -1;  /* failed to recover */
+	}
+
+	/* device recovered */
+	dev_info(&h->pdev->dev,
+		"Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+		psa[0], psa[1], psa[2], psa[3],
+		psa[4], psa[5], psa[6], psa[7]);
+
+	return rc; /* success */
+}
+
 /* Some Smart Arrays need the abort tag swizzled, and some don't.  It's hard to
  * tell which kind we're dealing with, so we send the abort both ways.  There
  * shouldn't be any collisions between swizzled and unswizzled tags due to the
@@ -2578,6 +4522,14 @@
 	struct CommandList *c;
 	int rc = 0, rc2 = 0;
 
+	/* ioccelerator mode 2 commands should be aborted via the
+	 * accelerated path, since RAID path is unaware of these commands,
+	 * but underlying firmware can't handle abort TMF.
+	 * Change abort to physical device reset.
+	 */
+	if (abort->cmd_type == CMD_IOACCEL2)
+		return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
+
 	/* we do not expect to find the swizzled tag in our queue, but
 	 * check anyway just to be sure the assumptions which make this
 	 * the case haven't become wrong.
@@ -2616,6 +4568,7 @@
 	struct scsi_cmnd *as;	/* ptr to scsi cmd inside aborted command. */
 	char msg[256];		/* For debug messaging. */
 	int ml = 0;
+	u32 tagupper, taglower;
 
 	/* Find the controller of the command to be aborted */
 	h = sdev_to_hba(sc->device);
@@ -2648,9 +4601,8 @@
 				msg);
 		return FAILED;
 	}
-
-	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ",
-		abort->Header.Tag.upper, abort->Header.Tag.lower);
+	hpsa_get_tag(h, abort, &taglower, &tagupper);
+	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
 	as  = (struct scsi_cmnd *) abort->scsi_cmd;
 	if (as != NULL)
 		ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
@@ -2776,6 +4728,7 @@
 		return NULL;
 	memset(c, 0, sizeof(*c));
 
+	c->cmd_type = CMD_SCSI;
 	c->cmdindex = -1;
 
 	c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
@@ -3038,7 +4991,7 @@
 		c->SG[0].Addr.lower = temp64.val32.lower;
 		c->SG[0].Addr.upper = temp64.val32.upper;
 		c->SG[0].Len = iocommand.buf_size;
-		c->SG[0].Ext = 0; /* we are not chaining*/
+		c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
 	}
 	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
 	if (iocommand.buf_size > 0)
@@ -3168,8 +5121,7 @@
 			c->SG[i].Addr.lower = temp64.val32.lower;
 			c->SG[i].Addr.upper = temp64.val32.upper;
 			c->SG[i].Len = buff_size[i];
-			/* we are not chaining */
-			c->SG[i].Ext = 0;
+			c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
 		}
 	}
 	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
@@ -3304,7 +5256,7 @@
 }
 
 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
-	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
+	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
 	int cmd_type)
 {
 	int pci_dir = XFER_NONE;
@@ -3327,9 +5279,9 @@
 		switch (cmd) {
 		case HPSA_INQUIRY:
 			/* are we trying to read a vital product page */
-			if (page_code != 0) {
+			if (page_code & VPD_PAGE) {
 				c->Request.CDB[1] = 0x01;
-				c->Request.CDB[2] = page_code;
+				c->Request.CDB[2] = (page_code & 0xff);
 			}
 			c->Request.CDBLen = 6;
 			c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -3369,6 +5321,28 @@
 			c->Request.Type.Direction = XFER_NONE;
 			c->Request.Timeout = 0;
 			break;
+		case HPSA_GET_RAID_MAP:
+			c->Request.CDBLen = 12;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_READ;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = HPSA_CISS_READ;
+			c->Request.CDB[1] = cmd;
+			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+			c->Request.CDB[7] = (size >> 16) & 0xFF;
+			c->Request.CDB[8] = (size >> 8) & 0xFF;
+			c->Request.CDB[9] = size & 0xFF;
+			break;
+		case BMIC_SENSE_CONTROLLER_PARAMETERS:
+			c->Request.CDBLen = 10;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_READ;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = BMIC_READ;
+			c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
+			c->Request.CDB[7] = (size >> 16) & 0xFF;
+			c->Request.CDB[8] = (size >> 8) & 0xFF;
+			break;
 		default:
 			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
 			BUG();
@@ -3562,7 +5536,8 @@
 	spin_unlock_irqrestore(&h->lock, flags);
 
 	dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
-	if (likely(c->cmd_type == CMD_SCSI))
+	if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
+			|| c->cmd_type == CMD_IOACCEL2))
 		complete_scsi_command(c);
 	else if (c->cmd_type == CMD_IOCTL_PEND)
 		complete(c->waiting);
@@ -4169,21 +6144,24 @@
 		goto default_int_mode;
 	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
 		dev_info(&h->pdev->dev, "MSIX\n");
+		h->msix_vector = MAX_REPLY_QUEUES;
 		err = pci_enable_msix(h->pdev, hpsa_msix_entries,
-						MAX_REPLY_QUEUES);
-		if (!err) {
-			for (i = 0; i < MAX_REPLY_QUEUES; i++)
-				h->intr[i] = hpsa_msix_entries[i].vector;
-			h->msix_vector = 1;
-			return;
-		}
+				      h->msix_vector);
 		if (err > 0) {
 			dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
 			       "available\n", err);
-			goto default_int_mode;
+			h->msix_vector = err;
+			err = pci_enable_msix(h->pdev, hpsa_msix_entries,
+					      h->msix_vector);
+		}
+		if (!err) {
+			for (i = 0; i < h->msix_vector; i++)
+				h->intr[i] = hpsa_msix_entries[i].vector;
+			return;
 		} else {
 			dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
 			       err);
+			h->msix_vector = 0;
 			goto default_int_mode;
 		}
 	}
@@ -4336,6 +6314,7 @@
 	hpsa_get_max_perf_mode_cmds(h);
 	h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
 	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
+	h->fw_support = readl(&(h->cfgtable->misc_fw_support));
 	/*
 	 * Limit in-command s/g elements to 32 save dma'able memory.
 	 * Howvever spec says if 0, use 31
@@ -4352,6 +6331,10 @@
 
 	/* Find out what task management functions are supported and cache */
 	h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
+	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
+		dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
+	if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
+		dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
 }
 
 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
@@ -4390,6 +6373,23 @@
 	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
 }
 
+static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
+{
+	int i;
+	u32 doorbell_value;
+	unsigned long flags;
+	/* wait until the clear_event_notify bit 6 is cleared by controller. */
+	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+		spin_lock_irqsave(&h->lock, flags);
+		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
+		spin_unlock_irqrestore(&h->lock, flags);
+		if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
+			break;
+		/* delay and try again */
+		msleep(20);
+	}
+}
+
 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
 {
 	int i;
@@ -4420,18 +6420,20 @@
 		return -ENOTSUPP;
 
 	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+
 	/* Update the field, and then ring the doorbell */
 	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 	hpsa_wait_for_mode_change_ack(h);
 	print_cfg_table(&h->pdev->dev, h->cfgtable);
-	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
-		dev_warn(&h->pdev->dev,
-			"unable to get board into simple mode\n");
-		return -ENODEV;
-	}
+	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
+		goto error;
 	h->transMethod = CFGTBL_Trans_Simple;
 	return 0;
+error:
+	dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
+	return -ENODEV;
 }
 
 static int hpsa_pci_init(struct ctlr_info *h)
@@ -4577,11 +6579,19 @@
 		pci_free_consistent(h->pdev,
 			    h->nr_cmds * sizeof(struct CommandList),
 			    h->cmd_pool, h->cmd_pool_dhandle);
+	if (h->ioaccel2_cmd_pool)
+		pci_free_consistent(h->pdev,
+			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+			h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
 	if (h->errinfo_pool)
 		pci_free_consistent(h->pdev,
 			    h->nr_cmds * sizeof(struct ErrorInfo),
 			    h->errinfo_pool,
 			    h->errinfo_pool_dhandle);
+	if (h->ioaccel_cmd_pool)
+		pci_free_consistent(h->pdev,
+			h->nr_cmds * sizeof(struct io_accel1_cmd),
+			h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
 }
 
 static int hpsa_request_irq(struct ctlr_info *h,
@@ -4597,15 +6607,15 @@
 	for (i = 0; i < MAX_REPLY_QUEUES; i++)
 		h->q[i] = (u8) i;
 
-	if (h->intr_mode == PERF_MODE_INT && h->msix_vector) {
+	if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
 		/* If performant mode and MSI-X, use multiple reply queues */
-		for (i = 0; i < MAX_REPLY_QUEUES; i++)
+		for (i = 0; i < h->msix_vector; i++)
 			rc = request_irq(h->intr[i], msixhandler,
 					0, h->devname,
 					&h->q[i]);
 	} else {
 		/* Use single reply pool */
-		if (h->msix_vector || h->msi_vector) {
+		if (h->msix_vector > 0 || h->msi_vector) {
 			rc = request_irq(h->intr[h->intr_mode],
 				msixhandler, 0, h->devname,
 				&h->q[h->intr_mode]);
@@ -4658,7 +6668,7 @@
 		return;
 	}
 
-	for (i = 0; i < MAX_REPLY_QUEUES; i++)
+	for (i = 0; i < h->msix_vector; i++)
 		free_irq(h->intr[i], &h->q[i]);
 }
 
@@ -4681,6 +6691,7 @@
 	hpsa_free_irqs_and_disable_msix(h);
 	hpsa_free_sg_chain_blocks(h);
 	hpsa_free_cmd_pool(h);
+	kfree(h->ioaccel1_blockFetchTable);
 	kfree(h->blockFetchTable);
 	pci_free_consistent(h->pdev, h->reply_pool_size,
 		h->reply_pool, h->reply_pool_dhandle);
@@ -4760,6 +6771,92 @@
 	h->last_heartbeat_timestamp = now;
 }
 
+static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+{
+	int i;
+	char *event_type;
+
+	/* Clear the driver-requested rescan flag */
+	h->drv_req_rescan = 0;
+
+	/* Ask the controller to clear the events we're handling. */
+	if ((h->transMethod & (CFGTBL_Trans_io_accel1
+			| CFGTBL_Trans_io_accel2)) &&
+		(h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
+		 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
+
+		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
+			event_type = "state change";
+		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
+			event_type = "configuration change";
+		/* Stop sending new RAID offload reqs via the IO accelerator */
+		scsi_block_requests(h->scsi_host);
+		for (i = 0; i < h->ndevices; i++)
+			h->dev[i]->offload_enabled = 0;
+		hpsa_drain_accel_commands(h);
+		/* Set 'accelerator path config change' bit */
+		dev_warn(&h->pdev->dev,
+			"Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
+			h->events, event_type);
+		writel(h->events, &(h->cfgtable->clear_event_notify));
+		/* Set the "clear event notify field update" bit 6 */
+		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
+		/* Wait until ctlr clears 'clear event notify field', bit 6 */
+		hpsa_wait_for_clear_event_notify_ack(h);
+		scsi_unblock_requests(h->scsi_host);
+	} else {
+		/* Acknowledge controller notification events. */
+		writel(h->events, &(h->cfgtable->clear_event_notify));
+		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
+		hpsa_wait_for_clear_event_notify_ack(h);
+#if 0
+		writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+		hpsa_wait_for_mode_change_ack(h);
+#endif
+	}
+	return;
+}
+
+/* Check a register on the controller to see if there are configuration
+ * changes (added/changed/removed logical drives, etc.) which mean that
+ * we should rescan the controller for devices.
+ * Also check flag for driver-initiated rescan.
+ */
+static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
+{
+	if (h->drv_req_rescan)
+		return 1;
+
+	if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
+		return 0;
+
+	h->events = readl(&(h->cfgtable->event_notify));
+	return h->events & RESCAN_REQUIRED_EVENT_BITS;
+}
+
+/*
+ * Check if any of the offline devices have become ready
+ */
+static int hpsa_offline_devices_ready(struct ctlr_info *h)
+{
+	unsigned long flags;
+	struct offline_device_entry *d;
+	struct list_head *this, *tmp;
+
+	spin_lock_irqsave(&h->offline_device_lock, flags);
+	list_for_each_safe(this, tmp, &h->offline_device_list) {
+		d = list_entry(this, struct offline_device_entry,
+				offline_list);
+		spin_unlock_irqrestore(&h->offline_device_lock, flags);
+		if (!hpsa_volume_offline(h, d->scsi3addr))
+			return 1;
+		spin_lock_irqsave(&h->offline_device_lock, flags);
+	}
+	spin_unlock_irqrestore(&h->offline_device_lock, flags);
+	return 0;
+}
+
+
 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
 {
 	unsigned long flags;
@@ -4768,6 +6865,15 @@
 	detect_controller_lockup(h);
 	if (h->lockup_detected)
 		return;
+
+	if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
+		scsi_host_get(h->scsi_host);
+		h->drv_req_rescan = 0;
+		hpsa_ack_ctlr_events(h);
+		hpsa_scan_start(h->scsi_host);
+		scsi_host_put(h->scsi_host);
+	}
+
 	spin_lock_irqsave(&h->lock, flags);
 	if (h->remove_in_progress) {
 		spin_unlock_irqrestore(&h->lock, flags);
@@ -4807,7 +6913,7 @@
 	 * the 5 lower bits of the address are used by the hardware. and by
 	 * the driver.  See comments in hpsa.h for more info.
 	 */
-#define COMMANDLIST_ALIGNMENT 32
+#define COMMANDLIST_ALIGNMENT 128
 	BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
 	h = kzalloc(sizeof(*h), GFP_KERNEL);
 	if (!h)
@@ -4817,7 +6923,9 @@
 	h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
 	INIT_LIST_HEAD(&h->cmpQ);
 	INIT_LIST_HEAD(&h->reqQ);
+	INIT_LIST_HEAD(&h->offline_device_list);
 	spin_lock_init(&h->lock);
+	spin_lock_init(&h->offline_device_lock);
 	spin_lock_init(&h->scan_lock);
 	spin_lock_init(&h->passthru_count_lock);
 	rc = hpsa_pci_init(h);
@@ -4859,6 +6967,7 @@
 
 	pci_set_drvdata(pdev, h);
 	h->ndevices = 0;
+	h->hba_mode_enabled = 0;
 	h->scsi_host = NULL;
 	spin_lock_init(&h->devlock);
 	hpsa_put_ctlr_into_performant_mode(h);
@@ -4918,6 +7027,11 @@
 		goto reinit_after_soft_reset;
 	}
 
+		/* Enable Accelerated IO path at driver layer */
+		h->acciopath_status = 1;
+
+	h->drv_req_rescan = 0;
+
 	/* Turn the interrupts on so we can service requests */
 	h->access.set_intr_mask(h, HPSA_INTR_ON);
 
@@ -5034,6 +7148,8 @@
 		h->reply_pool, h->reply_pool_dhandle);
 	kfree(h->cmd_pool_bits);
 	kfree(h->blockFetchTable);
+	kfree(h->ioaccel1_blockFetchTable);
+	kfree(h->ioaccel2_blockFetchTable);
 	kfree(h->hba_inquiry_data);
 	pci_disable_device(pdev);
 	pci_release_regions(pdev);
@@ -5074,20 +7190,17 @@
  * bits of the command address.
  */
 static void  calc_bucket_map(int bucket[], int num_buckets,
-	int nsgs, int *bucket_map)
+	int nsgs, int min_blocks, int *bucket_map)
 {
 	int i, j, b, size;
 
-	/* even a command with 0 SGs requires 4 blocks */
-#define MINIMUM_TRANSFER_BLOCKS 4
-#define NUM_BUCKETS 8
 	/* Note, bucket_map must have nsgs+1 entries. */
 	for (i = 0; i <= nsgs; i++) {
 		/* Compute size of a command with i SG entries */
-		size = i + MINIMUM_TRANSFER_BLOCKS;
+		size = i + min_blocks;
 		b = num_buckets; /* Assume the biggest bucket */
 		/* Find the bucket that is just big enough */
-		for (j = 0; j < 8; j++) {
+		for (j = 0; j < num_buckets; j++) {
 			if (bucket[j] >= size) {
 				b = j;
 				break;
@@ -5098,10 +7211,16 @@
 	}
 }
 
-static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
+static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
 {
 	int i;
 	unsigned long register_value;
+	unsigned long transMethod = CFGTBL_Trans_Performant |
+			(trans_support & CFGTBL_Trans_use_short_tags) |
+				CFGTBL_Trans_enable_directed_msix |
+			(trans_support & (CFGTBL_Trans_io_accel1 |
+				CFGTBL_Trans_io_accel2));
+	struct access_method access = SA5_performant_access;
 
 	/* This is a bit complicated.  There are 8 registers on
 	 * the controller which we write to to tell it 8 different
@@ -5121,6 +7240,16 @@
 	 * sizes for small commands, and fewer sizes for larger commands.
 	 */
 	int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
+#define MIN_IOACCEL2_BFT_ENTRY 5
+#define HPSA_IOACCEL2_HEADER_SZ 4
+	int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
+			13, 14, 15, 16, 17, 18, 19,
+			HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
+	BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
+	BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
+	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
+				 16 * MIN_IOACCEL2_BFT_ENTRY);
+	BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
 	BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
 	/*  5 = 1 s/g entry or 4k
 	 *  6 = 2 s/g entry or 8k
@@ -5133,7 +7262,7 @@
 
 	bft[7] = SG_ENTRIES_IN_CMD + 4;
 	calc_bucket_map(bft, ARRAY_SIZE(bft),
-				SG_ENTRIES_IN_CMD, h->blockFetchTable);
+				SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
 	for (i = 0; i < 8; i++)
 		writel(bft[i], &h->transtable->BlockFetch[i]);
 
@@ -5150,9 +7279,22 @@
 			&h->transtable->RepQAddr[i].lower);
 	}
 
-	writel(CFGTBL_Trans_Performant | use_short_tags |
-		CFGTBL_Trans_enable_directed_msix,
-		&(h->cfgtable->HostWrite.TransportRequest));
+	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
+	writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
+	/*
+	 * enable outbound interrupt coalescing in accelerator mode;
+	 */
+	if (trans_support & CFGTBL_Trans_io_accel1) {
+		access = SA5_ioaccel_mode1_access;
+		writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+		writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+	} else {
+		if (trans_support & CFGTBL_Trans_io_accel2) {
+			access = SA5_ioaccel_mode2_access;
+			writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
+			writel(4, &h->cfgtable->HostWrite.CoalIntCount);
+		}
+	}
 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 	hpsa_wait_for_mode_change_ack(h);
 	register_value = readl(&(h->cfgtable->TransportActive));
@@ -5162,23 +7304,186 @@
 		return;
 	}
 	/* Change the access methods to the performant access methods */
-	h->access = SA5_performant_access;
-	h->transMethod = CFGTBL_Trans_Performant;
+	h->access = access;
+	h->transMethod = transMethod;
+
+	if (!((trans_support & CFGTBL_Trans_io_accel1) ||
+		(trans_support & CFGTBL_Trans_io_accel2)))
+		return;
+
+	if (trans_support & CFGTBL_Trans_io_accel1) {
+		/* Set up I/O accelerator mode */
+		for (i = 0; i < h->nreply_queues; i++) {
+			writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
+			h->reply_queue[i].current_entry =
+				readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
+		}
+		bft[7] = h->ioaccel_maxsg + 8;
+		calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
+				h->ioaccel1_blockFetchTable);
+
+		/* initialize all reply queue entries to unused */
+		memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
+				h->reply_pool_size);
+
+		/* set all the constant fields in the accelerator command
+		 * frames once at init time to save CPU cycles later.
+		 */
+		for (i = 0; i < h->nr_cmds; i++) {
+			struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
+
+			cp->function = IOACCEL1_FUNCTION_SCSIIO;
+			cp->err_info = (u32) (h->errinfo_pool_dhandle +
+					(i * sizeof(struct ErrorInfo)));
+			cp->err_info_len = sizeof(struct ErrorInfo);
+			cp->sgl_offset = IOACCEL1_SGLOFFSET;
+			cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
+			cp->timeout_sec = 0;
+			cp->ReplyQueue = 0;
+			cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
+						DIRECT_LOOKUP_BIT;
+			cp->Tag.upper = 0;
+			cp->host_addr.lower =
+				(u32) (h->ioaccel_cmd_pool_dhandle +
+					(i * sizeof(struct io_accel1_cmd)));
+			cp->host_addr.upper = 0;
+		}
+	} else if (trans_support & CFGTBL_Trans_io_accel2) {
+		u64 cfg_offset, cfg_base_addr_index;
+		u32 bft2_offset, cfg_base_addr;
+		int rc;
+
+		rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+			&cfg_base_addr_index, &cfg_offset);
+		BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
+		bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
+		calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
+				4, h->ioaccel2_blockFetchTable);
+		bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
+		BUILD_BUG_ON(offsetof(struct CfgTable,
+				io_accel_request_size_offset) != 0xb8);
+		h->ioaccel2_bft2_regs =
+			remap_pci_mem(pci_resource_start(h->pdev,
+					cfg_base_addr_index) +
+					cfg_offset + bft2_offset,
+					ARRAY_SIZE(bft2) *
+					sizeof(*h->ioaccel2_bft2_regs));
+		for (i = 0; i < ARRAY_SIZE(bft2); i++)
+			writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
+	}
+	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+	hpsa_wait_for_mode_change_ack(h);
+}
+
+static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
+{
+	h->ioaccel_maxsg =
+		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
+	if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
+		h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
+
+	/* Command structures must be aligned on a 128-byte boundary
+	 * because the 7 lower bits of the address are used by the
+	 * hardware.
+	 */
+#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
+	BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
+			IOACCEL1_COMMANDLIST_ALIGNMENT);
+	h->ioaccel_cmd_pool =
+		pci_alloc_consistent(h->pdev,
+			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+			&(h->ioaccel_cmd_pool_dhandle));
+
+	h->ioaccel1_blockFetchTable =
+		kmalloc(((h->ioaccel_maxsg + 1) *
+				sizeof(u32)), GFP_KERNEL);
+
+	if ((h->ioaccel_cmd_pool == NULL) ||
+		(h->ioaccel1_blockFetchTable == NULL))
+		goto clean_up;
+
+	memset(h->ioaccel_cmd_pool, 0,
+		h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
+	return 0;
+
+clean_up:
+	if (h->ioaccel_cmd_pool)
+		pci_free_consistent(h->pdev,
+			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+			h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
+	kfree(h->ioaccel1_blockFetchTable);
+	return 1;
+}
+
+static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
+{
+	/* Allocate ioaccel2 mode command blocks and block fetch table */
+
+	h->ioaccel_maxsg =
+		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
+	if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
+		h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
+
+#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
+	BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
+			IOACCEL2_COMMANDLIST_ALIGNMENT);
+	h->ioaccel2_cmd_pool =
+		pci_alloc_consistent(h->pdev,
+			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+			&(h->ioaccel2_cmd_pool_dhandle));
+
+	h->ioaccel2_blockFetchTable =
+		kmalloc(((h->ioaccel_maxsg + 1) *
+				sizeof(u32)), GFP_KERNEL);
+
+	if ((h->ioaccel2_cmd_pool == NULL) ||
+		(h->ioaccel2_blockFetchTable == NULL))
+		goto clean_up;
+
+	memset(h->ioaccel2_cmd_pool, 0,
+		h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
+	return 0;
+
+clean_up:
+	if (h->ioaccel2_cmd_pool)
+		pci_free_consistent(h->pdev,
+			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+			h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
+	kfree(h->ioaccel2_blockFetchTable);
+	return 1;
 }
 
 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
 {
 	u32 trans_support;
+	unsigned long transMethod = CFGTBL_Trans_Performant |
+					CFGTBL_Trans_use_short_tags;
 	int i;
 
 	if (hpsa_simple_mode)
 		return;
 
+	/* Check for I/O accelerator mode support */
+	if (trans_support & CFGTBL_Trans_io_accel1) {
+		transMethod |= CFGTBL_Trans_io_accel1 |
+				CFGTBL_Trans_enable_directed_msix;
+		if (hpsa_alloc_ioaccel_cmd_and_bft(h))
+			goto clean_up;
+	} else {
+		if (trans_support & CFGTBL_Trans_io_accel2) {
+				transMethod |= CFGTBL_Trans_io_accel2 |
+				CFGTBL_Trans_enable_directed_msix;
+		if (ioaccel2_alloc_cmds_and_bft(h))
+			goto clean_up;
+		}
+	}
+
+	/* TODO, check that this next line h->nreply_queues is correct */
 	trans_support = readl(&(h->cfgtable->TransportSupport));
 	if (!(trans_support & PERFORMANT_MODE))
 		return;
 
-	h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1;
+	h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
 	hpsa_get_max_perf_mode_cmds(h);
 	/* Performant mode ring buffer and supporting data structures */
 	h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
@@ -5200,9 +7505,7 @@
 		|| (h->blockFetchTable == NULL))
 		goto clean_up;
 
-	hpsa_enter_performant_mode(h,
-		trans_support & CFGTBL_Trans_use_short_tags);
-
+	hpsa_enter_performant_mode(h, trans_support);
 	return;
 
 clean_up:
@@ -5212,6 +7515,31 @@
 	kfree(h->blockFetchTable);
 }
 
+static int is_accelerated_cmd(struct CommandList *c)
+{
+	return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
+}
+
+static void hpsa_drain_accel_commands(struct ctlr_info *h)
+{
+	struct CommandList *c = NULL;
+	unsigned long flags;
+	int accel_cmds_out;
+
+	do { /* wait for all outstanding commands to drain out */
+		accel_cmds_out = 0;
+		spin_lock_irqsave(&h->lock, flags);
+		list_for_each_entry(c, &h->cmpQ, list)
+			accel_cmds_out += is_accelerated_cmd(c);
+		list_for_each_entry(c, &h->reqQ, list)
+			accel_cmds_out += is_accelerated_cmd(c);
+		spin_unlock_irqrestore(&h->lock, flags);
+		if (accel_cmds_out <= 0)
+			break;
+		msleep(100);
+	} while (1);
+}
+
 /*
  *  This is it.  Register the PCI driver information for the cards we control
  *  the OS will call our registered routines when it finds one of our cards.
@@ -5226,5 +7554,83 @@
 	pci_unregister_driver(&hpsa_pci_driver);
 }
 
+static void __attribute__((unused)) verify_offsets(void)
+{
+#define VERIFY_OFFSET(member, offset) \
+	BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
+
+	VERIFY_OFFSET(structure_size, 0);
+	VERIFY_OFFSET(volume_blk_size, 4);
+	VERIFY_OFFSET(volume_blk_cnt, 8);
+	VERIFY_OFFSET(phys_blk_shift, 16);
+	VERIFY_OFFSET(parity_rotation_shift, 17);
+	VERIFY_OFFSET(strip_size, 18);
+	VERIFY_OFFSET(disk_starting_blk, 20);
+	VERIFY_OFFSET(disk_blk_cnt, 28);
+	VERIFY_OFFSET(data_disks_per_row, 36);
+	VERIFY_OFFSET(metadata_disks_per_row, 38);
+	VERIFY_OFFSET(row_cnt, 40);
+	VERIFY_OFFSET(layout_map_count, 42);
+	VERIFY_OFFSET(flags, 44);
+	VERIFY_OFFSET(dekindex, 46);
+	/* VERIFY_OFFSET(reserved, 48 */
+	VERIFY_OFFSET(data, 64);
+
+#undef VERIFY_OFFSET
+
+#define VERIFY_OFFSET(member, offset) \
+	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
+
+	VERIFY_OFFSET(IU_type, 0);
+	VERIFY_OFFSET(direction, 1);
+	VERIFY_OFFSET(reply_queue, 2);
+	/* VERIFY_OFFSET(reserved1, 3);  */
+	VERIFY_OFFSET(scsi_nexus, 4);
+	VERIFY_OFFSET(Tag, 8);
+	VERIFY_OFFSET(cdb, 16);
+	VERIFY_OFFSET(cciss_lun, 32);
+	VERIFY_OFFSET(data_len, 40);
+	VERIFY_OFFSET(cmd_priority_task_attr, 44);
+	VERIFY_OFFSET(sg_count, 45);
+	/* VERIFY_OFFSET(reserved3 */
+	VERIFY_OFFSET(err_ptr, 48);
+	VERIFY_OFFSET(err_len, 56);
+	/* VERIFY_OFFSET(reserved4  */
+	VERIFY_OFFSET(sg, 64);
+
+#undef VERIFY_OFFSET
+
+#define VERIFY_OFFSET(member, offset) \
+	BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
+
+	VERIFY_OFFSET(dev_handle, 0x00);
+	VERIFY_OFFSET(reserved1, 0x02);
+	VERIFY_OFFSET(function, 0x03);
+	VERIFY_OFFSET(reserved2, 0x04);
+	VERIFY_OFFSET(err_info, 0x0C);
+	VERIFY_OFFSET(reserved3, 0x10);
+	VERIFY_OFFSET(err_info_len, 0x12);
+	VERIFY_OFFSET(reserved4, 0x13);
+	VERIFY_OFFSET(sgl_offset, 0x14);
+	VERIFY_OFFSET(reserved5, 0x15);
+	VERIFY_OFFSET(transfer_len, 0x1C);
+	VERIFY_OFFSET(reserved6, 0x20);
+	VERIFY_OFFSET(io_flags, 0x24);
+	VERIFY_OFFSET(reserved7, 0x26);
+	VERIFY_OFFSET(LUN, 0x34);
+	VERIFY_OFFSET(control, 0x3C);
+	VERIFY_OFFSET(CDB, 0x40);
+	VERIFY_OFFSET(reserved8, 0x50);
+	VERIFY_OFFSET(host_context_flags, 0x60);
+	VERIFY_OFFSET(timeout_sec, 0x62);
+	VERIFY_OFFSET(ReplyQueue, 0x64);
+	VERIFY_OFFSET(reserved9, 0x65);
+	VERIFY_OFFSET(Tag, 0x68);
+	VERIFY_OFFSET(host_addr, 0x70);
+	VERIFY_OFFSET(CISS_LUN, 0x78);
+	VERIFY_OFFSET(SG, 0x78 + 8);
+#undef VERIFY_OFFSET
+}
+
 module_init(hpsa_init);
 module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 01c3283..44235a2 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,6 +1,6 @@
 /*
  *    Disk Array driver for HP Smart Array SAS controllers
- *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
@@ -46,6 +46,15 @@
 	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
 	unsigned char model[16];        /* bytes 16-31 of inquiry data */
 	unsigned char raid_level;	/* from inquiry page 0xC1 */
+	unsigned char volume_offline;	/* discovered via TUR or VPD */
+	u32 ioaccel_handle;
+	int offload_config;		/* I/O accel RAID offload configured */
+	int offload_enabled;		/* I/O accel RAID offload enabled */
+	int offload_to_mirror;		/* Send next I/O accelerator RAID
+					 * offload request to mirror drive
+					 */
+	struct raid_map_data raid_map;	/* I/O accelerator RAID map */
+
 };
 
 struct reply_pool {
@@ -55,6 +64,46 @@
 	u32 current_entry;
 };
 
+#pragma pack(1)
+struct bmic_controller_parameters {
+	u8   led_flags;
+	u8   enable_command_list_verification;
+	u8   backed_out_write_drives;
+	u16  stripes_for_parity;
+	u8   parity_distribution_mode_flags;
+	u16  max_driver_requests;
+	u16  elevator_trend_count;
+	u8   disable_elevator;
+	u8   force_scan_complete;
+	u8   scsi_transfer_mode;
+	u8   force_narrow;
+	u8   rebuild_priority;
+	u8   expand_priority;
+	u8   host_sdb_asic_fix;
+	u8   pdpi_burst_from_host_disabled;
+	char software_name[64];
+	char hardware_name[32];
+	u8   bridge_revision;
+	u8   snapshot_priority;
+	u32  os_specific;
+	u8   post_prompt_timeout;
+	u8   automatic_drive_slamming;
+	u8   reserved1;
+	u8   nvram_flags;
+	u8   cache_nvram_flags;
+	u8   drive_config_flags;
+	u16  reserved2;
+	u8   temp_warning_level;
+	u8   temp_shutdown_level;
+	u8   temp_condition_reset;
+	u8   max_coalesce_commands;
+	u32  max_coalesce_delay;
+	u8   orca_password[4];
+	u8   access_id[16];
+	u8   reserved[356];
+};
+#pragma pack()
+
 struct ctlr_info {
 	int	ctlr;
 	char	devname[8];
@@ -80,6 +129,7 @@
 	unsigned int msi_vector;
 	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
 	struct access_method access;
+	char hba_mode_enabled;
 
 	/* queue and queue Info */
 	struct list_head reqQ;
@@ -95,6 +145,10 @@
 	/* pointers to command and error info pool */
 	struct CommandList 	*cmd_pool;
 	dma_addr_t		cmd_pool_dhandle;
+	struct io_accel1_cmd	*ioaccel_cmd_pool;
+	dma_addr_t		ioaccel_cmd_pool_dhandle;
+	struct io_accel2_cmd	*ioaccel2_cmd_pool;
+	dma_addr_t		ioaccel2_cmd_pool_dhandle;
 	struct ErrorInfo 	*errinfo_pool;
 	dma_addr_t		errinfo_pool_dhandle;
 	unsigned long  		*cmd_pool_bits;
@@ -128,7 +182,14 @@
 	u8 nreply_queues;
 	dma_addr_t reply_pool_dhandle;
 	u32 *blockFetchTable;
+	u32 *ioaccel1_blockFetchTable;
+	u32 *ioaccel2_blockFetchTable;
+	u32 *ioaccel2_bft2_regs;
 	unsigned char *hba_inquiry_data;
+	u32 driver_support;
+	u32 fw_support;
+	int ioaccel_support;
+	int ioaccel_maxsg;
 	u64 last_intr_timestamp;
 	u32 last_heartbeat;
 	u64 last_heartbeat_timestamp;
@@ -161,7 +222,35 @@
 #define HPSATMF_LOG_QRY_TASK    (1 << 23)
 #define HPSATMF_LOG_QRY_TSET    (1 << 24)
 #define HPSATMF_LOG_QRY_ASYNC   (1 << 25)
+	u32 events;
+#define CTLR_STATE_CHANGE_EVENT				(1 << 0)
+#define CTLR_ENCLOSURE_HOT_PLUG_EVENT			(1 << 1)
+#define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV		(1 << 4)
+#define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV		(1 << 5)
+#define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL		(1 << 6)
+#define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED	(1 << 30)
+#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE	(1 << 31)
+
+#define RESCAN_REQUIRED_EVENT_BITS \
+		(CTLR_STATE_CHANGE_EVENT | \
+		CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
+		CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
+		CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
+		CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \
+		CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
+		CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
+	spinlock_t offline_device_lock;
+	struct list_head offline_device_list;
+	int	acciopath_status;
+	int	drv_req_rescan;	/* flag for driver to request rescan event */
+	int	raid_offload_debug;
 };
+
+struct offline_device_entry {
+	unsigned char scsi3addr[8];
+	struct list_head offline_list;
+};
+
 #define HPSA_ABORT_MSG 0
 #define HPSA_DEVICE_RESET_MSG 1
 #define HPSA_RESET_TYPE_CONTROLLER 0x00
@@ -242,6 +331,14 @@
 
 #define HPSA_INTR_ON 	1
 #define HPSA_INTR_OFF	0
+
+/*
+ * Inbound Post Queue offsets for IO Accelerator Mode 2
+ */
+#define IOACCEL2_INBOUND_POSTQ_32	0x48
+#define IOACCEL2_INBOUND_POSTQ_64_LOW	0xd0
+#define IOACCEL2_INBOUND_POSTQ_64_HI	0xd4
+
 /*
 	Send the command to the hardware
 */
@@ -254,6 +351,18 @@
 	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
 }
 
+static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
+		c->Header.Tag.lower);
+	if (c->cmd_type == CMD_IOACCEL2)
+		writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
+	else
+		writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+}
+
 /*
  *  This card is the opposite of the other cards.
  *   0 turns interrupts on...
@@ -387,6 +496,50 @@
 	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
 }
 
+#define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT    0x100
+
+static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
+{
+	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
+
+	return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
+		true : false;
+}
+
+#define IOACCEL_MODE1_REPLY_QUEUE_INDEX  0x1A0
+#define IOACCEL_MODE1_PRODUCER_INDEX     0x1B8
+#define IOACCEL_MODE1_CONSUMER_INDEX     0x1BC
+#define IOACCEL_MODE1_REPLY_UNUSED       0xFFFFFFFFFFFFFFFFULL
+
+static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
+{
+	u64 register_value;
+	struct reply_pool *rq = &h->reply_queue[q];
+	unsigned long flags;
+
+	BUG_ON(q >= h->nreply_queues);
+
+	register_value = rq->head[rq->current_entry];
+	if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
+		rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
+		if (++rq->current_entry == rq->size)
+			rq->current_entry = 0;
+		/*
+		 * @todo
+		 *
+		 * Don't really need to write the new index after each command,
+		 * but with current driver design this is easiest.
+		 */
+		wmb();
+		writel((q << 24) | rq->current_entry, h->vaddr +
+				IOACCEL_MODE1_CONSUMER_INDEX);
+		spin_lock_irqsave(&h->lock, flags);
+		h->commands_outstanding--;
+		spin_unlock_irqrestore(&h->lock, flags);
+	}
+	return (unsigned long) register_value;
+}
+
 static struct access_method SA5_access = {
 	SA5_submit_command,
 	SA5_intr_mask,
@@ -395,6 +548,22 @@
 	SA5_completed,
 };
 
+static struct access_method SA5_ioaccel_mode1_access = {
+	SA5_submit_command,
+	SA5_performant_intr_mask,
+	SA5_fifo_full,
+	SA5_ioaccel_mode1_intr_pending,
+	SA5_ioaccel_mode1_completed,
+};
+
+static struct access_method SA5_ioaccel_mode2_access = {
+	SA5_submit_command_ioaccel2,
+	SA5_performant_intr_mask,
+	SA5_fifo_full,
+	SA5_performant_intr_pending,
+	SA5_performant_completed,
+};
+
 static struct access_method SA5_performant_access = {
 	SA5_submit_command,
 	SA5_performant_intr_mask,
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index bfc8c4e..b5cc705 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,6 +1,6 @@
 /*
  *    Disk Array driver for HP Smart Array SAS controllers
- *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@
 #define SENSEINFOBYTES          32 /* may vary between hbas */
 #define SG_ENTRIES_IN_CMD	32 /* Max SG entries excluding chain blocks */
 #define HPSA_SG_CHAIN		0x80000000
+#define HPSA_SG_LAST		0x40000000
 #define MAXREPLYQS              256
 
 /* Command Status value */
@@ -41,6 +42,8 @@
 #define CMD_UNSOLICITED_ABORT   0x000A
 #define CMD_TIMEOUT             0x000B
 #define CMD_UNABORTABLE		0x000C
+#define CMD_IOACCEL_DISABLED	0x000E
+
 
 /* Unit Attentions ASC's as defined for the MSA2012sa */
 #define POWER_OR_RESET			0x29
@@ -79,8 +82,9 @@
 #define ATTR_ACA                0x07
 
 /* cdb type */
-#define TYPE_CMD				0x00
-#define TYPE_MSG				0x01
+#define TYPE_CMD		0x00
+#define TYPE_MSG		0x01
+#define TYPE_IOACCEL2_CMD	0x81 /* 0x81 is not used by hardware */
 
 /* Message Types  */
 #define HPSA_TASK_MANAGEMENT    0x00
@@ -125,9 +129,12 @@
 #define CFGTBL_AccCmds          0x00000001l
 #define DOORBELL_CTLR_RESET	0x00000004l
 #define DOORBELL_CTLR_RESET2	0x00000020l
+#define DOORBELL_CLEAR_EVENTS	0x00000040l
 
 #define CFGTBL_Trans_Simple     0x00000002l
 #define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_io_accel1	0x00000080l
+#define CFGTBL_Trans_io_accel2	0x00000100l
 #define CFGTBL_Trans_use_short_tags 0x20000000l
 #define CFGTBL_Trans_enable_directed_msix (1 << 30)
 
@@ -135,6 +142,28 @@
 #define CFGTBL_BusType_Ultra3   0x00000002l
 #define CFGTBL_BusType_Fibre1G  0x00000100l
 #define CFGTBL_BusType_Fibre2G  0x00000200l
+
+/* VPD Inquiry types */
+#define HPSA_VPD_SUPPORTED_PAGES        0x00
+#define HPSA_VPD_LV_DEVICE_GEOMETRY     0xC1
+#define HPSA_VPD_LV_IOACCEL_STATUS      0xC2
+#define HPSA_VPD_LV_STATUS		0xC3
+#define HPSA_VPD_HEADER_SZ              4
+
+/* Logical volume states */
+#define HPSA_VPD_LV_STATUS_UNSUPPORTED			-1
+#define HPSA_LV_OK                                      0x0
+#define HPSA_LV_UNDERGOING_ERASE			0x0F
+#define HPSA_LV_UNDERGOING_RPI				0x12
+#define HPSA_LV_PENDING_RPI				0x13
+#define HPSA_LV_ENCRYPTED_NO_KEY			0x14
+#define HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER	0x15
+#define HPSA_LV_UNDERGOING_ENCRYPTION			0x16
+#define HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING		0x17
+#define HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER	0x18
+#define HPSA_LV_PENDING_ENCRYPTION			0x19
+#define HPSA_LV_PENDING_ENCRYPTION_REKEYING		0x1A
+
 struct vals32 {
 	u32   lower;
 	u32   upper;
@@ -162,9 +191,50 @@
 
 #define HPSA_REPORT_LOG 0xc2    /* Report Logical LUNs */
 #define HPSA_REPORT_PHYS 0xc3   /* Report Physical LUNs */
+#define HPSA_REPORT_PHYS_EXTENDED 0x02
+#define HPSA_CISS_READ	0xc0	/* CISS Read */
+#define HPSA_GET_RAID_MAP 0xc8	/* CISS Get RAID Layout Map */
+
+#define RAID_MAP_MAX_ENTRIES   256
+
+struct raid_map_disk_data {
+	u32   ioaccel_handle;         /**< Handle to access this disk via the
+					*  I/O accelerator */
+	u8    xor_mult[2];            /**< XOR multipliers for this position,
+					*  valid for data disks only */
+	u8    reserved[2];
+};
+
+struct raid_map_data {
+	u32   structure_size;		/* Size of entire structure in bytes */
+	u32   volume_blk_size;		/* bytes / block in the volume */
+	u64   volume_blk_cnt;		/* logical blocks on the volume */
+	u8    phys_blk_shift;		/* Shift factor to convert between
+					 * units of logical blocks and physical
+					 * disk blocks */
+	u8    parity_rotation_shift;	/* Shift factor to convert between units
+					 * of logical stripes and physical
+					 * stripes */
+	u16   strip_size;		/* blocks used on each disk / stripe */
+	u64   disk_starting_blk;	/* First disk block used in volume */
+	u64   disk_blk_cnt;		/* disk blocks used by volume / disk */
+	u16   data_disks_per_row;	/* data disk entries / row in the map */
+	u16   metadata_disks_per_row;	/* mirror/parity disk entries / row
+					 * in the map */
+	u16   row_cnt;			/* rows in each layout map */
+	u16   layout_map_count;		/* layout maps (1 map per mirror/parity
+					 * group) */
+	u16   flags;			/* Bit 0 set if encryption enabled */
+#define RAID_MAP_FLAG_ENCRYPT_ON  0x01
+	u16   dekindex;			/* Data encryption key index. */
+	u8    reserved[16];
+	struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
+};
+
 struct ReportLUNdata {
 	u8 LUNListLength[4];
-	u32 reserved;
+	u8 extended_response_flag;
+	u8 reserved[3];
 	u8 LUN[HPSA_MAX_LUN][8];
 };
 
@@ -187,6 +257,7 @@
 #define BMIC_CACHE_FLUSH 0xc2
 #define HPSA_CACHE_FLUSH 0x01	/* C2 was already being used by HPSA */
 #define BMIC_FLASH_FIRMWARE 0xF7
+#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
 
 /* Command List Structure */
 union SCSI3Addr {
@@ -283,6 +354,8 @@
 /* Command types */
 #define CMD_IOCTL_PEND  0x01
 #define CMD_SCSI	0x03
+#define CMD_IOACCEL1	0x04
+#define CMD_IOACCEL2	0x05
 
 #define DIRECT_LOOKUP_SHIFT 5
 #define DIRECT_LOOKUP_BIT 0x10
@@ -314,7 +387,6 @@
 	int			   cmd_type;
 	long			   cmdindex;
 	struct list_head list;
-	struct request *rq;
 	struct completion *waiting;
 	void   *scsi_cmd;
 
@@ -327,16 +399,183 @@
  */
 #define IS_32_BIT ((8 - sizeof(long))/4)
 #define IS_64_BIT (!IS_32_BIT)
-#define PAD_32 (4)
-#define PAD_64 (4)
+#define PAD_32 (40)
+#define PAD_64 (12)
 #define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
 	u8 pad[COMMANDLIST_PAD];
 };
 
+/* Max S/G elements in I/O accelerator command */
+#define IOACCEL1_MAXSGENTRIES           24
+#define IOACCEL2_MAXSGENTRIES		28
+
+/*
+ * Structure for I/O accelerator (mode 1) commands.
+ * Note that this structure must be 128-byte aligned in size.
+ */
+struct io_accel1_cmd {
+	u16 dev_handle;			/* 0x00 - 0x01 */
+	u8  reserved1;			/* 0x02 */
+	u8  function;			/* 0x03 */
+	u8  reserved2[8];		/* 0x04 - 0x0B */
+	u32 err_info;			/* 0x0C - 0x0F */
+	u8  reserved3[2];		/* 0x10 - 0x11 */
+	u8  err_info_len;		/* 0x12 */
+	u8  reserved4;			/* 0x13 */
+	u8  sgl_offset;			/* 0x14 */
+	u8  reserved5[7];		/* 0x15 - 0x1B */
+	u32 transfer_len;		/* 0x1C - 0x1F */
+	u8  reserved6[4];		/* 0x20 - 0x23 */
+	u16 io_flags;			/* 0x24 - 0x25 */
+	u8  reserved7[14];		/* 0x26 - 0x33 */
+	u8  LUN[8];			/* 0x34 - 0x3B */
+	u32 control;			/* 0x3C - 0x3F */
+	u8  CDB[16];			/* 0x40 - 0x4F */
+	u8  reserved8[16];		/* 0x50 - 0x5F */
+	u16 host_context_flags;		/* 0x60 - 0x61 */
+	u16 timeout_sec;		/* 0x62 - 0x63 */
+	u8  ReplyQueue;			/* 0x64 */
+	u8  reserved9[3];		/* 0x65 - 0x67 */
+	struct vals32 Tag;		/* 0x68 - 0x6F */
+	struct vals32 host_addr;	/* 0x70 - 0x77 */
+	u8  CISS_LUN[8];		/* 0x78 - 0x7F */
+	struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
+#define IOACCEL1_PAD_64 0
+#define IOACCEL1_PAD_32 0
+#define IOACCEL1_PAD (IS_32_BIT * IOACCEL1_PAD_32 + \
+			IS_64_BIT * IOACCEL1_PAD_64)
+	u8 pad[IOACCEL1_PAD];
+};
+
+#define IOACCEL1_FUNCTION_SCSIIO        0x00
+#define IOACCEL1_SGLOFFSET              32
+
+#define IOACCEL1_IOFLAGS_IO_REQ         0x4000
+#define IOACCEL1_IOFLAGS_CDBLEN_MASK    0x001F
+#define IOACCEL1_IOFLAGS_CDBLEN_MAX     16
+
+#define IOACCEL1_CONTROL_NODATAXFER     0x00000000
+#define IOACCEL1_CONTROL_DATA_OUT       0x01000000
+#define IOACCEL1_CONTROL_DATA_IN        0x02000000
+#define IOACCEL1_CONTROL_TASKPRIO_MASK  0x00007800
+#define IOACCEL1_CONTROL_TASKPRIO_SHIFT 11
+#define IOACCEL1_CONTROL_SIMPLEQUEUE    0x00000000
+#define IOACCEL1_CONTROL_HEADOFQUEUE    0x00000100
+#define IOACCEL1_CONTROL_ORDEREDQUEUE   0x00000200
+#define IOACCEL1_CONTROL_ACA            0x00000400
+
+#define IOACCEL1_HCFLAGS_CISS_FORMAT    0x0013
+
+#define IOACCEL1_BUSADDR_CMDTYPE        0x00000060
+
+struct ioaccel2_sg_element {
+	u64 address;
+	u32 length;
+	u8 reserved[3];
+	u8 chain_indicator;
+#define IOACCEL2_CHAIN 0x80
+};
+
+/*
+ * SCSI Response Format structure for IO Accelerator Mode 2
+ */
+struct io_accel2_scsi_response {
+	u8 IU_type;
+#define IOACCEL2_IU_TYPE_SRF			0x60
+	u8 reserved1[3];
+	u8 req_id[4];		/* request identifier */
+	u8 reserved2[4];
+	u8 serv_response;		/* service response */
+#define IOACCEL2_SERV_RESPONSE_COMPLETE		0x000
+#define IOACCEL2_SERV_RESPONSE_FAILURE		0x001
+#define IOACCEL2_SERV_RESPONSE_TMF_COMPLETE	0x002
+#define IOACCEL2_SERV_RESPONSE_TMF_SUCCESS	0x003
+#define IOACCEL2_SERV_RESPONSE_TMF_REJECTED	0x004
+#define IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN	0x005
+	u8 status;			/* status */
+#define IOACCEL2_STATUS_SR_TASK_COMP_GOOD	0x00
+#define IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND	0x02
+#define IOACCEL2_STATUS_SR_TASK_COMP_BUSY	0x08
+#define IOACCEL2_STATUS_SR_TASK_COMP_RES_CON	0x18
+#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL	0x28
+#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED	0x40
+#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED	0x0E
+	u8 data_present;		/* low 2 bits */
+#define IOACCEL2_NO_DATAPRESENT		0x000
+#define IOACCEL2_RESPONSE_DATAPRESENT	0x001
+#define IOACCEL2_SENSE_DATA_PRESENT	0x002
+#define IOACCEL2_RESERVED		0x003
+	u8 sense_data_len;		/* sense/response data length */
+	u8 resid_cnt[4];		/* residual count */
+	u8 sense_data_buff[32];		/* sense/response data buffer */
+};
+
+#define IOACCEL2_64_PAD 76
+#define IOACCEL2_32_PAD 76
+#define IOACCEL2_PAD (IS_32_BIT * IOACCEL2_32_PAD + \
+			IS_64_BIT * IOACCEL2_64_PAD)
+/*
+ * Structure for I/O accelerator (mode 2 or m2) commands.
+ * Note that this structure must be 128-byte aligned in size.
+ */
+struct io_accel2_cmd {
+	u8  IU_type;			/* IU Type */
+	u8  direction;			/* direction, memtype, and encryption */
+#define IOACCEL2_DIRECTION_MASK		0x03 /* bits 0,1: direction  */
+#define IOACCEL2_DIRECTION_MEMTYPE_MASK	0x04 /* bit 2: memtype source/dest */
+					     /*     0b=PCIe, 1b=DDR */
+#define IOACCEL2_DIRECTION_ENCRYPT_MASK	0x08 /* bit 3: encryption flag */
+					     /*     0=off, 1=on */
+	u8  reply_queue;		/* Reply Queue ID */
+	u8  reserved1;			/* Reserved */
+	u32 scsi_nexus;			/* Device Handle */
+	u32 Tag;			/* cciss tag, lower 4 bytes only */
+	u32 tweak_lower;		/* Encryption tweak, lower 4 bytes */
+	u8  cdb[16];			/* SCSI Command Descriptor Block */
+	u8  cciss_lun[8];		/* 8 byte SCSI address */
+	u32 data_len;			/* Total bytes to transfer */
+	u8  cmd_priority_task_attr;	/* priority and task attrs */
+#define IOACCEL2_PRIORITY_MASK 0x78
+#define IOACCEL2_ATTR_MASK 0x07
+	u8  sg_count;			/* Number of sg elements */
+	u16 dekindex;			/* Data encryption key index */
+	u64 err_ptr;			/* Error Pointer */
+	u32 err_len;			/* Error Length*/
+	u32 tweak_upper;		/* Encryption tweak, upper 4 bytes */
+	struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
+	struct io_accel2_scsi_response error_data;
+	u8 pad[IOACCEL2_PAD];
+};
+
+/*
+ * defines for Mode 2 command struct
+ * FIXME: this can't be all I need mfm
+ */
+#define IOACCEL2_IU_TYPE	0x40
+#define IOACCEL2_IU_TMF_TYPE	0x41
+#define IOACCEL2_DIR_NO_DATA	0x00
+#define IOACCEL2_DIR_DATA_IN	0x01
+#define IOACCEL2_DIR_DATA_OUT	0x02
+/*
+ * SCSI Task Management Request format for Accelerator Mode 2
+ */
+struct hpsa_tmf_struct {
+	u8 iu_type;		/* Information Unit Type */
+	u8 reply_queue;		/* Reply Queue ID */
+	u8 tmf;			/* Task Management Function */
+	u8 reserved1;		/* byte 3 Reserved */
+	u32 it_nexus;		/* SCSI I-T Nexus */
+	u8 lun_id[8];		/* LUN ID for TMF request */
+	struct vals32 Tag;	/* cciss tag associated w/ request */
+	struct vals32 abort_tag;/* cciss tag of SCSI cmd or task to abort */
+	u64 error_ptr;		/* Error Pointer */
+	u32 error_len;		/* Error Length */
+};
+
 /* Configuration Table Structure */
 struct HostWrite {
 	u32 TransportRequest;
-	u32 Reserved;
+	u32 command_pool_addr_hi;
 	u32 CoalIntDelay;
 	u32 CoalIntCount;
 };
@@ -344,6 +583,9 @@
 #define SIMPLE_MODE     0x02
 #define PERFORMANT_MODE 0x04
 #define MEMQ_MODE       0x08
+#define IOACCEL_MODE_1  0x80
+
+#define DRIVER_SUPPORT_UA_ENABLE        0x00000001
 
 struct CfgTable {
 	u8            Signature[4];
@@ -373,8 +615,18 @@
 	u32		misc_fw_support; /* offset 0x78 */
 #define			MISC_FW_DOORBELL_RESET (0x02)
 #define			MISC_FW_DOORBELL_RESET2 (0x010)
+#define			MISC_FW_RAID_OFFLOAD_BASIC (0x020)
+#define			MISC_FW_EVENT_NOTIFY (0x080)
 	u8		driver_version[32];
-
+	u32             max_cached_write_size;
+	u8              driver_scratchpad[16];
+	u32             max_error_info_length;
+	u32		io_accel_max_embedded_sg_count;
+	u32		io_accel_request_size_offset;
+	u32		event_notify;
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
+#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
+	u32		clear_event_notify;
 };
 
 #define NUM_BLOCKFETCH_ENTRIES 8
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index bf9eca8..56f8a86 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -589,7 +589,7 @@
 	}
 
 	err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
-			  IRQF_DISABLED, "ibmvstgt", target);
+			  0, "ibmvstgt", target);
 	if (err)
 		goto req_irq_failed;
 
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index bf02821..b1c4d83 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -2015,7 +2015,7 @@
 		write1_io(0, IO_FIFO_READ);	/* start fifo out in read mode */
 		write1_io(0, IO_INTR_MASK);	/* allow all ints */
 		x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
-		if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) {
+		if (request_irq(x, in2000_intr, 0, "in2000", instance)) {
 			printk("in2000_detect: Unable to allocate IRQ.\n");
 			detect_count--;
 			continue;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 280d5af..e5dae7b 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2931,7 +2931,7 @@
 	shost->base = host->addr;
 	shost->sg_tablesize = TOTAL_SG_ENTRY;
 
-	error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost);
+	error = request_irq(pdev->irq, i91u_intr, IRQF_SHARED, "i91u", shost);
 	if (error < 0) {
 		printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
 		goto out_free_scbs;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3f5b56a..2f8dd8e 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1143,6 +1143,7 @@
 	res->add_to_ml = 0;
 	res->del_from_ml = 0;
 	res->resetting_device = 0;
+	res->reset_occurred = 0;
 	res->sdev = NULL;
 	res->sata_port = NULL;
 
@@ -2367,6 +2368,42 @@
 }
 
 /**
+ * ipr_log_sis64_device_error - Log a cache error.
+ * @ioa_cfg:	ioa config struct
+ * @hostrcb:	hostrcb struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
+					 struct ipr_hostrcb *hostrcb)
+{
+	struct ipr_hostrcb_type_21_error *error;
+	char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+	error = &hostrcb->hcam.u.error64.u.type_21_error;
+
+	ipr_err("-----Failing Device Information-----\n");
+	ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
+		be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
+		 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
+	ipr_err("Device Resource Path: %s\n",
+		__ipr_format_res_path(error->res_path,
+				      buffer, sizeof(buffer)));
+	error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
+	error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
+	ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
+	ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
+	ipr_err("SCSI Sense Data:\n");
+	ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
+	ipr_err("SCSI Command Descriptor Block: \n");
+	ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
+
+	ipr_err("Additional IOA Data:\n");
+	ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
+}
+
+/**
  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
  * @ioasc:	IOASC
  *
@@ -2467,6 +2504,9 @@
 	case IPR_HOST_RCB_OVERLAY_ID_20:
 		ipr_log_fabric_error(ioa_cfg, hostrcb);
 		break;
+	case IPR_HOST_RCB_OVERLAY_ID_21:
+		ipr_log_sis64_device_error(ioa_cfg, hostrcb);
+		break;
 	case IPR_HOST_RCB_OVERLAY_ID_23:
 		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
 		break;
@@ -5015,6 +5055,7 @@
 	} else
 		rc = ipr_device_reset(ioa_cfg, res);
 	res->resetting_device = 0;
+	res->reset_occurred = 1;
 
 	LEAVE;
 	return rc ? FAILED : SUCCESS;
@@ -6183,8 +6224,10 @@
 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
 
 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
-		if (ipr_is_gscsi(res))
+		if (ipr_is_gscsi(res) && res->reset_occurred) {
+			res->reset_occurred = 0;
 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
+		}
 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
 		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
 	}
@@ -8641,6 +8684,25 @@
 }
 
 /**
+ * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
+ * @pdev:	PCI device struct
+ *
+ * Description: This routine is called to tell us that the MMIO
+ * access to the IOA has been restored
+ */
+static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
+{
+	unsigned long flags = 0;
+	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+	if (!ioa_cfg->probe_done)
+		pci_save_state(pdev);
+	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
  * @pdev:	PCI device struct
  *
@@ -8654,7 +8716,8 @@
 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
 
 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
-	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
+	if (ioa_cfg->probe_done)
+		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 }
 
@@ -8672,11 +8735,14 @@
 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
 
 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
-	if (ioa_cfg->needs_warm_reset)
-		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
-	else
-		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
-					IPR_SHUTDOWN_NONE);
+	if (ioa_cfg->probe_done) {
+		if (ioa_cfg->needs_warm_reset)
+			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+		else
+			_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
+						IPR_SHUTDOWN_NONE);
+	} else
+		wake_up_all(&ioa_cfg->eeh_wait_q);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 	return PCI_ERS_RESULT_RECOVERED;
 }
@@ -8695,17 +8761,20 @@
 	int i;
 
 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
-	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
-		ioa_cfg->sdt_state = ABORT_DUMP;
-	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
-	ioa_cfg->in_ioa_bringdown = 1;
-	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
-		spin_lock(&ioa_cfg->hrrq[i]._lock);
-		ioa_cfg->hrrq[i].allow_cmds = 0;
-		spin_unlock(&ioa_cfg->hrrq[i]._lock);
-	}
-	wmb();
-	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+	if (ioa_cfg->probe_done) {
+		if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
+			ioa_cfg->sdt_state = ABORT_DUMP;
+		ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
+		ioa_cfg->in_ioa_bringdown = 1;
+		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+			spin_lock(&ioa_cfg->hrrq[i]._lock);
+			ioa_cfg->hrrq[i].allow_cmds = 0;
+			spin_unlock(&ioa_cfg->hrrq[i]._lock);
+		}
+		wmb();
+		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+	} else
+		wake_up_all(&ioa_cfg->eeh_wait_q);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 }
 
@@ -8725,7 +8794,7 @@
 	switch (state) {
 	case pci_channel_io_frozen:
 		ipr_pci_frozen(pdev);
-		return PCI_ERS_RESULT_NEED_RESET;
+		return PCI_ERS_RESULT_CAN_RECOVER;
 	case pci_channel_io_perm_failure:
 		ipr_pci_perm_failure(pdev);
 		return PCI_ERS_RESULT_DISCONNECT;
@@ -8755,6 +8824,7 @@
 	ENTER;
 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
+	ioa_cfg->probe_done = 1;
 	if (ioa_cfg->needs_hard_reset) {
 		ioa_cfg->needs_hard_reset = 0;
 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -9030,16 +9100,6 @@
 	if (!ioa_cfg->vpd_cbs)
 		goto out_free_res_entries;
 
-	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
-		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
-		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
-		spin_lock_init(&ioa_cfg->hrrq[i]._lock);
-		if (i == 0)
-			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
-		else
-			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
-	}
-
 	if (ipr_alloc_cmd_blks(ioa_cfg))
 		goto out_free_vpd_cbs;
 
@@ -9140,61 +9200,18 @@
 }
 
 /**
- * ipr_init_ioa_cfg - Initialize IOA config struct
+ * ipr_init_regs - Initialize IOA registers
  * @ioa_cfg:	ioa config struct
- * @host:		scsi host struct
- * @pdev:		PCI dev struct
  *
  * Return value:
- * 	none
+ *	none
  **/
-static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
-			     struct Scsi_Host *host, struct pci_dev *pdev)
+static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
 {
 	const struct ipr_interrupt_offsets *p;
 	struct ipr_interrupts *t;
 	void __iomem *base;
 
-	ioa_cfg->host = host;
-	ioa_cfg->pdev = pdev;
-	ioa_cfg->log_level = ipr_log_level;
-	ioa_cfg->doorbell = IPR_DOORBELL;
-	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
-	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
-	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
-	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
-	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
-	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
-
-	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
-	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
-	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
-	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
-	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
-	init_waitqueue_head(&ioa_cfg->reset_wait_q);
-	init_waitqueue_head(&ioa_cfg->msi_wait_q);
-	ioa_cfg->sdt_state = INACTIVE;
-
-	ipr_initialize_bus_attr(ioa_cfg);
-	ioa_cfg->max_devs_supported = ipr_max_devs;
-
-	if (ioa_cfg->sis64) {
-		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
-		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
-		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
-			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
-	} else {
-		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
-		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
-		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
-			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
-	}
-	host->max_channel = IPR_MAX_BUS_TO_SCAN;
-	host->unique_id = host->host_no;
-	host->max_cmd_len = IPR_MAX_CDB_LEN;
-	host->can_queue = ioa_cfg->max_cmds;
-	pci_set_drvdata(pdev, ioa_cfg);
-
 	p = &ioa_cfg->chip_cfg->regs;
 	t = &ioa_cfg->regs;
 	base = ioa_cfg->hdw_dma_regs;
@@ -9225,6 +9242,79 @@
 }
 
 /**
+ * ipr_init_ioa_cfg - Initialize IOA config struct
+ * @ioa_cfg:	ioa config struct
+ * @host:		scsi host struct
+ * @pdev:		PCI dev struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
+			     struct Scsi_Host *host, struct pci_dev *pdev)
+{
+	int i;
+
+	ioa_cfg->host = host;
+	ioa_cfg->pdev = pdev;
+	ioa_cfg->log_level = ipr_log_level;
+	ioa_cfg->doorbell = IPR_DOORBELL;
+	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
+	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
+	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
+	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
+	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
+	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
+
+	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
+	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
+	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
+	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
+	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
+	init_waitqueue_head(&ioa_cfg->reset_wait_q);
+	init_waitqueue_head(&ioa_cfg->msi_wait_q);
+	init_waitqueue_head(&ioa_cfg->eeh_wait_q);
+	ioa_cfg->sdt_state = INACTIVE;
+
+	ipr_initialize_bus_attr(ioa_cfg);
+	ioa_cfg->max_devs_supported = ipr_max_devs;
+
+	if (ioa_cfg->sis64) {
+		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
+		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
+		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
+			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
+		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
+					   + ((sizeof(struct ipr_config_table_entry64)
+					       * ioa_cfg->max_devs_supported)));
+	} else {
+		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
+		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
+		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
+			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
+		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
+					   + ((sizeof(struct ipr_config_table_entry)
+					       * ioa_cfg->max_devs_supported)));
+	}
+
+	host->max_channel = IPR_MAX_BUS_TO_SCAN;
+	host->unique_id = host->host_no;
+	host->max_cmd_len = IPR_MAX_CDB_LEN;
+	host->can_queue = ioa_cfg->max_cmds;
+	pci_set_drvdata(pdev, ioa_cfg);
+
+	for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
+		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
+		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
+		spin_lock_init(&ioa_cfg->hrrq[i]._lock);
+		if (i == 0)
+			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
+		else
+			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
+	}
+}
+
+/**
  * ipr_get_chip_info - Find adapter chip information
  * @dev_id:		PCI device id struct
  *
@@ -9243,54 +9333,63 @@
 	return NULL;
 }
 
+/**
+ * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
+ *						during probe time
+ * @ioa_cfg:	ioa config struct
+ *
+ * Return value:
+ * 	None
+ **/
+static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
+{
+	struct pci_dev *pdev = ioa_cfg->pdev;
+
+	if (pci_channel_offline(pdev)) {
+		wait_event_timeout(ioa_cfg->eeh_wait_q,
+				   !pci_channel_offline(pdev),
+				   IPR_PCI_ERROR_RECOVERY_TIMEOUT);
+		pci_restore_state(pdev);
+	}
+}
+
 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
 {
 	struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
-	int i, err, vectors;
+	int i, vectors;
 
 	for (i = 0; i < ARRAY_SIZE(entries); ++i)
 		entries[i].entry = i;
 
-	vectors = ipr_number_of_msix;
-
-	while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
-			vectors = err;
-
-	if (err < 0) {
-		pci_disable_msix(ioa_cfg->pdev);
-		return err;
+	vectors = pci_enable_msix_range(ioa_cfg->pdev,
+					entries, 1, ipr_number_of_msix);
+	if (vectors < 0) {
+		ipr_wait_for_pci_err_recovery(ioa_cfg);
+		return vectors;
 	}
 
-	if (!err) {
-		for (i = 0; i < vectors; i++)
-			ioa_cfg->vectors_info[i].vec = entries[i].vector;
-		ioa_cfg->nvectors = vectors;
-	}
+	for (i = 0; i < vectors; i++)
+		ioa_cfg->vectors_info[i].vec = entries[i].vector;
+	ioa_cfg->nvectors = vectors;
 
-	return err;
+	return 0;
 }
 
 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
 {
-	int i, err, vectors;
+	int i, vectors;
 
-	vectors = ipr_number_of_msix;
-
-	while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
-			vectors = err;
-
-	if (err < 0) {
-		pci_disable_msi(ioa_cfg->pdev);
-		return err;
+	vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
+	if (vectors < 0) {
+		ipr_wait_for_pci_err_recovery(ioa_cfg);
+		return vectors;
 	}
 
-	if (!err) {
-		for (i = 0; i < vectors; i++)
-			ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
-		ioa_cfg->nvectors = vectors;
-	}
+	for (i = 0; i < vectors; i++)
+		ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
+	ioa_cfg->nvectors = vectors;
 
-	return err;
+	return 0;
 }
 
 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
@@ -9355,7 +9454,7 @@
  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
  * @pdev:		PCI device struct
  *
- * Description: The return value from pci_enable_msi() can not always be
+ * Description: The return value from pci_enable_msi_range() can not always be
  * trusted.  This routine sets up and initiates a test interrupt to determine
  * if the interrupt is received via the ipr_test_intr() service routine.
  * If the tests fails, the driver will fall back to LSI.
@@ -9434,19 +9533,13 @@
 
 	ENTER;
 
-	if ((rc = pci_enable_device(pdev))) {
-		dev_err(&pdev->dev, "Cannot enable adapter\n");
-		goto out;
-	}
-
 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
-
 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
 
 	if (!host) {
 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
 		rc = -ENOMEM;
-		goto out_disable;
+		goto out;
 	}
 
 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
@@ -9476,6 +9569,8 @@
 
 	ioa_cfg->revid = pdev->revision;
 
+	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
+
 	ipr_regs_pci = pci_resource_start(pdev, 0);
 
 	rc = pci_request_regions(pdev, IPR_NAME);
@@ -9485,22 +9580,35 @@
 		goto out_scsi_host_put;
 	}
 
+	rc = pci_enable_device(pdev);
+
+	if (rc || pci_channel_offline(pdev)) {
+		if (pci_channel_offline(pdev)) {
+			ipr_wait_for_pci_err_recovery(ioa_cfg);
+			rc = pci_enable_device(pdev);
+		}
+
+		if (rc) {
+			dev_err(&pdev->dev, "Cannot enable adapter\n");
+			ipr_wait_for_pci_err_recovery(ioa_cfg);
+			goto out_release_regions;
+		}
+	}
+
 	ipr_regs = pci_ioremap_bar(pdev, 0);
 
 	if (!ipr_regs) {
 		dev_err(&pdev->dev,
 			"Couldn't map memory range of registers\n");
 		rc = -ENOMEM;
-		goto out_release_regions;
+		goto out_disable;
 	}
 
 	ioa_cfg->hdw_dma_regs = ipr_regs;
 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
 
-	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
-
-	pci_set_master(pdev);
+	ipr_init_regs(ioa_cfg);
 
 	if (ioa_cfg->sis64) {
 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -9508,7 +9616,6 @@
 			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
 			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		}
-
 	} else
 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 
@@ -9522,10 +9629,15 @@
 
 	if (rc != PCIBIOS_SUCCESSFUL) {
 		dev_err(&pdev->dev, "Write of cache line size failed\n");
+		ipr_wait_for_pci_err_recovery(ioa_cfg);
 		rc = -EIO;
 		goto cleanup_nomem;
 	}
 
+	/* Issue MMIO read to ensure card is not in EEH */
+	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
+	ipr_wait_for_pci_err_recovery(ioa_cfg);
+
 	if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
 		dev_err(&pdev->dev, "The max number of MSIX is %d\n",
 			IPR_MAX_MSIX_VECTORS);
@@ -9544,10 +9656,22 @@
 		dev_info(&pdev->dev, "Cannot enable MSI.\n");
 	}
 
+	pci_set_master(pdev);
+
+	if (pci_channel_offline(pdev)) {
+		ipr_wait_for_pci_err_recovery(ioa_cfg);
+		pci_set_master(pdev);
+		if (pci_channel_offline(pdev)) {
+			rc = -EIO;
+			goto out_msi_disable;
+		}
+	}
+
 	if (ioa_cfg->intr_flag == IPR_USE_MSI ||
 	    ioa_cfg->intr_flag == IPR_USE_MSIX) {
 		rc = ipr_test_msi(ioa_cfg, pdev);
 		if (rc == -EOPNOTSUPP) {
+			ipr_wait_for_pci_err_recovery(ioa_cfg);
 			if (ioa_cfg->intr_flag == IPR_USE_MSI) {
 				ioa_cfg->intr_flag &= ~IPR_USE_MSI;
 				pci_disable_msi(pdev);
@@ -9577,30 +9701,12 @@
 				(unsigned int)num_online_cpus(),
 				(unsigned int)IPR_MAX_HRRQ_NUM);
 
-	/* Save away PCI config space for use following IOA reset */
-	rc = pci_save_state(pdev);
-
-	if (rc != PCIBIOS_SUCCESSFUL) {
-		dev_err(&pdev->dev, "Failed to save PCI config space\n");
-		rc = -EIO;
-		goto out_msi_disable;
-	}
-
 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
 		goto out_msi_disable;
 
 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
 		goto out_msi_disable;
 
-	if (ioa_cfg->sis64)
-		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
-				+ ((sizeof(struct ipr_config_table_entry64)
-				* ioa_cfg->max_devs_supported)));
-	else
-		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
-				+ ((sizeof(struct ipr_config_table_entry)
-				* ioa_cfg->max_devs_supported)));
-
 	rc = ipr_alloc_mem(ioa_cfg);
 	if (rc < 0) {
 		dev_err(&pdev->dev,
@@ -9608,6 +9714,15 @@
 		goto out_msi_disable;
 	}
 
+	/* Save away PCI config space for use following IOA reset */
+	rc = pci_save_state(pdev);
+
+	if (rc != PCIBIOS_SUCCESSFUL) {
+		dev_err(&pdev->dev, "Failed to save PCI config space\n");
+		rc = -EIO;
+		goto cleanup_nolog;
+	}
+
 	/*
 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
 	 * the card is in an unknown state and needs a hard reset
@@ -9664,18 +9779,19 @@
 cleanup_nolog:
 	ipr_free_mem(ioa_cfg);
 out_msi_disable:
+	ipr_wait_for_pci_err_recovery(ioa_cfg);
 	if (ioa_cfg->intr_flag == IPR_USE_MSI)
 		pci_disable_msi(pdev);
 	else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
 		pci_disable_msix(pdev);
 cleanup_nomem:
 	iounmap(ipr_regs);
+out_disable:
+	pci_disable_device(pdev);
 out_release_regions:
 	pci_release_regions(pdev);
 out_scsi_host_put:
 	scsi_host_put(host);
-out_disable:
-	pci_disable_device(pdev);
 	goto out;
 }
 
@@ -9994,6 +10110,8 @@
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
@@ -10005,12 +10123,19 @@
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
 
 static const struct pci_error_handlers ipr_err_handler = {
 	.error_detected = ipr_pci_error_detected,
+	.mmio_enabled = ipr_pci_mmio_enabled,
 	.slot_reset = ipr_pci_slot_reset,
 };
 
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 9ce38a2..31ed126 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -101,12 +101,16 @@
 #define IPR_SUBS_DEV_ID_57D7    0x03FF
 #define IPR_SUBS_DEV_ID_57D8    0x03FE
 #define IPR_SUBS_DEV_ID_57D9    0x046D
+#define IPR_SUBS_DEV_ID_57DA    0x04CA
 #define IPR_SUBS_DEV_ID_57EB    0x0474
 #define IPR_SUBS_DEV_ID_57EC    0x0475
 #define IPR_SUBS_DEV_ID_57ED    0x0499
 #define IPR_SUBS_DEV_ID_57EE    0x049A
 #define IPR_SUBS_DEV_ID_57EF    0x049B
 #define IPR_SUBS_DEV_ID_57F0    0x049C
+#define IPR_SUBS_DEV_ID_2CCA	0x04C7
+#define IPR_SUBS_DEV_ID_2CD2	0x04C8
+#define IPR_SUBS_DEV_ID_2CCD	0x04C9
 #define IPR_NAME				"ipr"
 
 /*
@@ -230,6 +234,7 @@
 #define IPR_WAIT_FOR_RESET_TIMEOUT		(2 * HZ)
 #define IPR_CHECK_FOR_RESET_TIMEOUT		(HZ / 10)
 #define IPR_WAIT_FOR_BIST_TIMEOUT		(2 * HZ)
+#define IPR_PCI_ERROR_RECOVERY_TIMEOUT	(120 * HZ)
 #define IPR_PCI_RESET_TIMEOUT			(HZ / 2)
 #define IPR_SIS32_DUMP_TIMEOUT			(15 * HZ)
 #define IPR_SIS64_DUMP_TIMEOUT			(40 * HZ)
@@ -897,6 +902,18 @@
 	__be32 ioa_data[236];
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb_type_21_error {
+	__be32 wwn[4];
+	u8 res_path[8];
+	u8 primary_problem_desc[32];
+	u8 second_problem_desc[32];
+	__be32 sense_data[8];
+	__be32 cdb[4];
+	__be32 residual_trans_length;
+	__be32 length_of_error;
+	__be32 ioa_data[236];
+}__attribute__((packed, aligned (4)));
+
 struct ipr_hostrcb_type_02_error {
 	struct ipr_vpd ioa_vpd;
 	struct ipr_vpd cfc_vpd;
@@ -1126,6 +1143,7 @@
 		struct ipr_hostrcb_type_ff_error type_ff_error;
 		struct ipr_hostrcb_type_12_error type_12_error;
 		struct ipr_hostrcb_type_17_error type_17_error;
+		struct ipr_hostrcb_type_21_error type_21_error;
 		struct ipr_hostrcb_type_23_error type_23_error;
 		struct ipr_hostrcb_type_24_error type_24_error;
 		struct ipr_hostrcb_type_30_error type_30_error;
@@ -1169,6 +1187,7 @@
 #define IPR_HOST_RCB_OVERLAY_ID_16				0x16
 #define IPR_HOST_RCB_OVERLAY_ID_17				0x17
 #define IPR_HOST_RCB_OVERLAY_ID_20				0x20
+#define IPR_HOST_RCB_OVERLAY_ID_21				0x21
 #define IPR_HOST_RCB_OVERLAY_ID_23				0x23
 #define IPR_HOST_RCB_OVERLAY_ID_24				0x24
 #define IPR_HOST_RCB_OVERLAY_ID_26				0x26
@@ -1252,6 +1271,7 @@
 	u8 add_to_ml:1;
 	u8 del_from_ml:1;
 	u8 resetting_device:1;
+	u8 reset_occurred:1;
 
 	u32 bus;		/* AKA channel */
 	u32 target;		/* AKA id */
@@ -1441,6 +1461,7 @@
 	u8 dump_timeout:1;
 	u8 cfg_locked:1;
 	u8 clear_isr:1;
+	u8 probe_done:1;
 
 	u8 revid;
 
@@ -1519,6 +1540,7 @@
 
 	wait_queue_head_t reset_wait_q;
 	wait_queue_head_t msi_wait_q;
+	wait_queue_head_t eeh_wait_q;
 
 	struct ipr_dump *dump;
 	enum ipr_sdt_state sdt_state;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index d25d0d8..695b34e 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -66,7 +66,7 @@
 #include "probe_roms.h"
 
 #define MAJ 1
-#define MIN 1
+#define MIN 2
 #define BUILD 0
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 	__stringify(BUILD)
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 99d2930..56e3809 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2723,13 +2723,9 @@
 	memcpy(resp->ending_fis, fis, sizeof(*fis));
 	ts->buf_valid_size = sizeof(*resp);
 
-	/* If the device fault bit is set in the status register, then
-	 * set the sense data and return.
-	 */
-	if (fis->status & ATA_DF)
+	/* If an error is flagged let libata decode the fis */
+	if (ac_err_mask(fis->status))
 		ts->stat = SAS_PROTO_RESPONSE;
-	else if (fis->status & ATA_ERR)
-		ts->stat = SAM_STAT_CHECK_CONDITION;
 	else
 		ts->stat = SAM_STAT_GOOD;
 
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 14c1c8f..680bf6f 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -490,5 +490,6 @@
 		iscsi_boot_remove_kobj(boot_kobj);
 
 	kset_unregister(boot_kset->kset);
+	kfree(boot_kset);
 }
 EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index add6d15..bfb6d07 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -593,9 +593,9 @@
 	iscsi_sw_tcp_conn_restore_callbacks(conn);
 	sock_put(sock->sk);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	tcp_sw_conn->sock = NULL;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	sockfd_put(sock);
 }
 
@@ -663,10 +663,10 @@
 	if (err)
 		goto free_socket;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	/* bind iSCSI connection and socket */
 	tcp_sw_conn->sock = sock;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	/* setup Socket parameters */
 	sk = sock->sk;
@@ -726,14 +726,14 @@
 	switch(param) {
 	case ISCSI_PARAM_CONN_PORT:
 	case ISCSI_PARAM_CONN_ADDRESS:
-		spin_lock_bh(&conn->session->lock);
+		spin_lock_bh(&conn->session->frwd_lock);
 		if (!tcp_sw_conn || !tcp_sw_conn->sock) {
-			spin_unlock_bh(&conn->session->lock);
+			spin_unlock_bh(&conn->session->frwd_lock);
 			return -ENOTCONN;
 		}
 		rc = kernel_getpeername(tcp_sw_conn->sock,
 					(struct sockaddr *)&addr, &len);
-		spin_unlock_bh(&conn->session->lock);
+		spin_unlock_bh(&conn->session->frwd_lock);
 		if (rc)
 			return rc;
 
@@ -759,23 +759,26 @@
 
 	switch (param) {
 	case ISCSI_HOST_PARAM_IPADDRESS:
-		spin_lock_bh(&session->lock);
+		if (!session)
+			return -ENOTCONN;
+
+		spin_lock_bh(&session->frwd_lock);
 		conn = session->leadconn;
 		if (!conn) {
-			spin_unlock_bh(&session->lock);
+			spin_unlock_bh(&session->frwd_lock);
 			return -ENOTCONN;
 		}
 		tcp_conn = conn->dd_data;
 
 		tcp_sw_conn = tcp_conn->dd_data;
 		if (!tcp_sw_conn->sock) {
-			spin_unlock_bh(&session->lock);
+			spin_unlock_bh(&session->frwd_lock);
 			return -ENOTCONN;
 		}
 
 		rc = kernel_getsockname(tcp_sw_conn->sock,
 					(struct sockaddr *)&addr, &len);
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		if (rc)
 			return rc;
 
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 4046241..5b8605c 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -110,16 +110,8 @@
 		session->exp_cmdsn = exp_cmdsn;
 
 	if (max_cmdsn != session->max_cmdsn &&
-	    !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
+	    !iscsi_sna_lt(max_cmdsn, session->max_cmdsn))
 		session->max_cmdsn = max_cmdsn;
-		/*
-		 * if the window closed with IO queued, then kick the
-		 * xmit thread
-		 */
-		if (!list_empty(&session->leadconn->cmdqueue) ||
-		    !list_empty(&session->leadconn->mgmtqueue))
-			iscsi_conn_queue_work(session->leadconn);
-	}
 }
 
 void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
@@ -481,7 +473,7 @@
  * iscsi_free_task - free a task
  * @task: iscsi cmd task
  *
- * Must be called with session lock.
+ * Must be called with session back_lock.
  * This function returns the scsi command to scsi-ml or cleans
  * up mgmt tasks then returns the task to the pool.
  */
@@ -535,9 +527,10 @@
 {
 	struct iscsi_session *session = task->conn->session;
 
-	spin_lock_bh(&session->lock);
+	/* regular RX path uses back_lock */
+	spin_lock_bh(&session->back_lock);
 	__iscsi_put_task(task);
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->back_lock);
 }
 EXPORT_SYMBOL_GPL(iscsi_put_task);
 
@@ -546,7 +539,7 @@
  * @task: iscsi cmd task
  * @state: state to complete task with
  *
- * Must be called with session lock.
+ * Must be called with session back_lock.
  */
 static void iscsi_complete_task(struct iscsi_task *task, int state)
 {
@@ -585,7 +578,7 @@
  * This is used when drivers do not need or cannot perform
  * lower level pdu processing.
  *
- * Called with session lock
+ * Called with session back_lock
  */
 void iscsi_complete_scsi_task(struct iscsi_task *task,
 			      uint32_t exp_cmdsn, uint32_t max_cmdsn)
@@ -602,7 +595,7 @@
 
 
 /*
- * session lock must be held and if not called for a task that is
+ * session back_lock must be held and if not called for a task that is
  * still pending or from the xmit thread, then xmit thread must
  * be suspended.
  */
@@ -642,7 +635,10 @@
 		scsi_in(sc)->resid = scsi_in(sc)->length;
 	}
 
+	/* regular RX path uses back_lock */
+	spin_lock_bh(&conn->session->back_lock);
 	iscsi_complete_task(task, state);
+	spin_unlock_bh(&conn->session->back_lock);
 }
 
 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -780,7 +776,10 @@
 	return task;
 
 free_task:
+	/* regular RX path uses back_lock */
+	spin_lock_bh(&session->back_lock);
 	__iscsi_put_task(task);
+	spin_unlock_bh(&session->back_lock);
 	return NULL;
 }
 
@@ -791,10 +790,10 @@
 	struct iscsi_session *session = conn->session;
 	int err = 0;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
 		err = -EPERM;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	return err;
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -1013,13 +1012,13 @@
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "pdu (op 0x%x itt 0x%x) rejected "
 				  "due to DataDigest error.\n",
-				  rejected_pdu.itt, opcode);
+				  opcode, rejected_pdu.itt);
 		break;
 	case ISCSI_REASON_IMM_CMD_REJECT:
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "pdu (op 0x%x itt 0x%x) rejected. Too many "
 				  "immediate commands.\n",
-				  rejected_pdu.itt, opcode);
+				  opcode, rejected_pdu.itt);
 		/*
 		 * We only send one TMF at a time so if the target could not
 		 * handle it, then it should get fixed (RFC mandates that
@@ -1031,14 +1030,19 @@
 		if (opcode != ISCSI_OP_NOOP_OUT)
 			return 0;
 
-		 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG))
+		 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {
 			/*
 			 * nop-out in response to target's nop-out rejected.
 			 * Just resend.
 			 */
+			/* In RX path we are under back lock */
+			spin_unlock(&conn->session->back_lock);
+			spin_lock(&conn->session->frwd_lock);
 			iscsi_send_nopout(conn,
 					  (struct iscsi_nopin*)&rejected_pdu);
-		else {
+			spin_unlock(&conn->session->frwd_lock);
+			spin_lock(&conn->session->back_lock);
+		} else {
 			struct iscsi_task *task;
 			/*
 			 * Our nop as ping got dropped. We know the target
@@ -1059,8 +1063,8 @@
 	default:
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "pdu (op 0x%x itt 0x%x) rejected. Reason "
-				  "code 0x%x\n", rejected_pdu.itt,
-				  rejected_pdu.opcode, reject->reason);
+				  "code 0x%x\n", rejected_pdu.opcode,
+				  rejected_pdu.itt, reject->reason);
 		break;
 	}
 	return rc;
@@ -1074,7 +1078,7 @@
  * This should be used for mgmt tasks like login and nops, or if
  * the LDD's itt space does not include the session age.
  *
- * The session lock must be held.
+ * The session back_lock must be held.
  */
 struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
 {
@@ -1103,7 +1107,7 @@
  * @datalen: len of data buffer
  *
  * Completes pdu processing by freeing any resources allocated at
- * queuecommand or send generic. session lock must be held and verify
+ * queuecommand or send generic. session back_lock must be held and verify
  * itt must have been called.
  */
 int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -1140,7 +1144,12 @@
 			if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
 				break;
 
+			/* In RX path we are under back lock */
+			spin_unlock(&session->back_lock);
+			spin_lock(&session->frwd_lock);
 			iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
+			spin_unlock(&session->frwd_lock);
+			spin_lock(&session->back_lock);
 			break;
 		case ISCSI_OP_REJECT:
 			rc = iscsi_handle_reject(conn, hdr, data, datalen);
@@ -1247,9 +1256,9 @@
 {
 	int rc;
 
-	spin_lock(&conn->session->lock);
+	spin_lock(&conn->session->back_lock);
 	rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
-	spin_unlock(&conn->session->lock);
+	spin_unlock(&conn->session->back_lock);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
@@ -1293,7 +1302,7 @@
  *
  * This should be used for cmd tasks.
  *
- * The session lock must be held.
+ * The session back_lock must be held.
  */
 struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
 {
@@ -1323,15 +1332,15 @@
 	struct iscsi_conn *conn;
 	struct device *dev;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	conn = session->leadconn;
 	if (session->state == ISCSI_STATE_TERMINATE || !conn) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		return;
 	}
 
 	dev = get_device(&conn->cls_conn->dev);
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	if (!dev)
 	        return;
 	/*
@@ -1351,15 +1360,15 @@
 {
 	struct iscsi_session *session = conn->session;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (session->state == ISCSI_STATE_FAILED) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		return;
 	}
 
 	if (conn->stop_stage == 0)
 		session->state = ISCSI_STATE_FAILED;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
@@ -1393,15 +1402,18 @@
 		return -ENODATA;
 
 	__iscsi_get_task(task);
-	spin_unlock_bh(&conn->session->lock);
+	spin_unlock_bh(&conn->session->frwd_lock);
 	rc = conn->session->tt->xmit_task(task);
-	spin_lock_bh(&conn->session->lock);
+	spin_lock_bh(&conn->session->frwd_lock);
 	if (!rc) {
 		/* done with this task */
 		task->last_xfer = jiffies;
 		conn->task = NULL;
 	}
+	/* regular RX path uses back_lock */
+	spin_lock_bh(&conn->session->back_lock);
 	__iscsi_put_task(task);
+	spin_unlock_bh(&conn->session->back_lock);
 	return rc;
 }
 
@@ -1410,7 +1422,7 @@
  * @task: task to requeue
  *
  * LLDs that need to run a task from the session workqueue should call
- * this. The session lock must be held. This should only be called
+ * this. The session frwd_lock must be held. This should only be called
  * by software drivers.
  */
 void iscsi_requeue_task(struct iscsi_task *task)
@@ -1441,10 +1453,10 @@
 	struct iscsi_task *task;
 	int rc = 0;
 
-	spin_lock_bh(&conn->session->lock);
+	spin_lock_bh(&conn->session->frwd_lock);
 	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
 		ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
-		spin_unlock_bh(&conn->session->lock);
+		spin_unlock_bh(&conn->session->frwd_lock);
 		return -ENODATA;
 	}
 
@@ -1465,7 +1477,10 @@
 					 struct iscsi_task, running);
 		list_del_init(&conn->task->running);
 		if (iscsi_prep_mgmt_task(conn, conn->task)) {
+			/* regular RX path uses back_lock */
+			spin_lock_bh(&conn->session->back_lock);
 			__iscsi_put_task(conn->task);
+			spin_unlock_bh(&conn->session->back_lock);
 			conn->task = NULL;
 			continue;
 		}
@@ -1527,11 +1542,11 @@
 		if (!list_empty(&conn->mgmtqueue))
 			goto check_mgmt;
 	}
-	spin_unlock_bh(&conn->session->lock);
+	spin_unlock_bh(&conn->session->frwd_lock);
 	return -ENODATA;
 
 done:
-	spin_unlock_bh(&conn->session->lock);
+	spin_unlock_bh(&conn->session->frwd_lock);
 	return rc;
 }
 
@@ -1600,7 +1615,7 @@
 
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 
 	reason = iscsi_session_chkready(cls_session);
 	if (reason) {
@@ -1686,13 +1701,13 @@
 	}
 
 	session->queued_cmdsn++;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	return 0;
 
 prepd_reject:
 	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
 reject:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
 			  sc->cmnd[0], reason);
 	return SCSI_MLQUEUE_TARGET_BUSY;
@@ -1700,7 +1715,7 @@
 prepd_fault:
 	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
 fault:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
 			  sc->cmnd[0], reason);
 	if (!scsi_bidi_cmnd(sc))
@@ -1748,14 +1763,14 @@
 	struct iscsi_conn *conn = (struct iscsi_conn *)data;
 	struct iscsi_session *session = conn->session;
 
-	spin_lock(&session->lock);
+	spin_lock(&session->frwd_lock);
 	if (conn->tmf_state == TMF_QUEUED) {
 		conn->tmf_state = TMF_TIMEDOUT;
 		ISCSI_DBG_EH(session, "tmf timedout\n");
 		/* unblock eh_abort() */
 		wake_up(&conn->ehwait);
 	}
-	spin_unlock(&session->lock);
+	spin_unlock(&session->frwd_lock);
 }
 
 static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
@@ -1768,10 +1783,10 @@
 	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
 				      NULL, 0);
 	if (!task) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
-		spin_lock_bh(&session->lock);
+		spin_lock_bh(&session->frwd_lock);
 		return -EPERM;
 	}
 	conn->tmfcmd_pdus_cnt++;
@@ -1781,7 +1796,7 @@
 	add_timer(&conn->tmf_timer);
 	ISCSI_DBG_EH(session, "tmf set timeout\n");
 
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	mutex_unlock(&session->eh_mutex);
 
 	/*
@@ -1800,7 +1815,7 @@
 	del_timer_sync(&conn->tmf_timer);
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	/* if the session drops it will clean up the task */
 	if (age != session->age ||
 	    session->state != ISCSI_STATE_LOGGED_IN)
@@ -1837,7 +1852,7 @@
  * iscsi_suspend_queue - suspend iscsi_queuecommand
  * @conn: iscsi conn to stop queueing IO on
  *
- * This grabs the session lock to make sure no one is in
+ * This grabs the session frwd_lock to make sure no one is in
  * xmit_task/queuecommand, and then sets suspend to prevent
  * new commands from being queued. This only needs to be called
  * by offload drivers that need to sync a path like ep disconnect
@@ -1846,9 +1861,9 @@
  */
 void iscsi_suspend_queue(struct iscsi_conn *conn)
 {
-	spin_lock_bh(&conn->session->lock);
+	spin_lock_bh(&conn->session->frwd_lock);
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-	spin_unlock_bh(&conn->session->lock);
+	spin_unlock_bh(&conn->session->frwd_lock);
 }
 EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
 
@@ -1907,7 +1922,7 @@
 
 	ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
 
-	spin_lock(&session->lock);
+	spin_lock(&session->frwd_lock);
 	task = (struct iscsi_task *)sc->SCp.ptr;
 	if (!task) {
 		/*
@@ -2021,7 +2036,7 @@
 done:
 	if (task)
 		task->last_timeout = jiffies;
-	spin_unlock(&session->lock);
+	spin_unlock(&session->frwd_lock);
 	ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
 		     "timer reset" : "nh");
 	return rc;
@@ -2033,7 +2048,7 @@
 	struct iscsi_session *session = conn->session;
 	unsigned long recv_timeout, next_timeout = 0, last_recv;
 
-	spin_lock(&session->lock);
+	spin_lock(&session->frwd_lock);
 	if (session->state != ISCSI_STATE_LOGGED_IN)
 		goto done;
 
@@ -2050,7 +2065,7 @@
 				  "last ping %lu, now %lu\n",
 				  conn->ping_timeout, conn->recv_timeout,
 				  last_recv, conn->last_ping, jiffies);
-		spin_unlock(&session->lock);
+		spin_unlock(&session->frwd_lock);
 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
 		return;
 	}
@@ -2066,7 +2081,7 @@
 	ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);
 	mod_timer(&conn->transport_timer, next_timeout);
 done:
-	spin_unlock(&session->lock);
+	spin_unlock(&session->frwd_lock);
 }
 
 static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
@@ -2096,7 +2111,7 @@
 	ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	/*
 	 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
 	 * got the command.
@@ -2104,7 +2119,7 @@
 	if (!sc->SCp.ptr) {
 		ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
 				      "it completed.\n");
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		mutex_unlock(&session->eh_mutex);
 		return SUCCESS;
 	}
@@ -2115,7 +2130,7 @@
 	 */
 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
 	    sc->SCp.phase != session->age) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		mutex_unlock(&session->eh_mutex);
 		ISCSI_DBG_EH(session, "failing abort due to dropped "
 				  "session.\n");
@@ -2156,7 +2171,7 @@
 
 	switch (conn->tmf_state) {
 	case TMF_SUCCESS:
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		/*
 		 * stop tx side incase the target had sent a abort rsp but
 		 * the initiator was still writing out data.
@@ -2167,15 +2182,15 @@
 		 * good and have never sent us a successful tmf response
 		 * then sent more data for the cmd.
 		 */
-		spin_lock_bh(&session->lock);
+		spin_lock_bh(&session->frwd_lock);
 		fail_scsi_task(task, DID_ABORT);
 		conn->tmf_state = TMF_INITIAL;
 		memset(hdr, 0, sizeof(*hdr));
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		iscsi_start_tx(conn);
 		goto success_unlocked;
 	case TMF_TIMEDOUT:
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto failed_unlocked;
 	case TMF_NOT_FOUND:
@@ -2194,7 +2209,7 @@
 	}
 
 success:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 success_unlocked:
 	ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
 		     sc, task->itt);
@@ -2202,7 +2217,7 @@
 	return SUCCESS;
 
 failed:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 failed_unlocked:
 	ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
 		     task ? task->itt : 0);
@@ -2235,7 +2250,7 @@
 	ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	/*
 	 * Just check if we are not logged in. We cannot check for
 	 * the phase because the reset could come from a ioctl.
@@ -2262,7 +2277,7 @@
 	case TMF_SUCCESS:
 		break;
 	case TMF_TIMEDOUT:
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto done;
 	default:
@@ -2271,21 +2286,21 @@
 	}
 
 	rc = SUCCESS;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_suspend_tx(conn);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	memset(hdr, 0, sizeof(*hdr));
 	fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
 	conn->tmf_state = TMF_INITIAL;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_start_tx(conn);
 	goto done;
 
 unlock:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 done:
 	ISCSI_DBG_EH(session, "dev reset result = %s\n",
 		     rc == SUCCESS ? "SUCCESS" : "FAILED");
@@ -2298,13 +2313,13 @@
 {
 	struct iscsi_session *session = cls_session->dd_data;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (session->state != ISCSI_STATE_LOGGED_IN) {
 		session->state = ISCSI_STATE_RECOVERY_FAILED;
 		if (session->leadconn)
 			wake_up(&session->leadconn->ehwait);
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 }
 EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
 
@@ -2326,19 +2341,19 @@
 	conn = session->leadconn;
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (session->state == ISCSI_STATE_TERMINATE) {
 failed:
 		ISCSI_DBG_EH(session,
 			     "failing session reset: Could not log back into "
 			     "%s, %s [age %d]\n", session->targetname,
 			     conn->persistent_address, session->age);
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		mutex_unlock(&session->eh_mutex);
 		return FAILED;
 	}
 
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	mutex_unlock(&session->eh_mutex);
 	/*
 	 * we drop the lock here but the leadconn cannot be destoyed while
@@ -2355,14 +2370,14 @@
 		flush_signals(current);
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (session->state == ISCSI_STATE_LOGGED_IN) {
 		ISCSI_DBG_EH(session,
 			     "session reset succeeded for %s,%s\n",
 			     session->targetname, conn->persistent_address);
 	} else
 		goto failed;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	mutex_unlock(&session->eh_mutex);
 	return SUCCESS;
 }
@@ -2398,7 +2413,7 @@
 		     session->targetname);
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	/*
 	 * Just check if we are not logged in. We cannot check for
 	 * the phase because the reset could come from a ioctl.
@@ -2425,7 +2440,7 @@
 	case TMF_SUCCESS:
 		break;
 	case TMF_TIMEDOUT:
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto done;
 	default:
@@ -2434,21 +2449,21 @@
 	}
 
 	rc = SUCCESS;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_suspend_tx(conn);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	memset(hdr, 0, sizeof(*hdr));
 	fail_scsi_tasks(conn, -1, DID_ERROR);
 	conn->tmf_state = TMF_INITIAL;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_start_tx(conn);
 	goto done;
 
 unlock:
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 done:
 	ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
 		     rc == SUCCESS ? "SUCCESS" : "FAILED");
@@ -2746,8 +2761,10 @@
 	session->max_r2t = 1;
 	session->tt = iscsit;
 	session->dd_data = cls_session->dd_data + sizeof(*session);
+
 	mutex_init(&session->eh_mutex);
-	spin_lock_init(&session->lock);
+	spin_lock_init(&session->frwd_lock);
+	spin_lock_init(&session->back_lock);
 
 	/* initialize SCSI PDU commands pool */
 	if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
@@ -2861,14 +2878,14 @@
 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
 	/* allocate login_task used for the login/text sequences */
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (!kfifo_out(&session->cmdpool.queue,
                          (void*)&conn->login_task,
 			 sizeof(void*))) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		goto login_task_alloc_fail;
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	data = (char *) __get_free_pages(GFP_KERNEL,
 					 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
@@ -2905,7 +2922,7 @@
 
 	del_timer_sync(&conn->transport_timer);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
 	if (session->leadconn == conn) {
 		/*
@@ -2914,7 +2931,7 @@
 		session->state = ISCSI_STATE_TERMINATE;
 		wake_up(&conn->ehwait);
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	/*
 	 * Block until all in-progress commands for this connection
@@ -2941,16 +2958,19 @@
 	/* flush queued up work because we free the connection below */
 	iscsi_suspend_tx(conn);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	free_pages((unsigned long) conn->data,
 		   get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
 	kfree(conn->persistent_address);
 	kfree(conn->local_ipaddr);
+	/* regular RX path uses back_lock */
+	spin_lock_bh(&session->back_lock);
 	kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
 		    sizeof(void*));
+	spin_unlock_bh(&session->back_lock);
 	if (session->leadconn == conn)
 		session->leadconn = NULL;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_destroy_conn(cls_conn);
 }
@@ -2987,7 +3007,7 @@
 		conn->ping_timeout = 5;
 	}
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	conn->c_stage = ISCSI_CONN_STARTED;
 	session->state = ISCSI_STATE_LOGGED_IN;
 	session->queued_cmdsn = session->cmdsn;
@@ -3016,7 +3036,7 @@
 	default:
 		break;
 	}
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	iscsi_unblock_session(session->cls_session);
 	wake_up(&conn->ehwait);
@@ -3055,9 +3075,9 @@
 	int old_stop_stage;
 
 	mutex_lock(&session->eh_mutex);
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (conn->stop_stage == STOP_CONN_TERM) {
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&session->frwd_lock);
 		mutex_unlock(&session->eh_mutex);
 		return;
 	}
@@ -3074,14 +3094,14 @@
 
 	old_stop_stage = conn->stop_stage;
 	conn->stop_stage = flag;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	del_timer_sync(&conn->transport_timer);
 	iscsi_suspend_tx(conn);
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	conn->c_stage = ISCSI_CONN_STOPPED;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	/*
 	 * for connection level recovery we should not calculate
@@ -3102,11 +3122,11 @@
 	/*
 	 * flush queues.
 	 */
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
 	fail_mgmt_tasks(session, conn);
 	memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 	mutex_unlock(&session->eh_mutex);
 }
 
@@ -3133,10 +3153,10 @@
 	struct iscsi_session *session = cls_session->dd_data;
 	struct iscsi_conn *conn = cls_conn->dd_data;
 
-	spin_lock_bh(&session->lock);
+	spin_lock_bh(&session->frwd_lock);
 	if (is_leading)
 		session->leadconn = conn;
-	spin_unlock_bh(&session->lock);
+	spin_unlock_bh(&session->frwd_lock);
 
 	/*
 	 * Unblock xmitworker(), Login Phase will pass through.
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 1d58d53..60cb6dc 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -446,7 +446,7 @@
  * iscsi_tcp_cleanup_task - free tcp_task resources
  * @task: iscsi task
  *
- * must be called with session lock
+ * must be called with session back_lock
  */
 void iscsi_tcp_cleanup_task(struct iscsi_task *task)
 {
@@ -457,6 +457,7 @@
 	if (!task->sc)
 		return;
 
+	spin_lock_bh(&tcp_task->queue2pool);
 	/* flush task's r2t queues */
 	while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
 		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
@@ -470,6 +471,7 @@
 			    sizeof(void*));
 		tcp_task->r2t = NULL;
 	}
+	spin_unlock_bh(&tcp_task->queue2pool);
 }
 EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
 
@@ -529,6 +531,8 @@
 	struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
 	struct iscsi_r2t_info *r2t;
 	int r2tsn = be32_to_cpu(rhdr->r2tsn);
+	u32 data_length;
+	u32 data_offset;
 	int rc;
 
 	if (tcp_conn->in.datalen) {
@@ -554,39 +558,40 @@
 		return 0;
 	}
 
-	rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
+	data_length = be32_to_cpu(rhdr->data_length);
+	if (data_length == 0) {
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "invalid R2T with zero data len\n");
+		return ISCSI_ERR_DATALEN;
+	}
+
+	if (data_length > session->max_burst)
+		ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "
+			      "burst %u. Attempting to execute request.\n",
+			      data_length, session->max_burst);
+
+	data_offset = be32_to_cpu(rhdr->data_offset);
+	if (data_offset + data_length > scsi_out(task->sc)->length) {
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "invalid R2T with data len %u at offset %u "
+				  "and total length %d\n", data_length,
+				  data_offset, scsi_out(task->sc)->length);
+		return ISCSI_ERR_DATALEN;
+	}
+
+	spin_lock(&tcp_task->pool2queue);
+	rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *));
 	if (!rc) {
 		iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
 				  "Target has sent more R2Ts than it "
 				  "negotiated for or driver has leaked.\n");
+		spin_unlock(&tcp_task->pool2queue);
 		return ISCSI_ERR_PROTO;
 	}
 
 	r2t->exp_statsn = rhdr->statsn;
-	r2t->data_length = be32_to_cpu(rhdr->data_length);
-	if (r2t->data_length == 0) {
-		iscsi_conn_printk(KERN_ERR, conn,
-				  "invalid R2T with zero data len\n");
-		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
-			    sizeof(void*));
-		return ISCSI_ERR_DATALEN;
-	}
-
-	if (r2t->data_length > session->max_burst)
-		ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "
-			      "burst %u. Attempting to execute request.\n",
-			      r2t->data_length, session->max_burst);
-
-	r2t->data_offset = be32_to_cpu(rhdr->data_offset);
-	if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
-		iscsi_conn_printk(KERN_ERR, conn,
-				  "invalid R2T with data len %u at offset %u "
-				  "and total length %d\n", r2t->data_length,
-				  r2t->data_offset, scsi_out(task->sc)->length);
-		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
-			    sizeof(void*));
-		return ISCSI_ERR_DATALEN;
-	}
+	r2t->data_length = data_length;
+	r2t->data_offset = data_offset;
 
 	r2t->ttt = rhdr->ttt; /* no flip */
 	r2t->datasn = 0;
@@ -595,6 +600,7 @@
 	tcp_task->exp_datasn = r2tsn + 1;
 	kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
 	conn->r2t_pdus_cnt++;
+	spin_unlock(&tcp_task->pool2queue);
 
 	iscsi_requeue_task(task);
 	return 0;
@@ -667,14 +673,14 @@
 
 	switch(opcode) {
 	case ISCSI_OP_SCSI_DATA_IN:
-		spin_lock(&conn->session->lock);
+		spin_lock(&conn->session->back_lock);
 		task = iscsi_itt_to_ctask(conn, hdr->itt);
 		if (!task)
 			rc = ISCSI_ERR_BAD_ITT;
 		else
 			rc = iscsi_tcp_data_in(conn, task);
 		if (rc) {
-			spin_unlock(&conn->session->lock);
+			spin_unlock(&conn->session->back_lock);
 			break;
 		}
 
@@ -707,11 +713,11 @@
 						   tcp_conn->in.datalen,
 						   iscsi_tcp_process_data_in,
 						   rx_hash);
-			spin_unlock(&conn->session->lock);
+			spin_unlock(&conn->session->back_lock);
 			return rc;
 		}
 		rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
-		spin_unlock(&conn->session->lock);
+		spin_unlock(&conn->session->back_lock);
 		break;
 	case ISCSI_OP_SCSI_CMD_RSP:
 		if (tcp_conn->in.datalen) {
@@ -721,18 +727,20 @@
 		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
 		break;
 	case ISCSI_OP_R2T:
-		spin_lock(&conn->session->lock);
+		spin_lock(&conn->session->back_lock);
 		task = iscsi_itt_to_ctask(conn, hdr->itt);
+		spin_unlock(&conn->session->back_lock);
 		if (!task)
 			rc = ISCSI_ERR_BAD_ITT;
 		else if (ahslen)
 			rc = ISCSI_ERR_AHSLEN;
 		else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
 			task->last_xfer = jiffies;
+			spin_lock(&conn->session->frwd_lock);
 			rc = iscsi_tcp_r2t_rsp(conn, task);
+			spin_unlock(&conn->session->frwd_lock);
 		} else
 			rc = ISCSI_ERR_PROTO;
-		spin_unlock(&conn->session->lock);
 		break;
 	case ISCSI_OP_LOGIN_RSP:
 	case ISCSI_OP_TEXT_RSP:
@@ -980,14 +988,13 @@
 
 static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
 {
-	struct iscsi_session *session = task->conn->session;
 	struct iscsi_tcp_task *tcp_task = task->dd_data;
 	struct iscsi_r2t_info *r2t = NULL;
 
 	if (iscsi_task_has_unsol_data(task))
 		r2t = &task->unsol_r2t;
 	else {
-		spin_lock_bh(&session->lock);
+		spin_lock_bh(&tcp_task->queue2pool);
 		if (tcp_task->r2t) {
 			r2t = tcp_task->r2t;
 			/* Continue with this R2T? */
@@ -1009,7 +1016,7 @@
 			else
 				r2t = tcp_task->r2t;
 		}
-		spin_unlock_bh(&session->lock);
+		spin_unlock_bh(&tcp_task->queue2pool);
 	}
 
 	return r2t;
@@ -1139,6 +1146,8 @@
 			iscsi_pool_free(&tcp_task->r2tpool);
 			goto r2t_alloc_fail;
 		}
+		spin_lock_init(&tcp_task->pool2queue);
+		spin_lock_init(&tcp_task->queue2pool);
 	}
 
 	return 0;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index da3aee1..25d0f127 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -862,7 +862,7 @@
 
 enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
 {
-	scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd);
+	scmd_dbg(cmd, "command %p timed out\n", cmd);
 
 	return BLK_EH_NOT_HANDLED;
 }
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 4e1b75c..94a3caf 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -73,8 +73,6 @@
  */
 /* 1 Second */
 #define QUEUE_RAMP_DOWN_INTERVAL	(msecs_to_jiffies(1000 * 1))
-/* 5 minutes */
-#define QUEUE_RAMP_UP_INTERVAL		(msecs_to_jiffies(1000 * 300))
 
 /* Number of exchanges reserved for discovery to complete */
 #define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -722,6 +720,20 @@
 	uint32_t cfg_hba_queue_depth;
 	uint32_t cfg_enable_hba_reset;
 	uint32_t cfg_enable_hba_heartbeat;
+	uint32_t cfg_fof;
+	uint32_t cfg_EnableXLane;
+	uint8_t cfg_oas_tgt_wwpn[8];
+	uint8_t cfg_oas_vpt_wwpn[8];
+	uint32_t cfg_oas_lun_state;
+#define OAS_LUN_ENABLE	1
+#define OAS_LUN_DISABLE	0
+	uint32_t cfg_oas_lun_status;
+#define OAS_LUN_STATUS_EXISTS	0x01
+	uint32_t cfg_oas_flags;
+#define OAS_FIND_ANY_VPORT	0x01
+#define OAS_FIND_ANY_TARGET	0x02
+#define OAS_LUN_VALID	0x04
+	uint32_t cfg_XLanePriority;
 	uint32_t cfg_enable_bg;
 	uint32_t cfg_hostmem_hgp;
 	uint32_t cfg_log_verbose;
@@ -730,6 +742,7 @@
 	uint32_t cfg_request_firmware_upgrade;
 	uint32_t cfg_iocb_cnt;
 	uint32_t cfg_suppress_link_up;
+	uint32_t cfg_rrq_xri_bitmap_sz;
 #define LPFC_INITIALIZE_LINK              0	/* do normal init_link mbox */
 #define LPFC_DELAY_INIT_LINK              1	/* layered driver hold off */
 #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2	/* wait, manual intervention */
@@ -835,6 +848,7 @@
 	mempool_t *mbox_mem_pool;
 	mempool_t *nlp_mem_pool;
 	mempool_t *rrq_pool;
+	mempool_t *active_rrq_pool;
 
 	struct fc_host_statistics link_stats;
 	enum intr_type_t intr_type;
@@ -869,7 +883,6 @@
 	atomic_t num_cmd_success;
 	unsigned long last_rsrc_error_time;
 	unsigned long last_ramp_down_time;
-	unsigned long last_ramp_up_time;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	struct dentry *hba_debugfs_root;
 	atomic_t debugfs_vport_count;
@@ -971,6 +984,9 @@
 	atomic_t sdev_cnt;
 	uint8_t fips_spec_rev;
 	uint8_t fips_level;
+	spinlock_t devicelock;	/* lock for luns list */
+	mempool_t *device_data_mem_pool;
+	struct list_head luns;
 };
 
 static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 00656fc..8d5b6ce 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -529,6 +529,27 @@
 }
 
 /**
+ * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
+ *			    (OAS) is supported.
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			phba->sli4_hba.pc_sli4_params.oas_supported);
+}
+
+/**
  * lpfc_link_state_store - Transition the link_state on an HBA port
  * @dev: class device that is converted into a Scsi_host.
  * @attr: device attribute, not used.
@@ -2041,9 +2062,53 @@
 static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
 		   lpfc_sriov_hw_max_virtfn_show, NULL);
 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
+static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
+		   NULL);
 
 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+#define WWN_SZ 8
+/**
+ * lpfc_wwn_set - Convert string to the 8 byte WWN value.
+ * @buf: WWN string.
+ * @cnt: Length of string.
+ * @wwn: Array to receive converted wwn value.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain a valid wwn
+ * 0 success
+ **/
+static size_t
+lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
+{
+	unsigned int i, j;
 
+	/* Count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
+	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+		return -EINVAL;
+
+	memset(wwn, 0, WWN_SZ);
+
+	/* Validate and store the new name */
+	for (i = 0, j = 0; i < 16; i++) {
+		if ((*buf >= 'a') && (*buf <= 'f'))
+			j = ((j << 4) | ((*buf++ - 'a') + 10));
+		else if ((*buf >= 'A') && (*buf <= 'F'))
+			j = ((j << 4) | ((*buf++ - 'A') + 10));
+		else if ((*buf >= '0') && (*buf <= '9'))
+			j = ((j << 4) | (*buf++ - '0'));
+		else
+			return -EINVAL;
+		if (i % 2) {
+			wwn[i/2] = j & 0xff;
+			j = 0;
+		}
+	}
+	return 0;
+}
 /**
  * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
  * @dev: class device that is converted into a Scsi_host.
@@ -2132,9 +2197,9 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 	struct completion online_compl;
-	int stat1=0, stat2=0;
-	unsigned int i, j, cnt=count;
-	u8 wwpn[8];
+	int stat1 = 0, stat2 = 0;
+	unsigned int cnt = count;
+	u8 wwpn[WWN_SZ];
 	int rc;
 
 	if (!phba->cfg_enable_hba_reset)
@@ -2149,29 +2214,19 @@
 	if (buf[cnt-1] == '\n')
 		cnt--;
 
-	if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
-	    ((cnt == 17) && (*buf++ != 'x')) ||
-	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+	if (!phba->soft_wwn_enable)
 		return -EINVAL;
 
+	/* lock setting wwpn, wwnn down */
 	phba->soft_wwn_enable = 0;
 
-	memset(wwpn, 0, sizeof(wwpn));
-
-	/* Validate and store the new name */
-	for (i=0, j=0; i < 16; i++) {
-		int value;
-
-		value = hex_to_bin(*buf++);
-		if (value >= 0)
-			j = (j << 4) | value;
-		else
-			return -EINVAL;
-		if (i % 2) {
-			wwpn[i/2] = j & 0xff;
-			j = 0;
-		}
+	rc = lpfc_wwn_set(buf, cnt, wwpn);
+	if (!rc) {
+		/* not able to set wwpn, unlock it */
+		phba->soft_wwn_enable = 1;
+		return rc;
 	}
+
 	phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
 	fc_host_port_name(shost) = phba->cfg_soft_wwpn;
 	if (phba->cfg_soft_wwnn)
@@ -2198,7 +2253,7 @@
 				"reinit adapter - %d\n", stat2);
 	return (stat1 || stat2) ? -EIO : count;
 }
-static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
 		   lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
 
 /**
@@ -2235,39 +2290,25 @@
 {
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-	unsigned int i, j, cnt=count;
-	u8 wwnn[8];
+	unsigned int cnt = count;
+	u8 wwnn[WWN_SZ];
+	int rc;
 
 	/* count may include a LF at end of string */
 	if (buf[cnt-1] == '\n')
 		cnt--;
 
-	if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
-	    ((cnt == 17) && (*buf++ != 'x')) ||
-	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+	if (!phba->soft_wwn_enable)
 		return -EINVAL;
 
-	/*
-	 * Allow wwnn to be set many times, as long as the enable is set.
-	 * However, once the wwpn is set, everything locks.
-	 */
-
-	memset(wwnn, 0, sizeof(wwnn));
-
-	/* Validate and store the new name */
-	for (i=0, j=0; i < 16; i++) {
-		int value;
-
-		value = hex_to_bin(*buf++);
-		if (value >= 0)
-			j = (j << 4) | value;
-		else
-			return -EINVAL;
-		if (i % 2) {
-			wwnn[i/2] = j & 0xff;
-			j = 0;
-		}
+	rc = lpfc_wwn_set(buf, cnt, wwnn);
+	if (!rc) {
+		/* Allow wwnn to be set many times, as long as the enable
+		 * is set. However, once the wwpn is set, everything locks.
+		 */
+		return rc;
 	}
+
 	phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
 
 	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
@@ -2276,9 +2317,438 @@
 
 	return count;
 }
-static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
+static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
 		   lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
 
+/**
+ * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
+ *		      Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count
+ **/
+static ssize_t
+lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+			wwn_to_u64(phba->cfg_oas_tgt_wwpn));
+}
+
+/**
+ * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
+ *		      Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	unsigned int cnt = count;
+	uint8_t wwpn[WWN_SZ];
+	int rc;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	rc = lpfc_wwn_set(buf, cnt, wwpn);
+	if (rc)
+		return rc;
+
+	memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	if (wwn_to_u64(wwpn) == 0)
+		phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
+	else
+		phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
+	phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+	return count;
+}
+static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
+		   lpfc_oas_tgt_show, lpfc_oas_tgt_store);
+
+/**
+ * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
+ *		      for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+			wwn_to_u64(phba->cfg_oas_vpt_wwpn));
+}
+
+/**
+ * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
+ *		      for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	unsigned int cnt = count;
+	uint8_t wwpn[WWN_SZ];
+	int rc;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	rc = lpfc_wwn_set(buf, cnt, wwpn);
+	if (rc)
+		return rc;
+
+	memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	if (wwn_to_u64(wwpn) == 0)
+		phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
+	else
+		phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
+	phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+	return count;
+}
+static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
+		   lpfc_oas_vpt_show, lpfc_oas_vpt_store);
+
+/**
+ * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
+ *			    of whether luns will be enabled or disabled
+ *			    for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+}
+
+/**
+ * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
+ *			    of whether luns will be enabled or disabled
+ *			    for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	int val = 0;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	if ((val != 0) && (val != 1))
+		return -EINVAL;
+
+	phba->cfg_oas_lun_state = val;
+
+	return strlen(buf);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
+		   lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
+
+/**
+ * lpfc_oas_lun_status_show - Return the status of the Optimized Access
+ *                          Storage (OAS) lun returned by the
+ *                          lpfc_oas_lun_show function.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
+		return -EFAULT;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
+		   lpfc_oas_lun_status_show, NULL);
+
+
+/**
+ * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
+ *			   (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @ndlp: pointer to fcp target node.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the lun.
+ *
+ * Returns:
+ * SUCCESS : 0
+ * -EPERM OAS is not enabled or not supported by this port.
+ *
+ */
+static size_t
+lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+		       uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state)
+{
+
+	int rc = 0;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	if (oas_state) {
+		if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+					 (struct lpfc_name *)tgt_wwpn, lun))
+			rc = -ENOMEM;
+	} else {
+		lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+				     (struct lpfc_name *)tgt_wwpn, lun);
+	}
+	return rc;
+
+}
+
+/**
+ * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
+ *			  Access Storage (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: wwpn of the vport associated with the returned lun
+ * @tgt_wwpn: wwpn of the target associated with the returned lun
+ * @lun_status: status of the lun returned lun
+ *
+ * Returns the first or next lun enabled for OAS operations for the vport/target
+ * specified.  If a lun is found, its vport wwpn, target wwpn and status is
+ * returned.  If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
+ *
+ * Return:
+ * lun that is OAS enabled for the vport/target
+ * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
+ */
+static uint64_t
+lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+		      uint8_t tgt_wwpn[], uint32_t *lun_status)
+{
+	uint64_t found_lun;
+
+	if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
+		return NOT_OAS_ENABLED_LUN;
+	if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
+				   phba->sli4_hba.oas_next_vpt_wwpn,
+				   (struct lpfc_name *)
+				   phba->sli4_hba.oas_next_tgt_wwpn,
+				   &phba->sli4_hba.oas_next_lun,
+				   (struct lpfc_name *)vpt_wwpn,
+				   (struct lpfc_name *)tgt_wwpn,
+				   &found_lun, lun_status))
+		return found_lun;
+	else
+		return NOT_OAS_ENABLED_LUN;
+}
+
+/**
+ * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: vport wwpn by reference.
+ * @tgt_wwpn: target wwpn by reference.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the oas_lun.
+ *
+ * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
+ * a lun for OAS operations.
+ *
+ * Return:
+ * SUCCESS: 0
+ * -ENOMEM: failed to enable an lun for OAS operations
+ * -EPERM: OAS is not enabled
+ */
+static ssize_t
+lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+			  uint8_t tgt_wwpn[], uint64_t lun,
+			  uint32_t oas_state)
+{
+
+	int rc;
+
+	rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
+					oas_state);
+	return rc;
+}
+
+/**
+ * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This routine returns a lun enabled for OAS each time the function
+ * is called.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ **/
+static ssize_t
+lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	uint64_t oas_lun;
+	int len = 0;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
+			return -EFAULT;
+
+	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
+			return -EFAULT;
+
+	oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
+					phba->cfg_oas_tgt_wwpn,
+					&phba->cfg_oas_lun_status);
+	if (oas_lun != NOT_OAS_ENABLED_LUN)
+		phba->cfg_oas_flags |= OAS_LUN_VALID;
+
+	len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+
+	return len;
+}
+
+/**
+ * lpfc_oas_lun_store - Sets the OAS state for lun
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This function sets the OAS state for lun.  Before this function is called,
+ * the vport wwpn, target wwpn, and oas state need to be set.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	uint64_t scsi_lun;
+	ssize_t rc;
+
+	if (!phba->cfg_EnableXLane)
+		return -EPERM;
+
+	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+		return -EFAULT;
+
+	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+		return -EFAULT;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+
+	if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
+		return -EINVAL;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"3372 Try to set vport 0x%llx target 0x%llx lun:%lld "
+			"with oas set to %d\n",
+			wwn_to_u64(phba->cfg_oas_vpt_wwpn),
+			wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
+			phba->cfg_oas_lun_state);
+
+	rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
+					   phba->cfg_oas_tgt_wwpn, scsi_lun,
+					   phba->cfg_oas_lun_state);
+
+	if (rc)
+		return rc;
+
+	return count;
+}
+static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
+		   lpfc_oas_lun_show, lpfc_oas_lun_store);
 
 static int lpfc_poll = 0;
 module_param(lpfc_poll, int, S_IRUGO);
@@ -3818,7 +4288,7 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_vector_map_info *cpup;
-	int  idx, len = 0;
+	int  len = 0;
 
 	if ((phba->sli_rev != LPFC_SLI_REV4) ||
 	    (phba->intr_type != MSIX))
@@ -3846,23 +4316,39 @@
 		break;
 	}
 
-	cpup = phba->sli4_hba.cpu_map;
-	for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+	while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
+		cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
+
+		/* margin should fit in this and the truncated message */
 		if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
 			len += snprintf(buf + len, PAGE_SIZE-len,
 					"CPU %02d io_chan %02d "
 					"physid %d coreid %d\n",
-					idx, cpup->channel_id, cpup->phys_id,
+					phba->sli4_hba.curr_disp_cpu,
+					cpup->channel_id, cpup->phys_id,
 					cpup->core_id);
 		else
 			len += snprintf(buf + len, PAGE_SIZE-len,
 					"CPU %02d io_chan %02d "
 					"physid %d coreid %d IRQ %d\n",
-					idx, cpup->channel_id, cpup->phys_id,
+					phba->sli4_hba.curr_disp_cpu,
+					cpup->channel_id, cpup->phys_id,
 					cpup->core_id, cpup->irq);
 
-		cpup++;
+		phba->sli4_hba.curr_disp_cpu++;
+
+		/* display max number of CPUs keeping some margin */
+		if (phba->sli4_hba.curr_disp_cpu <
+				phba->sli4_hba.num_present_cpu &&
+				(len >= (PAGE_SIZE - 64))) {
+			len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
+			break;
+		}
 	}
+
+	if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
+		phba->sli4_hba.curr_disp_cpu = 0;
+
 	return len;
 }
 
@@ -4157,6 +4643,21 @@
 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
 
 /*
+# lpfc_EnableXLane: Enable Express Lane Feature
+#      0x0   Express Lane Feature disabled
+#      0x1   Express Lane Feature enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
+
+/*
+# lpfc_XLanePriority:  Define CS_CTL priority for Express Lane Feature
+#       0x0 - 0x7f  = CS_CTL field in FC header (high 7 bits)
+# Value range is [0x0,0x7f]. Default value is 0
+*/
+LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
+
+/*
 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
 #       0  = BlockGuard disabled (default)
 #       1  = BlockGuard enabled
@@ -4317,6 +4818,13 @@
 	&dev_attr_lpfc_soft_wwn_enable,
 	&dev_attr_lpfc_enable_hba_reset,
 	&dev_attr_lpfc_enable_hba_heartbeat,
+	&dev_attr_lpfc_EnableXLane,
+	&dev_attr_lpfc_XLanePriority,
+	&dev_attr_lpfc_xlane_lun,
+	&dev_attr_lpfc_xlane_tgt,
+	&dev_attr_lpfc_xlane_vpt,
+	&dev_attr_lpfc_xlane_lun_state,
+	&dev_attr_lpfc_xlane_lun_status,
 	&dev_attr_lpfc_sg_seg_cnt,
 	&dev_attr_lpfc_max_scsicmpl_time,
 	&dev_attr_lpfc_stat_data_ctrl,
@@ -4335,6 +4843,7 @@
 	&dev_attr_lpfc_dss,
 	&dev_attr_lpfc_sriov_hw_max_virtfn,
 	&dev_attr_protocol,
+	&dev_attr_lpfc_xlane_supported,
 	NULL,
 };
 
@@ -5296,11 +5805,20 @@
 	lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
 	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
 	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+	lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		phba->cfg_EnableXLane = 0;
+	lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
+	memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
+	memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
+	phba->cfg_oas_lun_state = 0;
+	phba->cfg_oas_lun_status = 0;
+	phba->cfg_oas_flags = 0;
 	lpfc_enable_bg_init(phba, lpfc_enable_bg);
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		phba->cfg_poll = 0;
 	else
-	phba->cfg_poll = lpfc_poll;
+		phba->cfg_poll = lpfc_poll;
 	phba->cfg_soft_wwnn = 0L;
 	phba->cfg_soft_wwpn = 0L;
 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 82134d20..ca2f4ea 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -4153,6 +4153,7 @@
 		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
 			switch (opcode) {
 			case FCOE_OPCODE_READ_FCF:
+			case FCOE_OPCODE_GET_DPORT_RESULTS:
 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 						"2957 Handled SLI_CONFIG "
 						"subsys_fcoe, opcode:x%x\n",
@@ -4161,6 +4162,8 @@
 							nemb_mse, dmabuf);
 				break;
 			case FCOE_OPCODE_ADD_FCF:
+			case FCOE_OPCODE_SET_DPORT_MODE:
+			case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 						"2958 Handled SLI_CONFIG "
 						"subsys_fcoe, opcode:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 67f7d0a..a94d4c9 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -231,6 +231,8 @@
 #define SLI_CONFIG_SUBSYS_FCOE		0x0C
 #define FCOE_OPCODE_READ_FCF		0x08
 #define FCOE_OPCODE_ADD_FCF		0x09
+#define FCOE_OPCODE_SET_DPORT_MODE	0x27
+#define FCOE_OPCODE_GET_DPORT_RESULTS	0x28
 };
 
 struct lpfc_sli_config_emb1_subsys {
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index cda076a..adda0bf 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -187,6 +187,11 @@
 void lpfc_offline(struct lpfc_hba *);
 void lpfc_reset_hba(struct lpfc_hba *);
 
+int lpfc_fof_queue_create(struct lpfc_hba *);
+int lpfc_fof_queue_setup(struct lpfc_hba *);
+int lpfc_fof_queue_destroy(struct lpfc_hba *);
+irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
+
 int lpfc_sli_setup(struct lpfc_hba *);
 int lpfc_sli_queue_setup(struct lpfc_hba *);
 
@@ -242,6 +247,7 @@
 void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
 
 int lpfc_mem_alloc(struct lpfc_hba *, int align);
+int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
 void lpfc_mem_free(struct lpfc_hba *);
 void lpfc_mem_free_all(struct lpfc_hba *);
 void lpfc_stop_vport_timers(struct lpfc_vport *);
@@ -399,7 +405,6 @@
 void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
 void lpfc_rampdown_queue_depth(struct lpfc_hba *);
 void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
-void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
 void lpfc_scsi_dev_block(struct lpfc_hba *);
 
 void
@@ -471,3 +476,20 @@
 uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
 int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
 void lpfc_sli4_offline_eratt(struct lpfc_hba *);
+
+struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
+						struct lpfc_name *,
+						struct lpfc_name *,
+						uint64_t, bool);
+void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
+struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
+					struct list_head *list,
+					struct lpfc_name *,
+					struct lpfc_name *, uint64_t);
+bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+			 struct lpfc_name *, uint64_t);
+bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+			  struct lpfc_name *, uint64_t);
+bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+			    struct lpfc_name *, uint64_t *, struct lpfc_name *,
+			    struct lpfc_name *, uint64_t *, uint32_t *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index b800cc9..828c08e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2280,6 +2280,104 @@
 		}
 	}
 
+	if (phba->cfg_fof) {
+		/* FOF EQ */
+		qp = phba->sli4_hba.fof_eq;
+		if (!qp)
+			goto out;
+
+		len += snprintf(pbuffer+len,
+			LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"\nFOF EQ info: "
+			"EQ-STAT[max:x%x noE:x%x "
+			"bs:x%x proc:x%llx]\n",
+			qp->q_cnt_1, qp->q_cnt_2,
+			qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+
+		len += snprintf(pbuffer+len,
+			LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"EQID[%02d], "
+			"QE-CNT[%04d], QE-SIZE[%04d], "
+			"HOST-IDX[%04d], PORT-IDX[%04d]",
+			qp->queue_id,
+			qp->entry_count,
+			qp->entry_size,
+			qp->host_index,
+			qp->hba_index);
+
+		/* Reset max counter */
+		qp->EQ_max_eqe = 0;
+
+		len +=  snprintf(pbuffer+len,
+			LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+		if (len >= max_cnt)
+			goto too_big;
+	}
+
+	if (phba->cfg_EnableXLane) {
+
+		/* OAS CQ */
+		qp = phba->sli4_hba.oas_cq;
+		if (qp) {
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"\tOAS CQ info: ");
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"AssocEQID[%02d]: "
+				"CQ STAT[max:x%x relw:x%x "
+				"xabt:x%x wq:x%llx]\n",
+				qp->assoc_qid,
+				qp->q_cnt_1, qp->q_cnt_2,
+				qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"\tCQID[%02d], "
+				"QE-CNT[%04d], QE-SIZE[%04d], "
+				"HOST-IDX[%04d], PORT-IDX[%04d]",
+				qp->queue_id, qp->entry_count,
+				qp->entry_size, qp->host_index,
+				qp->hba_index);
+
+			/* Reset max counter */
+			qp->CQ_max_cqe = 0;
+
+			len +=  snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+			if (len >= max_cnt)
+				goto too_big;
+		}
+
+		/* OAS WQ */
+		qp = phba->sli4_hba.oas_wq;
+		if (qp) {
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"\t\tOAS WQ info: ");
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"AssocCQID[%02d]: "
+				"WQ-STAT[oflow:x%x posted:x%llx]\n",
+				qp->assoc_qid,
+				qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
+			len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"\t\tWQID[%02d], "
+				"QE-CNT[%04d], QE-SIZE[%04d], "
+				"HOST-IDX[%04d], PORT-IDX[%04d]",
+				qp->queue_id,
+				qp->entry_count,
+				qp->entry_size,
+				qp->host_index,
+				qp->hba_index);
+
+			len +=  snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+			if (len >= max_cnt)
+				goto too_big;
+		}
+	}
+out:
 	spin_unlock_irq(&phba->hbalock);
 	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
 
@@ -3927,6 +4025,7 @@
 	struct lpfc_hba   *phba = vport->phba;
 	char name[64];
 	uint32_t num, i;
+	bool pport_setup = false;
 
 	if (!lpfc_debugfs_enable)
 		return;
@@ -3947,6 +4046,7 @@
 	/* Setup funcX directory for specific HBA PCI function */
 	snprintf(name, sizeof(name), "fn%d", phba->brd_no);
 	if (!phba->hba_debugfs_root) {
+		pport_setup = true;
 		phba->hba_debugfs_root =
 			debugfs_create_dir(name, lpfc_debugfs_root);
 		if (!phba->hba_debugfs_root) {
@@ -4239,6 +4339,14 @@
 	}
 
 	/*
+	 * The following section is for additional directories/files for the
+	 * physical port.
+	 */
+
+	if (!pport_setup)
+		goto debug_failed;
+
+	/*
 	 * iDiag debugfs root entry points for SLI4 device only
 	 */
 	if (phba->sli_rev < LPFC_SLI_REV4)
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index e409ba5..1a6fe52 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -116,7 +116,7 @@
 	atomic_t cmd_pending;
 	uint32_t cmd_qdepth;
 	unsigned long last_change_time;
-	struct lpfc_node_rrqs active_rrqs;
+	unsigned long *active_rrqs_xri_bitmap;
 	struct lpfc_scsicmd_bkt *lat_data;	/* Latency data */
 };
 struct lpfc_node_rrq {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 110445f..624fe0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1516,7 +1516,7 @@
 	uint32_t rc, keepDID = 0;
 	int  put_node;
 	int  put_rport;
-	struct lpfc_node_rrqs rrq;
+	unsigned long *active_rrqs_xri_bitmap = NULL;
 
 	/* Fabric nodes can have the same WWPN so we don't bother searching
 	 * by WWPN.  Just return the ndlp that was given to us.
@@ -1534,7 +1534,13 @@
 
 	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
 		return ndlp;
-	memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
+						       GFP_KERNEL);
+		if (active_rrqs_xri_bitmap)
+			memset(active_rrqs_xri_bitmap, 0,
+			       phba->cfg_rrq_xri_bitmap_sz);
+	}
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 		 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
@@ -1543,41 +1549,58 @@
 	if (!new_ndlp) {
 		rc = memcmp(&ndlp->nlp_portname, name,
 			    sizeof(struct lpfc_name));
-		if (!rc)
+		if (!rc) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
 			return ndlp;
+		}
 		new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
-		if (!new_ndlp)
+		if (!new_ndlp) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
 			return ndlp;
+		}
 		lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
 	} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
 		rc = memcmp(&ndlp->nlp_portname, name,
 			    sizeof(struct lpfc_name));
-		if (!rc)
+		if (!rc) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
 			return ndlp;
+		}
 		new_ndlp = lpfc_enable_node(vport, new_ndlp,
 						NLP_STE_UNUSED_NODE);
-		if (!new_ndlp)
+		if (!new_ndlp) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
 			return ndlp;
+		}
 		keepDID = new_ndlp->nlp_DID;
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			memcpy(&rrq.xri_bitmap,
-				&new_ndlp->active_rrqs.xri_bitmap,
-				sizeof(new_ndlp->active_rrqs.xri_bitmap));
+		if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
+			memcpy(active_rrqs_xri_bitmap,
+			       new_ndlp->active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
 	} else {
 		keepDID = new_ndlp->nlp_DID;
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			memcpy(&rrq.xri_bitmap,
-				&new_ndlp->active_rrqs.xri_bitmap,
-				sizeof(new_ndlp->active_rrqs.xri_bitmap));
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    active_rrqs_xri_bitmap)
+			memcpy(active_rrqs_xri_bitmap,
+			       new_ndlp->active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
 	}
 
 	lpfc_unreg_rpi(vport, new_ndlp);
 	new_ndlp->nlp_DID = ndlp->nlp_DID;
 	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
 	if (phba->sli_rev == LPFC_SLI_REV4)
-		memcpy(new_ndlp->active_rrqs.xri_bitmap,
-			&ndlp->active_rrqs.xri_bitmap,
-			sizeof(ndlp->active_rrqs.xri_bitmap));
+		memcpy(new_ndlp->active_rrqs_xri_bitmap,
+		       ndlp->active_rrqs_xri_bitmap,
+		       phba->cfg_rrq_xri_bitmap_sz);
 
 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
 		new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -1619,10 +1642,11 @@
 
 		/* Two ndlps cannot have the same did on the nodelist */
 		ndlp->nlp_DID = keepDID;
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			memcpy(&ndlp->active_rrqs.xri_bitmap,
-				&rrq.xri_bitmap,
-				sizeof(ndlp->active_rrqs.xri_bitmap));
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    active_rrqs_xri_bitmap)
+			memcpy(ndlp->active_rrqs_xri_bitmap,
+			       active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
 		lpfc_drop_node(vport, ndlp);
 	}
 	else {
@@ -1634,10 +1658,11 @@
 
 		/* Two ndlps cannot have the same did */
 		ndlp->nlp_DID = keepDID;
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			memcpy(&ndlp->active_rrqs.xri_bitmap,
-				&rrq.xri_bitmap,
-				sizeof(ndlp->active_rrqs.xri_bitmap));
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    active_rrqs_xri_bitmap)
+			memcpy(ndlp->active_rrqs_xri_bitmap,
+			       active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
 
 		/* Since we are swapping the ndlp passed in with the new one
 		 * and the did has already been swapped, copy over state.
@@ -1668,6 +1693,10 @@
 				put_device(&rport->dev);
 		}
 	}
+	if (phba->sli_rev == LPFC_SLI_REV4 &&
+	    active_rrqs_xri_bitmap)
+		mempool_free(active_rrqs_xri_bitmap,
+			     phba->active_rrq_pool);
 	return new_ndlp;
 }
 
@@ -2772,6 +2801,7 @@
 	/* This will cause the callback-function lpfc_cmpl_els_cmd to
 	 * trigger the release of node.
 	 */
+
 	lpfc_nlp_put(ndlp);
 	return 0;
 }
@@ -6193,11 +6223,11 @@
 
 	spin_lock_irqsave(&vport->work_port_lock, iflag);
 	tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
-	if (!tmo_posted)
+	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
 		vport->work_port_events |= WORKER_ELS_TMO;
 	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
 
-	if (!tmo_posted)
+	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
 		lpfc_worker_wake_up(phba);
 	return;
 }
@@ -6223,19 +6253,26 @@
 	uint32_t els_command = 0;
 	uint32_t timeout;
 	uint32_t remote_ID = 0xffffffff;
-	LIST_HEAD(txcmplq_completions);
 	LIST_HEAD(abort_list);
 
 
 	timeout = (uint32_t)(phba->fc_ratov << 1);
 
 	pring = &phba->sli.ring[LPFC_ELS_RING];
-
+	if ((phba->pport->load_flag & FC_UNLOADING))
+		return;
 	spin_lock_irq(&phba->hbalock);
-	list_splice_init(&pring->txcmplq, &txcmplq_completions);
-	spin_unlock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
 
-	list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
+	if ((phba->pport->load_flag & FC_UNLOADING)) {
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			spin_unlock(&pring->ring_lock);
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
 		cmd = &piocb->iocb;
 
 		if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
@@ -6274,11 +6311,12 @@
 		}
 		list_add_tail(&piocb->dlist, &abort_list);
 	}
-	spin_lock_irq(&phba->hbalock);
-	list_splice(&txcmplq_completions, &pring->txcmplq);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
 	spin_unlock_irq(&phba->hbalock);
 
 	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+		cmd = &piocb->iocb;
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 			 "0127 ELS timeout Data: x%x x%x x%x "
 			 "x%x\n", els_command,
@@ -6290,8 +6328,9 @@
 	}
 
 	if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
-		mod_timer(&vport->els_tmofunc,
-			  jiffies + msecs_to_jiffies(1000 * timeout));
+		if (!(phba->pport->load_flag & FC_UNLOADING))
+			mod_timer(&vport->els_tmofunc,
+				  jiffies + msecs_to_jiffies(1000 * timeout));
 }
 
 /**
@@ -6317,15 +6356,50 @@
 void
 lpfc_els_flush_cmd(struct lpfc_vport *vport)
 {
-	LIST_HEAD(completions);
+	LIST_HEAD(abort_list);
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	struct lpfc_iocbq *tmp_iocb, *piocb;
 	IOCB_t *cmd = NULL;
 
 	lpfc_fabric_abort_vport(vport);
+	/*
+	 * For SLI3, only the hbalock is required.  But SLI4 needs to coordinate
+	 * with the ring insert operation.  Because lpfc_sli_issue_abort_iotag
+	 * ultimately grabs the ring_lock, the driver must splice the list into
+	 * a working list and release the locks before calling the abort.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+
+	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+			continue;
+
+		if (piocb->vport != vport)
+			continue;
+		list_add_tail(&piocb->dlist, &abort_list);
+	}
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
+	spin_unlock_irq(&phba->hbalock);
+	/* Abort each iocb on the aborted list and remove the dlist links. */
+	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+		spin_lock_irq(&phba->hbalock);
+		list_del_init(&piocb->dlist);
+		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+		spin_unlock_irq(&phba->hbalock);
+	}
+	if (!list_empty(&abort_list))
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "3387 abort list for txq not empty\n");
+	INIT_LIST_HEAD(&abort_list);
 
 	spin_lock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+
 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
 		cmd = &piocb->iocb;
 
@@ -6343,24 +6417,16 @@
 		if (piocb->vport != vport)
 			continue;
 
-		list_move_tail(&piocb->list, &completions);
+		list_del_init(&piocb->list);
+		list_add_tail(&piocb->list, &abort_list);
 	}
-
-	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
-		if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
-			continue;
-		}
-
-		if (piocb->vport != vport)
-			continue;
-
-		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
-	}
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
 	spin_unlock_irq(&phba->hbalock);
 
 	/* Cancell all the IOCBs from the completions list */
-	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
-			      IOERR_SLI_ABORTED);
+	lpfc_sli_cancel_iocbs(phba, &abort_list,
+			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 
 	return;
 }
@@ -6385,35 +6451,9 @@
 void
 lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
 {
-	LIST_HEAD(completions);
-	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-	struct lpfc_iocbq *tmp_iocb, *piocb;
-	IOCB_t *cmd = NULL;
-
-	lpfc_fabric_abort_hba(phba);
-	spin_lock_irq(&phba->hbalock);
-	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
-		cmd = &piocb->iocb;
-		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
-			continue;
-		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
-		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
-		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
-		    cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
-		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
-			continue;
-		list_move_tail(&piocb->list, &completions);
-	}
-	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
-		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
-			continue;
-		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
-	}
-	spin_unlock_irq(&phba->hbalock);
-
-	/* Cancel all the IOCBs from the completions list */
-	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
-			      IOERR_SLI_ABORTED);
+	struct lpfc_vport *vport;
+	list_for_each_entry(vport, &phba->port_list, listentry)
+		lpfc_els_flush_cmd(vport);
 
 	return;
 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 883ea2d..59b51c5 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -674,8 +674,6 @@
 				lpfc_fdmi_timeout_handler(vport);
 			if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
 				lpfc_ramp_down_queue_handler(phba);
-			if (work_port_events & WORKER_RAMP_UP_QUEUE)
-				lpfc_ramp_up_queue_handler(phba);
 			if (work_port_events & WORKER_DELAYED_DISC_TMO)
 				lpfc_delayed_disc_timeout_handler(vport);
 		}
@@ -2545,8 +2543,11 @@
 	if (!new_fcf_record) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
 				"2766 Mailbox command READ_FCF_RECORD "
-				"failed to retrieve a FCF record.\n");
-		goto error_out;
+				"failed to retrieve a FCF record. "
+				"hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
+				phba->fcf.fcf_flag);
+		lpfc_unregister_fcf_rescan(phba);
+		goto out;
 	}
 
 	/* Get the needed parameters from FCF record */
@@ -3973,7 +3974,10 @@
 		vport->fc_map_cnt += count;
 		break;
 	case NLP_STE_NPR_NODE:
-		vport->fc_npr_cnt += count;
+		if (vport->fc_npr_cnt == 0 && count == -1)
+			vport->fc_npr_cnt = 0;
+		else
+			vport->fc_npr_cnt += count;
 		break;
 	}
 	spin_unlock_irq(shost->host_lock);
@@ -4180,6 +4184,7 @@
 	struct lpfc_hba *phba = vport->phba;
 	uint32_t did;
 	unsigned long flags;
+	unsigned long *active_rrqs_xri_bitmap = NULL;
 
 	if (!ndlp)
 		return NULL;
@@ -4208,12 +4213,17 @@
 
 	/* Keep the original DID */
 	did = ndlp->nlp_DID;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
 
 	/* re-initialize ndlp except of ndlp linked list pointer */
 	memset((((char *)ndlp) + sizeof (struct list_head)), 0,
 		sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
 	lpfc_initialize_node(vport, ndlp, did);
 
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
+
 	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
 	if (vport->phba->sli_rev == LPFC_SLI_REV4)
 		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
@@ -4799,9 +4809,10 @@
 				 ((uint32_t) ndlp->nlp_rpi & 0xff));
 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
 					 "0929 FIND node DID "
-					 "Data: x%p x%x x%x x%x\n",
+					 "Data: x%p x%x x%x x%x %p\n",
 					 ndlp, ndlp->nlp_DID,
-					 ndlp->nlp_flag, data1);
+					 ndlp->nlp_flag, data1,
+					 ndlp->active_rrqs_xri_bitmap);
 			return ndlp;
 		}
 	}
@@ -5618,8 +5629,13 @@
 
 	lpfc_initialize_node(vport, ndlp, did);
 	INIT_LIST_HEAD(&ndlp->nlp_listp);
-	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
 		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+		ndlp->active_rrqs_xri_bitmap =
+				mempool_alloc(vport->phba->active_rrq_pool,
+					      GFP_KERNEL);
+	}
+
 
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
@@ -5664,6 +5680,9 @@
 	/* free ndlp memory for final ndlp release */
 	if (NLP_CHK_FREE_REQ(ndlp)) {
 		kfree(ndlp->lat_data);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			mempool_free(ndlp->active_rrqs_xri_bitmap,
+				     ndlp->phba->active_rrq_pool);
 		mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
 	}
 }
@@ -6170,10 +6189,6 @@
 
 		memcpy(&conn_entry->conn_rec, &conn_rec[i],
 			sizeof(struct lpfc_fcf_conn_rec));
-		conn_entry->conn_rec.vlan_tag =
-			conn_entry->conn_rec.vlan_tag;
-		conn_entry->conn_rec.flags =
-			conn_entry->conn_rec.flags;
 		list_add_tail(&conn_entry->list,
 			&phba->fcf_conn_rec_list);
 	}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 6f927d3..3d9438c 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -45,6 +45,7 @@
 #define LPFC_EXTRA_RING          1	/* ring 1 for other protocols */
 #define LPFC_ELS_RING            2	/* ring 2 for ELS commands */
 #define LPFC_FCP_NEXT_RING       3
+#define LPFC_FCP_OAS_RING        3
 
 #define SLI2_IOCB_CMD_R0_ENTRIES    172	/* SLI-2 FCP command ring entries */
 #define SLI2_IOCB_RSP_R0_ENTRIES    134	/* SLI-2 FCP response ring entries */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 5464b11..fd79f7d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2616,6 +2616,9 @@
 #define cfg_phwq_SHIFT				15
 #define cfg_phwq_MASK				0x00000001
 #define cfg_phwq_WORD				word12
+#define cfg_oas_SHIFT				25
+#define cfg_oas_MASK				0x00000001
+#define cfg_oas_WORD				word12
 #define cfg_loopbk_scope_SHIFT			28
 #define cfg_loopbk_scope_MASK			0x0000000f
 #define cfg_loopbk_scope_WORD			word12
@@ -3322,6 +3325,9 @@
 #define wqe_ebde_cnt_SHIFT    0
 #define wqe_ebde_cnt_MASK     0x0000000f
 #define wqe_ebde_cnt_WORD     word10
+#define wqe_oas_SHIFT         6
+#define wqe_oas_MASK          0x00000001
+#define wqe_oas_WORD          word10
 #define wqe_lenloc_SHIFT      7
 #define wqe_lenloc_MASK       0x00000003
 #define wqe_lenloc_WORD       word10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 68c94cc..635eeb3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -80,6 +80,7 @@
 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
+static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
 
 static struct scsi_transport_template *lpfc_transport_template = NULL;
 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1005,9 +1006,14 @@
 
 	phba = (struct lpfc_hba *)ptr;
 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
-	phba->hba_flag |= HBA_RRQ_ACTIVE;
+	if (!(phba->pport->load_flag & FC_UNLOADING))
+		phba->hba_flag |= HBA_RRQ_ACTIVE;
+	else
+		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
-	lpfc_worker_wake_up(phba);
+
+	if (!(phba->pport->load_flag & FC_UNLOADING))
+		lpfc_worker_wake_up(phba);
 }
 
 /**
@@ -1468,7 +1474,8 @@
  * for handling possible port resource change.
  **/
 static int
-lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
+lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
+			    bool en_rn_msg)
 {
 	int rc;
 	uint32_t intr_mode;
@@ -1480,9 +1487,10 @@
 	rc = lpfc_sli4_pdev_status_reg_wait(phba);
 	if (!rc) {
 		/* need reset: attempt for port recovery */
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"2887 Reset Needed: Attempting Port "
-				"Recovery...\n");
+		if (en_rn_msg)
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2887 Reset Needed: Attempting Port "
+					"Recovery...\n");
 		lpfc_offline_prep(phba, mbx_action);
 		lpfc_offline(phba);
 		/* release interrupt for possible resource change */
@@ -1522,6 +1530,7 @@
 	uint32_t reg_err1, reg_err2;
 	uint32_t uerrlo_reg, uemasklo_reg;
 	uint32_t pci_rd_rc1, pci_rd_rc2;
+	bool en_rn_msg = true;
 	int rc;
 
 	/* If the pci channel is offline, ignore possible errors, since
@@ -1572,10 +1581,12 @@
 			break;
 		}
 		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
-		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"3143 Port Down: Firmware Restarted\n");
-		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+					"3143 Port Down: Firmware Update "
+					"Detected\n");
+			en_rn_msg = false;
+		} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
 			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"3144 Port Down: Debug Dump\n");
@@ -1585,7 +1596,8 @@
 					"3145 Port Down: Provisioning\n");
 
 		/* Check port status register for function reset */
-		rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT);
+		rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
+				en_rn_msg);
 		if (rc == 0) {
 			/* don't report event on forced debug dump */
 			if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
@@ -4856,6 +4868,7 @@
 	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
 	struct lpfc_mqe *mqe;
 	int longs;
+	int fof_vectors = 0;
 
 	/* Get all the module params for configuring this host */
 	lpfc_get_cfgparam(phba);
@@ -5061,6 +5074,9 @@
 	rc = lpfc_sli4_read_config(phba);
 	if (unlikely(rc))
 		goto out_free_bsmbx;
+	rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
+	if (unlikely(rc))
+		goto out_free_bsmbx;
 
 	/* IF Type 0 ports get initialized now. */
 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
@@ -5118,6 +5134,12 @@
 		}
 	}
 	mempool_free(mboxq, phba->mbox_mem_pool);
+
+	/* Verify OAS is supported */
+	lpfc_sli4_oas_verify(phba);
+	if (phba->cfg_fof)
+		fof_vectors = 1;
+
 	/* Verify all the SLI4 queues */
 	rc = lpfc_sli4_queue_verify(phba);
 	if (rc)
@@ -5159,7 +5181,8 @@
 
 	phba->sli4_hba.fcp_eq_hdl =
 			kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
-			    phba->cfg_fcp_io_channel), GFP_KERNEL);
+			    (fof_vectors + phba->cfg_fcp_io_channel)),
+			    GFP_KERNEL);
 	if (!phba->sli4_hba.fcp_eq_hdl) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"2572 Failed allocate memory for "
@@ -5169,7 +5192,8 @@
 	}
 
 	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
-				      phba->cfg_fcp_io_channel), GFP_KERNEL);
+				  (fof_vectors +
+				   phba->cfg_fcp_io_channel)), GFP_KERNEL);
 	if (!phba->sli4_hba.msix_entries) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"2573 Failed allocate memory for msi-x "
@@ -5267,6 +5291,7 @@
 	kfree(phba->sli4_hba.cpu_map);
 	phba->sli4_hba.num_present_cpu = 0;
 	phba->sli4_hba.num_online_cpu = 0;
+	phba->sli4_hba.curr_disp_cpu = 0;
 
 	/* Free memory allocated for msi-x interrupt vector entries */
 	kfree(phba->sli4_hba.msix_entries);
@@ -5390,6 +5415,10 @@
 	/* Initialize FCF connection rec list */
 	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
 
+	/* Initialize OAS configuration list */
+	spin_lock_init(&phba->devicelock);
+	INIT_LIST_HEAD(&phba->luns);
+
 	return 0;
 }
 
@@ -6816,6 +6845,7 @@
 	int cfg_fcp_io_channel;
 	uint32_t cpu;
 	uint32_t i = 0;
+	int fof_vectors = phba->cfg_fof ? 1 : 0;
 
 	/*
 	 * Sanity check for configured queue parameters against the run-time
@@ -6832,6 +6862,7 @@
 	}
 	phba->sli4_hba.num_online_cpu = i;
 	phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
+	phba->sli4_hba.curr_disp_cpu = 0;
 
 	if (i < cfg_fcp_io_channel) {
 		lpfc_printf_log(phba,
@@ -6842,7 +6873,7 @@
 		cfg_fcp_io_channel = i;
 	}
 
-	if (cfg_fcp_io_channel >
+	if (cfg_fcp_io_channel + fof_vectors >
 	    phba->sli4_hba.max_cfg_param.max_eq) {
 		if (phba->sli4_hba.max_cfg_param.max_eq <
 		    LPFC_FCP_IO_CHAN_MIN) {
@@ -6859,7 +6890,8 @@
 				"available EQs: from %d to %d\n",
 				cfg_fcp_io_channel,
 				phba->sli4_hba.max_cfg_param.max_eq);
-		cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
+		cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
+			fof_vectors;
 	}
 
 	/* The actual number of FCP event queues adopted */
@@ -7070,6 +7102,9 @@
 	}
 	phba->sli4_hba.dat_rq = qdesc;
 
+	/* Create the Queues needed for Flash Optimized Fabric operations */
+	if (phba->cfg_fof)
+		lpfc_fof_queue_create(phba);
 	return 0;
 
 out_error:
@@ -7094,6 +7129,9 @@
 {
 	int idx;
 
+	if (phba->cfg_fof)
+		lpfc_fof_queue_destroy(phba);
+
 	if (phba->sli4_hba.hba_eq != NULL) {
 		/* Release HBA event queue */
 		for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
@@ -7478,8 +7516,20 @@
 			phba->sli4_hba.hdr_rq->queue_id,
 			phba->sli4_hba.dat_rq->queue_id,
 			phba->sli4_hba.els_cq->queue_id);
+
+	if (phba->cfg_fof) {
+		rc = lpfc_fof_queue_setup(phba);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0549 Failed setup of FOF Queues: "
+					"rc = 0x%x\n", rc);
+			goto out_destroy_els_rq;
+		}
+	}
 	return 0;
 
+out_destroy_els_rq:
+	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
 out_destroy_els_wq:
 	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
 out_destroy_mbx_wq:
@@ -7518,6 +7568,9 @@
 {
 	int fcp_qidx;
 
+	/* Unset the queues created for Flash Optimized Fabric operations */
+	if (phba->cfg_fof)
+		lpfc_fof_queue_destroy(phba);
 	/* Unset mailbox command work queue */
 	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
 	/* Unset ELS work queue */
@@ -8635,6 +8688,10 @@
 
 	/* Configure MSI-X capability structure */
 	vectors = phba->cfg_fcp_io_channel;
+	if (phba->cfg_fof) {
+		phba->sli4_hba.msix_entries[index].entry = index;
+		vectors++;
+	}
 enable_msix_vectors:
 	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
 			     vectors);
@@ -8664,7 +8721,15 @@
 		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
 		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
 		atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
-		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
+		if (phba->cfg_fof && (index == (vectors - 1)))
+			rc = request_irq(
+				phba->sli4_hba.msix_entries[index].vector,
+				 &lpfc_sli4_fof_intr_handler, IRQF_SHARED,
+				 (char *)&phba->sli4_hba.handler_name[index],
+				 &phba->sli4_hba.fcp_eq_hdl[index]);
+		else
+			rc = request_irq(
+				phba->sli4_hba.msix_entries[index].vector,
 				 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
 				 (char *)&phba->sli4_hba.handler_name[index],
 				 &phba->sli4_hba.fcp_eq_hdl[index]);
@@ -8676,6 +8741,9 @@
 		}
 	}
 
+	if (phba->cfg_fof)
+		vectors--;
+
 	if (vectors != phba->cfg_fcp_io_channel) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"3238 Reducing IO channels to match number of "
@@ -8721,7 +8789,10 @@
 		free_irq(phba->sli4_hba.msix_entries[index].vector,
 			 &phba->sli4_hba.fcp_eq_hdl[index]);
 	}
-
+	if (phba->cfg_fof) {
+		free_irq(phba->sli4_hba.msix_entries[index].vector,
+			 &phba->sli4_hba.fcp_eq_hdl[index]);
+	}
 	/* Disable MSI-X */
 	pci_disable_msix(phba->pcidev);
 
@@ -8771,6 +8842,10 @@
 		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
 	}
 
+	if (phba->cfg_fof) {
+		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+	}
 	return 0;
 }
 
@@ -8853,6 +8928,12 @@
 				atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
 					fcp_eq_in_use, 1);
 			}
+			if (phba->cfg_fof) {
+				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+				atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
+					fcp_eq_in_use, 1);
+			}
 		}
 	}
 	return intr_mode;
@@ -9163,6 +9244,7 @@
 		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
 	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
 	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+	sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
 	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
 	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
 	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
@@ -10796,6 +10878,169 @@
 	return;
 }
 
+/**
+ * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if OAS is supported for this adapter. If
+ * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
+ * the enable oas flag is cleared and the pool created for OAS device data
+ * is destroyed.
+ *
+ **/
+void
+lpfc_sli4_oas_verify(struct lpfc_hba *phba)
+{
+
+	if (!phba->cfg_EnableXLane)
+		return;
+
+	if (phba->sli4_hba.pc_sli4_params.oas_supported) {
+		phba->cfg_fof = 1;
+	} else {
+		phba->cfg_EnableXLane = 0;
+		if (phba->device_data_mem_pool)
+			mempool_destroy(phba->device_data_mem_pool);
+		phba->device_data_mem_pool = NULL;
+	}
+
+	return;
+}
+
+/**
+ * lpfc_fof_queue_setup - Set up all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the fof queues for the FC HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ **/
+int
+lpfc_fof_queue_setup(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	int rc;
+
+	rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
+	if (rc)
+		return -ENOMEM;
+
+	if (phba->cfg_EnableXLane) {
+
+		rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
+				    phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
+		if (rc)
+			goto out_oas_cq;
+
+		rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
+				    phba->sli4_hba.oas_cq, LPFC_FCP);
+		if (rc)
+			goto out_oas_wq;
+
+		phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
+		phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
+	}
+
+	return 0;
+
+out_oas_wq:
+	if (phba->cfg_EnableXLane)
+		lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
+out_oas_cq:
+	lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
+	return rc;
+
+}
+
+/**
+ * lpfc_fof_queue_create - Create all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the fof queues for the FC HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No availble memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_fof_queue_create(struct lpfc_hba *phba)
+{
+	struct lpfc_queue *qdesc;
+
+	/* Create FOF EQ */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+				      phba->sli4_hba.eq_ecount);
+	if (!qdesc)
+		goto out_error;
+
+	phba->sli4_hba.fof_eq = qdesc;
+
+	if (phba->cfg_EnableXLane) {
+
+		/* Create OAS CQ */
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+						      phba->sli4_hba.cq_ecount);
+		if (!qdesc)
+			goto out_error;
+
+		phba->sli4_hba.oas_cq = qdesc;
+
+		/* Create OAS WQ */
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+					      phba->sli4_hba.wq_ecount);
+		if (!qdesc)
+			goto out_error;
+
+		phba->sli4_hba.oas_wq = qdesc;
+
+	}
+	return 0;
+
+out_error:
+	lpfc_fof_queue_destroy(phba);
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_fof_queue_destroy - Destroy all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FC HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ **/
+int
+lpfc_fof_queue_destroy(struct lpfc_hba *phba)
+{
+	/* Release FOF Event queue */
+	if (phba->sli4_hba.fof_eq != NULL) {
+		lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
+		phba->sli4_hba.fof_eq = NULL;
+	}
+
+	/* Release OAS Completion queue */
+	if (phba->sli4_hba.oas_cq != NULL) {
+		lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
+		phba->sli4_hba.oas_cq = NULL;
+	}
+
+	/* Release OAS Work queue */
+	if (phba->sli4_hba.oas_wq != NULL) {
+		lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
+		phba->sli4_hba.oas_wq = NULL;
+	}
+	return 0;
+}
+
 static struct pci_device_id lpfc_id_table[] = {
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
 		PCI_ANY_ID, PCI_ANY_ID, },
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 812d0cd..ed419aa 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -38,10 +38,29 @@
 #include "lpfc_scsi.h"
 #include "lpfc.h"
 #include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
 
 #define LPFC_MBUF_POOL_SIZE     64      /* max elements in MBUF safety pool */
 #define LPFC_MEM_POOL_SIZE      64      /* max elem in non-DMA safety pool */
+#define LPFC_DEVICE_DATA_POOL_SIZE 64   /* max elements in device data pool */
 
+int
+lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
+	size_t bytes;
+	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+	if (max_xri <= 0)
+		return -ENOMEM;
+	bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
+		  sizeof(unsigned long);
+	phba->cfg_rrq_xri_bitmap_sz = bytes;
+	phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+							    bytes);
+	if (!phba->active_rrq_pool)
+		return -ENOMEM;
+	else
+		return 0;
+}
 
 /**
  * lpfc_mem_alloc - create and allocate all PCI and memory pools
@@ -146,6 +165,16 @@
 		phba->lpfc_drb_pool = NULL;
 	}
 
+	if (phba->cfg_EnableXLane) {
+		phba->device_data_mem_pool = mempool_create_kmalloc_pool(
+					LPFC_DEVICE_DATA_POOL_SIZE,
+					sizeof(struct lpfc_device_data));
+		if (!phba->device_data_mem_pool)
+			goto fail_free_hrb_pool;
+	} else {
+		phba->device_data_mem_pool = NULL;
+	}
+
 	return 0;
  fail_free_hrb_pool:
 	pci_pool_destroy(phba->lpfc_hrb_pool);
@@ -188,6 +217,7 @@
 {
 	int i;
 	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+	struct lpfc_device_data *device_data;
 
 	/* Free HBQ pools */
 	lpfc_sli_hbqbuf_free_all(phba);
@@ -209,6 +239,10 @@
 	/* Free NLP memory pool */
 	mempool_destroy(phba->nlp_mem_pool);
 	phba->nlp_mem_pool = NULL;
+	if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
+		mempool_destroy(phba->active_rrq_pool);
+		phba->active_rrq_pool = NULL;
+	}
 
 	/* Free mbox memory pool */
 	mempool_destroy(phba->mbox_mem_pool);
@@ -227,6 +261,19 @@
 	pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
 	phba->lpfc_scsi_dma_buf_pool = NULL;
 
+	/* Free Device Data memory pool */
+	if (phba->device_data_mem_pool) {
+		/* Ensure all objects have been returned to the pool */
+		while (!list_empty(&phba->luns)) {
+			device_data = list_first_entry(&phba->luns,
+						       struct lpfc_device_data,
+						       listentry);
+			list_del(&device_data->listentry);
+			mempool_free(device_data, phba->device_data_mem_pool);
+		}
+		mempool_destroy(phba->device_data_mem_pool);
+	}
+	phba->device_data_mem_pool = NULL;
 	return;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index abc3612..c342f6a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -203,8 +203,6 @@
 int
 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
-	LIST_HEAD(completions);
-	LIST_HEAD(txcmplq_completions);
 	LIST_HEAD(abort_list);
 	struct lpfc_sli  *psli = &phba->sli;
 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
@@ -216,32 +214,27 @@
 			 "Data: x%x x%x x%x\n",
 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
 			 ndlp->nlp_rpi);
-
+	/* Clean up all fabric IOs first.*/
 	lpfc_fabric_abort_nport(ndlp);
 
-	/* First check the txq */
+	/*
+	 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
+	 * of all ELS IOs that need an ABTS.  The IOs need to stay on the
+	 * txcmplq so that the abort operation completes them successfully.
+	 */
 	spin_lock_irq(&phba->hbalock);
-	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-		/* Check to see if iocb matches the nport we are looking for */
-		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
-			/* It matches, so deque and call compl with anp error */
-			list_move_tail(&iocb->list, &completions);
-		}
-	}
-
-	/* Next check the txcmplq */
-	list_splice_init(&pring->txcmplq, &txcmplq_completions);
-	spin_unlock_irq(&phba->hbalock);
-
-	list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
-		/* Check to see if iocb matches the nport we are looking for */
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+	/* Add to abort_list on on NDLP match. */
 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
 			list_add_tail(&iocb->dlist, &abort_list);
 	}
-	spin_lock_irq(&phba->hbalock);
-	list_splice(&txcmplq_completions, &pring->txcmplq);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
 	spin_unlock_irq(&phba->hbalock);
 
+	/* Abort the targeted IOs and remove them from the abort list. */
 	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
 			spin_lock_irq(&phba->hbalock);
 			list_del_init(&iocb->dlist);
@@ -249,9 +242,28 @@
 			spin_unlock_irq(&phba->hbalock);
 	}
 
+	INIT_LIST_HEAD(&abort_list);
+
+	/* Now process the txq */
+	spin_lock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+		/* Check to see if iocb matches the nport we are looking for */
+		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
+			list_del_init(&iocb->list);
+			list_add_tail(&iocb->list, &abort_list);
+		}
+	}
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
+	spin_unlock_irq(&phba->hbalock);
+
 	/* Cancel all the IOCBs from the completions list */
-	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
-			      IOERR_SLI_ABORTED);
+	lpfc_sli_cancel_iocbs(phba, &abort_list,
+			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 
 	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
 	return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b2ede05..462453e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -68,6 +68,17 @@
 	__be32 ref_tag;         /* Target LBA or indirect LBA */
 };
 
+static struct lpfc_rport_data *
+lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
+
+	if (vport->phba->cfg_EnableXLane)
+		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
+	else
+		return (struct lpfc_rport_data *)sdev->hostdata;
+}
+
 static void
 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 static void
@@ -304,9 +315,27 @@
 	unsigned long new_queue_depth, old_queue_depth;
 
 	old_queue_depth = sdev->queue_depth;
-	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+
+	switch (reason) {
+	case SCSI_QDEPTH_DEFAULT:
+		/* change request from sysfs, fall through */
+	case SCSI_QDEPTH_RAMP_UP:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+		break;
+	case SCSI_QDEPTH_QFULL:
+		if (scsi_track_queue_full(sdev, qdepth) == 0)
+			return sdev->queue_depth;
+
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+				 "0711 detected queue full - lun queue "
+				 "depth adjusted to %d.\n", sdev->queue_depth);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
 	new_queue_depth = sdev->queue_depth;
-	rdata = sdev->hostdata;
+	rdata = lpfc_rport_data_from_scsi_device(sdev);
 	if (rdata)
 		lpfc_send_sdev_queuedepth_change_event(phba, vport,
 						       rdata->pnode, sdev->lun,
@@ -377,50 +406,6 @@
 }
 
 /**
- * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
- * @phba: The Hba for which this call is being executed.
- *
- * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
- * post at most 1 event every 5 minute after last_ramp_up_time or
- * last_rsrc_error_time.  This routine wakes up worker thread of @phba
- * to process WORKER_RAM_DOWN_EVENT event.
- *
- * This routine should be called with no lock held.
- **/
-static inline void
-lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
-			uint32_t queue_depth)
-{
-	unsigned long flags;
-	struct lpfc_hba *phba = vport->phba;
-	uint32_t evt_posted;
-	atomic_inc(&phba->num_cmd_success);
-
-	if (vport->cfg_lun_queue_depth <= queue_depth)
-		return;
-	spin_lock_irqsave(&phba->hbalock, flags);
-	if (time_before(jiffies,
-			phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
-	    time_before(jiffies,
-			phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
-		spin_unlock_irqrestore(&phba->hbalock, flags);
-		return;
-	}
-	phba->last_ramp_up_time = jiffies;
-	spin_unlock_irqrestore(&phba->hbalock, flags);
-
-	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
-	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
-	if (!evt_posted)
-		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
-	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
-
-	if (!evt_posted)
-		lpfc_worker_wake_up(phba);
-	return;
-}
-
-/**
  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
  * @phba: The Hba for which this call is being executed.
  *
@@ -472,41 +457,6 @@
 }
 
 /**
- * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
- * @phba: The Hba for which this call is being executed.
- *
- * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
- * thread.This routine increases queue depth for all scsi device on each vport
- * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
- * num_cmd_success to zero.
- **/
-void
-lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
-{
-	struct lpfc_vport **vports;
-	struct Scsi_Host  *shost;
-	struct scsi_device *sdev;
-	int i;
-
-	vports = lpfc_create_vport_work_array(phba);
-	if (vports != NULL)
-		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
-			shost = lpfc_shost_from_vport(vports[i]);
-			shost_for_each_device(sdev, shost) {
-				if (vports[i]->cfg_lun_queue_depth <=
-				    sdev->queue_depth)
-					continue;
-				lpfc_change_queue_depth(sdev,
-							sdev->queue_depth+1,
-							SCSI_QDEPTH_RAMP_UP);
-			}
-		}
-	lpfc_destroy_vport_work_array(phba, vports);
-	atomic_set(&phba->num_rsrc_err, 0);
-	atomic_set(&phba->num_cmd_success, 0);
-}
-
-/**
  * lpfc_scsi_dev_block - set all scsi hosts to block state
  * @phba: Pointer to HBA context object.
  *
@@ -1502,7 +1452,7 @@
 	}
 
 	/* Next check if we need to match the remote NPortID or WWPN */
-	rdata = sc->device->hostdata;
+	rdata = lpfc_rport_data_from_scsi_device(sc->device);
 	if (rdata && rdata->pnode) {
 		ndlp = rdata->pnode;
 
@@ -3507,6 +3457,14 @@
 	 * we need to set word 4 of IOCB here
 	 */
 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+
+	/*
+	 * If the OAS driver feature is enabled and the lun is enabled for
+	 * OAS, set the oas iocb related flags.
+	 */
+	if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *)
+		scsi_cmnd->device->hostdata)->oas_enabled)
+		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
 	return 0;
 }
 
@@ -4021,7 +3979,6 @@
 	struct lpfc_nodelist *pnode = rdata->pnode;
 	struct scsi_cmnd *cmd;
 	int result;
-	struct scsi_device *tmp_sdev;
 	int depth;
 	unsigned long flags;
 	struct lpfc_fast_path_event *fast_path_evt;
@@ -4266,32 +4223,6 @@
 		return;
 	}
 
-	if (!result)
-		lpfc_rampup_queue_depth(vport, queue_depth);
-
-	/*
-	 * Check for queue full.  If the lun is reporting queue full, then
-	 * back off the lun queue depth to prevent target overloads.
-	 */
-	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
-	    NLP_CHK_NODE_ACT(pnode)) {
-		shost_for_each_device(tmp_sdev, shost) {
-			if (tmp_sdev->id != scsi_id)
-				continue;
-			depth = scsi_track_queue_full(tmp_sdev,
-						      tmp_sdev->queue_depth-1);
-			if (depth <= 0)
-				continue;
-			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
-					 "0711 detected queue full - lun queue "
-					 "depth adjusted to %d.\n", depth);
-			lpfc_send_sdev_queuedepth_change_event(phba, vport,
-							       pnode,
-							       tmp_sdev->lun,
-							       depth+1, depth);
-		}
-	}
-
 	spin_lock_irqsave(&phba->hbalock, flags);
 	lpfc_cmd->pCmd = NULL;
 	spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -4492,6 +4423,8 @@
 	}
 	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
 	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
+	piocb->ulpPU = 0;
+	piocb->un.fcpi.fcpi_parm = 0;
 
 	/* ulpTimeout is only one byte */
 	if (lpfc_cmd->timeout > 0xff) {
@@ -4691,12 +4624,13 @@
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
-	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_rport_data *rdata;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_scsi_buf *lpfc_cmd;
 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
 	int err;
 
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 	err = fc_remote_port_chkready(rport);
 	if (err) {
 		cmnd->result = err;
@@ -4782,6 +4716,24 @@
 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
 	if (err) {
 		atomic_dec(&ndlp->cmd_pending);
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+				 "3376 FCP could not issue IOCB err %x"
+				 "FCP cmd x%x <%d/%d> "
+				 "sid: x%x did: x%x oxid: x%x "
+				 "Data: x%x x%x x%x x%x\n",
+				 err, cmnd->cmnd[0],
+				 cmnd->device ? cmnd->device->id : 0xffff,
+				 cmnd->device ? cmnd->device->lun : 0xffff,
+				 vport->fc_myDID, ndlp->nlp_DID,
+				 phba->sli_rev == LPFC_SLI_REV4 ?
+				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
+				 lpfc_cmd->cur_iocbq.iocb.ulpContext,
+				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
+				 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
+				 (uint32_t)
+				 (cmnd->request->timeout / 1000));
+
+
 		goto out_host_busy_free_buf;
 	}
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -5161,10 +5113,11 @@
 static int
 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
 {
-	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_rport_data *rdata;
 	struct lpfc_nodelist *pnode;
 	unsigned long later;
 
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 	if (!rdata) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
 			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
@@ -5182,7 +5135,7 @@
 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
 			return SUCCESS;
 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
-		rdata = cmnd->device->hostdata;
+		rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 		if (!rdata)
 			return FAILED;
 		pnode = rdata->pnode;
@@ -5254,13 +5207,14 @@
 {
 	struct Scsi_Host  *shost = cmnd->device->host;
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
-	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_rport_data *rdata;
 	struct lpfc_nodelist *pnode;
 	unsigned tgt_id = cmnd->device->id;
 	unsigned int lun_id = cmnd->device->lun;
 	struct lpfc_scsi_event_header scsi_event;
 	int status;
 
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 	if (!rdata) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 			"0798 Device Reset rport failure: rdata x%p\n", rdata);
@@ -5323,13 +5277,14 @@
 {
 	struct Scsi_Host  *shost = cmnd->device->host;
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
-	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_rport_data *rdata;
 	struct lpfc_nodelist *pnode;
 	unsigned tgt_id = cmnd->device->id;
 	unsigned int lun_id = cmnd->device->lun;
 	struct lpfc_scsi_event_header scsi_event;
 	int status;
 
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 	if (!rdata) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 			"0799 Target Reset rport failure: rdata x%p\n", rdata);
@@ -5529,11 +5484,45 @@
 	uint32_t num_to_alloc = 0;
 	int num_allocated = 0;
 	uint32_t sdev_cnt;
+	struct lpfc_device_data *device_data;
+	unsigned long flags;
+	struct lpfc_name target_wwpn;
 
 	if (!rport || fc_remote_port_chkready(rport))
 		return -ENXIO;
 
-	sdev->hostdata = rport->dd_data;
+	if (phba->cfg_EnableXLane) {
+
+		/*
+		 * Check to see if the device data structure for the lun
+		 * exists.  If not, create one.
+		 */
+
+		u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
+		spin_lock_irqsave(&phba->devicelock, flags);
+		device_data = __lpfc_get_device_data(phba,
+						     &phba->luns,
+						     &vport->fc_portname,
+						     &target_wwpn,
+						     sdev->lun);
+		if (!device_data) {
+			spin_unlock_irqrestore(&phba->devicelock, flags);
+			device_data = lpfc_create_device_data(phba,
+							&vport->fc_portname,
+							&target_wwpn,
+							sdev->lun, true);
+			if (!device_data)
+				return -ENOMEM;
+			spin_lock_irqsave(&phba->devicelock, flags);
+			list_add_tail(&device_data->listentry, &phba->luns);
+		}
+		device_data->rport_data = rport->dd_data;
+		device_data->available = true;
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		sdev->hostdata = device_data;
+	} else {
+		sdev->hostdata = rport->dd_data;
+	}
 	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
 
 	/*
@@ -5623,11 +5612,344 @@
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
+	unsigned long flags;
+	struct lpfc_device_data *device_data = sdev->hostdata;
+
 	atomic_dec(&phba->sdev_cnt);
+	if ((phba->cfg_EnableXLane) && (device_data)) {
+		spin_lock_irqsave(&phba->devicelock, flags);
+		device_data->available = false;
+		if (!device_data->oas_enabled)
+			lpfc_delete_device_data(phba, device_data);
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+	}
 	sdev->hostdata = NULL;
 	return;
 }
 
+/**
+ * lpfc_create_device_data - creates and initializes device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ * @atomic_create: Flag to indicate if memory should be allocated using the
+ *		  GFP_ATOMIC flag or not.
+ *
+ * This routine creates a device data structure which will contain identifying
+ * information for the device (host wwpn, target wwpn, lun), state of OAS,
+ * whether or not the corresponding lun is available by the system,
+ * and pointer to the rport data.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+			struct lpfc_name *target_wwpn, uint64_t lun,
+			bool atomic_create)
+{
+
+	struct lpfc_device_data *lun_info;
+	int memory_flags;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
+	    !(phba->cfg_EnableXLane))
+		return NULL;
+
+	/* Attempt to create the device data to contain lun info */
+
+	if (atomic_create)
+		memory_flags = GFP_ATOMIC;
+	else
+		memory_flags = GFP_KERNEL;
+	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
+	if (!lun_info)
+		return NULL;
+	INIT_LIST_HEAD(&lun_info->listentry);
+	lun_info->rport_data  = NULL;
+	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
+	       sizeof(struct lpfc_name));
+	memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
+	       sizeof(struct lpfc_name));
+	lun_info->device_id.lun = lun;
+	lun_info->oas_enabled = false;
+	lun_info->available = false;
+	return lun_info;
+}
+
+/**
+ * lpfc_delete_device_data - frees a device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @lun_info: Pointer to device data structure to free.
+ *
+ * This routine frees the previously allocated device data structure passed.
+ *
+ **/
+void
+lpfc_delete_device_data(struct lpfc_hba *phba,
+			struct lpfc_device_data *lun_info)
+{
+
+	if (unlikely(!phba) || !lun_info  ||
+	    !(phba->cfg_EnableXLane))
+		return;
+
+	if (!list_empty(&lun_info->listentry))
+		list_del(&lun_info->listentry);
+	mempool_free(lun_info, phba->device_data_mem_pool);
+	return;
+}
+
+/**
+ * __lpfc_get_device_data - returns the device data for the specified lun
+ * @pha: Pointer to host bus adapter structure.
+ * @list: Point to list to search.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ *
+ * This routine searches the list passed for the specified lun's device data.
+ * This function does not hold locks, it is the responsibility of the caller
+ * to ensure the proper lock is held before calling the function.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
+		       struct lpfc_name *vport_wwpn,
+		       struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+	struct lpfc_device_data *lun_info;
+
+	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
+	    !phba->cfg_EnableXLane)
+		return NULL;
+
+	/* Check to see if the lun is already enabled for OAS. */
+
+	list_for_each_entry(lun_info, list, listentry) {
+		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+			    sizeof(struct lpfc_name)) == 0) &&
+		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+			    sizeof(struct lpfc_name)) == 0) &&
+		    (lun_info->device_id.lun == lun))
+			return lun_info;
+	}
+
+	return NULL;
+}
+
+/**
+ * lpfc_find_next_oas_lun - searches for the next oas lun
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @starting_lun: Pointer to the lun to start searching for
+ * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
+ * @found_target_wwpn: Pointer to the found lun's target wwpn information
+ * @found_lun: Pointer to the found lun.
+ * @found_lun_status: Pointer to status of the found lun.
+ *
+ * This routine searches the luns list for the specified lun
+ * or the first lun for the vport/target.  If the vport wwpn contains
+ * a zero value then a specific vport is not specified. In this case
+ * any vport which contains the lun will be considered a match.  If the
+ * target wwpn contains a zero value then a specific target is not specified.
+ * In this case any target which contains the lun will be considered a
+ * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
+ * are returned.  The function will also return the next lun if available.
+ * If the next lun is not found, starting_lun parameter will be set to
+ * NO_MORE_OAS_LUN.
+ *
+ * Return codes:
+ *   non-0 - Error
+ *   0 - Success
+ **/
+bool
+lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+		       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
+		       struct lpfc_name *found_vport_wwpn,
+		       struct lpfc_name *found_target_wwpn,
+		       uint64_t *found_lun,
+		       uint32_t *found_lun_status)
+{
+
+	unsigned long flags;
+	struct lpfc_device_data *lun_info;
+	struct lpfc_device_id *device_id;
+	uint64_t lun;
+	bool found = false;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+	    !starting_lun || !found_vport_wwpn ||
+	    !found_target_wwpn || !found_lun || !found_lun_status ||
+	    (*starting_lun == NO_MORE_OAS_LUN) ||
+	    !phba->cfg_EnableXLane)
+		return false;
+
+	lun = *starting_lun;
+	*found_lun = NO_MORE_OAS_LUN;
+	*starting_lun = NO_MORE_OAS_LUN;
+
+	/* Search for lun or the lun closet in value */
+
+	spin_lock_irqsave(&phba->devicelock, flags);
+	list_for_each_entry(lun_info, &phba->luns, listentry) {
+		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
+		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+			    sizeof(struct lpfc_name)) == 0)) &&
+		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
+		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+			    sizeof(struct lpfc_name)) == 0)) &&
+		    (lun_info->oas_enabled)) {
+			device_id = &lun_info->device_id;
+			if ((!found) &&
+			    ((lun == FIND_FIRST_OAS_LUN) ||
+			     (device_id->lun == lun))) {
+				*found_lun = device_id->lun;
+				memcpy(found_vport_wwpn,
+				       &device_id->vport_wwpn,
+				       sizeof(struct lpfc_name));
+				memcpy(found_target_wwpn,
+				       &device_id->target_wwpn,
+				       sizeof(struct lpfc_name));
+				if (lun_info->available)
+					*found_lun_status =
+						OAS_LUN_STATUS_EXISTS;
+				else
+					*found_lun_status = 0;
+				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
+					memset(vport_wwpn, 0x0,
+					       sizeof(struct lpfc_name));
+				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
+					memset(target_wwpn, 0x0,
+					       sizeof(struct lpfc_name));
+				found = true;
+			} else if (found) {
+				*starting_lun = device_id->lun;
+				memcpy(vport_wwpn, &device_id->vport_wwpn,
+				       sizeof(struct lpfc_name));
+				memcpy(target_wwpn, &device_id->target_wwpn,
+				       sizeof(struct lpfc_name));
+				break;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&phba->devicelock, flags);
+	return found;
+}
+
+/**
+ * lpfc_enable_oas_lun - enables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine enables a lun for oas operations.  The routines does so by
+ * doing the following :
+ *
+ *   1) Checks to see if the device data for the lun has been created.
+ *   2) If found, sets the OAS enabled flag if not set and returns.
+ *   3) Otherwise, creates a device data structure.
+ *   4) If successfully created, indicates the device data is for an OAS lun,
+ *   indicates the lun is not available and add to the list of luns.
+ *
+ * Return codes:
+ *   false - Error
+ *   true - Success
+ **/
+bool
+lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+		    struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+	struct lpfc_device_data *lun_info;
+	unsigned long flags;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+	    !phba->cfg_EnableXLane)
+		return false;
+
+	spin_lock_irqsave(&phba->devicelock, flags);
+
+	/* Check to see if the device data for the lun has been created */
+	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
+					  target_wwpn, lun);
+	if (lun_info) {
+		if (!lun_info->oas_enabled)
+			lun_info->oas_enabled = true;
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		return true;
+	}
+
+	/* Create an lun info structure and add to list of luns */
+	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
+					   false);
+	if (lun_info) {
+		lun_info->oas_enabled = true;
+		lun_info->available = false;
+		list_add_tail(&lun_info->listentry, &phba->luns);
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		return true;
+	}
+	spin_unlock_irqrestore(&phba->devicelock, flags);
+	return false;
+}
+
+/**
+ * lpfc_disable_oas_lun - disables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine disables a lun for oas operations.  The routines does so by
+ * doing the following :
+ *
+ *   1) Checks to see if the device data for the lun is created.
+ *   2) If present, clears the flag indicating this lun is for OAS.
+ *   3) If the lun is not available by the system, the device data is
+ *   freed.
+ *
+ * Return codes:
+ *   false - Error
+ *   true - Success
+ **/
+bool
+lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+		     struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+	struct lpfc_device_data *lun_info;
+	unsigned long flags;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+	    !phba->cfg_EnableXLane)
+		return false;
+
+	spin_lock_irqsave(&phba->devicelock, flags);
+
+	/* Check to see if the lun is available. */
+	lun_info = __lpfc_get_device_data(phba,
+					  &phba->luns, vport_wwpn,
+					  target_wwpn, lun);
+	if (lun_info) {
+		lun_info->oas_enabled = false;
+		if (!lun_info->available)
+			lpfc_delete_device_data(phba, lun_info);
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		return true;
+	}
+
+	spin_unlock_irqrestore(&phba->devicelock, flags);
+	return false;
+}
 
 struct scsi_host_template lpfc_template = {
 	.module			= THIS_MODULE,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 852ff7d..0120bfc 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -41,6 +41,20 @@
 	struct lpfc_nodelist *pnode;	/* Pointer to the node structure. */
 };
 
+struct lpfc_device_id {
+	struct lpfc_name vport_wwpn;
+	struct lpfc_name target_wwpn;
+	uint64_t lun;
+};
+
+struct lpfc_device_data {
+	struct list_head listentry;
+	struct lpfc_rport_data *rport_data;
+	struct lpfc_device_id device_id;
+	bool oas_enabled;
+	bool available;
+};
+
 struct fcp_rsp {
 	uint32_t rspRsvd1;	/* FC Word 0, byte 0:3 */
 	uint32_t rspRsvd2;	/* FC Word 1, byte 0:3 */
@@ -166,3 +180,7 @@
 #define LPFC_SCSI_DMA_EXT_SIZE 264
 #define LPFC_BPL_SIZE          1024
 #define MDAC_DIRECT_CMD                  0x22
+
+#define FIND_FIRST_OAS_LUN		 0
+#define NO_MORE_OAS_LUN			-1
+#define NOT_OAS_ENABLED_LUN		NO_MORE_OAS_LUN
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8f580fd..6bb51f8e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -635,7 +635,7 @@
 	if (!ndlp)
 		goto out;
 
-	if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
+	if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
 		rrq->send_rrq = 0;
 		rrq->xritag = 0;
 		rrq->rrq_stop_time = 0;
@@ -678,7 +678,8 @@
 			next_time = rrq->rrq_stop_time;
 	}
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
-	if (!list_empty(&phba->active_rrq_list))
+	if ((!list_empty(&phba->active_rrq_list)) &&
+	    (!(phba->pport->load_flag & FC_UNLOADING)))
 		mod_timer(&phba->rrq_tmr, next_time);
 	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
 		list_del(&rrq->list);
@@ -792,7 +793,9 @@
 		list_del(&rrq->list);
 		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
 	}
-	if (!list_empty(&phba->active_rrq_list))
+	if ((!list_empty(&phba->active_rrq_list)) &&
+	    (!(phba->pport->load_flag & FC_UNLOADING)))
+
 		mod_timer(&phba->rrq_tmr, next_time);
 }
 
@@ -813,7 +816,9 @@
 {
 	if (!ndlp)
 		return 0;
-	if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+	if (!ndlp->active_rrqs_xri_bitmap)
+		return 0;
+	if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
 			return 1;
 	else
 		return 0;
@@ -863,7 +868,10 @@
 	if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
 		goto out;
 
-	if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+	if (!ndlp->active_rrqs_xri_bitmap)
+		goto out;
+
+	if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
 		goto out;
 
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -1318,7 +1326,8 @@
 
 	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
-	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
+	 (!(piocb->vport->load_flag & FC_UNLOADING))) {
 		if (!piocb->vport)
 			BUG();
 		else
@@ -4971,12 +4980,19 @@
 					     LPFC_QUEUE_REARM);
 		} while (++fcp_eqidx < phba->cfg_fcp_io_channel);
 	}
+
+	if (phba->cfg_EnableXLane)
+		lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
+
 	if (phba->sli4_hba.hba_eq) {
 		for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
 		     fcp_eqidx++)
 			lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
 					     LPFC_QUEUE_REARM);
 	}
+
+	if (phba->cfg_fof)
+		lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
 }
 
 /**
@@ -8032,7 +8048,8 @@
 	struct lpfc_vector_map_info *cpup;
 	int chann, cpu;
 
-	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
+	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
+	    && phba->cfg_fcp_io_channel > 1) {
 		cpu = smp_processor_id();
 		if (cpu < phba->sli4_hba.num_present_cpu) {
 			cpup = phba->sli4_hba.cpu_map;
@@ -8250,6 +8267,14 @@
 		bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
 		bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
 		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+		if (iocbq->iocb_flag & LPFC_IO_OAS) {
+			bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+			if (phba->cfg_XLanePriority) {
+				bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+				bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+				       (phba->cfg_XLanePriority << 1));
+			}
+		}
 		break;
 	case CMD_FCP_IREAD64_CR:
 		/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8271,6 +8296,14 @@
 		bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
 		bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
 		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+		if (iocbq->iocb_flag & LPFC_IO_OAS) {
+			bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+			if (phba->cfg_XLanePriority) {
+				bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+				bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+				       (phba->cfg_XLanePriority << 1));
+			}
+		}
 		break;
 	case CMD_FCP_ICMND64_CR:
 		/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8291,6 +8324,14 @@
 		bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
 		bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
 		       iocbq->iocb.ulpFCP2Rcvy);
+		if (iocbq->iocb_flag & LPFC_IO_OAS) {
+			bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+			if (phba->cfg_XLanePriority) {
+				bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+				bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+				       (phba->cfg_XLanePriority << 1));
+			}
+		}
 		break;
 	case CMD_GEN_REQUEST64_CR:
 		/* For this command calculate the xmit length of the
@@ -8523,6 +8564,7 @@
 {
 	struct lpfc_sglq *sglq;
 	union lpfc_wqe wqe;
+	struct lpfc_queue *wq;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
 
 	if (piocb->sli4_xritag == NO_XRI) {
@@ -8575,11 +8617,14 @@
 		return IOCB_ERROR;
 
 	if ((piocb->iocb_flag & LPFC_IO_FCP) ||
-		(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
-		if (unlikely(!phba->sli4_hba.fcp_wq))
-			return IOCB_ERROR;
-		if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
-				     &wqe))
+	    (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+		if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+			LPFC_IO_OAS))) {
+			wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
+		} else {
+			wq = phba->sli4_hba.oas_wq;
+		}
+		if (lpfc_sli4_wq_put(wq, &wqe))
 			return IOCB_ERROR;
 	} else {
 		if (unlikely(!phba->sli4_hba.els_wq))
@@ -8669,12 +8714,20 @@
 
 	if (phba->sli_rev == LPFC_SLI_REV4) {
 		if (piocb->iocb_flag &  LPFC_IO_FCP) {
-			if (unlikely(!phba->sli4_hba.fcp_wq))
-				return IOCB_ERROR;
-			idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
-			piocb->fcp_wqidx = idx;
-			ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
-
+			if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
+				LPFC_IO_OAS))) {
+				if (unlikely(!phba->sli4_hba.fcp_wq))
+					return IOCB_ERROR;
+				idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+				piocb->fcp_wqidx = idx;
+				ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+			} else {
+				if (unlikely(!phba->sli4_hba.oas_wq))
+					return IOCB_ERROR;
+				idx = 0;
+				piocb->fcp_wqidx = 0;
+				ring_number =  LPFC_FCP_OAS_RING;
+			}
 			pring = &phba->sli.ring[ring_number];
 			spin_lock_irqsave(&pring->ring_lock, iflags);
 			rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
@@ -12132,6 +12185,175 @@
 	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
 }
 
+
+/**
+ * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
+ *			     entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the Flash Optimized Fabric
+ * event queue.  It will check the MajorCode and MinorCode to determine this
+ * is for a completion event on a completion queue, if not, an error shall be
+ * logged and just return. Otherwise, it will get to the corresponding
+ * completion queue and process all the entries on the completion queue, rearm
+ * the completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+	struct lpfc_queue *cq;
+	struct lpfc_cqe *cqe;
+	bool workposted = false;
+	uint16_t cqid;
+	int ecount = 0;
+
+	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"9147 Not a valid completion "
+				"event: majorcode=x%x, minorcode=x%x\n",
+				bf_get_le32(lpfc_eqe_major_code, eqe),
+				bf_get_le32(lpfc_eqe_minor_code, eqe));
+		return;
+	}
+
+	/* Get the reference to the corresponding CQ */
+	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+	/* Next check for OAS */
+	cq = phba->sli4_hba.oas_cq;
+	if (unlikely(!cq)) {
+		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"9148 OAS completion queue "
+					"does not exist\n");
+		return;
+	}
+
+	if (unlikely(cqid != cq->queue_id)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"9149 Miss-matched fast-path compl "
+				"queue id: eqcqid=%d, fcpcqid=%d\n",
+				cqid, cq->queue_id);
+		return;
+	}
+
+	/* Process all the entries to the OAS CQ */
+	while ((cqe = lpfc_sli4_cq_get(cq))) {
+		workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+		if (!(++ecount % cq->entry_repost))
+			lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+	}
+
+	/* Track the max number of CQEs processed in 1 EQ */
+	if (ecount > cq->CQ_max_cqe)
+		cq->CQ_max_cqe = ecount;
+
+	/* Catch the no cq entry condition */
+	if (unlikely(ecount == 0))
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"9153 No entry from fast-path completion "
+				"queue fcpcqid=%d\n", cq->queue_id);
+
+	/* In any case, flash and re-arm the CQ */
+	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+	/* wake up worker thread if there are works to be done */
+	if (workposted)
+		lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
+ * IOCB ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The Flash Optimized Fabric ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the EQ to CQ are one-to-one map such that the EQ index is
+ * equal to that of CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba *phba;
+	struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+	struct lpfc_queue *eq;
+	struct lpfc_eqe *eqe;
+	unsigned long iflag;
+	int ecount = 0;
+	uint32_t eqidx;
+
+	/* Get the driver's phba structure from the dev_id */
+	fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+	phba = fcp_eq_hdl->phba;
+	eqidx = fcp_eq_hdl->idx;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/* Get to the EQ struct associated with this vector */
+	eq = phba->sli4_hba.fof_eq;
+	if (unlikely(!eq))
+		return IRQ_NONE;
+
+	/* Check device state for handling interrupt */
+	if (unlikely(lpfc_intr_state_check(phba))) {
+		eq->EQ_badstate++;
+		/* Check again for link_state with lock held */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (phba->link_state < LPFC_LINK_DOWN)
+			/* Flush, clear interrupt, and rearm the EQ */
+			lpfc_sli4_eq_flush(phba, eq);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return IRQ_NONE;
+	}
+
+	/*
+	 * Process all the event on FCP fast-path EQ
+	 */
+	while ((eqe = lpfc_sli4_eq_get(eq))) {
+		lpfc_sli4_fof_handle_eqe(phba, eqe);
+		if (!(++ecount % eq->entry_repost))
+			lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
+		eq->EQ_processed++;
+	}
+
+	/* Track the max number of EQEs processed in 1 intr */
+	if (ecount > eq->EQ_max_eqe)
+		eq->EQ_max_eqe = ecount;
+
+
+	if (unlikely(ecount == 0)) {
+		eq->EQ_no_entry++;
+
+		if (phba->intr_type == MSIX)
+			/* MSI-X treated interrupt served as no EQ share INT */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"9145 MSI-X interrupt with no EQE\n");
+		else {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"9146 ISR interrupt with no EQE\n");
+			/* Non MSI-X treated on interrupt as EQ share INT */
+			return IRQ_NONE;
+		}
+	}
+	/* Always clear and re-arm the fast-path EQ */
+	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+	return IRQ_HANDLED;
+}
+
 /**
  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
  * @irq: Interrupt number.
@@ -12287,6 +12509,13 @@
 			hba_handled |= true;
 	}
 
+	if (phba->cfg_fof) {
+		hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
+					&phba->sli4_hba.fcp_eq_hdl[0]);
+		if (hba_irq_rc == IRQ_HANDLED)
+			hba_handled |= true;
+	}
+
 	return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
 } /* lpfc_sli4_intr_handler */
 
@@ -16544,7 +16773,7 @@
 {
 	LIST_HEAD(completions);
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-	struct lpfc_iocbq *piocbq = 0;
+	struct lpfc_iocbq *piocbq = NULL;
 	unsigned long iflags = 0;
 	char *fail_msg = NULL;
 	struct lpfc_sglq *sglq;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 6b0f247..6f04080 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -78,6 +78,8 @@
 #define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */
 #define LPFC_FIP_ELS_ID_SHIFT	14
 
+#define LPFC_IO_OAS		0x10000 /* OAS FCP IO */
+
 	uint32_t drvrTimeout;	/* driver timeout in seconds */
 	uint32_t fcp_wqidx;	/* index to FCP work queue */
 	struct lpfc_vport *vport;/* virtual port pointer */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 298c8cd..9b8cda8 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -39,6 +39,10 @@
 #define LPFC_FCP_IO_CHAN_MIN       1
 #define LPFC_FCP_IO_CHAN_MAX       16
 
+/* Number of channels used for Flash Optimized Fabric (FOF) operations */
+
+#define LPFC_FOF_IO_CHAN_NUM       1
+
 /*
  * Provide the default FCF Record attributes used by the driver
  * when nonFIP mode is configured and there is no other default
@@ -399,6 +403,7 @@
 	uint32_t if_page_sz;
 	uint32_t rq_db_window;
 	uint32_t loopbk_scope;
+	uint32_t oas_supported;
 	uint32_t eq_pages_max;
 	uint32_t eqe_size;
 	uint32_t cq_pages_max;
@@ -439,6 +444,8 @@
 	uint8_t lnk_no;
 };
 
+#define LPFC_SLI4_HANDLER_CNT		(LPFC_FCP_IO_CHAN_MAX+ \
+					 LPFC_FOF_IO_CHAN_NUM)
 #define LPFC_SLI4_HANDLER_NAME_SZ	16
 
 /* Used for IRQ vector to CPU mapping */
@@ -507,7 +514,7 @@
 	struct lpfc_register sli_intf;
 	struct lpfc_pc_sli4_params pc_sli4_params;
 	struct msix_entry *msix_entries;
-	uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
+	uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
 	struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
 
 	/* Pointers to the constructed SLI4 queues */
@@ -527,6 +534,17 @@
 	uint32_t ulp0_mode;	/* ULP0 protocol mode */
 	uint32_t ulp1_mode;	/* ULP1 protocol mode */
 
+	struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
+
+	/* Optimized Access Storage specific queues/structures */
+
+	struct lpfc_queue *oas_cq; /* OAS completion queue */
+	struct lpfc_queue *oas_wq; /* OAS Work queue */
+	struct lpfc_sli_ring *oas_ring;
+	uint64_t oas_next_lun;
+	uint8_t oas_next_tgt_wwpn[8];
+	uint8_t oas_next_vpt_wwpn[8];
+
 	/* Setup information for various queue parameters */
 	int eq_esize;
 	int eq_ecount;
@@ -589,6 +607,7 @@
 	struct lpfc_vector_map_info *cpu_map;
 	uint16_t num_online_cpu;
 	uint16_t num_present_cpu;
+	uint16_t curr_disp_cpu;
 };
 
 enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e3094c4..e32cbec 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.43"
+#define LPFC_DRIVER_VERSION "8.3.45"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 816db12..b777051 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -531,13 +531,6 @@
 	int	target = 0;
 	int	ldrv_num = 0;   /* logical drive number */
 
-
-	/*
-	 * filter the internal and ioctl commands
-	 */
-	if((cmd->cmnd[0] == MEGA_INTERNAL_CMD))
-		return (scb_t *)cmd->host_scribble;
-
 	/*
 	 * We know what channels our logical drives are on - mega_find_card()
 	 */
@@ -1439,19 +1432,22 @@
 
 		cmdid = completed[i];
 
-		if( cmdid == CMDID_INT_CMDS ) { /* internal command */
+		/*
+		 * Only free SCBs for the commands coming down from the
+		 * mid-layer, not for which were issued internally
+		 *
+		 * For internal command, restore the status returned by the
+		 * firmware so that user can interpret it.
+		 */
+		if (cmdid == CMDID_INT_CMDS) {
 			scb = &adapter->int_scb;
-			cmd = scb->cmd;
-			mbox = (mbox_t *)scb->raw_mbox;
 
-			/*
-			 * Internal command interface do not fire the extended
-			 * passthru or 64-bit passthru
-			 */
-			pthru = scb->pthru;
+			list_del_init(&scb->list);
+			scb->state = SCB_FREE;
 
-		}
-		else {
+			adapter->int_status = status;
+			complete(&adapter->int_waitq);
+		} else {
 			scb = &adapter->scb_list[cmdid];
 
 			/*
@@ -1640,25 +1636,7 @@
 				cmd->result |= (DID_BAD_TARGET << 16)|status;
 		}
 
-		/*
-		 * Only free SCBs for the commands coming down from the
-		 * mid-layer, not for which were issued internally
-		 *
-		 * For internal command, restore the status returned by the
-		 * firmware so that user can interpret it.
-		 */
-		if( cmdid == CMDID_INT_CMDS ) { /* internal command */
-			cmd->result = status;
-
-			/*
-			 * Remove the internal command from the pending list
-			 */
-			list_del_init(&scb->list);
-			scb->state = SCB_FREE;
-		}
-		else {
-			mega_free_scb(adapter, scb);
-		}
+		mega_free_scb(adapter, scb);
 
 		/* Add Scsi_Command to end of completed queue */
 		list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
@@ -4133,23 +4111,15 @@
  * The last argument is the address of the passthru structure if the command
  * to be fired is a passthru command
  *
- * lockscope specifies whether the caller has already acquired the lock. Of
- * course, the caller must know which lock we are talking about.
- *
  * Note: parameter 'pthru' is null for non-passthru commands.
  */
 static int
 mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
 {
-	Scsi_Cmnd	*scmd;
-	struct	scsi_device *sdev;
+	unsigned long flags;
 	scb_t	*scb;
 	int	rval;
 
-	scmd = scsi_allocate_command(GFP_KERNEL);
-	if (!scmd)
-		return -ENOMEM;
-
 	/*
 	 * The internal commands share one command id and hence are
 	 * serialized. This is so because we want to reserve maximum number of
@@ -4160,73 +4130,45 @@
 	scb = &adapter->int_scb;
 	memset(scb, 0, sizeof(scb_t));
 
-	sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
-	scmd->device = sdev;
-
-	memset(adapter->int_cdb, 0, sizeof(adapter->int_cdb));
-	scmd->cmnd = adapter->int_cdb;
-	scmd->device->host = adapter->host;
-	scmd->host_scribble = (void *)scb;
-	scmd->cmnd[0] = MEGA_INTERNAL_CMD;
-
-	scb->state |= SCB_ACTIVE;
-	scb->cmd = scmd;
+	scb->idx = CMDID_INT_CMDS;
+	scb->state |= SCB_ACTIVE | SCB_PENDQ;
 
 	memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
 
 	/*
 	 * Is it a passthru command
 	 */
-	if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
-
+	if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
 		scb->pthru = pthru;
-	}
 
-	scb->idx = CMDID_INT_CMDS;
-
-	megaraid_queue_lck(scmd, mega_internal_done);
+	spin_lock_irqsave(&adapter->lock, flags);
+	list_add_tail(&scb->list, &adapter->pending_list);
+	/*
+	 * Check if the HBA is in quiescent state, e.g., during a
+	 * delete logical drive opertion. If it is, don't run
+	 * the pending_list.
+	 */
+	if (atomic_read(&adapter->quiescent) == 0)
+		mega_runpendq(adapter);
+	spin_unlock_irqrestore(&adapter->lock, flags);
 
 	wait_for_completion(&adapter->int_waitq);
 
-	rval = scmd->result;
-	mc->status = scmd->result;
-	kfree(sdev);
+	mc->status = rval = adapter->int_status;
 
 	/*
 	 * Print a debug message for all failed commands. Applications can use
 	 * this information.
 	 */
-	if( scmd->result && trace_level ) {
+	if (rval && trace_level) {
 		printk("megaraid: cmd [%x, %x, %x] status:[%x]\n",
-			mc->cmd, mc->opcode, mc->subopcode, scmd->result);
+			mc->cmd, mc->opcode, mc->subopcode, rval);
 	}
 
 	mutex_unlock(&adapter->int_mtx);
-
-	scsi_free_command(GFP_KERNEL, scmd);
-
 	return rval;
 }
 
-
-/**
- * mega_internal_done()
- * @scmd - internal scsi command
- *
- * Callback routine for internal commands.
- */
-static void
-mega_internal_done(Scsi_Cmnd *scmd)
-{
-	adapter_t	*adapter;
-
-	adapter = (adapter_t *)scmd->device->host->hostdata;
-
-	complete(&adapter->int_waitq);
-
-}
-
-
 static struct scsi_host_template megaraid_template = {
 	.module				= THIS_MODULE,
 	.name				= "MegaRAID",
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 4d0ce4e..508d65e 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -853,10 +853,10 @@
 
 	u8	sglen;	/* f/w supported scatter-gather list length */
 
-	unsigned char int_cdb[MAX_COMMAND_SIZE];
 	scb_t			int_scb;
 	struct mutex		int_mtx;	/* To synchronize the internal
 						commands */
+	int			int_status;	/* status of internal cmd */
 	struct completion	int_waitq;	/* wait queue for internal
 						 cmds */
 
@@ -1004,7 +1004,6 @@
 static int mega_do_del_logdrv(adapter_t *, int);
 static void mega_get_max_sgl(adapter_t *);
 static int mega_internal_command(adapter_t *, megacmd_t *, mega_passthru *);
-static void mega_internal_done(Scsi_Cmnd *);
 static int mega_support_cluster(adapter_t *);
 #endif
 
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index dfffd0f..a706927 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -486,6 +486,8 @@
 
 	pthru32->dataxferaddr	= kioc->buf_paddr;
 	if (kioc->data_dir & UIOC_WR) {
+		if (pthru32->dataxferlen > kioc->xferlen)
+			return -EINVAL;
 		if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
 						pthru32->dataxferlen)) {
 			return (-EFAULT);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 34452ea..32166c2 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"06.700.06.00-rc1"
-#define MEGASAS_RELDATE				"Aug. 31, 2013"
-#define MEGASAS_EXT_VERSION			"Sat. Aug. 31 17:00:00 PDT 2013"
+#define MEGASAS_VERSION				"06.803.01.00-rc1"
+#define MEGASAS_RELDATE				"Mar. 10, 2014"
+#define MEGASAS_EXT_VERSION			"Mon. Mar. 10 17:00:00 PDT 2014"
 
 /*
  * Device IDs
@@ -48,6 +48,7 @@
 #define	PCI_DEVICE_ID_LSI_SAS0073SKINNY		0x0073
 #define	PCI_DEVICE_ID_LSI_SAS0071SKINNY		0x0071
 #define	PCI_DEVICE_ID_LSI_FUSION		0x005b
+#define PCI_DEVICE_ID_LSI_PLASMA		0x002f
 #define PCI_DEVICE_ID_LSI_INVADER		0x005d
 #define PCI_DEVICE_ID_LSI_FURY			0x005f
 
@@ -559,7 +560,8 @@
 		u8 PCIE:1;
 		u8 iSCSI:1;
 		u8 SAS_3G:1;
-		u8 reserved_0:4;
+		u8 SRIOV:1;
+		u8 reserved_0:3;
 		u8 reserved_1[6];
 		u8 port_count;
 		u64 port_addr[8];
@@ -839,7 +841,12 @@
 
 	struct {                                /*7A4h */
 #if   defined(__BIG_ENDIAN_BITFIELD)
-		u32     reserved:11;
+		u32     reserved:5;
+		u32	activePassive:2;
+		u32	supportConfigAutoBalance:1;
+		u32	mpio:1;
+		u32	supportDataLDonSSCArray:1;
+		u32	supportPointInTimeProgress:1;
 		u32     supportUnevenSpans:1;
 		u32     dedicatedHotSparesLimited:1;
 		u32     headlessMode:1;
@@ -886,7 +893,12 @@
 
 
 		u32     supportUnevenSpans:1;
-		u32     reserved:11;
+		u32	supportPointInTimeProgress:1;
+		u32	supportDataLDonSSCArray:1;
+		u32	mpio:1;
+		u32	supportConfigAutoBalance:1;
+		u32	activePassive:2;
+		u32     reserved:5;
 #endif
 	} adapterOperations2;
 
@@ -914,8 +926,14 @@
 	} cluster;
 
 	char clusterId[16];                     /*7D4h */
+	struct {
+		u8  maxVFsSupported;            /*0x7E4*/
+		u8  numVFsEnabled;              /*0x7E5*/
+		u8  requestorId;                /*0x7E6 0:PF, 1:VF1, 2:VF2*/
+		u8  reserved;                   /*0x7E7*/
+	} iov;
 
-	u8          pad[0x800-0x7E4];           /*7E4 */
+	u8          pad[0x800-0x7E8];           /*0x7E8 pad to 2k */
 } __packed;
 
 /*
@@ -986,7 +1004,9 @@
 
 #define MFI_OB_INTR_STATUS_MASK			0x00000002
 #define MFI_POLL_TIMEOUT_SECS			60
-
+#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF	(5 * HZ)
+#define MEGASAS_OCR_SETTLE_TIME_VF		(1000 * 30)
+#define MEGASAS_ROUTINE_WAIT_TIME_VF		300
 #define MFI_REPLY_1078_MESSAGE_INTERRUPT	0x80000000
 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT	0x00000001
 #define MFI_GEN2_ENABLE_INTERRUPT_MASK		(0x00000001 | 0x00000004)
@@ -1347,9 +1367,15 @@
 union megasas_evt_class_locale {
 
 	struct {
+#ifndef __BIG_ENDIAN_BITFIELD
 		u16 locale;
 		u8 reserved;
 		s8 class;
+#else
+		s8 class;
+		u8 reserved;
+		u16 locale;
+#endif
 	} __attribute__ ((packed)) members;
 
 	u32 word;
@@ -1523,6 +1549,12 @@
 	dma_addr_t producer_h;
 	u32 *consumer;
 	dma_addr_t consumer_h;
+	struct MR_LD_VF_AFFILIATION *vf_affiliation;
+	dma_addr_t vf_affiliation_h;
+	struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111;
+	dma_addr_t vf_affiliation_111_h;
+	struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
+	dma_addr_t hb_host_mem_h;
 
 	u32 *reply_queue;
 	dma_addr_t reply_queue_h;
@@ -1598,10 +1630,73 @@
 	unsigned long bar;
 	long reset_flags;
 	struct mutex reset_mutex;
+	struct timer_list sriov_heartbeat_timer;
+	char skip_heartbeat_timer_del;
+	u8 requestorId;
+	u64 initiator_sas_address;
+	u64 ld_sas_address[64];
+	char PlasmaFW111;
+	char mpio;
 	int throttlequeuedepth;
 	u8 mask_interrupts;
 	u8 is_imr;
 };
+struct MR_LD_VF_MAP {
+	u32 size;
+	union MR_LD_REF ref;
+	u8 ldVfCount;
+	u8 reserved[6];
+	u8 policy[1];
+};
+
+struct MR_LD_VF_AFFILIATION {
+	u32 size;
+	u8 ldCount;
+	u8 vfCount;
+	u8 thisVf;
+	u8 reserved[9];
+	struct MR_LD_VF_MAP map[1];
+};
+
+/* Plasma 1.11 FW backward compatibility structures */
+#define IOV_111_OFFSET 0x7CE
+#define MAX_VIRTUAL_FUNCTIONS 8
+
+struct IOV_111 {
+	u8 maxVFsSupported;
+	u8 numVFsEnabled;
+	u8 requestorId;
+	u8 reserved[5];
+};
+
+struct MR_LD_VF_MAP_111 {
+	u8 targetId;
+	u8 reserved[3];
+	u8 policy[MAX_VIRTUAL_FUNCTIONS];
+};
+
+struct MR_LD_VF_AFFILIATION_111 {
+	u8 vdCount;
+	u8 vfCount;
+	u8 thisVf;
+	u8 reserved[5];
+	struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES];
+};
+
+struct MR_CTRL_HB_HOST_MEM {
+	struct {
+		u32 fwCounter;	/* Firmware heart beat counter */
+		struct {
+			u32 debugmode:1; /* 1=Firmware is in debug mode.
+					    Heart beat will not be updated. */
+			u32 reserved:31;
+		} debug;
+		u32 reserved_fw[6];
+		u32 driverCounter; /* Driver heart beat counter.  0x20 */
+		u32 reserved_driver[7];
+	} HB;
+	u8 pad[0x400-0x40];
+};
 
 enum {
 	MEGASAS_HBA_OPERATIONAL			= 0,
@@ -1609,6 +1704,7 @@
 	MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS	= 2,
 	MEGASAS_ADPRESET_SM_OPERATIONAL		= 3,
 	MEGASAS_HW_CRITICAL_ERROR		= 4,
+	MEGASAS_ADPRESET_SM_POLLING		= 5,
 	MEGASAS_ADPRESET_INPROG_SIGN		= 0xDEADDEAD,
 };
 
@@ -1728,7 +1824,7 @@
 		    struct IO_REQUEST_INFO *io_info,
 		    struct RAID_CONTEXT *pRAID_Context,
 		    struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
 struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
 u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
 u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3b7ad10..d84d02c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : 06.700.06.00-rc1
+ *  Version : 06.803.01.00-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
@@ -75,6 +75,10 @@
 module_param(msix_vectors, int, S_IRUGO);
 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
 
+static int allow_vf_ioctls;
+module_param(allow_vf_ioctls, int, S_IRUGO);
+MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
+
 static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
 module_param(throttlequeuedepth, int, S_IRUGO);
 MODULE_PARM_DESC(throttlequeuedepth,
@@ -122,6 +126,8 @@
 	/* xscale IOP */
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
 	/* Fusion */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
+	/* Plasma */
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
 	/* Invader */
 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
@@ -132,7 +138,7 @@
 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
 
 static int megasas_mgmt_majorno;
-static struct megasas_mgmt_info megasas_mgmt_info;
+struct megasas_mgmt_info megasas_mgmt_info;
 static struct fasync_struct *megasas_async_queue;
 static DEFINE_MUTEX(megasas_async_queue_mutex);
 
@@ -171,10 +177,15 @@
 int
 megasas_sync_map_info(struct megasas_instance *instance);
 int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+	int seconds);
 void megasas_reset_reply_desc(struct megasas_instance *instance);
-int megasas_reset_fusion(struct Scsi_Host *shost);
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);
 void megasas_fusion_ocr_wq(struct work_struct *work);
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+					 int initial);
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+			     struct scsi_cmnd *scmd);
 
 void
 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -224,6 +235,7 @@
 	cmd->scmd = NULL;
 	cmd->frame_count = 0;
 	if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+	    (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
 	    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
 	    (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
 	    (reset_devices))
@@ -877,6 +889,7 @@
 int
 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
 {
+	int seconds;
 
 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
 
@@ -891,13 +904,18 @@
 	/*
 	 * Wait for cmd_status to change
 	 */
-	return wait_and_poll(instance, cmd);
+	if (instance->requestorId)
+		seconds = MEGASAS_ROUTINE_WAIT_TIME_VF;
+	else
+		seconds = MFI_POLL_TIMEOUT_SECS;
+	return wait_and_poll(instance, cmd, seconds);
 }
 
 /**
  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
  * @instance:			Adapter soft state
  * @cmd:			Command to be issued
+ * @timeout:			Timeout in seconds
  *
  * This function waits on an event for the command to be returned from ISR.
  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
@@ -905,13 +923,20 @@
  */
 static int
 megasas_issue_blocked_cmd(struct megasas_instance *instance,
-			  struct megasas_cmd *cmd)
+			  struct megasas_cmd *cmd, int timeout)
 {
+	int ret = 0;
 	cmd->cmd_status = ENODATA;
 
 	instance->instancet->issue_dcmd(instance, cmd);
-
-	wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA);
+	if (timeout) {
+		ret = wait_event_timeout(instance->int_cmd_wait_q,
+				cmd->cmd_status != ENODATA, timeout * HZ);
+		if (!ret)
+			return 1;
+	} else
+		wait_event(instance->int_cmd_wait_q,
+				cmd->cmd_status != ENODATA);
 
 	return 0;
 }
@@ -920,18 +945,20 @@
  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
  * @instance:				Adapter soft state
  * @cmd_to_abort:			Previously issued cmd to be aborted
+ * @timeout:				Timeout in seconds
  *
- * MFI firmware can abort previously issued AEN command (automatic event
+ * MFI firmware can abort previously issued AEN comamnd (automatic event
  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
  * cmd and waits for return status.
  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
  */
 static int
 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
-				struct megasas_cmd *cmd_to_abort)
+				struct megasas_cmd *cmd_to_abort, int timeout)
 {
 	struct megasas_cmd *cmd;
 	struct megasas_abort_frame *abort_fr;
+	int ret = 0;
 
 	cmd = megasas_get_cmd(instance);
 
@@ -957,10 +984,18 @@
 
 	instance->instancet->issue_dcmd(instance, cmd);
 
-	/*
-	 * Wait for this cmd to complete
-	 */
-	wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF);
+	if (timeout) {
+		ret = wait_event_timeout(instance->abort_cmd_wait_q,
+				cmd->cmd_status != ENODATA, timeout * HZ);
+		if (!ret) {
+			dev_err(&instance->pdev->dev, "Command timedout"
+				"from %s\n", __func__);
+			return 1;
+		}
+	} else
+		wait_event(instance->abort_cmd_wait_q,
+				cmd->cmd_status != ENODATA);
+
 	cmd->sync_cmd = 0;
 
 	megasas_return_cmd(instance, cmd);
@@ -1514,9 +1549,23 @@
 
 	spin_lock_irqsave(&instance->hba_lock, flags);
 
+	/* Check for an mpio path and adjust behavior */
+	if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+		if (megasas_check_mpio_paths(instance, scmd) ==
+		    (DID_RESET << 16)) {
+			spin_unlock_irqrestore(&instance->hba_lock, flags);
+			return SCSI_MLQUEUE_HOST_BUSY;
+		} else {
+			spin_unlock_irqrestore(&instance->hba_lock, flags);
+			scmd->result = DID_NO_CONNECT << 16;
+			done(scmd);
+			return 0;
+		}
+	}
+
 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
-		scmd->result = DID_ERROR << 16;
+		scmd->result = DID_NO_CONNECT << 16;
 		done(scmd);
 		return 0;
 	}
@@ -1641,9 +1690,14 @@
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+		/* Flush */
+		readl(&instance->reg_set->doorbell);
+		if (instance->mpio && instance->requestorId)
+			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
 	} else {
 		writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
 	}
@@ -1730,6 +1784,25 @@
 	megasas_check_and_restore_queue_depth(instance);
 }
 
+/**
+ * megasas_start_timer - Initializes a timer object
+ * @instance:		Adapter soft state
+ * @timer:		timer object to be initialized
+ * @fn:			timer function
+ * @interval:		time interval between timer function call
+ *
+ */
+void megasas_start_timer(struct megasas_instance *instance,
+			struct timer_list *timer,
+			void *fn, unsigned long interval)
+{
+	init_timer(timer);
+	timer->expires = jiffies + interval;
+	timer->data = (unsigned long)instance;
+	timer->function = fn;
+	add_timer(timer);
+}
+
 static void
 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
 
@@ -1752,6 +1825,295 @@
 	process_fw_state_change_wq(&instance->work_init);
 }
 
+/* This function will get the current SR-IOV LD/VF affiliation */
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+	int initial)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
+	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
+	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
+	dma_addr_t new_affiliation_h;
+	dma_addr_t new_affiliation_111_h;
+	int ld, retval = 0;
+	u8 thisVf;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas: megasas_get_ld_vf_"
+		       "affiliation: Failed to get cmd for scsi%d.\n",
+			instance->host->host_no);
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	if (!instance->vf_affiliation && !instance->vf_affiliation_111) {
+		printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+		       "affiliation for scsi%d.\n", instance->host->host_no);
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	if (initial)
+		if (instance->PlasmaFW111)
+			memset(instance->vf_affiliation_111, 0,
+			       sizeof(struct MR_LD_VF_AFFILIATION_111));
+		else
+			memset(instance->vf_affiliation, 0,
+			       (MAX_LOGICAL_DRIVES + 1) *
+			       sizeof(struct MR_LD_VF_AFFILIATION));
+	else {
+		if (instance->PlasmaFW111)
+			new_affiliation_111 =
+				pci_alloc_consistent(instance->pdev,
+						     sizeof(struct MR_LD_VF_AFFILIATION_111),
+						     &new_affiliation_111_h);
+		else
+			new_affiliation =
+				pci_alloc_consistent(instance->pdev,
+						     (MAX_LOGICAL_DRIVES + 1) *
+						     sizeof(struct MR_LD_VF_AFFILIATION),
+						     &new_affiliation_h);
+		if (!new_affiliation && !new_affiliation_111) {
+			printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+			       "memory for new affiliation for scsi%d.\n",
+				instance->host->host_no);
+			megasas_return_cmd(instance, cmd);
+			return -ENOMEM;
+		}
+		if (instance->PlasmaFW111)
+			memset(new_affiliation_111, 0,
+			       sizeof(struct MR_LD_VF_AFFILIATION_111));
+		else
+			memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+			       sizeof(struct MR_LD_VF_AFFILIATION));
+	}
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_BOTH;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	if (instance->PlasmaFW111) {
+		dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
+		dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
+	} else {
+		dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
+			sizeof(struct MR_LD_VF_AFFILIATION);
+		dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
+	}
+
+	if (initial) {
+		if (instance->PlasmaFW111)
+			dcmd->sgl.sge32[0].phys_addr =
+			  instance->vf_affiliation_111_h;
+		else
+			dcmd->sgl.sge32[0].phys_addr =
+			  instance->vf_affiliation_h;
+	} else {
+		if (instance->PlasmaFW111)
+			dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
+		else
+			dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
+	}
+	if (instance->PlasmaFW111)
+		dcmd->sgl.sge32[0].length =
+		  sizeof(struct MR_LD_VF_AFFILIATION_111);
+	else
+		dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
+			sizeof(struct MR_LD_VF_AFFILIATION);
+
+	printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+	       "scsi%d\n", instance->host->host_no);
+
+	megasas_issue_blocked_cmd(instance, cmd, 0);
+
+	if (dcmd->cmd_status) {
+		printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+		       " failed with status 0x%x for scsi%d.\n",
+		       dcmd->cmd_status, instance->host->host_no);
+		retval = 1; /* Do a scan if we couldn't get affiliation */
+		goto out;
+	}
+
+	if (!initial) {
+		if (instance->PlasmaFW111) {
+			if (!new_affiliation_111->vdCount) {
+				printk(KERN_WARNING "megasas: SR-IOV: Got new "
+				       "LD/VF affiliation for passive path "
+				       "for scsi%d.\n",
+					instance->host->host_no);
+				retval = 1;
+				goto out;
+			}
+			thisVf = new_affiliation_111->thisVf;
+			for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
+				if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) {
+					printk(KERN_WARNING "megasas: SR-IOV: "
+					       "Got new LD/VF affiliation "
+					       "for scsi%d.\n",
+						instance->host->host_no);
+					memcpy(instance->vf_affiliation_111,
+					       new_affiliation_111,
+					       sizeof(struct MR_LD_VF_AFFILIATION_111));
+					retval = 1;
+					goto out;
+				}
+		} else {
+			if (!new_affiliation->ldCount) {
+				printk(KERN_WARNING "megasas: SR-IOV: Got new "
+				       "LD/VF affiliation for passive "
+				       "path for scsi%d.\n",
+				       instance->host->host_no);
+				retval = 1;
+				goto out;
+			}
+			newmap = new_affiliation->map;
+			savedmap = instance->vf_affiliation->map;
+			thisVf = new_affiliation->thisVf;
+			for (ld = 0 ; ld < new_affiliation->ldCount; ld++) {
+				if (savedmap->policy[thisVf] !=
+				    newmap->policy[thisVf]) {
+					printk(KERN_WARNING "megasas: SR-IOV: "
+					       "Got new LD/VF affiliation "
+					       "for scsi%d.\n",
+						instance->host->host_no);
+					memcpy(instance->vf_affiliation,
+					       new_affiliation,
+					       new_affiliation->size);
+					retval = 1;
+					goto out;
+				}
+				savedmap = (struct MR_LD_VF_MAP *)
+					((unsigned char *)savedmap +
+					 savedmap->size);
+				newmap = (struct MR_LD_VF_MAP *)
+					((unsigned char *)newmap +
+					 newmap->size);
+			}
+		}
+	}
+out:
+	if (new_affiliation) {
+		if (instance->PlasmaFW111)
+			pci_free_consistent(instance->pdev,
+					    sizeof(struct MR_LD_VF_AFFILIATION_111),
+					    new_affiliation_111,
+					    new_affiliation_111_h);
+		else
+			pci_free_consistent(instance->pdev,
+					    (MAX_LOGICAL_DRIVES + 1) *
+					    sizeof(struct MR_LD_VF_AFFILIATION),
+					    new_affiliation, new_affiliation_h);
+	}
+	megasas_return_cmd(instance, cmd);
+
+	return retval;
+}
+
+/* This function will tell FW to start the SR-IOV heartbeat */
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+					 int initial)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	int retval = 0;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: "
+		       "Failed to get cmd for scsi%d.\n",
+		       instance->host->host_no);
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	if (initial) {
+		instance->hb_host_mem =
+			pci_alloc_consistent(instance->pdev,
+					     sizeof(struct MR_CTRL_HB_HOST_MEM),
+					     &instance->hb_host_mem_h);
+		if (!instance->hb_host_mem) {
+			printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate"
+			       " memory for heartbeat host memory for "
+			       "scsi%d.\n", instance->host->host_no);
+			retval = -ENOMEM;
+			goto out;
+		}
+		memset(instance->hb_host_mem, 0,
+		       sizeof(struct MR_CTRL_HB_HOST_MEM));
+	}
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM);
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_BOTH;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM);
+	dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC;
+	dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h;
+	dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM);
+
+	printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
+	       instance->host->host_no);
+
+	if (!megasas_issue_polled(instance, cmd)) {
+		retval = 0;
+	} else {
+		printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+		       "_MEM_ALLOC DCMD timed out for scsi%d\n",
+		       instance->host->host_no);
+		retval = 1;
+		goto out;
+	}
+
+
+	if (dcmd->cmd_status) {
+		printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+		       "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n",
+		       dcmd->cmd_status,
+		       instance->host->host_no);
+		retval = 1;
+		goto out;
+	}
+
+out:
+	megasas_return_cmd(instance, cmd);
+
+	return retval;
+}
+
+/* Handler for SR-IOV heartbeat */
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
+{
+	struct megasas_instance *instance =
+		(struct megasas_instance *)instance_addr;
+
+	if (instance->hb_host_mem->HB.fwCounter !=
+	    instance->hb_host_mem->HB.driverCounter) {
+		instance->hb_host_mem->HB.driverCounter =
+			instance->hb_host_mem->HB.fwCounter;
+		mod_timer(&instance->sriov_heartbeat_timer,
+			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+	} else {
+		printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never "
+		       "completed for scsi%d\n", instance->host->host_no);
+		schedule_work(&instance->work_init);
+	}
+}
+
 /**
  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
  * @instance:				Adapter soft state
@@ -2014,9 +2376,10 @@
 	 * First wait for all commands to complete
 	 */
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
-		ret = megasas_reset_fusion(scmd->device->host);
+		ret = megasas_reset_fusion(scmd->device->host, 1);
 	else
 		ret = megasas_generic_reset(scmd);
 
@@ -2731,6 +3094,8 @@
 				(instance->pdev->device ==
 				PCI_DEVICE_ID_LSI_FUSION) ||
 				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_PLASMA) ||
+				(instance->pdev->device ==
 				PCI_DEVICE_ID_LSI_INVADER) ||
 				(instance->pdev->device ==
 				PCI_DEVICE_ID_LSI_FURY)) {
@@ -2755,6 +3120,8 @@
 			    (instance->pdev->device ==
 			     PCI_DEVICE_ID_LSI_FUSION) ||
 			    (instance->pdev->device ==
+			     PCI_DEVICE_ID_LSI_PLASMA) ||
+			    (instance->pdev->device ==
 			     PCI_DEVICE_ID_LSI_INVADER) ||
 			    (instance->pdev->device ==
 			     PCI_DEVICE_ID_LSI_FURY)) {
@@ -2780,6 +3147,8 @@
 				(instance->pdev->device
 					== PCI_DEVICE_ID_LSI_FUSION) ||
 				(instance->pdev->device
+					== PCI_DEVICE_ID_LSI_PLASMA) ||
+				(instance->pdev->device
 					== PCI_DEVICE_ID_LSI_INVADER) ||
 				(instance->pdev->device
 					== PCI_DEVICE_ID_LSI_FURY)) {
@@ -2788,6 +3157,8 @@
 				if ((instance->pdev->device ==
 					PCI_DEVICE_ID_LSI_FUSION) ||
 					(instance->pdev->device ==
+					PCI_DEVICE_ID_LSI_PLASMA) ||
+					(instance->pdev->device ==
 					PCI_DEVICE_ID_LSI_INVADER) ||
 					(instance->pdev->device ==
 					PCI_DEVICE_ID_LSI_FURY)) {
@@ -3014,6 +3385,7 @@
 		cmd->frame->io.context = cpu_to_le32(cmd->index);
 		cmd->frame->io.pad_0 = 0;
 		if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+		    (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
 		    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
 			(instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
 		    (reset_devices))
@@ -3620,6 +3992,7 @@
 	struct megasas_ctrl_info *ctrl_info;
 	unsigned long bar_list;
 	int i, loop, fw_msix_count = 0;
+	struct IOV_111 *iovPtr;
 
 	/* Find first memory bar */
 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
@@ -3642,6 +4015,7 @@
 
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
+	case PCI_DEVICE_ID_LSI_PLASMA:
 	case PCI_DEVICE_ID_LSI_INVADER:
 	case PCI_DEVICE_ID_LSI_FURY:
 		instance->instancet = &megasas_instance_template_fusion;
@@ -3696,7 +4070,8 @@
 		scratch_pad_2 = readl
 			(&instance->reg_set->outbound_scratch_pad_2);
 		/* Check max MSI-X vectors */
-		if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) {
+		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+		    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) {
 			instance->msix_vectors = (scratch_pad_2
 				& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
 			fw_msix_count = instance->msix_vectors;
@@ -3763,7 +4138,10 @@
 
 	memset(instance->pd_list, 0 ,
 		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
-	megasas_get_pd_list(instance);
+	if (megasas_get_pd_list(instance) < 0) {
+		printk(KERN_ERR "megasas: failed to get PD list\n");
+		goto fail_init_adapter;
+	}
 
 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
 	if (megasas_ld_list_query(instance,
@@ -3807,6 +4185,7 @@
 		ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
 		/* adapterOperations2 are converted into CPU arch*/
 		le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
+		instance->mpio = ctrl_info->adapterOperations2.mpio;
 		instance->UnevenSpanSupport =
 			ctrl_info->adapterOperations2.supportUnevenSpans;
 		if (instance->UnevenSpanSupport) {
@@ -3819,6 +4198,20 @@
 				fusion->fast_path_io = 0;
 
 		}
+		if (ctrl_info->host_interface.SRIOV) {
+			if (!ctrl_info->adapterOperations2.activePassive)
+				instance->PlasmaFW111 = 1;
+
+			if (!instance->PlasmaFW111)
+				instance->requestorId =
+					ctrl_info->iov.requestorId;
+			else {
+				iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
+				instance->requestorId = iovPtr->requestorId;
+			}
+			printk(KERN_WARNING "megaraid_sas: I am VF "
+			       "requestorId %d\n", instance->requestorId);
+		}
 	}
 	instance->max_sectors_per_req = instance->max_num_sge *
 						PAGE_SIZE / 512;
@@ -3851,6 +4244,17 @@
 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
 		(unsigned long)instance);
 
+	/* Launch SR-IOV heartbeat timer */
+	if (instance->requestorId) {
+		if (!megasas_sriov_start_heartbeat(instance, 1))
+			megasas_start_timer(instance,
+					    &instance->sriov_heartbeat_timer,
+					    megasas_sriov_heartbeat_handler,
+					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+		else
+			instance->skip_heartbeat_timer_del = 1;
+	}
+
 	return 0;
 
 fail_init_adapter:
@@ -3933,16 +4337,19 @@
 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
 
-	megasas_issue_blocked_cmd(instance, cmd);
-
-	/*
-	 * Copy the data back into callers buffer
-	 */
-	eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
-	eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
-	eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
-	eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
-	eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+	if (megasas_issue_blocked_cmd(instance, cmd, 30))
+		dev_err(&instance->pdev->dev, "Command timedout"
+			"from %s\n", __func__);
+	else {
+		/*
+		 * Copy the data back into callers buffer
+		 */
+		eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
+		eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
+		eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
+		eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
+		eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+	}
 
 	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
 			    el_info, el_info_h);
@@ -4018,7 +4425,7 @@
 			instance->aen_cmd->abort_aen = 1;
 			ret_val = megasas_issue_blocked_abort_cmd(instance,
 								  instance->
-								  aen_cmd);
+								  aen_cmd, 30);
 
 			if (ret_val) {
 				printk(KERN_DEBUG "megasas: Failed to abort "
@@ -4160,6 +4567,7 @@
 
 	/* Fusion only supports host reset */
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 		host->hostt->eh_device_reset_handler = NULL;
@@ -4197,6 +4605,19 @@
 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
 			goto fail_set_dma_mask;
 	}
+	/*
+	 * Ensure that all data structures are allocated in 32-bit
+	 * memory.
+	 */
+	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+		/* Try 32bit DMA mask and 32 bit Consistent dma mask */
+		if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+			&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+			dev_info(&pdev->dev, "set 32bit DMA mask"
+				"and 32 bit consistent mask\n");
+		else
+			goto fail_set_dma_mask;
+	}
 
 	return 0;
 
@@ -4212,7 +4633,7 @@
 static int megasas_probe_one(struct pci_dev *pdev,
 			     const struct pci_device_id *id)
 {
-	int rval, pos, i, j;
+	int rval, pos, i, j, cpu;
 	struct Scsi_Host *host;
 	struct megasas_instance *instance;
 	u16 control = 0;
@@ -4272,6 +4693,7 @@
 
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
+	case PCI_DEVICE_ID_LSI_PLASMA:
 	case PCI_DEVICE_ID_LSI_INVADER:
 	case PCI_DEVICE_ID_LSI_FURY:
 	{
@@ -4368,6 +4790,7 @@
 	instance->UnevenSpanSupport = 0;
 
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
@@ -4380,12 +4803,33 @@
 	if (megasas_init_fw(instance))
 		goto fail_init_mfi;
 
+	if (instance->requestorId) {
+		if (instance->PlasmaFW111) {
+			instance->vf_affiliation_111 =
+				pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
+						     &instance->vf_affiliation_111_h);
+			if (!instance->vf_affiliation_111)
+				printk(KERN_WARNING "megasas: Can't allocate "
+				       "memory for VF affiliation buffer\n");
+		} else {
+			instance->vf_affiliation =
+				pci_alloc_consistent(pdev,
+						     (MAX_LOGICAL_DRIVES + 1) *
+						     sizeof(struct MR_LD_VF_AFFILIATION),
+						     &instance->vf_affiliation_h);
+			if (!instance->vf_affiliation)
+				printk(KERN_WARNING "megasas: Can't allocate "
+				       "memory for VF affiliation buffer\n");
+		}
+	}
+
 retry_irq_register:
 	/*
 	 * Register IRQ
 	 */
 	if (instance->msix_vectors) {
-		for (i = 0 ; i < instance->msix_vectors; i++) {
+		cpu = cpumask_first(cpu_online_mask);
+		for (i = 0; i < instance->msix_vectors; i++) {
 			instance->irq_context[i].instance = instance;
 			instance->irq_context[i].MSIxIndex = i;
 			if (request_irq(instance->msixentry[i].vector,
@@ -4394,14 +4838,22 @@
 					&instance->irq_context[i])) {
 				printk(KERN_DEBUG "megasas: Failed to "
 				       "register IRQ for vector %d.\n", i);
-				for (j = 0 ; j < i ; j++)
+				for (j = 0; j < i; j++) {
+					irq_set_affinity_hint(
+						instance->msixentry[j].vector, NULL);
 					free_irq(
 						instance->msixentry[j].vector,
 						&instance->irq_context[j]);
+				}
 				/* Retry irq register for IO_APIC */
 				instance->msix_vectors = 0;
 				goto retry_irq_register;
 			}
+			if (irq_set_affinity_hint(instance->msixentry[i].vector,
+				get_cpu_mask(cpu)))
+				dev_err(&instance->pdev->dev, "Error setting"
+					"affinity hint for cpu %d\n", cpu);
+			cpu = cpumask_next(cpu, cpu_online_mask);
 		}
 	} else {
 		instance->irq_context[0].instance = instance;
@@ -4455,13 +4907,17 @@
 
 	instance->instancet->disable_intr(instance);
 	if (instance->msix_vectors)
-		for (i = 0 ; i < instance->msix_vectors; i++)
+		for (i = 0; i < instance->msix_vectors; i++) {
+			irq_set_affinity_hint(
+				instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
+		}
 	else
 		free_irq(instance->pdev->irq, &instance->irq_context[0]);
 fail_irq:
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		megasas_release_fusion(instance);
@@ -4522,7 +4978,9 @@
 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
 
-	megasas_issue_blocked_cmd(instance, cmd);
+	if (megasas_issue_blocked_cmd(instance, cmd, 30))
+		dev_err(&instance->pdev->dev, "Command timedout"
+			" from %s\n", __func__);
 
 	megasas_return_cmd(instance, cmd);
 
@@ -4549,10 +5007,11 @@
 		return;
 
 	if (instance->aen_cmd)
-		megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
+		megasas_issue_blocked_abort_cmd(instance,
+			instance->aen_cmd, 30);
 	if (instance->map_update_cmd)
 		megasas_issue_blocked_abort_cmd(instance,
-						instance->map_update_cmd);
+			instance->map_update_cmd, 30);
 	dcmd = &cmd->frame->dcmd;
 
 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4566,7 +5025,9 @@
 	dcmd->data_xfer_len = 0;
 	dcmd->opcode = cpu_to_le32(opcode);
 
-	megasas_issue_blocked_cmd(instance, cmd);
+	if (megasas_issue_blocked_cmd(instance, cmd, 30))
+		dev_err(&instance->pdev->dev, "Command timedout"
+			"from %s\n", __func__);
 
 	megasas_return_cmd(instance, cmd);
 
@@ -4590,6 +5051,10 @@
 	host = instance->host;
 	instance->unload = 1;
 
+	/* Shutdown SR-IOV heartbeat timer */
+	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+		del_timer_sync(&instance->sriov_heartbeat_timer);
+
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
 
@@ -4606,9 +5071,12 @@
 	instance->instancet->disable_intr(instance);
 
 	if (instance->msix_vectors)
-		for (i = 0 ; i < instance->msix_vectors; i++)
+		for (i = 0; i < instance->msix_vectors; i++) {
+			irq_set_affinity_hint(
+				instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
+		}
 	else
 		free_irq(instance->pdev->irq, &instance->irq_context[0]);
 	if (instance->msix_vectors)
@@ -4629,7 +5097,7 @@
 static int
 megasas_resume(struct pci_dev *pdev)
 {
-	int rval, i, j;
+	int rval, i, j, cpu;
 	struct Scsi_Host *host;
 	struct megasas_instance *instance;
 
@@ -4673,6 +5141,7 @@
 
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
+	case PCI_DEVICE_ID_LSI_PLASMA:
 	case PCI_DEVICE_ID_LSI_INVADER:
 	case PCI_DEVICE_ID_LSI_FURY:
 	{
@@ -4701,6 +5170,7 @@
 	 * Register IRQ
 	 */
 	if (instance->msix_vectors) {
+		cpu = cpumask_first(cpu_online_mask);
 		for (i = 0 ; i < instance->msix_vectors; i++) {
 			instance->irq_context[i].instance = instance;
 			instance->irq_context[i].MSIxIndex = i;
@@ -4710,12 +5180,21 @@
 					&instance->irq_context[i])) {
 				printk(KERN_DEBUG "megasas: Failed to "
 				       "register IRQ for vector %d.\n", i);
-				for (j = 0 ; j < i ; j++)
+				for (j = 0; j < i; j++) {
+					irq_set_affinity_hint(
+						instance->msixentry[j].vector, NULL);
 					free_irq(
 						instance->msixentry[j].vector,
 						&instance->irq_context[j]);
+				}
 				goto fail_irq;
 			}
+
+			if (irq_set_affinity_hint(instance->msixentry[i].vector,
+				get_cpu_mask(cpu)))
+				dev_err(&instance->pdev->dev, "Error setting"
+					"affinity hint for cpu %d\n", cpu);
+			cpu = cpumask_next(cpu, cpu_online_mask);
 		}
 	} else {
 		instance->irq_context[0].instance = instance;
@@ -4728,6 +5207,17 @@
 		}
 	}
 
+	/* Re-launch SR-IOV heartbeat timer */
+	if (instance->requestorId) {
+		if (!megasas_sriov_start_heartbeat(instance, 0))
+			megasas_start_timer(instance,
+					    &instance->sriov_heartbeat_timer,
+					    megasas_sriov_heartbeat_handler,
+					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+		else
+			instance->skip_heartbeat_timer_del = 1;
+	}
+
 	instance->instancet->enable_intr(instance);
 	instance->unload = 0;
 
@@ -4782,6 +5272,10 @@
 	host = instance->host;
 	fusion = instance->ctrl_context;
 
+	/* Shutdown SR-IOV heartbeat timer */
+	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+		del_timer_sync(&instance->sriov_heartbeat_timer);
+
 	scsi_remove_host(instance->host);
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4793,6 +5287,9 @@
 		instance->ev = NULL;
 	}
 
+	/* cancel all wait events */
+	wake_up_all(&instance->int_cmd_wait_q);
+
 	tasklet_kill(&instance->isr_tasklet);
 
 	/*
@@ -4811,9 +5308,12 @@
 	instance->instancet->disable_intr(instance);
 
 	if (instance->msix_vectors)
-		for (i = 0 ; i < instance->msix_vectors; i++)
+		for (i = 0; i < instance->msix_vectors; i++) {
+			irq_set_affinity_hint(
+				instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
+		}
 	else
 		free_irq(instance->pdev->irq, &instance->irq_context[0]);
 	if (instance->msix_vectors)
@@ -4821,6 +5321,7 @@
 
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
+	case PCI_DEVICE_ID_LSI_PLASMA:
 	case PCI_DEVICE_ID_LSI_INVADER:
 	case PCI_DEVICE_ID_LSI_FURY:
 		megasas_release_fusion(instance);
@@ -4847,6 +5348,24 @@
 	if (instance->evt_detail)
 		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
 				instance->evt_detail, instance->evt_detail_h);
+
+	if (instance->vf_affiliation)
+		pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+				    sizeof(struct MR_LD_VF_AFFILIATION),
+				    instance->vf_affiliation,
+				    instance->vf_affiliation_h);
+
+	if (instance->vf_affiliation_111)
+		pci_free_consistent(pdev,
+				    sizeof(struct MR_LD_VF_AFFILIATION_111),
+				    instance->vf_affiliation_111,
+				    instance->vf_affiliation_111_h);
+
+	if (instance->hb_host_mem)
+		pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+				    instance->hb_host_mem,
+				    instance->hb_host_mem_h);
+
 	scsi_host_put(host);
 
 	pci_disable_device(pdev);
@@ -4868,9 +5387,12 @@
 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
 	instance->instancet->disable_intr(instance);
 	if (instance->msix_vectors)
-		for (i = 0 ; i < instance->msix_vectors; i++)
+		for (i = 0; i < instance->msix_vectors; i++) {
+			irq_set_affinity_hint(
+				instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
+		}
 	else
 		free_irq(instance->pdev->irq, &instance->irq_context[0]);
 	if (instance->msix_vectors)
@@ -5045,7 +5567,7 @@
 	 * cmd to the SCSI mid-layer
 	 */
 	cmd->sync_cmd = 1;
-	megasas_issue_blocked_cmd(instance, cmd);
+	megasas_issue_blocked_cmd(instance, cmd, 0);
 	cmd->sync_cmd = 0;
 
 	/*
@@ -5132,6 +5654,16 @@
 		goto out_kfree_ioc;
 	}
 
+	/* Adjust ioctl wait time for VF mode */
+	if (instance->requestorId)
+		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+	/* Block ioctls in VF mode */
+	if (instance->requestorId && !allow_vf_ioctls) {
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+
 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
 		printk(KERN_ERR "Controller in crit error\n");
 		error = -ENODEV;
@@ -5441,7 +5973,7 @@
 	u16     pd_index = 0;
 	u16	ld_index = 0;
 	int     i, j, doscan = 0;
-	u32 seq_num;
+	u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
 	int error;
 
 	if (!instance) {
@@ -5449,6 +5981,23 @@
 		kfree(ev);
 		return;
 	}
+
+	/* Adjust event workqueue thread wait time for VF mode */
+	if (instance->requestorId)
+		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+	/* Don't run the event workqueue thread if OCR is running */
+	for (i = 0; i < wait_time; i++) {
+		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
+			break;
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+			printk(KERN_NOTICE "megasas: %s waiting for "
+			       "controller reset to finish for scsi%d\n",
+			       __func__, instance->host->host_no);
+		}
+		msleep(1000);
+	}
+
 	instance->ev = NULL;
 	host = instance->host;
 	if (instance->evt_detail) {
@@ -5515,65 +6064,64 @@
 		case MR_EVT_LD_OFFLINE:
 		case MR_EVT_CFG_CLEARED:
 		case MR_EVT_LD_DELETED:
-			if (megasas_ld_list_query(instance,
-					MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
-				megasas_get_ld_list(instance);
-			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-				for (j = 0;
-				j < MEGASAS_MAX_DEV_PER_CHANNEL;
-				j++) {
+			if (!instance->requestorId ||
+			    (instance->requestorId &&
+			     megasas_get_ld_vf_affiliation(instance, 0))) {
+				if (megasas_ld_list_query(instance,
+							  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+					megasas_get_ld_list(instance);
+				for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+					for (j = 0;
+					     j < MEGASAS_MAX_DEV_PER_CHANNEL;
+					     j++) {
 
-				ld_index =
-				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+						ld_index =
+							(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
-				sdev1 = scsi_device_lookup(host,
-					MEGASAS_MAX_PD_CHANNELS + i,
-					j,
-					0);
+						sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
 
-				if (instance->ld_ids[ld_index] != 0xff) {
-					if (sdev1) {
-						scsi_device_put(sdev1);
-					}
-				} else {
-					if (sdev1) {
-						scsi_remove_device(sdev1);
-						scsi_device_put(sdev1);
-					}
-				}
-				}
-			}
-			doscan = 0;
-			break;
-		case MR_EVT_LD_CREATED:
-			if (megasas_ld_list_query(instance,
-					MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
-				megasas_get_ld_list(instance);
-			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-				for (j = 0;
-					j < MEGASAS_MAX_DEV_PER_CHANNEL;
-					j++) {
-					ld_index =
-					(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-					sdev1 = scsi_device_lookup(host,
-						MEGASAS_MAX_PD_CHANNELS + i,
-						j, 0);
-
-					if (instance->ld_ids[ld_index] !=
-								0xff) {
-						if (!sdev1) {
-							scsi_add_device(host,
-						MEGASAS_MAX_PD_CHANNELS + i,
-								j, 0);
+						if (instance->ld_ids[ld_index]
+						    != 0xff) {
+							if (sdev1)
+								scsi_device_put(sdev1);
+						} else {
+							if (sdev1) {
+								scsi_remove_device(sdev1);
+								scsi_device_put(sdev1);
+							}
 						}
 					}
-					if (sdev1) {
-						scsi_device_put(sdev1);
+				}
+				doscan = 0;
+			}
+			break;
+		case MR_EVT_LD_CREATED:
+			if (!instance->requestorId ||
+			    (instance->requestorId &&
+			     megasas_get_ld_vf_affiliation(instance, 0))) {
+				if (megasas_ld_list_query(instance,
+							  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+					megasas_get_ld_list(instance);
+				for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+					for (j = 0;
+					     j < MEGASAS_MAX_DEV_PER_CHANNEL;
+					     j++) {
+						ld_index =
+							(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+						sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+						if (instance->ld_ids[ld_index]
+						    != 0xff) {
+							if (!sdev1)
+								scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+						}
+						if (sdev1)
+							scsi_device_put(sdev1);
 					}
 				}
+				doscan = 0;
 			}
-			doscan = 0;
 			break;
 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
 		case MR_EVT_FOREIGN_CFG_IMPORTED:
@@ -5591,50 +6139,55 @@
 	}
 
 	if (doscan) {
-		printk(KERN_INFO "scanning ...\n");
-		megasas_get_pd_list(instance);
-		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
-			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
-				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
-				sdev1 = scsi_device_lookup(host, i, j, 0);
-				if (instance->pd_list[pd_index].driveState ==
-							MR_PD_STATE_SYSTEM) {
-					if (!sdev1) {
-						scsi_add_device(host, i, j, 0);
-					}
-					if (sdev1)
-						scsi_device_put(sdev1);
-				} else {
-					if (sdev1) {
-						scsi_remove_device(sdev1);
-						scsi_device_put(sdev1);
+		printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n",
+		       instance->host->host_no);
+		if (megasas_get_pd_list(instance) == 0) {
+			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+				for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+					pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+					sdev1 = scsi_device_lookup(host, i, j, 0);
+					if (instance->pd_list[pd_index].driveState ==
+					    MR_PD_STATE_SYSTEM) {
+						if (!sdev1) {
+							scsi_add_device(host, i, j, 0);
+						}
+						if (sdev1)
+							scsi_device_put(sdev1);
+					} else {
+						if (sdev1) {
+							scsi_remove_device(sdev1);
+							scsi_device_put(sdev1);
+						}
 					}
 				}
 			}
 		}
 
-		if (megasas_ld_list_query(instance,
-					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
-			megasas_get_ld_list(instance);
-		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
-				ld_index =
-				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+		if (!instance->requestorId ||
+		    (instance->requestorId &&
+		     megasas_get_ld_vf_affiliation(instance, 0))) {
+			if (megasas_ld_list_query(instance,
+						  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+				megasas_get_ld_list(instance);
+			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+				for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
+				     j++) {
+					ld_index =
+						(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
-				sdev1 = scsi_device_lookup(host,
-					MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-				if (instance->ld_ids[ld_index] != 0xff) {
-					if (!sdev1) {
-						scsi_add_device(host,
-						MEGASAS_MAX_PD_CHANNELS + i,
-								j, 0);
+					sdev1 = scsi_device_lookup(host,
+								   MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+					if (instance->ld_ids[ld_index]
+					    != 0xff) {
+						if (!sdev1)
+							scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+						else
+							scsi_device_put(sdev1);
 					} else {
-						scsi_device_put(sdev1);
-					}
-				} else {
-					if (sdev1) {
-						scsi_remove_device(sdev1);
-						scsi_device_put(sdev1);
+						if (sdev1) {
+							scsi_remove_device(sdev1);
+							scsi_device_put(sdev1);
+						}
 					}
 				}
 			}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e24b6eb..081bfff 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -143,12 +143,12 @@
 
 u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
 {
-	return map->raidMap.ldSpanMap[ld].ldRaid.targetId;
+	return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
 }
 
-u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
 {
-	return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]);
+	return map->raidMap.ldTgtIdToLd[ldTgtId];
 }
 
 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
@@ -975,7 +975,10 @@
 			regSize += stripSize;
 	}
 
-	pRAID_Context->timeoutValue     = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec);
+	pRAID_Context->timeoutValue =
+		cpu_to_le16(raid->fpIoTimeoutForLd ?
+			    raid->fpIoTimeoutForLd :
+			    map->raidMap.fpPdIoTimeoutSec);
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
 		pRAID_Context->regLockFlags = (isRead) ?
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f655592..2260041 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -62,7 +62,8 @@
 		     struct megasas_cmd *cmd, u8 alt_status);
 int megasas_is_ldio(struct scsi_cmnd *cmd);
 int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+	      int seconds);
 
 void
 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
@@ -81,6 +82,13 @@
 void megaraid_sas_kill_hba(struct megasas_instance *instance);
 
 extern u32 megasas_dbg_lvl;
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+				  int initial);
+void megasas_start_timer(struct megasas_instance *instance,
+			struct timer_list *timer,
+			 void *fn, unsigned long interval);
+extern struct megasas_mgmt_info megasas_mgmt_info;
 extern int resetwaittime;
 
 /**
@@ -549,12 +557,13 @@
  * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
  */
 int
-wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd)
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+	int seconds)
 {
 	int i;
 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
 
-	u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
+	u32 msecs = seconds * 1000;
 
 	/*
 	 * Wait for cmd_status to change
@@ -585,7 +594,7 @@
 	struct megasas_cmd *cmd;
 	u8 ret;
 	struct fusion_context *fusion;
-	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
 	int i;
 	struct megasas_header *frame_hdr;
 
@@ -644,18 +653,18 @@
 	/* Convert capability to LE32 */
 	cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
 
-	init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle);
+	init_frame->queue_info_new_phys_addr_hi =
+		cpu_to_le32(upper_32_bits(ioc_init_handle));
+	init_frame->queue_info_new_phys_addr_lo =
+		cpu_to_le32(lower_32_bits(ioc_init_handle));
 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
 
-	req_desc =
-	  (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
-
-	req_desc->Words = 0;
-	req_desc->MFAIo.RequestFlags =
+	req_desc.Words = 0;
+	req_desc.MFAIo.RequestFlags =
 		(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
 		 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-	cpu_to_le32s((u32 *)&req_desc->MFAIo);
-	req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr);
+	cpu_to_le32s((u32 *)&req_desc.MFAIo);
+	req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr);
 
 	/*
 	 * disable the intr before firing the init frame
@@ -669,10 +678,10 @@
 			break;
 	}
 
-	instance->instancet->fire_cmd(instance, req_desc->u.low,
-				      req_desc->u.high, instance->reg_set);
+	instance->instancet->fire_cmd(instance, req_desc.u.low,
+				      req_desc.u.high, instance->reg_set);
 
-	wait_and_poll(instance, cmd);
+	wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
 
 	frame_hdr = &cmd->frame->hdr;
 	if (frame_hdr->cmd_status != 0) {
@@ -723,7 +732,7 @@
 
 	if (!fusion) {
 		megasas_return_cmd(instance, cmd);
-		return 1;
+		return -ENXIO;
 	}
 
 	dcmd = &cmd->frame->dcmd;
@@ -1604,13 +1613,15 @@
 			MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 			(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
-			io_request->IoFlags |=
-				MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+			io_request->IoFlags |= cpu_to_le16(
+				MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
 		cmd->request_desc->SCSIIO.RequestFlags =
 			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
 			 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
 		cmd->request_desc->SCSIIO.DevHandle =
 			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+		cmd->request_desc->SCSIIO.MSIxIndex =
+			instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
 		/*
 		 * If the command is for the tape device, set the
 		 * FP timeout to the os layer timeout value.
@@ -1770,7 +1781,8 @@
 
 	if (index >= instance->max_fw_cmds) {
 		printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
-		       "descriptor\n", index);
+		       "descriptor for scsi%d\n", index,
+			instance->host->host_no);
 		return NULL;
 	}
 	fusion = instance->ctrl_context;
@@ -2038,8 +2050,11 @@
 		/* If we didn't complete any commands, check for FW fault */
 		fw_state = instance->instancet->read_fw_status_reg(
 			instance->reg_set) & MFI_STATE_MASK;
-		if (fw_state == MFI_STATE_FAULT)
+		if (fw_state == MFI_STATE_FAULT) {
+			printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
+			       "for scsi%d\n", instance->host->host_no);
 			schedule_work(&instance->work_init);
+		}
 	}
 
 	return IRQ_HANDLED;
@@ -2210,9 +2225,10 @@
 }
 
 /* This function waits for outstanding commands on fusion to complete */
-int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
+int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+					int iotimeout, int *convert)
 {
-	int i, outstanding, retval = 0;
+	int i, outstanding, retval = 0, hb_seconds_missed = 0;
 	u32 fw_state;
 
 	for (i = 0; i < resetwaittime; i++) {
@@ -2221,10 +2237,40 @@
 			instance->reg_set) & MFI_STATE_MASK;
 		if (fw_state == MFI_STATE_FAULT) {
 			printk(KERN_WARNING "megasas: Found FW in FAULT state,"
-			       " will reset adapter.\n");
+			       " will reset adapter scsi%d.\n",
+				instance->host->host_no);
 			retval = 1;
 			goto out;
 		}
+		/* If SR-IOV VF mode & heartbeat timeout, don't wait */
+		if (instance->requestorId && !iotimeout) {
+			retval = 1;
+			goto out;
+		}
+
+		/* If SR-IOV VF mode & I/O timeout, check for HB timeout */
+		if (instance->requestorId && iotimeout) {
+			if (instance->hb_host_mem->HB.fwCounter !=
+			    instance->hb_host_mem->HB.driverCounter) {
+				instance->hb_host_mem->HB.driverCounter =
+					instance->hb_host_mem->HB.fwCounter;
+				hb_seconds_missed = 0;
+			} else {
+				hb_seconds_missed++;
+				if (hb_seconds_missed ==
+				    (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
+					printk(KERN_WARNING "megasas: SR-IOV:"
+					       " Heartbeat never completed "
+					       " while polling during I/O "
+					       " timeout handling for "
+					       "scsi%d.\n",
+					       instance->host->host_no);
+					       *convert = 1;
+					       retval = 1;
+					       goto out;
+				}
+			}
+		}
 
 		outstanding = atomic_read(&instance->fw_outstanding);
 		if (!outstanding)
@@ -2232,7 +2278,8 @@
 
 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
 			printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
-			       "commands to complete\n", i, outstanding);
+			       "commands to complete for scsi%d\n", i,
+			       outstanding, instance->host->host_no);
 			megasas_complete_cmd_dpc_fusion(
 				(unsigned long)instance);
 		}
@@ -2241,7 +2288,8 @@
 
 	if (atomic_read(&instance->fw_outstanding)) {
 		printk("megaraid_sas: pending commands remain after waiting, "
-		       "will reset adapter.\n");
+		       "will reset adapter scsi%d.\n",
+		       instance->host->host_no);
 		retval = 1;
 	}
 out:
@@ -2263,10 +2311,34 @@
 		reply_desc->Words = ULLONG_MAX;
 }
 
-/* Core fusion reset function */
-int megasas_reset_fusion(struct Scsi_Host *shost)
+/* Check for a second path that is currently UP */
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+	struct scsi_cmnd *scmd)
 {
-	int retval = SUCCESS, i, j, retry = 0;
+	int i, j, retval = (DID_RESET << 16);
+
+	if (instance->mpio && instance->requestorId) {
+		for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
+			for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
+				if (megasas_mgmt_info.instance[i] &&
+				    (megasas_mgmt_info.instance[i] != instance) &&
+				    megasas_mgmt_info.instance[i]->mpio &&
+				    megasas_mgmt_info.instance[i]->requestorId
+				    &&
+				    (megasas_mgmt_info.instance[i]->ld_ids[j]
+				     == scmd->device->id)) {
+					    retval = (DID_NO_CONNECT << 16);
+					    goto out;
+				}
+	}
+out:
+	return retval;
+}
+
+/* Core fusion reset function */
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
+{
+	int retval = SUCCESS, i, j, retry = 0, convert = 0;
 	struct megasas_instance *instance;
 	struct megasas_cmd_fusion *cmd_fusion;
 	struct fusion_context *fusion;
@@ -2277,28 +2349,39 @@
 	instance = (struct megasas_instance *)shost->hostdata;
 	fusion = instance->ctrl_context;
 
+	mutex_lock(&instance->reset_mutex);
+
 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
 		printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
-		       "returning FAILED.\n");
+		       "returning FAILED for scsi%d.\n",
+			instance->host->host_no);
 		return FAILED;
 	}
 
-	mutex_lock(&instance->reset_mutex);
+	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+		del_timer_sync(&instance->sriov_heartbeat_timer);
 	set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
-	instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+	instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
 	instance->instancet->disable_intr(instance);
 	msleep(1000);
 
 	/* First try waiting for commands to complete */
-	if (megasas_wait_for_outstanding_fusion(instance)) {
+	if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
+						&convert)) {
+		instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
 		printk(KERN_WARNING "megaraid_sas: resetting fusion "
-		       "adapter.\n");
+		       "adapter scsi%d.\n", instance->host->host_no);
+		if (convert)
+			iotimeout = 0;
+
 		/* Now return commands back to the OS */
 		for (i = 0 ; i < instance->max_fw_cmds; i++) {
 			cmd_fusion = fusion->cmd_list[i];
 			if (cmd_fusion->scmd) {
 				scsi_dma_unmap(cmd_fusion->scmd);
-				cmd_fusion->scmd->result = (DID_RESET << 16);
+				cmd_fusion->scmd->result =
+					megasas_check_mpio_paths(instance,
+								 cmd_fusion->scmd);
 				cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
 				megasas_return_cmd_fusion(instance, cmd_fusion);
 				atomic_dec(&instance->fw_outstanding);
@@ -2313,13 +2396,67 @@
 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
 			/* Reset not supported, kill adapter */
 			printk(KERN_WARNING "megaraid_sas: Reset not supported"
-			       ", killing adapter.\n");
+			       ", killing adapter scsi%d.\n",
+				instance->host->host_no);
 			megaraid_sas_kill_hba(instance);
+			instance->skip_heartbeat_timer_del = 1;
 			instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
 			retval = FAILED;
 			goto out;
 		}
 
+		/* Let SR-IOV VF & PF sync up if there was a HB failure */
+		if (instance->requestorId && !iotimeout) {
+			msleep(MEGASAS_OCR_SETTLE_TIME_VF);
+			/* Look for a late HB update after VF settle time */
+			if (abs_state == MFI_STATE_OPERATIONAL &&
+			    (instance->hb_host_mem->HB.fwCounter !=
+			     instance->hb_host_mem->HB.driverCounter)) {
+					instance->hb_host_mem->HB.driverCounter =
+						instance->hb_host_mem->HB.fwCounter;
+					printk(KERN_WARNING "megasas: SR-IOV:"
+					       "Late FW heartbeat update for "
+					       "scsi%d.\n",
+					       instance->host->host_no);
+			} else {
+				/* In VF mode, first poll for FW ready */
+				for (i = 0;
+				     i < (MEGASAS_RESET_WAIT_TIME * 1000);
+				     i += 20) {
+					status_reg =
+						instance->instancet->
+						read_fw_status_reg(
+							instance->reg_set);
+					abs_state = status_reg &
+						MFI_STATE_MASK;
+					if (abs_state == MFI_STATE_READY) {
+						printk(KERN_WARNING "megasas"
+						       ": SR-IOV: FW was found"
+						       "to be in ready state "
+						       "for scsi%d.\n",
+						       instance->host->host_no);
+						break;
+					}
+					msleep(20);
+				}
+				if (abs_state != MFI_STATE_READY) {
+					printk(KERN_WARNING "megasas: SR-IOV: "
+					       "FW not in ready state after %d"
+					       " seconds for scsi%d, status_reg = "
+					       "0x%x.\n",
+					       MEGASAS_RESET_WAIT_TIME,
+					       instance->host->host_no,
+					       status_reg);
+					megaraid_sas_kill_hba(instance);
+					instance->skip_heartbeat_timer_del = 1;
+					instance->adprecovery =
+						MEGASAS_HW_CRITICAL_ERROR;
+					retval = FAILED;
+					goto out;
+				}
+			}
+		}
+
 		/* Now try to reset the chip */
 		for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
 			writel(MPI2_WRSEQ_FLUSH_KEY_VALUE,
@@ -2346,7 +2483,9 @@
 				readl(&instance->reg_set->fusion_host_diag);
 				if (retry++ == 100) {
 					printk(KERN_WARNING "megaraid_sas: "
-					       "Host diag unlock failed!\n");
+					       "Host diag unlock failed! "
+					       "for scsi%d\n",
+						instance->host->host_no);
 					break;
 				}
 			}
@@ -2368,7 +2507,8 @@
 				if (retry++ == 1000) {
 					printk(KERN_WARNING "megaraid_sas: "
 					       "Diag reset adapter never "
-					       "cleared!\n");
+					       "cleared for scsi%d!\n",
+						instance->host->host_no);
 					break;
 				}
 			}
@@ -2390,29 +2530,29 @@
 			if (abs_state <= MFI_STATE_FW_INIT) {
 				printk(KERN_WARNING "megaraid_sas: firmware "
 				       "state < MFI_STATE_FW_INIT, state = "
-				       "0x%x\n", abs_state);
+				       "0x%x for scsi%d\n", abs_state,
+					instance->host->host_no);
 				continue;
 			}
 
 			/* Wait for FW to become ready */
 			if (megasas_transition_to_ready(instance, 1)) {
 				printk(KERN_WARNING "megaraid_sas: Failed to "
-				       "transition controller to ready.\n");
+				       "transition controller to ready "
+				       "for scsi%d.\n",
+				       instance->host->host_no);
 				continue;
 			}
 
 			megasas_reset_reply_desc(instance);
 			if (megasas_ioc_init_fusion(instance)) {
 				printk(KERN_WARNING "megaraid_sas: "
-				       "megasas_ioc_init_fusion() failed!\n");
+				       "megasas_ioc_init_fusion() failed!"
+				       " for scsi%d\n",
+				       instance->host->host_no);
 				continue;
 			}
 
-			clear_bit(MEGASAS_FUSION_IN_RESET,
-				  &instance->reset_flags);
-			instance->instancet->enable_intr(instance);
-			instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
-
 			/* Re-fire management commands */
 			for (j = 0 ; j < instance->max_fw_cmds; j++) {
 				cmd_fusion = fusion->cmd_list[j];
@@ -2422,7 +2562,7 @@
 					instance->
 					cmd_list[cmd_fusion->sync_cmd_idx];
 					if (cmd_mfi->frame->dcmd.opcode ==
-					    MR_DCMD_LD_MAP_GET_INFO) {
+					    cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
 						megasas_return_cmd(instance,
 								   cmd_mfi);
 						megasas_return_cmd_fusion(
@@ -2433,11 +2573,15 @@
 							instance,
 							cmd_mfi->context.smid
 							-1);
-						if (!req_desc)
+						if (!req_desc) {
 							printk(KERN_WARNING
 							       "req_desc NULL"
-							       "\n");
-						else {
+							       " for scsi%d\n",
+								instance->host->host_no);
+							/* Return leaked MPT
+							   frame */
+							megasas_return_cmd_fusion(instance, cmd_fusion);
+						} else {
 							instance->instancet->
 							fire_cmd(instance,
 								 req_desc->
@@ -2451,6 +2595,11 @@
 				}
 			}
 
+			clear_bit(MEGASAS_FUSION_IN_RESET,
+				  &instance->reset_flags);
+			instance->instancet->enable_intr(instance);
+			instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+
 			/* Reset load balance info */
 			memset(fusion->load_balance_info, 0,
 			       sizeof(struct LD_LOAD_BALANCE_INFO)
@@ -2459,18 +2608,39 @@
 			if (!megasas_get_map_info(instance))
 				megasas_sync_map_info(instance);
 
+			/* Restart SR-IOV heartbeat */
+			if (instance->requestorId) {
+				if (!megasas_sriov_start_heartbeat(instance, 0))
+					megasas_start_timer(instance,
+							    &instance->sriov_heartbeat_timer,
+							    megasas_sriov_heartbeat_handler,
+							    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+				else
+					instance->skip_heartbeat_timer_del = 1;
+			}
+
 			/* Adapter reset completed successfully */
 			printk(KERN_WARNING "megaraid_sas: Reset "
-			       "successful.\n");
+			       "successful for scsi%d.\n",
+				instance->host->host_no);
 			retval = SUCCESS;
 			goto out;
 		}
 		/* Reset failed, kill the adapter */
 		printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
-		       "adapter.\n");
+		       "adapter scsi%d.\n", instance->host->host_no);
 		megaraid_sas_kill_hba(instance);
+		instance->skip_heartbeat_timer_del = 1;
+		instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
 		retval = FAILED;
 	} else {
+		/* For VF: Restart HB timer if we didn't OCR */
+		if (instance->requestorId) {
+			megasas_start_timer(instance,
+					    &instance->sriov_heartbeat_timer,
+					    megasas_sriov_heartbeat_handler,
+					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+		}
 		clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
 		instance->instancet->enable_intr(instance);
 		instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
@@ -2487,7 +2657,7 @@
 	struct megasas_instance *instance =
 		container_of(work, struct megasas_instance, work_init);
 
-	megasas_reset_fusion(instance->host);
+	megasas_reset_fusion(instance->host, 0);
 }
 
 struct megasas_instance_template megasas_instance_template_fusion = {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 35a5139..e76af54 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -485,6 +485,9 @@
 #define MAX_PHYSICAL_DEVICES 256
 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
 #define MR_DCMD_LD_MAP_GET_INFO             0x0300e101
+#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC  0x010e8485   /* SR-IOV HB alloc*/
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111   0x03200200
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS       0x03150200
 
 struct MR_DEV_HANDLE_INFO {
 	u16     curDevHdl;
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 62f1a60..0d78a4d 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -453,7 +453,7 @@
 	    instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
 
 	if (instance->irq != SCSI_IRQ_NONE) 
-	    if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED,
+	    if (request_irq(instance->irq, pas16_intr, 0,
 			    "pas16", instance)) {
 		printk("scsi%d : IRQ%d not free, interrupts disabled\n", 
 		    instance->host_no, instance->irq);
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index a04b4ff..28b4e81 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -323,24 +323,17 @@
 	int offset;
 	char *str = buf;
 	int start = 0;
-#define IB_MEMMAP(c)		\
-		(*(u32 *)((u8 *)pm8001_ha->		\
-		memoryMap.region[IB].virt_ptr +		\
+#define IB_MEMMAP(c)	\
+		(*(u32 *)((u8 *)pm8001_ha->	\
+		memoryMap.region[IB].virt_ptr +	\
 		pm8001_ha->evtlog_ib_offset + (c)))
 
 	for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
-		if (pm8001_ha->chip_id != chip_8001)
-			str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
-		else
-			str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+		str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
 		start = start + 4;
 	}
 	pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
-	if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
-		&& (pm8001_ha->chip_id != chip_8001))
-		pm8001_ha->evtlog_ib_offset = 0;
-	if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
-		&& (pm8001_ha->chip_id == chip_8001))
+	if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
 		pm8001_ha->evtlog_ib_offset = 0;
 
 	return str - buf;
@@ -363,24 +356,17 @@
 	int offset;
 	char *str = buf;
 	int start = 0;
-#define OB_MEMMAP(c)		\
-		(*(u32 *)((u8 *)pm8001_ha->		\
-		memoryMap.region[OB].virt_ptr +		\
+#define OB_MEMMAP(c)	\
+		(*(u32 *)((u8 *)pm8001_ha->	\
+		memoryMap.region[OB].virt_ptr +	\
 		pm8001_ha->evtlog_ob_offset + (c)))
 
 	for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
-		if (pm8001_ha->chip_id != chip_8001)
-			str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
-		else
-			str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+		str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
 		start = start + 4;
 	}
 	pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
-	if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
-			&& (pm8001_ha->chip_id != chip_8001))
-		pm8001_ha->evtlog_ob_offset = 0;
-	if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
-			&& (pm8001_ha->chip_id == chip_8001))
+	if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
 		pm8001_ha->evtlog_ob_offset = 0;
 
 	return str - buf;
@@ -466,7 +452,7 @@
 static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
-	u32 count;
+	ssize_t count;
 
 	count = pm80xx_get_fatal_dump(cdev, attr, buf);
 	return count;
@@ -484,7 +470,7 @@
 static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
 	struct device_attribute *attr, char *buf)
 {
-	u32 count;
+	ssize_t count;
 
 	count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
 	return count;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 0a1296a..a97be01 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -644,7 +644,7 @@
 	pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
 	/* 8081 controllers need BAR shift to access MPI space
 	* as this is shared with BIOS data */
-	if (deviceid == 0x8081) {
+	if (deviceid == 0x8081 || deviceid == 0x0042) {
 		if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
 			PM8001_FAIL_DBG(pm8001_ha,
 				pm8001_printk("Shift Bar4 to 0x%x failed\n",
@@ -673,7 +673,7 @@
 	for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
 		update_outbnd_queue_table(pm8001_ha, i);
 	/* 8081 controller donot require these operations */
-	if (deviceid != 0x8081) {
+	if (deviceid != 0x8081 && deviceid != 0x0042) {
 		mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
 		/* 7->130ms, 34->500ms, 119->1.5s */
 		mpi_set_open_retry_interval_reg(pm8001_ha, 119);
@@ -701,7 +701,7 @@
 	u32 gst_len_mpistate;
 	u16 deviceid;
 	pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
-	if (deviceid == 0x8081) {
+	if (deviceid == 0x8081 || deviceid == 0x0042) {
 		if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
 			PM8001_FAIL_DBG(pm8001_ha,
 				pm8001_printk("Shift Bar4 to 0x%x failed\n",
@@ -2502,11 +2502,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*in order to force CPU ordering*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2522,11 +2518,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2550,11 +2542,7 @@
 				IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/* ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2617,11 +2605,7 @@
 				    IO_DS_NON_OPERATIONAL);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2641,11 +2625,7 @@
 				    IO_DS_IN_ERROR);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2674,20 +2654,9 @@
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, status, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-	} else if (t->uldd_task) {
+	} else {
 		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/* ditto */
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
-	} else if (!t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/*ditto*/
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
+		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 	}
 }
 
@@ -2796,11 +2765,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_COMPLETE;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2909,20 +2874,9 @@
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, event, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-	} else if (t->uldd_task) {
+	} else {
 		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/* ditto */
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
-	} else if (!t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/*ditto*/
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
+		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 	}
 }
 
@@ -4467,23 +4421,11 @@
 					" stat 0x%x but aborted by upper layer "
 					"\n", task, ts->resp, ts->stat));
 				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
-			} else if (task->uldd_task) {
+			} else {
 				spin_unlock_irqrestore(&task->task_state_lock,
 							flags);
-				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
-				mb();/* ditto */
-				spin_unlock_irq(&pm8001_ha->lock);
-				task->task_done(task);
-				spin_lock_irq(&pm8001_ha->lock);
-				return 0;
-			} else if (!task->uldd_task) {
-				spin_unlock_irqrestore(&task->task_state_lock,
-							flags);
-				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
-				mb();/*ditto*/
-				spin_unlock_irq(&pm8001_ha->lock);
-				task->task_done(task);
-				spin_lock_irq(&pm8001_ha->lock);
+				pm8001_ccb_task_free_done(pm8001_ha, task,
+								ccb, tag);
 				return 0;
 			}
 		}
@@ -5020,7 +4962,7 @@
 	/* check max is 1 Mbytes */
 	if ((length > 0x100000) || (gsm_dump_offset & 3) ||
 		((gsm_dump_offset + length) > 0x1000000))
-			return 1;
+			return -EINVAL;
 
 	if (pm8001_ha->chip_id == chip_8001)
 		bar = 2;
@@ -5048,12 +4990,12 @@
 				gsm_base = GSM_BASE;
 				if (-1 == pm8001_bar4_shift(pm8001_ha,
 						(gsm_base + shift_value)))
-					return 1;
+					return -EIO;
 			} else {
 				gsm_base = 0;
 				if (-1 == pm80xx_bar4_shift(pm8001_ha,
 						(gsm_base + shift_value)))
-					return 1;
+					return -EIO;
 			}
 			gsm_dump_offset = (gsm_dump_offset + offset) &
 						0xFFFF0000;
@@ -5072,13 +5014,8 @@
 		direct_data += sprintf(direct_data, "%08x ", value);
 	}
 	/* Shift back to BAR4 original address */
-	if (pm8001_ha->chip_id == chip_8001) {
-		if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
-			return 1;
-	} else {
-		if (-1 == pm80xx_bar4_shift(pm8001_ha, 0))
-			return 1;
-	}
+	if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
+			return -EIO;
 	pm8001_ha->fatal_forensic_shift_offset += 1024;
 
 	if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 73a120d..c4f31b21 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -625,7 +625,7 @@
 	pm8001_ha->nvmd_completion = &completion;
 
 	if (pm8001_ha->chip_id == chip_8001) {
-		if (deviceid == 0x8081) {
+		if (deviceid == 0x8081 || deviceid == 0x0042) {
 			payload.minor_function = 4;
 			payload.length = 4096;
 		} else {
@@ -646,6 +646,9 @@
 			if (deviceid == 0x8081)
 				pm8001_ha->sas_addr[j] =
 					payload.func_specific[0x704 + i];
+			else if (deviceid == 0x0042)
+				pm8001_ha->sas_addr[j] =
+					payload.func_specific[0x010 + i];
 		} else
 			pm8001_ha->sas_addr[j] =
 					payload.func_specific[0x804 + i];
@@ -713,11 +716,9 @@
 	/* SPCv controllers supports 64 msi-x */
 	if (pm8001_ha->chip_id == chip_8001) {
 		number_of_intr = 1;
-		flag |= IRQF_DISABLED;
 	} else {
 		number_of_intr = PM8001_MAX_MSIX_VEC;
 		flag &= ~IRQF_SHARED;
-		flag |= IRQF_DISABLED;
 	}
 
 	max_entry = sizeof(pm8001_ha->msix_entries) /
@@ -1072,10 +1073,7 @@
  */
 static struct pci_device_id pm8001_pci_table[] = {
 	{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
-	{
-		PCI_DEVICE(0x117c, 0x0042),
-		.driver_data = chip_8001
-	},
+	{ PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
 	/* Support for SPC/SPCv/SPCve controllers */
 	{ PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
 	{ PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index f50ac44..8a44bc9 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -434,6 +434,7 @@
 		ccb->n_elem = n_elem;
 		ccb->ccb_tag = tag;
 		ccb->task = t;
+		ccb->device = pm8001_dev;
 		switch (t->task_proto) {
 		case SAS_PROTOCOL_SMP:
 			rc = pm8001_task_prep_smp(pm8001_ha, ccb);
@@ -865,13 +866,11 @@
 static void pm8001_dev_gone_notify(struct domain_device *dev)
 {
 	unsigned long flags = 0;
-	u32 tag;
 	struct pm8001_hba_info *pm8001_ha;
 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
 
 	pm8001_ha = pm8001_find_ha_by_dev(dev);
 	spin_lock_irqsave(&pm8001_ha->lock, flags);
-	pm8001_tag_alloc(pm8001_ha, &tag);
 	if (pm8001_dev) {
 		u32 device_id = pm8001_dev->device_id;
 
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 6c5fd5e..1ee06f2 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -708,5 +708,17 @@
 /* ctl shared API */
 extern struct device_attribute *pm8001_host_attrs[];
 
+static inline void
+pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
+			struct sas_task *task, struct pm8001_ccb_info *ccb,
+			u32 ccb_idx)
+{
+	pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
+	smp_mb(); /*in order to force CPU ordering*/
+	spin_unlock(&pm8001_ha->lock);
+	task->task_done(task);
+	spin_lock(&pm8001_ha->lock);
+}
+
 #endif
 
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index c950dc5..d70587f 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -91,7 +91,6 @@
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 	void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
-	u32 status = 1;
 	u32 accum_len , reg_val, index, *temp;
 	unsigned long start;
 	u8 *direct_data;
@@ -111,13 +110,10 @@
 		direct_data = (u8 *)fatal_error_data;
 		pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
 		pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
-		pm8001_ha->forensic_info.data_buf.direct_offset = 0;
 		pm8001_ha->forensic_info.data_buf.read_len = 0;
 
 		pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
-	}
 
-	if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
 		/* start to get data */
 		/* Program the MEMBASE II Shifting Register with 0x00.*/
 		pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
@@ -126,6 +122,7 @@
 		pm8001_ha->forensic_fatal_step = 0;
 		pm8001_ha->fatal_bar_loc = 0;
 	}
+
 	/* Read until accum_len is retrived */
 	accum_len = pm8001_mr32(fatal_table_address,
 				MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
@@ -135,7 +132,7 @@
 		PM8001_IO_DBG(pm8001_ha,
 			pm8001_printk("Possible PCI issue 0x%x not expected\n",
 				accum_len));
-		return status;
+		return -EIO;
 	}
 	if (accum_len == 0 || accum_len >= 0x100000) {
 		pm8001_ha->forensic_info.data_buf.direct_data +=
@@ -178,7 +175,6 @@
 			pm8001_ha->forensic_fatal_step = 1;
 			pm8001_ha->fatal_forensic_shift_offset = 0;
 			pm8001_ha->forensic_last_offset	= 0;
-			status = 0;
 			return (char *)pm8001_ha->
 				forensic_info.data_buf.direct_data -
 				(char *)buf;
@@ -194,7 +190,6 @@
 					forensic_info.data_buf.direct_data,
 					"%08x ", *(temp + index));
 			}
-			status = 0;
 			return (char *)pm8001_ha->
 				forensic_info.data_buf.direct_data -
 				(char *)buf;
@@ -214,7 +209,6 @@
 		pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
 			pm8001_ha->fatal_forensic_shift_offset);
 		pm8001_ha->fatal_bar_loc = 0;
-		status = 0;
 		return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
 			(char *)buf;
 	}
@@ -239,7 +233,7 @@
 			PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
 			" = 0x%x\n", reg_val));
-			return -1;
+			return -EIO;
 		}
 
 		/* Read the next 64K of the debug data. */
@@ -259,7 +253,6 @@
 			pm8001_ha->forensic_info.data_buf.direct_len =  0;
 			pm8001_ha->forensic_info.data_buf.direct_offset = 0;
 			pm8001_ha->forensic_info.data_buf.read_len = 0;
-			status = 0;
 		}
 	}
 
@@ -2175,11 +2168,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*in order to force CPU ordering*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2195,11 +2184,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2221,11 +2206,7 @@
 				IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/* ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2288,11 +2269,7 @@
 					IO_DS_NON_OPERATIONAL);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2312,11 +2289,7 @@
 					IO_DS_IN_ERROR);
 			ts->resp = SAS_TASK_UNDELIVERED;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2345,20 +2318,9 @@
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, status, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-	} else if (t->uldd_task) {
+	} else {
 		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/* ditto */
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
-	} else if (!t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/*ditto*/
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
+		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 	}
 }
 
@@ -2470,11 +2432,7 @@
 				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
 			ts->resp = SAS_TASK_COMPLETE;
 			ts->stat = SAS_QUEUE_FULL;
-			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-			mb();/*ditto*/
-			spin_unlock_irq(&pm8001_ha->lock);
-			t->task_done(t);
-			spin_lock_irq(&pm8001_ha->lock);
+			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 			return;
 		}
 		break;
@@ -2596,20 +2554,9 @@
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, event, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-	} else if (t->uldd_task) {
+	} else {
 		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/* ditto */
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
-	} else if (!t->uldd_task) {
-		spin_unlock_irqrestore(&t->task_state_lock, flags);
-		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/*ditto*/
-		spin_unlock_irq(&pm8001_ha->lock);
-		t->task_done(t);
-		spin_lock_irq(&pm8001_ha->lock);
+		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
 	}
 }
 
@@ -4304,23 +4251,11 @@
 					"\n", task, ts->resp, ts->stat));
 				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
 				return 0;
-			} else if (task->uldd_task) {
+			} else {
 				spin_unlock_irqrestore(&task->task_state_lock,
 							flags);
-				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
-				mb();/* ditto */
-				spin_unlock_irq(&pm8001_ha->lock);
-				task->task_done(task);
-				spin_lock_irq(&pm8001_ha->lock);
-				return 0;
-			} else if (!task->uldd_task) {
-				spin_unlock_irqrestore(&task->task_state_lock,
-							flags);
-				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
-				mb();/*ditto*/
-				spin_unlock_irq(&pm8001_ha->lock);
-				task->task_done(task);
-				spin_lock_irq(&pm8001_ha->lock);
+				pm8001_ccb_task_free_done(pm8001_ha, task,
+								ccb, tag);
 				return 0;
 			}
 		}
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index ff0fc7c..44def6b 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
 		qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-        qla_nx.o qla_mr.o qla_nx2.o qla_target.o
+		qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
 obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4a0d7c9..07befcf 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -147,6 +147,92 @@
 };
 
 static ssize_t
+qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj,
+			   struct bin_attribute *bin_attr,
+			   char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->fw_dump_template || !ha->fw_dump_template_len)
+		return 0;
+
+	ql_dbg(ql_dbg_user, vha, 0x70e2,
+	    "chunk <- off=%llx count=%zx\n", off, count);
+	return memory_read_from_buffer(buf, count, &off,
+	    ha->fw_dump_template, ha->fw_dump_template_len);
+}
+
+static ssize_t
+qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj,
+			    struct bin_attribute *bin_attr,
+			    char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t size;
+
+	if (off == 0) {
+		if (ha->fw_dump)
+			vfree(ha->fw_dump);
+		if (ha->fw_dump_template)
+			vfree(ha->fw_dump_template);
+
+		ha->fw_dump = NULL;
+		ha->fw_dump_len = 0;
+		ha->fw_dump_template = NULL;
+		ha->fw_dump_template_len = 0;
+
+		size = qla27xx_fwdt_template_size(buf);
+		ql_dbg(ql_dbg_user, vha, 0x70d1,
+		    "-> allocating fwdt (%x bytes)...\n", size);
+		ha->fw_dump_template = vmalloc(size);
+		if (!ha->fw_dump_template) {
+			ql_log(ql_log_warn, vha, 0x70d2,
+			    "Failed allocate fwdt (%x bytes).\n", size);
+			return -ENOMEM;
+		}
+		ha->fw_dump_template_len = size;
+	}
+
+	if (off + count > ha->fw_dump_template_len) {
+		count = ha->fw_dump_template_len - off;
+		ql_dbg(ql_dbg_user, vha, 0x70d3,
+		    "chunk -> truncating to %zx bytes.\n", count);
+	}
+
+	ql_dbg(ql_dbg_user, vha, 0x70d4,
+	    "chunk -> off=%llx count=%zx\n", off, count);
+	memcpy(ha->fw_dump_template + off, buf, count);
+
+	if (off + count == ha->fw_dump_template_len) {
+		size = qla27xx_fwdt_calculate_dump_size(vha);
+		ql_dbg(ql_dbg_user, vha, 0x70d5,
+		    "-> allocating fwdump (%x bytes)...\n", size);
+		ha->fw_dump = vmalloc(size);
+		if (!ha->fw_dump) {
+			ql_log(ql_log_warn, vha, 0x70d6,
+			    "Failed allocate fwdump (%x bytes).\n", size);
+			return -ENOMEM;
+		}
+		ha->fw_dump_len = size;
+	}
+
+	return count;
+}
+static struct bin_attribute sysfs_fw_dump_template_attr = {
+	.attr = {
+		.name = "fw_dump_template",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = 0,
+	.read = qla2x00_sysfs_read_fw_dump_template,
+	.write = qla2x00_sysfs_write_fw_dump_template,
+};
+
+static ssize_t
 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
 			 struct bin_attribute *bin_attr,
 			 char *buf, loff_t off, size_t count)
@@ -241,12 +327,17 @@
 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
 	struct qla_hw_data *ha = vha->hw;
+	ssize_t rval = 0;
 
 	if (ha->optrom_state != QLA_SREADING)
 		return 0;
 
-	return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
-					ha->optrom_region_size);
+	mutex_lock(&ha->optrom_mutex);
+	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
+	    ha->optrom_region_size);
+	mutex_unlock(&ha->optrom_mutex);
+
+	return rval;
 }
 
 static ssize_t
@@ -265,7 +356,9 @@
 	if (off + count > ha->optrom_region_size)
 		count = ha->optrom_region_size - off;
 
+	mutex_lock(&ha->optrom_mutex);
 	memcpy(&ha->optrom_buffer[off], buf, count);
+	mutex_unlock(&ha->optrom_mutex);
 
 	return count;
 }
@@ -288,10 +381,10 @@
 	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
 	    struct device, kobj)));
 	struct qla_hw_data *ha = vha->hw;
-
 	uint32_t start = 0;
 	uint32_t size = ha->optrom_size;
 	int val, valid;
+	ssize_t rval = count;
 
 	if (off)
 		return -EINVAL;
@@ -304,12 +397,14 @@
 	if (start > ha->optrom_size)
 		return -EINVAL;
 
+	mutex_lock(&ha->optrom_mutex);
 	switch (val) {
 	case 0:
 		if (ha->optrom_state != QLA_SREADING &&
-		    ha->optrom_state != QLA_SWRITING)
-			return -EINVAL;
-
+		    ha->optrom_state != QLA_SWRITING) {
+			rval =  -EINVAL;
+			goto out;
+		}
 		ha->optrom_state = QLA_SWAITING;
 
 		ql_dbg(ql_dbg_user, vha, 0x7061,
@@ -320,8 +415,10 @@
 		ha->optrom_buffer = NULL;
 		break;
 	case 1:
-		if (ha->optrom_state != QLA_SWAITING)
-			return -EINVAL;
+		if (ha->optrom_state != QLA_SWAITING) {
+			rval = -EINVAL;
+			goto out;
+		}
 
 		ha->optrom_region_start = start;
 		ha->optrom_region_size = start + size > ha->optrom_size ?
@@ -335,13 +432,15 @@
 			    "(%x).\n", ha->optrom_region_size);
 
 			ha->optrom_state = QLA_SWAITING;
-			return -ENOMEM;
+			rval = -ENOMEM;
+			goto out;
 		}
 
 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
 			ql_log(ql_log_warn, vha, 0x7063,
 			    "HBA not online, failing NVRAM update.\n");
-			return -EAGAIN;
+			rval = -EAGAIN;
+			goto out;
 		}
 
 		ql_dbg(ql_dbg_user, vha, 0x7064,
@@ -353,8 +452,10 @@
 		    ha->optrom_region_start, ha->optrom_region_size);
 		break;
 	case 2:
-		if (ha->optrom_state != QLA_SWAITING)
-			return -EINVAL;
+		if (ha->optrom_state != QLA_SWAITING) {
+			rval = -EINVAL;
+			goto out;
+		}
 
 		/*
 		 * We need to be more restrictive on which FLASH regions are
@@ -388,7 +489,8 @@
 		if (!valid) {
 			ql_log(ql_log_warn, vha, 0x7065,
 			    "Invalid start region 0x%x/0x%x.\n", start, size);
-			return -EINVAL;
+			rval = -EINVAL;
+			goto out;
 		}
 
 		ha->optrom_region_start = start;
@@ -403,7 +505,8 @@
 			    "(%x)\n", ha->optrom_region_size);
 
 			ha->optrom_state = QLA_SWAITING;
-			return -ENOMEM;
+			rval = -ENOMEM;
+			goto out;
 		}
 
 		ql_dbg(ql_dbg_user, vha, 0x7067,
@@ -413,13 +516,16 @@
 		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
 		break;
 	case 3:
-		if (ha->optrom_state != QLA_SWRITING)
-			return -EINVAL;
+		if (ha->optrom_state != QLA_SWRITING) {
+			rval = -EINVAL;
+			goto out;
+		}
 
 		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
 			ql_log(ql_log_warn, vha, 0x7068,
 			    "HBA not online, failing flash update.\n");
-			return -EAGAIN;
+			rval = -EAGAIN;
+			goto out;
 		}
 
 		ql_dbg(ql_dbg_user, vha, 0x7069,
@@ -430,9 +536,12 @@
 		    ha->optrom_region_start, ha->optrom_region_size);
 		break;
 	default:
-		return -EINVAL;
+		rval = -EINVAL;
 	}
-	return count;
+
+out:
+	mutex_unlock(&ha->optrom_mutex);
+	return rval;
 }
 
 static struct bin_attribute sysfs_optrom_ctl_attr = {
@@ -822,6 +931,7 @@
 	int is4GBp_only;
 } bin_file_entries[] = {
 	{ "fw_dump", &sysfs_fw_dump_attr, },
+	{ "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 },
 	{ "nvram", &sysfs_nvram_attr, },
 	{ "optrom", &sysfs_optrom_attr, },
 	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
@@ -847,6 +957,8 @@
 			continue;
 		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
 			continue;
+		if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+			continue;
 
 		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
 		    iter->attr);
@@ -1187,7 +1299,7 @@
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
 		return scnprintf(buf, PAGE_SIZE, "\n");
 
 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1391,6 +1503,37 @@
 	return scnprintf(buf, PAGE_SIZE, "%d\n", size);
 }
 
+static ssize_t
+qla2x00_allow_cna_fw_dump_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	if (!IS_P3P_TYPE(vha->hw))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+	else
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+		    vha->hw->allow_cna_fw_dump ? "true" : "false");
+}
+
+static ssize_t
+qla2x00_allow_cna_fw_dump_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int val = 0;
+
+	if (!IS_P3P_TYPE(vha->hw))
+		return -EINVAL;
+
+	if (sscanf(buf, "%d", &val) != 1)
+		return -EINVAL;
+
+	vha->hw->allow_cna_fw_dump = val != 0;
+
+	return strlen(buf);
+}
+
 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1432,6 +1575,9 @@
 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
+static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
+		   qla2x00_allow_cna_fw_dump_show,
+		   qla2x00_allow_cna_fw_dump_store);
 
 struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_driver_version,
@@ -1464,6 +1610,7 @@
 	&dev_attr_diag_requests,
 	&dev_attr_diag_megabytes,
 	&dev_attr_fw_dump_size,
+	&dev_attr_allow_cna_fw_dump,
 	NULL,
 };
 
@@ -1509,6 +1656,9 @@
 	case PORT_SPEED_16GB:
 		speed = FC_PORTSPEED_16GBIT;
 		break;
+	case PORT_SPEED_32GB:
+		speed = FC_PORTSPEED_32GBIT;
+		break;
 	}
 	fc_host_speed(shost) = speed;
 }
@@ -2160,6 +2310,9 @@
 	else if (IS_QLAFX00(ha))
 		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
 		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+	else if (IS_QLA27XX(ha))
+		speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
+		    FC_PORTSPEED_8GBIT;
 	else
 		speed = FC_PORTSPEED_1GBIT;
 	fc_host_supported_speeds(vha->host) = speed;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index f15d03e..71ff340 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1437,9 +1437,12 @@
 	if (ha->flags.nic_core_reset_hdlr_active)
 		return -EBUSY;
 
+	mutex_lock(&ha->optrom_mutex);
 	rval = qla2x00_optrom_setup(bsg_job, vha, 0);
-	if (rval)
+	if (rval) {
+		mutex_unlock(&ha->optrom_mutex);
 		return rval;
+	}
 
 	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
 	    ha->optrom_region_start, ha->optrom_region_size);
@@ -1453,6 +1456,7 @@
 	vfree(ha->optrom_buffer);
 	ha->optrom_buffer = NULL;
 	ha->optrom_state = QLA_SWAITING;
+	mutex_unlock(&ha->optrom_mutex);
 	bsg_job->job_done(bsg_job);
 	return rval;
 }
@@ -1465,9 +1469,12 @@
 	struct qla_hw_data *ha = vha->hw;
 	int rval = 0;
 
+	mutex_lock(&ha->optrom_mutex);
 	rval = qla2x00_optrom_setup(bsg_job, vha, 1);
-	if (rval)
+	if (rval) {
+		mutex_unlock(&ha->optrom_mutex);
 		return rval;
+	}
 
 	/* Set the isp82xx_no_md_cap not to capture minidump */
 	ha->flags.isp82xx_no_md_cap = 1;
@@ -1483,6 +1490,7 @@
 	vfree(ha->optrom_buffer);
 	ha->optrom_buffer = NULL;
 	ha->optrom_state = QLA_SWAITING;
+	mutex_unlock(&ha->optrom_mutex);
 	bsg_job->job_done(bsg_job);
 	return rval;
 }
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f6103f5..97255f7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,13 +11,15 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes	|
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x015b       | 0x4b,0xba,0xfa |
- * |                              |                    | 0x0x015a	|
- * | Mailbox commands             |       0x1187       | 0x111a-0x111b  |
- * |                              |                    | 0x1155-0x1158  |
- * |                              |                    | 0x1018-0x1019  |
+ * | Module Init and Probe        |       0x017d       | 0x004b,0x0141	|
+ * |                              |                    | 0x0144,0x0146	|
+ * |                              |                    | 0x015b-0x0160	|
+ * |                              |                    | 0x016e-0x0170	|
+ * | Mailbox commands             |       0x1187       | 0x1018-0x1019	|
+ * |                              |                    | 0x10ca         |
  * |                              |                    | 0x1115-0x1116  |
- * |                              |                    | 0x10ca		|
+ * |                              |                    | 0x111a-0x111b	|
+ * |                              |                    | 0x1155-0x1158  |
  * | Device Discovery             |       0x2095       | 0x2020-0x2022, |
  * |                              |                    | 0x2011-0x2012, |
  * |                              |                    | 0x2016         |
@@ -32,18 +34,17 @@
  * |                              |                    | 0x5047,0x5052  |
  * |                              |                    | 0x5084,0x5075	|
  * |                              |                    | 0x503d,0x5044  |
+ * |                              |                    | 0x507b		|
  * | Timer Routines               |       0x6012       |                |
- * | User Space Interactions      |       0x70e1       | 0x7018,0x702e, |
- * |                              |                    | 0x7020,0x7024, |
- * |                              |                    | 0x7039,0x7045, |
- * |                              |                    | 0x7073-0x7075, |
- * |                              |                    | 0x707b,0x708c, |
- * |                              |                    | 0x70a5,0x70a6, |
- * |                              |                    | 0x70a8,0x70ab, |
- * |                              |                    | 0x70ad-0x70ae, |
- * |                              |                    | 0x70d1-0x70db, |
- * |                              |                    | 0x7047,0x703b	|
- * |                              |                    | 0x70de-0x70df, |
+ * | User Space Interactions      |       0x70e2       | 0x7018,0x702e  |
+ * |				  |		       | 0x7020,0x7024  |
+ * |                              |                    | 0x7039,0x7045  |
+ * |                              |                    | 0x7073-0x7075  |
+ * |                              |                    | 0x70a5-0x70a6  |
+ * |                              |                    | 0x70a8,0x70ab  |
+ * |                              |                    | 0x70ad-0x70ae  |
+ * |                              |                    | 0x70d7-0x70db  |
+ * |                              |                    | 0x70de-0x70df  |
  * | Task Management              |       0x803d       | 0x8025-0x8026  |
  * |                              |                    | 0x800b,0x8039  |
  * | AER/EEH                      |       0x9011       |		|
@@ -59,7 +60,11 @@
  * |                              |                    | 0xb13c-0xb140  |
  * |                              |                    | 0xb149		|
  * | MultiQ                       |       0xc00c       |		|
- * | Misc                         |       0xd010       |		|
+ * | Misc                         |       0xd2ff       | 0xd017-0xd019	|
+ * |                              |                    | 0xd020		|
+ * |                              |                    | 0xd02e-0xd0ff	|
+ * |                              |                    | 0xd101-0xd1fe	|
+ * |                              |                    | 0xd212-0xd2fe	|
  * | Target Mode		  |	  0xe070       | 0xe021		|
  * | Target Mode Management	  |	  0xf072       | 0xf002-0xf003	|
  * |                              |                    | 0xf046-0xf049  |
@@ -104,7 +109,87 @@
 	return ptr + (rsp->length * sizeof(response_t));
 }
 
-static int
+int
+qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
+	uint32_t ram_dwords, void **nxt)
+{
+	int rval;
+	uint32_t cnt, stat, timer, dwords, idx;
+	uint16_t mb0, mb1;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	dma_addr_t dump_dma = ha->gid_list_dma;
+	uint32_t *dump = (uint32_t *)ha->gid_list;
+
+	rval = QLA_SUCCESS;
+	mb0 = 0;
+
+	WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
+	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+	dwords = qla2x00_gid_list_size(ha) / 4;
+	for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
+	    cnt += dwords, addr += dwords) {
+		if (cnt + dwords > ram_dwords)
+			dwords = ram_dwords - cnt;
+
+		WRT_REG_WORD(&reg->mailbox1, LSW(addr));
+		WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+
+		WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
+		WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+		WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
+		WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+
+		WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
+		WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+
+		WRT_REG_WORD(&reg->mailbox9, 0);
+		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+		ha->flags.mbox_int = 0;
+		for (timer = 6000000; timer; timer--) {
+			/* Check for pending interrupts. */
+			stat = RD_REG_DWORD(&reg->host_status);
+			if (stat & HSRX_RISC_INT) {
+				stat &= 0xff;
+
+				if (stat == 0x1 || stat == 0x2 ||
+				    stat == 0x10 || stat == 0x11) {
+					set_bit(MBX_INTERRUPT,
+					    &ha->mbx_cmd_flags);
+
+					mb0 = RD_REG_WORD(&reg->mailbox0);
+					mb1 = RD_REG_WORD(&reg->mailbox1);
+
+					WRT_REG_DWORD(&reg->hccr,
+					    HCCRX_CLR_RISC_INT);
+					RD_REG_DWORD(&reg->hccr);
+					break;
+				}
+
+				/* Clear this intr; it wasn't a mailbox intr */
+				WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+				RD_REG_DWORD(&reg->hccr);
+			}
+			udelay(5);
+		}
+		ha->flags.mbox_int = 1;
+
+		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+			rval = mb0 & MBS_MASK;
+			for (idx = 0; idx < dwords; idx++)
+				ram[cnt + idx] = IS_QLA27XX(ha) ?
+				    le32_to_cpu(dump[idx]) : swab32(dump[idx]);
+		} else {
+			rval = QLA_FUNCTION_FAILED;
+		}
+	}
+
+	*nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
+	return rval;
+}
+
+int
 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
     uint32_t ram_dwords, void **nxt)
 {
@@ -139,6 +224,7 @@
 		WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
 		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
 
+		ha->flags.mbox_int = 0;
 		for (timer = 6000000; timer; timer--) {
 			/* Check for pending interrupts. */
 			stat = RD_REG_DWORD(&reg->host_status);
@@ -164,11 +250,13 @@
 			}
 			udelay(5);
 		}
+		ha->flags.mbox_int = 1;
 
 		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
 			rval = mb0 & MBS_MASK;
 			for (idx = 0; idx < dwords; idx++)
-				ram[cnt + idx] = swab32(dump[idx]);
+				ram[cnt + idx] = IS_QLA27XX(ha) ?
+				    le32_to_cpu(dump[idx]) : swab32(dump[idx]);
 		} else {
 			rval = QLA_FUNCTION_FAILED;
 		}
@@ -208,7 +296,7 @@
 	return buf;
 }
 
-static inline int
+int
 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
 {
 	int rval = QLA_SUCCESS;
@@ -227,7 +315,7 @@
 	return rval;
 }
 
-static int
+int
 qla24xx_soft_reset(struct qla_hw_data *ha)
 {
 	int rval = QLA_SUCCESS;
@@ -537,7 +625,7 @@
 	struct qla2xxx_mq_chain *mq = ptr;
 	device_reg_t __iomem *reg;
 
-	if (!ha->mqenable || IS_QLA83XX(ha))
+	if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
 		return ptr;
 
 	mq = ptr;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 35e20b4..cc96104 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,3 +348,10 @@
 #define ql_dbg_tgt	0x00004000 /* Target mode */
 #define ql_dbg_tgt_mgt	0x00002000 /* Target mode management */
 #define ql_dbg_tgt_tmr	0x00001000 /* Target mode task management */
+
+extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+	uint32_t, void **);
+extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+	uint32_t, void **);
+extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *);
+extern int qla24xx_soft_reset(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 266724b..6a10613 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -654,7 +654,7 @@
 		struct device_reg_25xxmq isp25mq;
 		struct device_reg_82xx isp82;
 		struct device_reg_fx00 ispfx00;
-} device_reg_t;
+} __iomem device_reg_t;
 
 #define ISP_REQ_Q_IN(ha, reg) \
 	(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
@@ -808,7 +808,7 @@
 					   Notification */
 #define MBA_FW_POLL_STATE	0x8600  /* Firmware in poll diagnostic state */
 #define MBA_FW_RESET_FCT	0x8502	/* Firmware reset factory defaults */
-
+#define MBA_FW_INIT_INPROGRESS	0x8500	/* Firmware boot in progress */
 /* 83XX FCoE specific */
 #define MBA_IDC_AEN		0x8200  /* FCoE: NIC Core state change AEN */
 
@@ -938,6 +938,7 @@
  */
 #define MBC_WRITE_SERDES		0x3	/* Write serdes word. */
 #define MBC_READ_SERDES			0x4	/* Read serdes word. */
+#define MBC_LOAD_DUMP_MPI_RAM		0x5	/* Load/Dump MPI RAM. */
 #define MBC_SERDES_PARAMS		0x10	/* Serdes Tx Parameters. */
 #define MBC_GET_IOCB_STATUS		0x12	/* Get IOCB status command. */
 #define MBC_PORT_PARAMS			0x1A	/* Port iDMA Parameters. */
@@ -1197,30 +1198,6 @@
 	uint8_t  reserved_3[26];
 } init_cb_t;
 
-
-struct init_cb_fx {
-	uint16_t	version;
-	uint16_t	reserved_1[13];
-	__le16		request_q_outpointer;
-	__le16		response_q_inpointer;
-	uint16_t	reserved_2[2];
-	__le16		response_q_length;
-	__le16		request_q_length;
-	uint16_t	reserved_3[2];
-	__le32		request_q_address[2];
-	__le32		response_q_address[2];
-	uint16_t	reserved_4[4];
-	uint8_t		response_q_msivec;
-	uint8_t		reserved_5[19];
-	uint16_t	interrupt_delay_timer;
-	uint16_t	reserved_6;
-	uint32_t	fwoptions1;
-	uint32_t	fwoptions2;
-	uint32_t	fwoptions3;
-	uint8_t		reserved_7[24];
-};
-
-
 /*
  * Get Link Status mailbox command return buffer.
  */
@@ -2172,6 +2149,7 @@
 #define FDMI_PORT_SPEED_4GB		0x8
 #define FDMI_PORT_SPEED_8GB		0x10
 #define FDMI_PORT_SPEED_16GB		0x20
+#define FDMI_PORT_SPEED_32GB		0x40
 #define FDMI_PORT_SPEED_UNKNOWN		0x8000
 
 struct ct_fdmi_port_attr {
@@ -2680,7 +2658,7 @@
 #define QLA_MQ_SIZE 32
 #define QLA_MAX_QUEUES 256
 #define ISP_QUE_REG(ha, id) \
-	((ha->mqenable || IS_QLA83XX(ha)) ? \
+	((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
 	 ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
 	 ((void __iomem *)ha->iobase))
 #define QLA_REQ_QUE_ID(tag) \
@@ -2818,7 +2796,6 @@
 		uint32_t	fac_supported		:1;
 
 		uint32_t	chip_reset_done		:1;
-		uint32_t	port0			:1;
 		uint32_t	running_gold_fw		:1;
 		uint32_t	eeh_busy		:1;
 		uint32_t	cpu_affinity_enabled	:1;
@@ -2849,7 +2826,7 @@
 	spinlock_t	hardware_lock ____cacheline_aligned;
 	int		bars;
 	int		mem_only;
-	device_reg_t __iomem *iobase;           /* Base I/O address */
+	device_reg_t *iobase;           /* Base I/O address */
 	resource_size_t pio_address;
 
 #define MIN_IOBASE_LEN          0x100
@@ -2868,8 +2845,8 @@
 	uint32_t		rsp_que_off;
 
 	/* Multi queue data structs */
-	device_reg_t __iomem *mqiobase;
-	device_reg_t __iomem *msixbase;
+	device_reg_t *mqiobase;
+	device_reg_t *msixbase;
 	uint16_t        msix_count;
 	uint8_t         mqenable;
 	struct req_que **req_q_map;
@@ -2905,6 +2882,7 @@
 #define PORT_SPEED_4GB  0x03
 #define PORT_SPEED_8GB  0x04
 #define PORT_SPEED_16GB 0x05
+#define PORT_SPEED_32GB 0x06
 #define PORT_SPEED_10GB	0x13
 	uint16_t	link_data_rate;         /* F/W operating speed */
 
@@ -2928,6 +2906,7 @@
 #define PCI_DEVICE_ID_QLOGIC_ISP8001	0x8001
 #define PCI_DEVICE_ID_QLOGIC_ISP8031	0x8031
 #define PCI_DEVICE_ID_QLOGIC_ISP2031	0x2031
+#define PCI_DEVICE_ID_QLOGIC_ISP2071	0x2071
 	uint32_t	device_type;
 #define DT_ISP2100                      BIT_0
 #define DT_ISP2200                      BIT_1
@@ -2948,7 +2927,8 @@
 #define DT_ISP8031			BIT_16
 #define DT_ISPFX00			BIT_17
 #define DT_ISP8044			BIT_18
-#define DT_ISP_LAST			(DT_ISP8044 << 1)
+#define DT_ISP2071			BIT_19
+#define DT_ISP_LAST			(DT_ISP2071 << 1)
 
 #define DT_T10_PI                       BIT_25
 #define DT_IIDMA                        BIT_26
@@ -2978,6 +2958,7 @@
 #define IS_QLA2031(ha)	(DT_MASK(ha) & DT_ISP2031)
 #define IS_QLA8031(ha)	(DT_MASK(ha) & DT_ISP8031)
 #define IS_QLAFX00(ha)	(DT_MASK(ha) & DT_ISPFX00)
+#define IS_QLA2071(ha)	(DT_MASK(ha) & DT_ISP2071)
 
 #define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
 			IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2986,6 +2967,7 @@
 #define IS_QLA25XX(ha)  (IS_QLA2532(ha))
 #define IS_QLA83XX(ha)	(IS_QLA2031(ha) || IS_QLA8031(ha))
 #define IS_QLA84XX(ha)  (IS_QLA8432(ha))
+#define IS_QLA27XX(ha)  (IS_QLA2071(ha))
 #define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
 				IS_QLA84XX(ha))
 #define IS_CNA_CAPABLE(ha)	(IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -2994,11 +2976,13 @@
 #define IS_QLA2XXX_MIDTYPE(ha)	(IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
 				IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
 				IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
-				IS_QLA8044(ha))
+				IS_QLA8044(ha) || IS_QLA27XX(ha))
 #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
 #define IS_NOPOLLING_TYPE(ha)	(IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
-#define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha))
-#define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha))
+#define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+				IS_QLA27XX(ha))
+#define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+				IS_QLA27XX(ha))
 #define IS_ALOGIO_CAPABLE(ha)	(IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
 
 #define IS_T10_PI_CAPABLE(ha)   ((ha)->device_type & DT_T10_PI)
@@ -3008,7 +2992,8 @@
 #define IS_OEM_001(ha)          ((ha)->device_type & DT_OEM_001)
 #define HAS_EXTENDED_IDS(ha)    ((ha)->device_type & DT_EXTENDED_IDS)
 #define IS_CT6_SUPPORTED(ha)	((ha)->device_type & DT_CT6_SUPPORTED)
-#define IS_MQUE_CAPABLE(ha)	((ha)->mqenable || IS_QLA83XX(ha))
+#define IS_MQUE_CAPABLE(ha)	((ha)->mqenable || IS_QLA83XX(ha) || \
+				IS_QLA27XX(ha))
 #define IS_BIDI_CAPABLE(ha)	((IS_QLA25XX(ha) || IS_QLA2031(ha)))
 /* Bit 21 of fw_attributes decides the MCTP capabilities */
 #define IS_MCTP_CAPABLE(ha)	(IS_QLA2031(ha) && \
@@ -3133,6 +3118,9 @@
 	uint16_t	fw_xcb_count;
 	uint16_t	fw_iocb_count;
 
+	uint32_t	fw_shared_ram_start;
+	uint32_t	fw_shared_ram_end;
+
 	uint16_t	fw_options[16];         /* slots: 1,2,3,10,11 */
 	uint8_t		fw_seriallink_options[4];
 	uint16_t	fw_seriallink_options24[4];
@@ -3141,6 +3129,9 @@
 	uint32_t	mpi_capabilities;
 	uint8_t		phy_version[3];
 
+	/* Firmware dump template */
+	void		*fw_dump_template;
+	uint32_t	fw_dump_template_len;
 	/* Firmware dump information. */
 	struct qla2xxx_fw_dump *fw_dump;
 	uint32_t	fw_dump_len;
@@ -3183,6 +3174,7 @@
 #define QLA_SWRITING	2
 	uint32_t	optrom_region_start;
 	uint32_t	optrom_region_size;
+	struct mutex	optrom_mutex;
 
 /* PCI expansion ROM image information. */
 #define ROM_CODE_TYPE_BIOS	0
@@ -3309,6 +3301,7 @@
 	struct mr_data_fx00 mr;
 
 	struct qlt_hw_data tgt;
+	int	allow_cna_fw_dump;
 };
 
 /*
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 792a292..32ab809 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -114,7 +114,8 @@
 {
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+	    !IS_QLA27XX(ha))
 		goto out;
 	if (!ha->fce)
 		goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 610d3aa9..3a7353e 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1378,6 +1378,10 @@
 #define FLT_REG_NVRAM_0		0x15
 #define FLT_REG_VPD_1		0x16
 #define FLT_REG_NVRAM_1		0x17
+#define FLT_REG_VPD_2		0xD4
+#define FLT_REG_NVRAM_2		0xD5
+#define FLT_REG_VPD_3		0xD6
+#define FLT_REG_NVRAM_3		0xD7
 #define FLT_REG_FDT		0x1a
 #define FLT_REG_FLT		0x1c
 #define FLT_REG_HW_EVENT_0	0x1d
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1f42662..e665e81 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -330,6 +330,7 @@
     dma_addr_t);
 
 extern int qla24xx_abort_command(srb_t *);
+extern int qla24xx_async_abort_command(srb_t *);
 extern int
 qla24xx_abort_target(struct fc_port *, unsigned int, int);
 extern int
@@ -511,6 +512,16 @@
 extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
 extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
 extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla8044_fw_dump(scsi_qla_host_t *, int);
+
+extern void qla27xx_fwdump(scsi_qla_host_t *, int);
+extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
+extern int qla27xx_fwdt_template_valid(void *);
+extern ulong qla27xx_fwdt_template_size(void *);
+extern const void *qla27xx_fwdt_template_default(void);
+extern ulong qla27xx_fwdt_template_default_size(void);
+
 extern void qla2x00_dump_regs(scsi_qla_host_t *);
 extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
 extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
@@ -594,7 +605,6 @@
 extern irqreturn_t qlafx00_intr_handler(int, void *);
 extern void qlafx00_enable_intrs(struct qla_hw_data *);
 extern void qlafx00_disable_intrs(struct qla_hw_data *);
-extern int qlafx00_abort_command(srb_t *);
 extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
 extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
 extern int qlafx00_start_scsi(srb_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index cd47f1b..e377f9d2 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1532,6 +1532,10 @@
 	if (IS_CNA_CAPABLE(ha))
 		eiter->a.sup_speed = __constant_cpu_to_be32(
 		    FDMI_PORT_SPEED_10GB);
+	else if (IS_QLA27XX(ha))
+		eiter->a.sup_speed = __constant_cpu_to_be32(
+		    FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB|
+		    FDMI_PORT_SPEED_8GB);
 	else if (IS_QLA25XX(ha))
 		eiter->a.sup_speed = __constant_cpu_to_be32(
 		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
@@ -1580,6 +1584,10 @@
 		eiter->a.cur_speed =
 		    __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
 		break;
+	case PORT_SPEED_32GB:
+		eiter->a.cur_speed =
+		    __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB);
+		break;
 	default:
 		eiter->a.cur_speed =
 		    __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
@@ -1889,6 +1897,9 @@
 			case BIT_10:
 				list[i].fp_speed = PORT_SPEED_16GB;
 				break;
+			case BIT_8:
+				list[i].fp_speed = PORT_SPEED_32GB;
+				break;
 			}
 
 			ql_dbg(ql_dbg_disc, vha, 0x205b,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e7e5f4f..38aeb54 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -271,56 +271,46 @@
 }
 
 static void
-qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
+qla2x00_tmf_iocb_timeout(void *data)
+{
+	srb_t *sp = (srb_t *)data;
+	struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+	tmf->u.tmf.comp_status = CS_TIMEOUT;
+	complete(&tmf->u.tmf.comp);
+}
+
+static void
+qla2x00_tmf_sp_done(void *data, void *ptr, int res)
 {
 	srb_t *sp = (srb_t *)ptr;
-	struct srb_iocb *iocb = &sp->u.iocb_cmd;
-	struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
-	uint32_t flags;
-	uint16_t lun;
-	int rval;
-
-	if (!test_bit(UNLOADING, &vha->dpc_flags)) {
-		flags = iocb->u.tmf.flags;
-		lun = (uint16_t)iocb->u.tmf.lun;
-
-		/* Issue Marker IOCB */
-		rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
-			vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
-			flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
-
-		if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
-			ql_dbg(ql_dbg_taskm, vha, 0x8030,
-			    "TM IOCB failed (%x).\n", rval);
-		}
-	}
-	sp->free(sp->fcport->vha, sp);
+	struct srb_iocb *tmf = &sp->u.iocb_cmd;
+	complete(&tmf->u.tmf.comp);
 }
 
 int
-qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
 	uint32_t tag)
 {
 	struct scsi_qla_host *vha = fcport->vha;
+	struct srb_iocb *tm_iocb;
 	srb_t *sp;
-	struct srb_iocb *tcf;
-	int rval;
+	int rval = QLA_FUNCTION_FAILED;
 
-	rval = QLA_FUNCTION_FAILED;
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
 		goto done;
 
+	tm_iocb = &sp->u.iocb_cmd;
 	sp->type = SRB_TM_CMD;
 	sp->name = "tmf";
-	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
-	tcf = &sp->u.iocb_cmd;
-	tcf->u.tmf.flags = tm_flags;
-	tcf->u.tmf.lun = lun;
-	tcf->u.tmf.data = tag;
-	tcf->timeout = qla2x00_async_iocb_timeout;
-	sp->done = qla2x00_async_tm_cmd_done;
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+	tm_iocb->u.tmf.flags = flags;
+	tm_iocb->u.tmf.lun = lun;
+	tm_iocb->u.tmf.data = tag;
+	sp->done = qla2x00_tmf_sp_done;
+	tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
+	init_completion(&tm_iocb->u.tmf.comp);
 
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS)
@@ -330,14 +320,121 @@
 	    "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
 	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
-	return rval;
+
+	wait_for_completion(&tm_iocb->u.tmf.comp);
+
+	rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
+	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+	if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
+		ql_dbg(ql_dbg_taskm, vha, 0x8030,
+		    "TM IOCB failed (%x).\n", rval);
+	}
+
+	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
+		flags = tm_iocb->u.tmf.flags;
+		lun = (uint16_t)tm_iocb->u.tmf.lun;
+
+		/* Issue Marker IOCB */
+		qla2x00_marker(vha, vha->hw->req_q_map[0],
+		    vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+		    flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
+	}
 
 done_free_sp:
-	sp->free(fcport->vha, sp);
+	sp->free(vha, sp);
 done:
 	return rval;
 }
 
+static void
+qla24xx_abort_iocb_timeout(void *data)
+{
+	srb_t *sp = (srb_t *)data;
+	struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+	abt->u.abt.comp_status = CS_TIMEOUT;
+	complete(&abt->u.abt.comp);
+}
+
+static void
+qla24xx_abort_sp_done(void *data, void *ptr, int res)
+{
+	srb_t *sp = (srb_t *)ptr;
+	struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+	complete(&abt->u.abt.comp);
+}
+
+static int
+qla24xx_async_abort_cmd(srb_t *cmd_sp)
+{
+	scsi_qla_host_t *vha = cmd_sp->fcport->vha;
+	fc_port_t *fcport = cmd_sp->fcport;
+	struct srb_iocb *abt_iocb;
+	srb_t *sp;
+	int rval = QLA_FUNCTION_FAILED;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	abt_iocb = &sp->u.iocb_cmd;
+	sp->type = SRB_ABT_CMD;
+	sp->name = "abort";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+	abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+	sp->done = qla24xx_abort_sp_done;
+	abt_iocb->timeout = qla24xx_abort_iocb_timeout;
+	init_completion(&abt_iocb->u.abt.comp);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_async, vha, 0x507c,
+	    "Abort command issued - hdl=%x, target_id=%x\n",
+	    cmd_sp->handle, fcport->tgt_id);
+
+	wait_for_completion(&abt_iocb->u.abt.comp);
+
+	rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+done_free_sp:
+	sp->free(vha, sp);
+done:
+	return rval;
+}
+
+int
+qla24xx_async_abort_command(srb_t *sp)
+{
+	unsigned long   flags = 0;
+
+	uint32_t	handle;
+	fc_port_t	*fcport = sp->fcport;
+	struct scsi_qla_host *vha = fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = vha->req;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
+		if (req->outstanding_cmds[handle] == sp)
+			break;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if (handle == req->num_outstanding_cmds) {
+		/* Command not found. */
+		return QLA_FUNCTION_FAILED;
+	}
+	if (sp->type == SRB_FXIOCB_DCMD)
+		return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+		    FXDISC_ABORT_IOCTL);
+
+	return qla24xx_async_abort_cmd(sp);
+}
+
 void
 qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
     uint16_t *data)
@@ -1379,7 +1476,12 @@
 	}
 
 	ha->fw_dumped = 0;
-	fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+	dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+	req_q_size = rsp_q_size = 0;
+
+	if (IS_QLA27XX(ha))
+		goto try_fce;
+
 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
 		fixed_size = sizeof(struct qla2100_fw_dump);
 	} else if (IS_QLA23XX(ha)) {
@@ -1395,6 +1497,7 @@
 			fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
 		else
 			fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+
 		mem_size = (ha->fw_memory_size - 0x100000 + 1) *
 		    sizeof(uint32_t);
 		if (ha->mqenable) {
@@ -1412,9 +1515,16 @@
 		if (ha->tgt.atio_ring)
 			mq_size += ha->tgt.atio_q_length * sizeof(request_t);
 		/* Allocate memory for Fibre Channel Event Buffer. */
-		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+		    !IS_QLA27XX(ha))
 			goto try_eft;
 
+try_fce:
+		if (ha->fce)
+			dma_free_coherent(&ha->pdev->dev,
+			    FCE_SIZE, ha->fce, ha->fce_dma);
+
+		/* Allocate memory for Fibre Channel Event Buffer. */
 		tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
 		    GFP_KERNEL);
 		if (!tc) {
@@ -1442,7 +1552,12 @@
 		ha->flags.fce_enabled = 1;
 		ha->fce_dma = tc_dma;
 		ha->fce = tc;
+
 try_eft:
+		if (ha->eft)
+			dma_free_coherent(&ha->pdev->dev,
+			    EFT_SIZE, ha->eft, ha->eft_dma);
+
 		/* Allocate memory for Extended Trace Buffer. */
 		tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
 		    GFP_KERNEL);
@@ -1469,15 +1584,28 @@
 		ha->eft_dma = tc_dma;
 		ha->eft = tc;
 	}
+
 cont_alloc:
+	if (IS_QLA27XX(ha)) {
+		if (!ha->fw_dump_template) {
+			ql_log(ql_log_warn, vha, 0x00ba,
+			    "Failed missing fwdump template\n");
+			return;
+		}
+		dump_size = qla27xx_fwdt_calculate_dump_size(vha);
+		ql_dbg(ql_dbg_init, vha, 0x00fa,
+		    "-> allocating fwdump (%x bytes)...\n", dump_size);
+		goto allocate;
+	}
+
 	req_q_size = req->length * sizeof(request_t);
 	rsp_q_size = rsp->length * sizeof(response_t);
-
 	dump_size = offsetof(struct qla2xxx_fw_dump, isp);
 	dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
 	ha->chain_offset = dump_size;
 	dump_size += mq_size + fce_size;
 
+allocate:
 	ha->fw_dump = vmalloc(dump_size);
 	if (!ha->fw_dump) {
 		ql_log(ql_log_warn, vha, 0x00c4,
@@ -1499,10 +1627,13 @@
 		}
 		return;
 	}
+	ha->fw_dump_len = dump_size;
 	ql_dbg(ql_dbg_init, vha, 0x00c5,
 	    "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
 
-	ha->fw_dump_len = dump_size;
+	if (IS_QLA27XX(ha))
+		return;
+
 	ha->fw_dump->signature[0] = 'Q';
 	ha->fw_dump->signature[1] = 'L';
 	ha->fw_dump->signature[2] = 'G';
@@ -1718,9 +1849,6 @@
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	}
 
-	if (IS_QLA83XX(ha))
-		goto skip_fac_check;
-
 	if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
 		uint32_t size;
 
@@ -1733,8 +1861,8 @@
 			    "Unsupported FAC firmware (%d.%02d.%02d).\n",
 			    ha->fw_major_version, ha->fw_minor_version,
 			    ha->fw_subminor_version);
-skip_fac_check:
-			if (IS_QLA83XX(ha)) {
+
+			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 				ha->flags.fac_supported = 0;
 				rval = QLA_SUCCESS;
 			}
@@ -1933,7 +2061,7 @@
 	icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
 	icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
 
-	if (ha->mqenable || IS_QLA83XX(ha)) {
+	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
 		icb->rid = __constant_cpu_to_le16(rid);
 		if (ha->flags.msix_enabled) {
@@ -4792,13 +4920,14 @@
 	nv = ha->nvram;
 
 	/* Determine NVRAM starting address. */
-	if (ha->flags.port0) {
+	if (ha->port_no == 0) {
 		ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
 		ha->vpd_base = FA_NVRAM_VPD0_ADDR;
 	} else {
 		ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
 		ha->vpd_base = FA_NVRAM_VPD1_ADDR;
 	}
+
 	ha->nvram_size = sizeof(struct nvram_24xx);
 	ha->vpd_size = FA_NVRAM_VPD_SIZE;
 
@@ -4842,7 +4971,7 @@
 		nv->exchange_count = __constant_cpu_to_le16(0);
 		nv->hard_address = __constant_cpu_to_le16(124);
 		nv->port_name[0] = 0x21;
-		nv->port_name[1] = 0x00 + ha->port_no;
+		nv->port_name[1] = 0x00 + ha->port_no + 1;
 		nv->port_name[2] = 0x00;
 		nv->port_name[3] = 0xe0;
 		nv->port_name[4] = 0x8b;
@@ -5117,6 +5246,99 @@
 		segments--;
 	}
 
+	if (!IS_QLA27XX(ha))
+		return rval;
+
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+
+	ql_dbg(ql_dbg_init, vha, 0x0161,
+	    "Loading fwdump template from %x\n", faddr);
+	qla24xx_read_flash_data(vha, dcode, faddr, 7);
+	risc_size = be32_to_cpu(dcode[2]);
+	ql_dbg(ql_dbg_init, vha, 0x0162,
+	    "-> array size %x dwords\n", risc_size);
+	if (risc_size == 0 || risc_size == ~0)
+		goto default_template;
+
+	dlen = (risc_size - 8) * sizeof(*dcode);
+	ql_dbg(ql_dbg_init, vha, 0x0163,
+	    "-> template allocating %x bytes...\n", dlen);
+	ha->fw_dump_template = vmalloc(dlen);
+	if (!ha->fw_dump_template) {
+		ql_log(ql_log_warn, vha, 0x0164,
+		    "Failed fwdump template allocate %x bytes.\n", risc_size);
+		goto default_template;
+	}
+
+	faddr += 7;
+	risc_size -= 8;
+	dcode = ha->fw_dump_template;
+	qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+	for (i = 0; i < risc_size; i++)
+		dcode[i] = le32_to_cpu(dcode[i]);
+
+	if (!qla27xx_fwdt_template_valid(dcode)) {
+		ql_log(ql_log_warn, vha, 0x0165,
+		    "Failed fwdump template validate\n");
+		goto default_template;
+	}
+
+	dlen = qla27xx_fwdt_template_size(dcode);
+	ql_dbg(ql_dbg_init, vha, 0x0166,
+	    "-> template size %x bytes\n", dlen);
+	if (dlen > risc_size * sizeof(*dcode)) {
+		ql_log(ql_log_warn, vha, 0x0167,
+		    "Failed fwdump template exceeds array by %x bytes\n",
+		    (uint32_t)(dlen - risc_size * sizeof(*dcode)));
+		goto default_template;
+	}
+	ha->fw_dump_template_len = dlen;
+	return rval;
+
+default_template:
+	ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+
+	dlen = qla27xx_fwdt_template_default_size();
+	ql_dbg(ql_dbg_init, vha, 0x0169,
+	    "-> template allocating %x bytes...\n", dlen);
+	ha->fw_dump_template = vmalloc(dlen);
+	if (!ha->fw_dump_template) {
+		ql_log(ql_log_warn, vha, 0x016a,
+		    "Failed fwdump template allocate %x bytes.\n", risc_size);
+		goto failed_template;
+	}
+
+	dcode = ha->fw_dump_template;
+	risc_size = dlen / sizeof(*dcode);
+	memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
+	for (i = 0; i < risc_size; i++)
+		dcode[i] = be32_to_cpu(dcode[i]);
+
+	if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
+		ql_log(ql_log_warn, vha, 0x016b,
+		    "Failed fwdump template validate\n");
+		goto failed_template;
+	}
+
+	dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
+	ql_dbg(ql_dbg_init, vha, 0x016c,
+	    "-> template size %x bytes\n", dlen);
+	ha->fw_dump_template_len = dlen;
+	return rval;
+
+failed_template:
+	ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
 	return rval;
 }
 
@@ -5231,7 +5453,8 @@
 	uint32_t risc_size;
 	uint32_t i;
 	struct fw_blob *blob;
-	uint32_t *fwcode, fwclen;
+	const uint32_t *fwcode;
+	uint32_t fwclen;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = ha->req_q_map[0];
 
@@ -5263,7 +5486,7 @@
 		ql_log(ql_log_fatal, vha, 0x0093,
 		    "Unable to verify integrity of firmware image (%Zd).\n",
 		    blob->fw->size);
-		goto fail_fw_integrity;
+		return QLA_FUNCTION_FAILED;
 	}
 	for (i = 0; i < 4; i++)
 		dcode[i] = be32_to_cpu(fwcode[i + 4]);
@@ -5277,7 +5500,7 @@
 		ql_log(ql_log_fatal, vha, 0x0095,
 		    "Firmware data: %08x %08x %08x %08x.\n",
 		    dcode[0], dcode[1], dcode[2], dcode[3]);
-		goto fail_fw_integrity;
+		return QLA_FUNCTION_FAILED;
 	}
 
 	while (segments && rval == QLA_SUCCESS) {
@@ -5291,8 +5514,7 @@
 			ql_log(ql_log_fatal, vha, 0x0096,
 			    "Unable to verify integrity of firmware image "
 			    "(%Zd).\n", blob->fw->size);
-
-			goto fail_fw_integrity;
+			return QLA_FUNCTION_FAILED;
 		}
 
 		fragment = 0;
@@ -5326,10 +5548,100 @@
 		/* Next segment. */
 		segments--;
 	}
+
+	if (!IS_QLA27XX(ha))
+		return rval;
+
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+
+	ql_dbg(ql_dbg_init, vha, 0x171,
+	    "Loading fwdump template from %x\n",
+	    (uint32_t)((void *)fwcode - (void *)blob->fw->data));
+	risc_size = be32_to_cpu(fwcode[2]);
+	ql_dbg(ql_dbg_init, vha, 0x172,
+	    "-> array size %x dwords\n", risc_size);
+	if (risc_size == 0 || risc_size == ~0)
+		goto default_template;
+
+	dlen = (risc_size - 8) * sizeof(*fwcode);
+	ql_dbg(ql_dbg_init, vha, 0x0173,
+	    "-> template allocating %x bytes...\n", dlen);
+	ha->fw_dump_template = vmalloc(dlen);
+	if (!ha->fw_dump_template) {
+		ql_log(ql_log_warn, vha, 0x0174,
+		    "Failed fwdump template allocate %x bytes.\n", risc_size);
+		goto default_template;
+	}
+
+	fwcode += 7;
+	risc_size -= 8;
+	dcode = ha->fw_dump_template;
+	for (i = 0; i < risc_size; i++)
+		dcode[i] = le32_to_cpu(fwcode[i]);
+
+	if (!qla27xx_fwdt_template_valid(dcode)) {
+		ql_log(ql_log_warn, vha, 0x0175,
+		    "Failed fwdump template validate\n");
+		goto default_template;
+	}
+
+	dlen = qla27xx_fwdt_template_size(dcode);
+	ql_dbg(ql_dbg_init, vha, 0x0176,
+	    "-> template size %x bytes\n", dlen);
+	if (dlen > risc_size * sizeof(*fwcode)) {
+		ql_log(ql_log_warn, vha, 0x0177,
+		    "Failed fwdump template exceeds array by %x bytes\n",
+		    (uint32_t)(dlen - risc_size * sizeof(*fwcode)));
+		goto default_template;
+	}
+	ha->fw_dump_template_len = dlen;
 	return rval;
 
-fail_fw_integrity:
-	return QLA_FUNCTION_FAILED;
+default_template:
+	ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+
+	dlen = qla27xx_fwdt_template_default_size();
+	ql_dbg(ql_dbg_init, vha, 0x0179,
+	    "-> template allocating %x bytes...\n", dlen);
+	ha->fw_dump_template = vmalloc(dlen);
+	if (!ha->fw_dump_template) {
+		ql_log(ql_log_warn, vha, 0x017a,
+		    "Failed fwdump template allocate %x bytes.\n", risc_size);
+		goto failed_template;
+	}
+
+	dcode = ha->fw_dump_template;
+	risc_size = dlen / sizeof(*fwcode);
+	fwcode = qla27xx_fwdt_template_default();
+	for (i = 0; i < risc_size; i++)
+		dcode[i] = be32_to_cpu(fwcode[i]);
+
+	if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
+		ql_log(ql_log_warn, vha, 0x017b,
+		    "Failed fwdump template validate\n");
+		goto failed_template;
+	}
+
+	dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
+	ql_dbg(ql_dbg_init, vha, 0x017c,
+	    "-> template size %x bytes\n", dlen);
+	ha->fw_dump_template_len = dlen;
+	return rval;
+
+failed_template:
+	ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+	return rval;
 }
 
 int
@@ -5605,7 +5917,7 @@
 		nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
 		nv->exchange_count = __constant_cpu_to_le16(0);
 		nv->port_name[0] = 0x21;
-		nv->port_name[1] = 0x00 + ha->port_no;
+		nv->port_name[1] = 0x00 + ha->port_no + 1;
 		nv->port_name[2] = 0x00;
 		nv->port_name[3] = 0xe0;
 		nv->port_name[4] = 0x8b;
@@ -5639,7 +5951,7 @@
 		nv->enode_mac[2] = 0xDD;
 		nv->enode_mac[3] = 0x04;
 		nv->enode_mac[4] = 0x05;
-		nv->enode_mac[5] = 0x06 + ha->port_no;
+		nv->enode_mac[5] = 0x06 + ha->port_no + 1;
 
 		rval = 1;
 	}
@@ -5677,7 +5989,7 @@
 		icb->enode_mac[2] = 0xDD;
 		icb->enode_mac[3] = 0x04;
 		icb->enode_mac[4] = 0x05;
-		icb->enode_mac[5] = 0x06 + ha->port_no;
+		icb->enode_mac[5] = 0x06 + ha->port_no + 1;
 	}
 
 	/* Use extended-initialization control block. */
@@ -5780,7 +6092,7 @@
 		ha->login_retry_count = ql2xloginretrycount;
 
 	/* if not running MSI-X we need handshaking on interrupts */
-	if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha))
+	if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
 		icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
 
 	/* Enable ZIO. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 46b9307..e607568 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -488,7 +488,7 @@
 			req->ring_ptr++;
 
 		/* Set chip new ring index. */
-		if (ha->mqenable || IS_QLA83XX(ha)) {
+		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 			WRT_REG_DWORD(req->req_q_in, req->ring_index);
 			RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
 		} else if (IS_QLAFX00(ha)) {
@@ -524,7 +524,6 @@
 {
 	mrk_entry_t *mrk;
 	struct mrk_entry_24xx *mrk24 = NULL;
-	struct mrk_entry_fx00 *mrkfx = NULL;
 
 	struct qla_hw_data *ha = vha->hw;
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
@@ -541,15 +540,7 @@
 	mrk->entry_type = MARKER_TYPE;
 	mrk->modifier = type;
 	if (type != MK_SYNC_ALL) {
-		if (IS_QLAFX00(ha)) {
-			mrkfx = (struct mrk_entry_fx00 *) mrk;
-			mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
-			mrkfx->handle_hi = 0;
-			mrkfx->tgt_id = cpu_to_le16(loop_id);
-			mrkfx->lun[1] = LSB(lun);
-			mrkfx->lun[2] = MSB(lun);
-			host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
-		} else if (IS_FWI2_CAPABLE(ha)) {
+		if (IS_FWI2_CAPABLE(ha)) {
 			mrk24 = (struct mrk_entry_24xx *) mrk;
 			mrk24->nport_handle = cpu_to_le16(loop_id);
 			mrk24->lun[1] = LSB(lun);
@@ -1823,7 +1814,7 @@
 
 	/* Check for room in outstanding command list. */
 	handle = req->current_outstanding_cmd;
-	for (index = 1; req->num_outstanding_cmds; index++) {
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
 		handle++;
 		if (handle == req->num_outstanding_cmds)
 			handle = 1;
@@ -1848,7 +1839,7 @@
 skip_cmd_array:
 	/* Check for room on request queue. */
 	if (req->cnt < req_cnt) {
-		if (ha->mqenable || IS_QLA83XX(ha))
+		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
 			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
 		else if (IS_P3P_TYPE(ha))
 			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2594,6 +2585,29 @@
 	return QLA_FUNCTION_FAILED;
 }
 
+void
+qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
+{
+	struct srb_iocb *aio = &sp->u.iocb_cmd;
+	scsi_qla_host_t *vha = sp->fcport->vha;
+	struct req_que *req = vha->req;
+
+	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
+	abt_iocb->entry_type = ABORT_IOCB_TYPE;
+	abt_iocb->entry_count = 1;
+	abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+	abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	abt_iocb->handle_to_abort =
+	    cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
+	abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+	abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
+	abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+	abt_iocb->vp_index = vha->vp_idx;
+	abt_iocb->req_que_no = cpu_to_le16(req->id);
+	/* Send the command to the firmware */
+	wmb();
+}
+
 int
 qla2x00_start_sp(srb_t *sp)
 {
@@ -2647,7 +2661,9 @@
 		qlafx00_fxdisc_iocb(sp, pkt);
 		break;
 	case SRB_ABT_CMD:
-		qlafx00_abort_iocb(sp, pkt);
+		IS_QLAFX00(ha) ?
+			qlafx00_abort_iocb(sp, pkt) :
+			qla24xx_abort_iocb(sp, pkt);
 		break;
 	default:
 		break;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0a1dcb4..95314ef 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -356,15 +356,16 @@
 const char *
 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
 {
-	static const char * const link_speeds[] = {
-		"1", "2", "?", "4", "8", "16", "10"
+	static const char *const link_speeds[] = {
+		"1", "2", "?", "4", "8", "16", "32", "10"
 	};
+#define	QLA_LAST_SPEED	7
 
 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
 		return link_speeds[0];
 	else if (speed == 0x13)
-		return link_speeds[6];
-	else if (speed < 6)
+		return link_speeds[QLA_LAST_SPEED];
+	else if (speed < QLA_LAST_SPEED)
 		return link_speeds[speed];
 	else
 		return link_speeds[LS_UNKNOWN];
@@ -649,7 +650,7 @@
 		break;
 
 	case MBA_SYSTEM_ERR:		/* System Error */
-		mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
+		mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
 			RD_REG_WORD(&reg24->mailbox7) : 0;
 		ql_log(ql_log_warn, vha, 0x5003,
 		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
@@ -666,7 +667,7 @@
 				vha->device_flags |= DFLG_DEV_FAILED;
 			} else {
 				/* Check to see if MPI timeout occurred */
-				if ((mbx & MBX_3) && (ha->flags.port0))
+				if ((mbx & MBX_3) && (ha->port_no == 0))
 					set_bit(MPI_RESET_NEEDED,
 					    &vha->dpc_flags);
 
@@ -1497,8 +1498,7 @@
 }
 
 static void
-qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
-    struct tsk_mgmt_entry *tsk)
+qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
 {
 	const char func[] = "TMF-IOCB";
 	const char *type;
@@ -1506,7 +1506,6 @@
 	srb_t *sp;
 	struct srb_iocb *iocb;
 	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
-	int error = 1;
 
 	sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
 	if (!sp)
@@ -1515,37 +1514,35 @@
 	iocb = &sp->u.iocb_cmd;
 	type = sp->name;
 	fcport = sp->fcport;
+	iocb->u.tmf.data = QLA_SUCCESS;
 
 	if (sts->entry_status) {
 		ql_log(ql_log_warn, fcport->vha, 0x5038,
 		    "Async-%s error - hdl=%x entry-status(%x).\n",
 		    type, sp->handle, sts->entry_status);
+		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
 	} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
 		ql_log(ql_log_warn, fcport->vha, 0x5039,
 		    "Async-%s error - hdl=%x completion status(%x).\n",
 		    type, sp->handle, sts->comp_status);
-	} else if (!(le16_to_cpu(sts->scsi_status) &
+		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+	} else if ((le16_to_cpu(sts->scsi_status) &
 	    SS_RESPONSE_INFO_LEN_VALID)) {
-		ql_log(ql_log_warn, fcport->vha, 0x503a,
-		    "Async-%s error - hdl=%x no response info(%x).\n",
-		    type, sp->handle, sts->scsi_status);
-	} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
-		ql_log(ql_log_warn, fcport->vha, 0x503b,
-		    "Async-%s error - hdl=%x not enough response(%d).\n",
-		    type, sp->handle, sts->rsp_data_len);
-	} else if (sts->data[3]) {
-		ql_log(ql_log_warn, fcport->vha, 0x503c,
-		    "Async-%s error - hdl=%x response(%x).\n",
-		    type, sp->handle, sts->data[3]);
-	} else {
-		error = 0;
+		if (le32_to_cpu(sts->rsp_data_len) < 4) {
+			ql_log(ql_log_warn, fcport->vha, 0x503b,
+			    "Async-%s error - hdl=%x not enough response(%d).\n",
+			    type, sp->handle, sts->rsp_data_len);
+		} else if (sts->data[3]) {
+			ql_log(ql_log_warn, fcport->vha, 0x503c,
+			    "Async-%s error - hdl=%x response(%x).\n",
+			    type, sp->handle, sts->data[3]);
+		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+		}
 	}
 
-	if (error) {
-		iocb->u.tmf.data = error;
+	if (iocb->u.tmf.data != QLA_SUCCESS)
 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
 		    (uint8_t *)sts, sizeof(*sts));
-	}
 
 	sp->done(vha, sp, 0);
 }
@@ -2025,6 +2022,12 @@
 		return;
 	}
 
+	/* Task Management completion. */
+	if (sp->type == SRB_TM_CMD) {
+		qla24xx_tm_iocb_entry(vha, req, pkt);
+		return;
+	}
+
 	/* Fast path completion. */
 	if (comp_status == CS_COMPLETE && scsi_status == 0) {
 		qla2x00_process_completed_request(vha, req, handle);
@@ -2425,6 +2428,23 @@
 	}
 }
 
+static void
+qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+	struct abort_entry_24xx *pkt)
+{
+	const char func[] = "ABT_IOCB";
+	srb_t *sp;
+	struct srb_iocb *abt;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	abt = &sp->u.iocb_cmd;
+	abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
+	sp->done(vha, sp, 0);
+}
+
 /**
  * qla24xx_process_response_queue() - Process response queue entries.
  * @ha: SCSI driver HA context
@@ -2474,10 +2494,6 @@
 			qla24xx_logio_entry(vha, rsp->req,
 			    (struct logio_entry_24xx *)pkt);
 			break;
-		case TSK_MGMT_IOCB_TYPE:
-			qla24xx_tm_iocb_entry(vha, rsp->req,
-			    (struct tsk_mgmt_entry *)pkt);
-			break;
                 case CT_IOCB_TYPE:
 			qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
 			break;
@@ -2497,6 +2513,10 @@
 			 * from falling into default case
 			 */
 			break;
+		case ABORT_IOCB_TYPE:
+			qla24xx_abort_iocb_entry(vha, rsp->req,
+			    (struct abort_entry_24xx *)pkt);
+			break;
 		default:
 			/* Type Not Supported. */
 			ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -2525,7 +2545,8 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
-	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+	    !IS_QLA27XX(ha))
 		return;
 
 	rval = QLA_SUCCESS;
@@ -2979,7 +3000,7 @@
 	}
 
 	/* Enable MSI-X vector for response queue update for queue 0 */
-	if (IS_QLA83XX(ha)) {
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		if (ha->msixbase && ha->mqiobase &&
 		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
 			ha->mqenable = 1;
@@ -3003,12 +3024,13 @@
 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
 {
 	int ret = QLA_FUNCTION_FAILED;
-	device_reg_t __iomem *reg = ha->iobase;
+	device_reg_t *reg = ha->iobase;
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
 	/* If possible, enable MSI-X. */
 	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
-		!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
+	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
+	    !IS_QLA27XX(ha))
 		goto skip_msi;
 
 	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -3043,7 +3065,8 @@
 	    "Falling back-to MSI mode -%d.\n", ret);
 
 	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
-	    !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha))
+	    !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
+	    !IS_QLA27XX(ha))
 		goto skip_msi;
 
 	ret = pci_enable_msi(ha->pdev);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b94511a..2528709 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -35,7 +35,7 @@
 {
 	int		rval;
 	unsigned long    flags = 0;
-	device_reg_t __iomem *reg;
+	device_reg_t *reg;
 	uint8_t		abort_active;
 	uint8_t		io_lock_on;
 	uint16_t	command = 0;
@@ -468,7 +468,8 @@
 		mcp->mb[1] = MSW(risc_addr);
 		mcp->mb[2] = LSW(risc_addr);
 		mcp->mb[3] = 0;
-		if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) {
+		if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+		    IS_QLA27XX(ha)) {
 			struct nvram_81xx *nv = ha->nvram;
 			mcp->mb[4] = (nv->enhanced_features &
 			    EXTENDED_BB_CREDITS);
@@ -539,6 +540,8 @@
 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
 	if (IS_FWI2_CAPABLE(ha))
 		mcp->in_mb |= MBX_17|MBX_16|MBX_15;
+	if (IS_QLA27XX(ha))
+		mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18;
 	mcp->flags = 0;
 	mcp->tov = MBX_TOV_SECONDS;
 	rval = qla2x00_mailbox_command(vha, mcp);
@@ -574,6 +577,10 @@
 		    "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
 		    __func__, mcp->mb[17], mcp->mb[16]);
 	}
+	if (IS_QLA27XX(ha)) {
+		ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
+		ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
+	}
 
 failed:
 	if (rval != QLA_SUCCESS) {
@@ -1225,7 +1232,7 @@
 	}
 	/* 1 and 2 should normally be captured. */
 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
-	if (IS_QLA83XX(ha))
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 		/* mb3 is additional info about the installed SFP. */
 		mcp->in_mb  |= MBX_3;
 	mcp->buf_size = size;
@@ -2349,7 +2356,7 @@
 	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
-	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
+	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
 		mcp->in_mb |= MBX_12;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
@@ -2590,6 +2597,9 @@
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
 	    "Entered %s.\n", __func__);
 
+	if (ql2xasynctmfenable)
+		return qla24xx_async_abort_command(sp);
+
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
 		if (req->outstanding_cmds[handle] == sp)
@@ -3032,7 +3042,7 @@
 	    "Entered %s.\n", __func__);
 
 	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
-	    !IS_QLA83XX(vha->hw))
+	    !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -3662,7 +3672,7 @@
 	mcp->mb[12] = req->qos;
 	mcp->mb[11] = req->vp_idx;
 	mcp->mb[13] = req->rid;
-	if (IS_QLA83XX(ha))
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 		mcp->mb[15] = 0;
 
 	mcp->mb[4] = req->id;
@@ -3676,9 +3686,9 @@
 	mcp->flags = MBX_DMA_OUT;
 	mcp->tov = MBX_TOV_SECONDS * 2;
 
-	if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
+	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
 		mcp->in_mb |= MBX_1;
-	if (IS_QLA83XX(ha)) {
+	if (IS_QLA83XX(ha) || !IS_QLA27XX(ha)) {
 		mcp->out_mb |= MBX_15;
 		/* debug q create issue in SR-IOV */
 		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
@@ -3687,7 +3697,7 @@
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	if (!(req->options & BIT_0)) {
 		WRT_REG_DWORD(req->req_q_in, 0);
-		if (!IS_QLA83XX(ha))
+		if (!IS_QLA83XX(ha) || !IS_QLA27XX(ha))
 			WRT_REG_DWORD(req->req_q_out, 0);
 	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3725,7 +3735,7 @@
 	mcp->mb[5] = rsp->length;
 	mcp->mb[14] = rsp->msix->entry;
 	mcp->mb[13] = rsp->rid;
-	if (IS_QLA83XX(ha))
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 		mcp->mb[15] = 0;
 
 	mcp->mb[4] = rsp->id;
@@ -3742,7 +3752,7 @@
 	if (IS_QLA81XX(ha)) {
 		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
 		mcp->in_mb |= MBX_1;
-	} else if (IS_QLA83XX(ha)) {
+	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
 		mcp->in_mb |= MBX_1;
 		/* debug q create issue in SR-IOV */
@@ -3809,7 +3819,8 @@
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
 	    "Entered %s.\n", __func__);
 
-	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
+	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+	    !IS_QLA27XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -3840,7 +3851,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
+	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+	    !IS_QLA27XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
@@ -3874,7 +3886,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
+	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+	    !IS_QLA27XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
@@ -4545,7 +4558,7 @@
 	mcp->mb[1] = 0;
 	mcp->out_mb = MBX_1|MBX_0;
 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
-	if (IS_QLA83XX(ha))
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 		mcp->in_mb |= MBX_3;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
@@ -4574,7 +4587,8 @@
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
 	    "Entered %s.\n", __func__);
 
-	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
+	    !IS_QLA27XX(ha))
 		return QLA_FUNCTION_FAILED;
 	mcp->mb[0] = MBC_GET_PORT_CONFIG;
 	mcp->out_mb = MBX_0;
@@ -5070,7 +5084,7 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_QLA83XX(ha))
+	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
@@ -5145,7 +5159,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	unsigned long retry_max_time = jiffies + (2 * HZ);
 
-	if (!IS_QLA83XX(ha))
+	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index a72df70..f0a8522 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -630,7 +630,7 @@
 	struct req_que *req = NULL;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 	uint16_t que_id = 0;
-	device_reg_t __iomem *reg;
+	device_reg_t *reg;
 	uint32_t cnt;
 
 	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
@@ -754,7 +754,7 @@
 	struct rsp_que *rsp = NULL;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 	uint16_t que_id = 0;
-	device_reg_t __iomem *reg;
+	device_reg_t *reg;
 
 	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
 	if (rsp == NULL) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index ba6f8b1..0aaf6a9 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -40,7 +40,7 @@
 {
 	int		rval;
 	unsigned long    flags = 0;
-	device_reg_t __iomem *reg;
+	device_reg_t *reg;
 	uint8_t		abort_active;
 	uint8_t		io_lock_on;
 	uint16_t	command = 0;
@@ -631,20 +631,6 @@
 {
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
-	struct init_cb_fx *icb;
-	struct req_que *req = ha->req_q_map[0];
-	struct rsp_que *rsp = ha->rsp_q_map[0];
-
-	/* Setup ring parameters in initialization control block. */
-	icb = (struct init_cb_fx *)ha->init_cb;
-	icb->request_q_outpointer = __constant_cpu_to_le16(0);
-	icb->response_q_inpointer = __constant_cpu_to_le16(0);
-	icb->request_q_length = cpu_to_le16(req->length);
-	icb->response_q_length = cpu_to_le16(rsp->length);
-	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
-	icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
-	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
-	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
 
 	WRT_REG_DWORD(&reg->req_q_in, 0);
 	WRT_REG_DWORD(&reg->req_q_out, 0);
@@ -699,78 +685,16 @@
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
-static void
-qlafx00_tmf_iocb_timeout(void *data)
-{
-	srb_t *sp = (srb_t *)data;
-	struct srb_iocb *tmf = &sp->u.iocb_cmd;
-
-	tmf->u.tmf.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
-	complete(&tmf->u.tmf.comp);
-}
-
-static void
-qlafx00_tmf_sp_done(void *data, void *ptr, int res)
-{
-	srb_t *sp = (srb_t *)ptr;
-	struct srb_iocb *tmf = &sp->u.iocb_cmd;
-
-	complete(&tmf->u.tmf.comp);
-}
-
-static int
-qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
-		     uint32_t lun, uint32_t tag)
-{
-	scsi_qla_host_t *vha = fcport->vha;
-	struct srb_iocb *tm_iocb;
-	srb_t *sp;
-	int rval = QLA_FUNCTION_FAILED;
-
-	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
-	if (!sp)
-		goto done;
-
-	tm_iocb = &sp->u.iocb_cmd;
-	sp->type = SRB_TM_CMD;
-	sp->name = "tmf";
-	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
-	tm_iocb->u.tmf.flags = flags;
-	tm_iocb->u.tmf.lun = lun;
-	tm_iocb->u.tmf.data = tag;
-	sp->done = qlafx00_tmf_sp_done;
-	tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
-	init_completion(&tm_iocb->u.tmf.comp);
-
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
-	ql_dbg(ql_dbg_async, vha, 0x507b,
-	    "Task management command issued target_id=%x\n",
-	    fcport->tgt_id);
-
-	wait_for_completion(&tm_iocb->u.tmf.comp);
-
-	rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
-	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
-
-done_free_sp:
-	sp->free(vha, sp);
-done:
-	return rval;
-}
-
 int
 qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
 {
-	return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+	return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
 }
 
 int
 qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
 {
-	return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+	return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
 }
 
 int
@@ -997,6 +921,9 @@
 			break;
 
 		default:
+			if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS)
+				break;
+
 			/* If fw is apparently not ready. In order to continue,
 			 * we might need to issue Mbox cmd, but the problem is
 			 * that the DoorBell vector values that come with the
@@ -2014,7 +1941,8 @@
 		memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
 	} else if (fx_type == FXDISC_ABORT_IOCTL)
 		fdisc->u.fxiocb.result =
-		    (fdisc->u.fxiocb.result == cpu_to_le32(0x68)) ?
+		    (fdisc->u.fxiocb.result ==
+			cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ?
 		    cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
 
 	rval = le32_to_cpu(fdisc->u.fxiocb.result);
@@ -2034,94 +1962,6 @@
 	return rval;
 }
 
-static void
-qlafx00_abort_iocb_timeout(void *data)
-{
-	srb_t *sp = (srb_t *)data;
-	struct srb_iocb *abt = &sp->u.iocb_cmd;
-
-	abt->u.abt.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
-	complete(&abt->u.abt.comp);
-}
-
-static void
-qlafx00_abort_sp_done(void *data, void *ptr, int res)
-{
-	srb_t *sp = (srb_t *)ptr;
-	struct srb_iocb *abt = &sp->u.iocb_cmd;
-
-	complete(&abt->u.abt.comp);
-}
-
-static int
-qlafx00_async_abt_cmd(srb_t *cmd_sp)
-{
-	scsi_qla_host_t *vha = cmd_sp->fcport->vha;
-	fc_port_t *fcport = cmd_sp->fcport;
-	struct srb_iocb *abt_iocb;
-	srb_t *sp;
-	int rval = QLA_FUNCTION_FAILED;
-
-	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
-	if (!sp)
-		goto done;
-
-	abt_iocb = &sp->u.iocb_cmd;
-	sp->type = SRB_ABT_CMD;
-	sp->name = "abort";
-	qla2x00_init_timer(sp, FXDISC_TIMEOUT);
-	abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
-	sp->done = qlafx00_abort_sp_done;
-	abt_iocb->timeout = qlafx00_abort_iocb_timeout;
-	init_completion(&abt_iocb->u.abt.comp);
-
-	rval = qla2x00_start_sp(sp);
-	if (rval != QLA_SUCCESS)
-		goto done_free_sp;
-
-	ql_dbg(ql_dbg_async, vha, 0x507c,
-	    "Abort command issued - hdl=%x, target_id=%x\n",
-	    cmd_sp->handle, fcport->tgt_id);
-
-	wait_for_completion(&abt_iocb->u.abt.comp);
-
-	rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
-	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
-
-done_free_sp:
-	sp->free(vha, sp);
-done:
-	return rval;
-}
-
-int
-qlafx00_abort_command(srb_t *sp)
-{
-	unsigned long   flags = 0;
-
-	uint32_t	handle;
-	fc_port_t	*fcport = sp->fcport;
-	struct scsi_qla_host *vha = fcport->vha;
-	struct qla_hw_data *ha = vha->hw;
-	struct req_que *req = vha->req;
-
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
-		if (req->outstanding_cmds[handle] == sp)
-			break;
-	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-	if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
-		/* Command not found. */
-		return QLA_FUNCTION_FAILED;
-	}
-	if (sp->type == SRB_FXIOCB_DCMD)
-		return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
-		    FXDISC_ABORT_IOCTL);
-
-	return qlafx00_async_abt_cmd(sp);
-}
-
 /*
  * qlafx00_initialize_adapter
  *      Initialize board.
@@ -2150,7 +1990,6 @@
 	vha->device_flags = DFLG_NO_CABLE;
 	vha->dpc_flags = 0;
 	vha->flags.management_server_logged_in = 0;
-	vha->marker_needed = 0;
 	ha->isp_abort_cnt = 0;
 	ha->beacon_blink_led = 0;
 
@@ -2354,8 +2193,7 @@
 		fstatus.ioctl_flags = pkt->fw_iotcl_flags;
 		fstatus.ioctl_data = pkt->dataword_r;
 		fstatus.adapid = pkt->adapid;
-		fstatus.adapid_hi = pkt->adapid_hi;
-		fstatus.reserved_2 = pkt->reserved_1;
+		fstatus.reserved_2 = pkt->dataword_r_extra;
 		fstatus.res_count = pkt->residuallen;
 		fstatus.status = pkt->status;
 		fstatus.seq_number = pkt->seq_no;
@@ -2804,7 +2642,7 @@
 	srb_t *sp;
 	struct qla_hw_data *ha = vha->hw;
 	const char func[] = "ERROR-IOCB";
-	uint16_t que = MSW(pkt->handle);
+	uint16_t que = 0;
 	struct req_que *req = NULL;
 	int res = DID_ERROR << 16;
 
@@ -2833,16 +2671,22 @@
 {
 	struct sts_entry_fx00 *pkt;
 	response_t *lptr;
+	uint16_t lreq_q_in = 0;
+	uint16_t lreq_q_out = 0;
 
-	while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) !=
-	    RESPONSE_PROCESSED) {
+	lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
+	lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out);
+
+	while (lreq_q_in != lreq_q_out) {
 		lptr = rsp->ring_ptr;
 		memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
 		    sizeof(rsp->rsp_pkt));
 		pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
 
 		rsp->ring_index++;
+		lreq_q_out++;
 		if (rsp->ring_index == rsp->length) {
+			lreq_q_out = 0;
 			rsp->ring_index = 0;
 			rsp->ring_ptr = rsp->ring;
 		} else {
@@ -2854,7 +2698,6 @@
 			qlafx00_error_entry(vha, rsp,
 			    (struct sts_entry_fx00 *)pkt, pkt->entry_status,
 			    pkt->entry_type);
-			goto next_iter;
 			continue;
 		}
 
@@ -2888,10 +2731,6 @@
 			    pkt->entry_type, pkt->entry_status);
 			break;
 		}
-next_iter:
-		WRT_REG_DWORD((void __iomem *)&lptr->signature,
-		    RESPONSE_PROCESSED);
-		wmb();
 	}
 
 	/* Adjust ring index */
@@ -2926,9 +2765,9 @@
 		break;
 
 	case QLAFX00_MBA_PORT_UPDATE:		/* Port database update */
-		ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
-		ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
-		ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
+		ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
+		ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
+		ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
 		ql_dbg(ql_dbg_async, vha, 0x5077,
 		    "Asynchronous port Update received "
 		    "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
@@ -2985,7 +2824,7 @@
 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
 {
 	uint16_t	cnt;
-	uint16_t __iomem *wptr;
+	uint32_t __iomem *wptr;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
 
@@ -2995,10 +2834,10 @@
 	/* Load return mailbox registers. */
 	ha->flags.mbox_int = 1;
 	ha->mailbox_out32[0] = mb0;
-	wptr = (uint16_t __iomem *)&reg->mailbox17;
+	wptr = (uint32_t __iomem *)&reg->mailbox17;
 
 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
-		ha->mailbox_out32[cnt] = RD_REG_WORD(wptr);
+		ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
 		wptr++;
 	}
 }
@@ -3025,6 +2864,7 @@
 	struct rsp_que *rsp;
 	unsigned long	flags;
 	uint32_t clr_intr = 0;
+	uint32_t intr_stat = 0;
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
@@ -3046,34 +2886,26 @@
 		stat = QLAFX00_RD_INTR_REG(ha);
 		if (qla2x00_check_reg_for_disconnect(vha, stat))
 			break;
-		if ((stat & QLAFX00_HST_INT_STS_BITS) == 0)
+		intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
+		if (!intr_stat)
 			break;
 
-		switch (stat & QLAFX00_HST_INT_STS_BITS) {
-		case QLAFX00_INTR_MB_CMPLT:
-		case QLAFX00_INTR_MB_RSP_CMPLT:
-		case QLAFX00_INTR_MB_ASYNC_CMPLT:
-		case QLAFX00_INTR_ALL_CMPLT:
+		if (stat & QLAFX00_INTR_MB_CMPLT) {
 			mb[0] = RD_REG_WORD(&reg->mailbox16);
 			qlafx00_mbx_completion(vha, mb[0]);
 			status |= MBX_INTERRUPT;
 			clr_intr |= QLAFX00_INTR_MB_CMPLT;
-			break;
-		case QLAFX00_INTR_ASYNC_CMPLT:
-		case QLAFX00_INTR_RSP_ASYNC_CMPLT:
+		}
+		if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
 			ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
 			qlafx00_async_event(vha);
 			clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
-			break;
-		case QLAFX00_INTR_RSP_CMPLT:
+		}
+		if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {
 			qlafx00_process_response_queue(vha, rsp);
 			clr_intr |= QLAFX00_INTR_RSP_CMPLT;
-			break;
-		default:
-			ql_dbg(ql_dbg_async, vha, 0x507a,
-			    "Unrecognized interrupt type (%d).\n", stat);
-			break;
 		}
+
 		QLAFX00_CLR_INTR_REG(ha, clr_intr);
 		QLAFX00_RD_INTR_REG(ha);
 	}
@@ -3223,17 +3055,6 @@
 	/* So we know we haven't pci_map'ed anything yet */
 	tot_dsds = 0;
 
-	/* Forcing marker needed for now */
-	vha->marker_needed = 0;
-
-	/* Send marker if required */
-	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
-		    QLA_SUCCESS)
-			return QLA_FUNCTION_FAILED;
-		vha->marker_needed = 0;
-	}
-
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
@@ -3284,7 +3105,9 @@
 	memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
 
 	lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
-	lcmd_pkt.handle_hi = 0;
+	lcmd_pkt.reserved_0 = 0;
+	lcmd_pkt.port_path_ctrl = 0;
+	lcmd_pkt.reserved_1 = 0;
 	lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
 	lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
 
@@ -3364,8 +3187,7 @@
 	tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
 	tm_iocb.entry_count = 1;
 	tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
-	tm_iocb.handle_hi = 0;
-	tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
+	tm_iocb.reserved_0 = 0;
 	tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
 	tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
 	if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 6cd7072..e529dfa 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -22,13 +22,16 @@
 	uint8_t entry_status;		/* Entry Status. */
 
 	uint32_t handle;		/* System handle. */
-	uint32_t handle_hi;
+	uint8_t reserved_0;
+	uint8_t port_path_ctrl;
+	uint16_t reserved_1;
 
 	__le16 tgt_idx;		/* Target Idx. */
 	uint16_t timeout;		/* Command timeout. */
 
 	__le16 dseg_count;		/* Data segment count. */
-	uint16_t scsi_rsp_dsd_len;
+	uint8_t	scsi_rsp_dsd_len;
+	uint8_t reserved_2;
 
 	struct scsi_lun lun;		/* LUN (LE). */
 
@@ -47,30 +50,6 @@
 	uint32_t dseg_0_len;		/* Data segment 0 length. */
 };
 
-/*
- * ISP queue - marker entry structure definition.
- */
-struct mrk_entry_fx00 {
-	uint8_t entry_type;		/* Entry type. */
-	uint8_t entry_count;		/* Entry count. */
-	uint8_t handle_count;		/* Handle count. */
-	uint8_t entry_status;		/* Entry Status. */
-
-	uint32_t handle;		/* System handle. */
-	uint32_t handle_hi;		/* System handle. */
-
-	uint16_t tgt_id;		/* Target ID. */
-
-	uint8_t modifier;		/* Modifier (7-0). */
-	uint8_t reserved_1;
-
-	uint8_t reserved_2[5];
-
-	uint8_t lun[8];			/* FCP LUN (BE). */
-	uint8_t reserved_3[36];
-};
-
-
 #define	STATUS_TYPE_FX00	0x01		/* Status entry. */
 struct sts_entry_fx00 {
 	uint8_t entry_type;		/* Entry type. */
@@ -79,7 +58,7 @@
 	uint8_t entry_status;		/* Entry Status. */
 
 	uint32_t handle;		/* System handle. */
-	uint32_t handle_hi;		/* System handle. */
+	uint32_t reserved_3;		/* System handle. */
 
 	__le16 comp_status;		/* Completion status. */
 	uint16_t reserved_0;		/* OX_ID used by the firmware. */
@@ -102,7 +81,7 @@
 
 struct multi_sts_entry_fx00 {
 	uint8_t entry_type;		/* Entry type. */
-	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_count;		/* Entry count. */
 	uint8_t handle_count;
 	uint8_t entry_status;
 
@@ -118,15 +97,13 @@
 
 	__le32 handle;		/* System handle. */
 
-	uint32_t handle_hi;		/* System handle. */
+	uint32_t reserved_0;
 
 	__le16 tgt_id;		/* Target Idx. */
 
 	uint16_t reserved_1;
-
-	uint16_t delay;			/* Activity delay in seconds. */
-
-	__le16 timeout;		/* Command timeout. */
+	uint16_t reserved_3;
+	uint16_t reserved_4;
 
 	struct scsi_lun lun;		/* LUN (LE). */
 
@@ -144,13 +121,13 @@
 	uint8_t entry_status;		/* Entry Status. */
 
 	__le32 handle;		/* System handle. */
-	__le32 handle_hi;		/* System handle. */
+	__le32 reserved_0;
 
 	__le16 tgt_id_sts;		/* Completion status. */
 	__le16 options;
 
 	__le32 abort_handle;		/* System handle. */
-	__le32 abort_handle_hi;	/* System handle. */
+	__le32 reserved_2;
 
 	__le16 req_que_no;
 	uint8_t reserved_1[38];
@@ -171,8 +148,7 @@
 
 	__le32 dataword_r;		/* Data word returned */
 	uint32_t adapid;		/* Adapter ID */
-	uint32_t adapid_hi;		/* Adapter ID high */
-	uint32_t reserved_1;
+	uint32_t dataword_r_extra;
 
 	__le32 seq_no;
 	uint8_t reserved_2[20];
@@ -360,11 +336,7 @@
 
 #define QLAFX00_INTR_MB_CMPLT		0x1
 #define QLAFX00_INTR_RSP_CMPLT		0x2
-#define QLAFX00_INTR_MB_RSP_CMPLT	0x3
 #define QLAFX00_INTR_ASYNC_CMPLT	0x4
-#define QLAFX00_INTR_MB_ASYNC_CMPLT	0x5
-#define QLAFX00_INTR_RSP_ASYNC_CMPLT	0x6
-#define QLAFX00_INTR_ALL_CMPLT		0x7
 
 #define QLAFX00_MBA_SYSTEM_ERR		0x8002
 #define QLAFX00_MBA_TEMP_OVER		0x8005
@@ -548,4 +520,7 @@
 /* Max conncurrent IOs that can be queued */
 #define QLAFX00_MAX_CANQUEUE		1024
 
+/* IOCTL IOCB abort success */
+#define QLAFX00_IOCTL_ICOB_ABORT_SUCCESS	0x68
+
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 1e6ba4a..5511e24 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1664,10 +1664,10 @@
 	/* Mapping of IO base pointer */
 	if (IS_QLA8044(ha)) {
 		ha->iobase =
-		    (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase);
+		    (device_reg_t *)((uint8_t *)ha->nx_pcibase);
 	} else if (IS_QLA82XX(ha)) {
 		ha->iobase =
-		    (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
+		    (device_reg_t *)((uint8_t *)ha->nx_pcibase +
 			0xbc000 + (ha->pdev->devfn << 11));
 	}
 
@@ -4502,3 +4502,20 @@
 	qla82xx_idc_unlock(ha);
 	return rval;
 }
+
+void
+qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->allow_cna_fw_dump)
+		return;
+
+	scsi_block_requests(vha->host);
+	ha->flags.isp82xx_no_md_cap = 1;
+	qla82xx_idc_lock(ha);
+	qla82xx_set_reset_owner(vha);
+	qla82xx_idc_unlock(ha);
+	qla2x00_wait_for_chip_reset(vha);
+	scsi_unblock_requests(vha->host);
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index f60989d..86cf108 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1578,8 +1578,8 @@
 		do {
 			if (time_after_eq(jiffies, dev_init_timeout)) {
 				ql_log(ql_log_info, vha, 0xb0c4,
-				    "%s: Non Reset owner DEV INIT "
-				    "TIMEOUT!\n", __func__);
+				    "%s: Non Reset owner: Reset Ack Timeout!\n",
+				    __func__);
 				break;
 			}
 
@@ -2014,8 +2014,6 @@
 
 	/* don't poll if reset is going on or FW hang in quiescent state */
 	if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
-	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
-	    test_bit(ISP_ABORT_RETRY, &vha->dpc_flags) ||
 	    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
 		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
 
@@ -3715,3 +3713,19 @@
 	return rval;
 }
 
+void
+qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->allow_cna_fw_dump)
+		return;
+
+	scsi_block_requests(vha->host);
+	ha->flags.isp82xx_no_md_cap = 1;
+	qla8044_idc_lock(ha);
+	qla82xx_set_reset_owner(vha);
+	qla8044_idc_unlock(ha);
+	qla2x00_wait_for_chip_reset(vha);
+	scsi_unblock_requests(vha->host);
+}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 89a5300..19e99cc 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -120,15 +120,17 @@
 int ql2xenabledif = 2;
 module_param(ql2xenabledif, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xenabledif,
-		" Enable T10-CRC-DIF "
-		" Default is 0 - No DIF Support. 1 - Enable it"
-		", 2 - Enable DIF for all types, except Type 0.");
+		" Enable T10-CRC-DIF:\n"
+		" Default is 2.\n"
+		"  0 -- No DIF Support\n"
+		"  1 -- Enable DIF for all types\n"
+		"  2 -- Enable DIF for all types, except Type 0.\n");
 
 int ql2xenablehba_err_chk = 2;
 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xenablehba_err_chk,
 		" Enable T10-CRC-DIF Error isolation by HBA:\n"
-		" Default is 1.\n"
+		" Default is 2.\n"
 		"  0 -- Error isolation disabled\n"
 		"  1 -- Error isolation enabled only for DIX Type 0\n"
 		"  2 -- Error isolation enabled for all Types\n");
@@ -1975,7 +1977,7 @@
 	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
 	.read_nvram		= qla24xx_read_nvram_data,
 	.write_nvram		= qla24xx_write_nvram_data,
-	.fw_dump		= qla24xx_fw_dump,
+	.fw_dump		= qla82xx_fw_dump,
 	.beacon_on		= qla82xx_beacon_on,
 	.beacon_off		= qla82xx_beacon_off,
 	.beacon_blink		= NULL,
@@ -2013,11 +2015,11 @@
 	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
 	.read_nvram		= NULL,
 	.write_nvram		= NULL,
-	.fw_dump		= qla24xx_fw_dump,
+	.fw_dump		= qla8044_fw_dump,
 	.beacon_on		= qla82xx_beacon_on,
 	.beacon_off		= qla82xx_beacon_off,
 	.beacon_blink		= NULL,
-	.read_optrom		= qla82xx_read_optrom_data,
+	.read_optrom		= qla8044_read_optrom_data,
 	.write_optrom		= qla8044_write_optrom_data,
 	.get_flash_version	= qla82xx_get_flash_version,
 	.start_scsi             = qla82xx_start_scsi,
@@ -2078,7 +2080,7 @@
 	.intr_handler		= qlafx00_intr_handler,
 	.enable_intrs		= qlafx00_enable_intrs,
 	.disable_intrs		= qlafx00_disable_intrs,
-	.abort_command		= qlafx00_abort_command,
+	.abort_command		= qla24xx_async_abort_command,
 	.target_reset		= qlafx00_abort_target,
 	.lun_reset		= qlafx00_lun_reset,
 	.fabric_login		= NULL,
@@ -2102,6 +2104,44 @@
 	.initialize_adapter	= qlafx00_initialize_adapter,
 };
 
+static struct isp_operations qla27xx_isp_ops = {
+	.pci_config		= qla25xx_pci_config,
+	.reset_chip		= qla24xx_reset_chip,
+	.chip_diag		= qla24xx_chip_diag,
+	.config_rings		= qla24xx_config_rings,
+	.reset_adapter		= qla24xx_reset_adapter,
+	.nvram_config		= qla81xx_nvram_config,
+	.update_fw_options	= qla81xx_update_fw_options,
+	.load_risc		= qla81xx_load_risc,
+	.pci_info_str		= qla24xx_pci_info_str,
+	.fw_version_str		= qla24xx_fw_version_str,
+	.intr_handler		= qla24xx_intr_handler,
+	.enable_intrs		= qla24xx_enable_intrs,
+	.disable_intrs		= qla24xx_disable_intrs,
+	.abort_command		= qla24xx_abort_command,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
+	.fabric_login		= qla24xx_login_fabric,
+	.fabric_logout		= qla24xx_fabric_logout,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= NULL,
+	.write_nvram		= NULL,
+	.fw_dump		= qla27xx_fwdump,
+	.beacon_on		= qla24xx_beacon_on,
+	.beacon_off		= qla24xx_beacon_off,
+	.beacon_blink		= qla83xx_beacon_blink,
+	.read_optrom		= qla25xx_read_optrom_data,
+	.write_optrom		= qla24xx_write_optrom_data,
+	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qla24xx_dif_start_scsi,
+	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config		= qla83xx_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
+};
+
 static inline void
 qla2x00_set_isp_flags(struct qla_hw_data *ha)
 {
@@ -2223,21 +2263,29 @@
 	case PCI_DEVICE_ID_QLOGIC_ISPF001:
 		ha->device_type |= DT_ISPFX00;
 		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2071:
+		ha->device_type |= DT_ISP2071;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
 	}
 
 	if (IS_QLA82XX(ha))
-		ha->port_no = !(ha->portnum & 1);
-	else
+		ha->port_no = ha->portnum & 1;
+	else {
 		/* Get adapter physical port no from interrupt pin register. */
 		pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+		if (IS_QLA27XX(ha))
+			ha->port_no--;
+		else
+			ha->port_no = !(ha->port_no & 1);
+	}
 
-	if (ha->port_no & 1)
-		ha->flags.port0 = 1;
-	else
-		ha->flags.port0 = 0;
 	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
 	    "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
-	    ha->device_type, ha->flags.port0, ha->fw_srisc_address);
+	    ha->device_type, ha->port_no, ha->fw_srisc_address);
 }
 
 static void
@@ -2297,7 +2345,8 @@
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
-	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044) {
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071) {
 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 		mem_only = 1;
 		ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2334,13 +2383,14 @@
 	spin_lock_init(&ha->hardware_lock);
 	spin_lock_init(&ha->vport_slock);
 	mutex_init(&ha->selflogin_lock);
+	mutex_init(&ha->optrom_mutex);
 
 	/* Set ISP-type information. */
 	qla2x00_set_isp_flags(ha);
 
 	/* Set EEH reset type to fundamental if required by hba */
 	if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
-	    IS_QLA83XX(ha))
+	    IS_QLA83XX(ha) || IS_QLA27XX(ha))
 		pdev->needs_freset = 1;
 
 	ha->prev_topology = 0;
@@ -2488,7 +2538,6 @@
 		ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
 		req_length = REQUEST_ENTRY_CNT_FX00;
 		rsp_length = RESPONSE_ENTRY_CNT_FX00;
-		ha->init_cb_size = sizeof(struct init_cb_fx);
 		ha->isp_ops = &qlafx00_isp_ops;
 		ha->port_down_retry_count = 30; /* default value */
 		ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
@@ -2497,6 +2546,22 @@
 		ha->mr.fw_hbt_en = 1;
 		ha->mr.host_info_resend = false;
 		ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
+	} else if (IS_QLA27XX(ha)) {
+		ha->portnum = PCI_FUNC(ha->pdev->devfn);
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_24XX;
+		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+		ha->gid_list_info_size = 8;
+		ha->optrom_size = OPTROM_SIZE_83XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+		ha->isp_ops = &qla27xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
 	}
 
 	ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -2536,7 +2601,7 @@
 	    ha->flags.enable_64bit_addressing ? "enable" :
 	    "disable");
 	ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
-	if (!ret) {
+	if (ret) {
 		ql_log_pci(ql_log_fatal, pdev, 0x0031,
 		    "Failed to allocate memory for adapter, aborting.\n");
 
@@ -2561,10 +2626,6 @@
 
 	host = base_vha->host;
 	base_vha->req = req;
-	if (IS_QLAFX00(ha))
-		host->can_queue = QLAFX00_MAX_CANQUEUE;
-	else
-		host->can_queue = req->length + 128;
 	if (IS_QLA2XXX_MIDTYPE(ha))
 		base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
 	else
@@ -2587,11 +2648,6 @@
 		if (!IS_QLA82XX(ha))
 			host->sg_tablesize = QLA_SG_ALL;
 	}
-	ql_dbg(ql_dbg_init, base_vha, 0x0032,
-	    "can_queue=%d, req=%p, "
-	    "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
-	    host->can_queue, base_vha->req,
-	    base_vha->mgmt_svr_loop_id, host->sg_tablesize);
 	host->max_id = ha->max_fibre_devices;
 	host->cmd_per_lun = 3;
 	host->unique_id = host->host_no;
@@ -2646,7 +2702,7 @@
 	req->req_q_out = &ha->iobase->isp24.req_q_out;
 	rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
 	rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
-	if (ha->mqenable || IS_QLA83XX(ha)) {
+	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
 		req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
 		rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -2707,6 +2763,16 @@
 		goto probe_failed;
 	}
 
+	if (IS_QLAFX00(ha))
+		host->can_queue = QLAFX00_MAX_CANQUEUE;
+	else
+		host->can_queue = req->num_outstanding_cmds - 10;
+
+	ql_dbg(ql_dbg_init, base_vha, 0x0032,
+	    "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
+	    host->can_queue, base_vha->req,
+	    base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+
 	if (ha->mqenable) {
 		if (qla25xx_setup_mode(base_vha)) {
 			ql_log(ql_log_warn, base_vha, 0x00ec,
@@ -2887,9 +2953,9 @@
 iospace_config_failed:
 	if (IS_P3P_TYPE(ha)) {
 		if (!ha->nx_pcibase)
-			iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+			iounmap((device_reg_t *)ha->nx_pcibase);
 		if (!ql2xdbwr)
-			iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+			iounmap((device_reg_t *)ha->nxdb_wr_ptr);
 	} else {
 		if (ha->iobase)
 			iounmap(ha->iobase);
@@ -3020,9 +3086,9 @@
 {
 	if (IS_QLA82XX(ha)) {
 
-		iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+		iounmap((device_reg_t *)ha->nx_pcibase);
 		if (!ql2xdbwr)
-			iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+			iounmap((device_reg_t *)ha->nxdb_wr_ptr);
 	} else {
 		if (ha->iobase)
 			iounmap(ha->iobase);
@@ -3033,7 +3099,7 @@
 		if (ha->mqiobase)
 			iounmap(ha->mqiobase);
 
-		if (IS_QLA83XX(ha) && ha->msixbase)
+		if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
 			iounmap(ha->msixbase);
 	}
 }
@@ -3447,7 +3513,7 @@
 		ha->npiv_info = NULL;
 
 	/* Get consistent memory allocated for EX-INIT-CB. */
-	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) {
+	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
 		ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
 		    &ha->ex_init_cb_dma);
 		if (!ha->ex_init_cb)
@@ -3478,10 +3544,10 @@
 	else {
 		qla2x00_set_reserved_loop_ids(ha);
 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
-		    "loop_id_map=%p. \n", ha->loop_id_map);
+		    "loop_id_map=%p.\n", ha->loop_id_map);
 	}
 
-	return 1;
+	return 0;
 
 fail_async_pd:
 	dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
@@ -3562,22 +3628,28 @@
 qla2x00_free_fw_dump(struct qla_hw_data *ha)
 {
 	if (ha->fce)
-		dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
-		    ha->fce_dma);
+		dma_free_coherent(&ha->pdev->dev,
+		    FCE_SIZE, ha->fce, ha->fce_dma);
 
-	if (ha->fw_dump) {
-		if (ha->eft)
-			dma_free_coherent(&ha->pdev->dev,
-			    ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
+	if (ha->eft)
+		dma_free_coherent(&ha->pdev->dev,
+		    EFT_SIZE, ha->eft, ha->eft_dma);
+
+	if (ha->fw_dump)
 		vfree(ha->fw_dump);
-	}
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+
 	ha->fce = NULL;
 	ha->fce_dma = 0;
 	ha->eft = NULL;
 	ha->eft_dma = 0;
-	ha->fw_dump = NULL;
 	ha->fw_dumped = 0;
 	ha->fw_dump_reading = 0;
+	ha->fw_dump = NULL;
+	ha->fw_dump_len = 0;
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
 }
 
 /*
@@ -5242,7 +5314,7 @@
 
 /* Firmware interface routines. */
 
-#define FW_BLOBS	10
+#define FW_BLOBS	11
 #define FW_ISP21XX	0
 #define FW_ISP22XX	1
 #define FW_ISP2300	2
@@ -5253,6 +5325,7 @@
 #define FW_ISP82XX	7
 #define FW_ISP2031	8
 #define FW_ISP8031	9
+#define FW_ISP2071	10
 
 #define FW_FILE_ISP21XX	"ql2100_fw.bin"
 #define FW_FILE_ISP22XX	"ql2200_fw.bin"
@@ -5264,6 +5337,8 @@
 #define FW_FILE_ISP82XX	"ql8200_fw.bin"
 #define FW_FILE_ISP2031	"ql2600_fw.bin"
 #define FW_FILE_ISP8031	"ql8300_fw.bin"
+#define FW_FILE_ISP2071	"ql2700_fw.bin"
+
 
 static DEFINE_MUTEX(qla_fw_lock);
 
@@ -5278,6 +5353,7 @@
 	{ .name = FW_FILE_ISP82XX, },
 	{ .name = FW_FILE_ISP2031, },
 	{ .name = FW_FILE_ISP8031, },
+	{ .name = FW_FILE_ISP2071, },
 };
 
 struct fw_blob *
@@ -5306,6 +5382,8 @@
 		blob = &qla_fw_blobs[FW_ISP2031];
 	} else if (IS_QLA8031(ha)) {
 		blob = &qla_fw_blobs[FW_ISP8031];
+	} else if (IS_QLA2071(ha)) {
+		blob = &qla_fw_blobs[FW_ISP2071];
 	} else {
 		return NULL;
 	}
@@ -5635,6 +5713,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
 	{ 0 },
 };
 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bd56cde..f28123e 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -568,7 +568,7 @@
 	else if (IS_P3P_TYPE(ha)) {
 		*start = FA_FLASH_LAYOUT_ADDR_82;
 		goto end;
-	} else if (IS_QLA83XX(ha)) {
+	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		*start = FA_FLASH_LAYOUT_ADDR_83;
 		goto end;
 	}
@@ -682,7 +682,7 @@
 	/* Assign FCP prio region since older adapters may not have FLT, or
 	   FCP prio region in it's FLT.
 	 */
-	ha->flt_region_fcp_prio = ha->flags.port0 ?
+	ha->flt_region_fcp_prio = (ha->port_no == 0) ?
 	    fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
 
 	ha->flt_region_flt = flt_addr;
@@ -743,47 +743,71 @@
 			ha->flt_region_vpd_nvram = start;
 			if (IS_P3P_TYPE(ha))
 				break;
-			if (ha->flags.port0)
+			if (ha->port_no == 0)
 				ha->flt_region_vpd = start;
 			break;
 		case FLT_REG_VPD_1:
 			if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
 				break;
-			if (!ha->flags.port0)
+			if (ha->port_no == 1)
+				ha->flt_region_vpd = start;
+			break;
+		case FLT_REG_VPD_2:
+			if (!IS_QLA27XX(ha))
+				break;
+			if (ha->port_no == 2)
+				ha->flt_region_vpd = start;
+			break;
+		case FLT_REG_VPD_3:
+			if (!IS_QLA27XX(ha))
+				break;
+			if (ha->port_no == 3)
 				ha->flt_region_vpd = start;
 			break;
 		case FLT_REG_NVRAM_0:
 			if (IS_QLA8031(ha))
 				break;
-			if (ha->flags.port0)
+			if (ha->port_no == 0)
 				ha->flt_region_nvram = start;
 			break;
 		case FLT_REG_NVRAM_1:
 			if (IS_QLA8031(ha))
 				break;
-			if (!ha->flags.port0)
+			if (ha->port_no == 1)
+				ha->flt_region_nvram = start;
+			break;
+		case FLT_REG_NVRAM_2:
+			if (!IS_QLA27XX(ha))
+				break;
+			if (ha->port_no == 2)
+				ha->flt_region_nvram = start;
+			break;
+		case FLT_REG_NVRAM_3:
+			if (!IS_QLA27XX(ha))
+				break;
+			if (ha->port_no == 3)
 				ha->flt_region_nvram = start;
 			break;
 		case FLT_REG_FDT:
 			ha->flt_region_fdt = start;
 			break;
 		case FLT_REG_NPIV_CONF_0:
-			if (ha->flags.port0)
+			if (ha->port_no == 0)
 				ha->flt_region_npiv_conf = start;
 			break;
 		case FLT_REG_NPIV_CONF_1:
-			if (!ha->flags.port0)
+			if (ha->port_no == 1)
 				ha->flt_region_npiv_conf = start;
 			break;
 		case FLT_REG_GOLD_FW:
 			ha->flt_region_gold_fw = start;
 			break;
 		case FLT_REG_FCP_PRIO_0:
-			if (ha->flags.port0)
+			if (ha->port_no == 0)
 				ha->flt_region_fcp_prio = start;
 			break;
 		case FLT_REG_FCP_PRIO_1:
-			if (!ha->flags.port0)
+			if (ha->port_no == 1)
 				ha->flt_region_fcp_prio = start;
 			break;
 		case FLT_REG_BOOT_CODE_82XX:
@@ -813,13 +837,13 @@
 		case FLT_REG_FCOE_NVRAM_0:
 			if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
 				break;
-			if (ha->flags.port0)
+			if (ha->port_no == 0)
 				ha->flt_region_nvram = start;
 			break;
 		case FLT_REG_FCOE_NVRAM_1:
 			if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
 				break;
-			if (!ha->flags.port0)
+			if (ha->port_no == 1)
 				ha->flt_region_nvram = start;
 			break;
 		}
@@ -832,12 +856,12 @@
 	ha->flt_region_fw = def_fw[def];
 	ha->flt_region_boot = def_boot[def];
 	ha->flt_region_vpd_nvram = def_vpd_nvram[def];
-	ha->flt_region_vpd = ha->flags.port0 ?
+	ha->flt_region_vpd = (ha->port_no == 0) ?
 	    def_vpd0[def] : def_vpd1[def];
-	ha->flt_region_nvram = ha->flags.port0 ?
+	ha->flt_region_nvram = (ha->port_no == 0) ?
 	    def_nvram0[def] : def_nvram1[def];
 	ha->flt_region_fdt = def_fdt[def];
-	ha->flt_region_npiv_conf = ha->flags.port0 ?
+	ha->flt_region_npiv_conf = (ha->port_no == 0) ?
 	    def_npiv_conf0[def] : def_npiv_conf1[def];
 done:
 	ql_dbg(ql_dbg_init, vha, 0x004a,
@@ -989,7 +1013,7 @@
 	struct qla_hw_data *ha = vha->hw;
 
 	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
-	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
+	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
 		return QLA_SUCCESS;
 
 	ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -1192,7 +1216,8 @@
 	struct qla_hw_data *ha = vha->hw;
 
 	/* Prepare burst-capable write on supported ISPs. */
-	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+	    IS_QLA27XX(ha)) &&
 	    !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
 		optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
 		    &optrom_dma, GFP_KERNEL);
@@ -1675,7 +1700,7 @@
 	if (!IS_QLA83XX(ha))
 		goto out;
 
-	if (ha->flags.port0)
+	if (ha->port_no == 0)
 		led_select_value = QLA83XX_LED_PORT0;
 	else
 		led_select_value = QLA83XX_LED_PORT1;
@@ -2332,7 +2357,7 @@
 				 */
 				rest_addr = 0xffff;
 				sec_mask = 0x10000;
-				break;   
+				break;
 			}
 			/*
 			 * ST m29w010b part - 16kb sector size
@@ -2558,7 +2583,7 @@
 	uint32_t faddr, left, burst;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
+	if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha))
 		goto try_fast;
 	if (offset & 0xfff)
 		goto slow_read;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
new file mode 100644
index 0000000..a804e9b
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -0,0 +1,909 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_tmpl.h"
+
+/* note default template is in big endian */
+static const uint32_t ql27xx_fwdt_default_template[] = {
+	0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
+	0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x04010000, 0x14000000, 0x00000000,
+	0x02000000, 0x44000000, 0x09010000, 0x10000000,
+	0x00000000, 0x02000000, 0x01010000, 0x1c000000,
+	0x00000000, 0x02000000, 0x00600000, 0x00000000,
+	0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
+	0x02000000, 0x00600000, 0x00000000, 0xcc000000,
+	0x01010000, 0x1c000000, 0x00000000, 0x02000000,
+	0x10600000, 0x00000000, 0xd4000000, 0x01010000,
+	0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
+	0x00000060, 0xf0000000, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x00700000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x10700000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x40700000, 0x041000c0,
+	0x01010000, 0x1c000000, 0x00000000, 0x02000000,
+	0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
+	0x18000000, 0x00000000, 0x02000000, 0x007c0000,
+	0x040300c4, 0x00010000, 0x18000000, 0x00000000,
+	0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
+	0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
+	0x00000000, 0xc0000000, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x007c0000, 0x04200000,
+	0x0b010000, 0x18000000, 0x00000000, 0x02000000,
+	0x0c000000, 0x00000000, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x0a000000, 0x04200080, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x00300000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x10300000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x20300000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
+	0x00000000, 0x02000000, 0x06010000, 0x1c000000,
+	0x00000000, 0x02000000, 0x01000000, 0x00000200,
+	0xff230200, 0x06010000, 0x1c000000, 0x00000000,
+	0x02000000, 0x02000000, 0x00001000, 0x00000000,
+	0x07010000, 0x18000000, 0x00000000, 0x02000000,
+	0x00000000, 0x01000000, 0x07010000, 0x18000000,
+	0x00000000, 0x02000000, 0x00000000, 0x02000000,
+	0x07010000, 0x18000000, 0x00000000, 0x02000000,
+	0x00000000, 0x03000000, 0x0d010000, 0x14000000,
+	0x00000000, 0x02000000, 0x00000000, 0xff000000,
+	0x10000000, 0x00000000, 0x00000080,
+};
+
+static inline void __iomem *
+qla27xx_isp_reg(struct scsi_qla_host *vha)
+{
+	return &vha->hw->iobase->isp24;
+}
+
+static inline void
+qla27xx_insert16(uint16_t value, void *buf, ulong *len)
+{
+	if (buf) {
+		buf += *len;
+		*(__le16 *)buf = cpu_to_le16(value);
+	}
+	*len += sizeof(value);
+}
+
+static inline void
+qla27xx_insert32(uint32_t value, void *buf, ulong *len)
+{
+	if (buf) {
+		buf += *len;
+		*(__le32 *)buf = cpu_to_le32(value);
+	}
+	*len += sizeof(value);
+}
+
+static inline void
+qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
+{
+	ulong cnt = size;
+
+	if (buf && mem) {
+		buf += *len;
+		while (cnt >= sizeof(uint32_t)) {
+			*(__le32 *)buf = cpu_to_le32p(mem);
+			buf += sizeof(uint32_t);
+			mem += sizeof(uint32_t);
+			cnt -= sizeof(uint32_t);
+		}
+		if (cnt)
+			memcpy(buf, mem, cnt);
+	}
+	*len += size;
+}
+
+static inline void
+qla27xx_read8(void *window, void *buf, ulong *len)
+{
+	uint8_t value = ~0;
+
+	if (buf) {
+		value = RD_REG_BYTE((__iomem void *)window);
+		ql_dbg(ql_dbg_misc, NULL, 0xd011,
+		    "%s: -> %x\n", __func__, value);
+	}
+	qla27xx_insert32(value, buf, len);
+}
+
+static inline void
+qla27xx_read16(void *window, void *buf, ulong *len)
+{
+	uint16_t value = ~0;
+
+	if (buf) {
+		value = RD_REG_WORD((__iomem void *)window);
+		ql_dbg(ql_dbg_misc, NULL, 0xd012,
+		    "%s: -> %x\n", __func__, value);
+	}
+	qla27xx_insert32(value, buf, len);
+}
+
+static inline void
+qla27xx_read32(void *window, void *buf, ulong *len)
+{
+	uint32_t value = ~0;
+
+	if (buf) {
+		value = RD_REG_DWORD((__iomem void *)window);
+		ql_dbg(ql_dbg_misc, NULL, 0xd013,
+		    "%s: -> %x\n", __func__, value);
+	}
+	qla27xx_insert32(value, buf, len);
+}
+
+static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *)
+{
+	return
+	    (width == 1) ? qla27xx_read8 :
+	    (width == 2) ? qla27xx_read16 :
+			   qla27xx_read32;
+}
+
+static inline void
+qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
+	uint offset, void *buf, ulong *len)
+{
+	void *window = (void *)reg + offset;
+
+	if (buf) {
+		ql_dbg(ql_dbg_misc, NULL, 0xd014,
+		    "%s: @%x\n", __func__, offset);
+	}
+	qla27xx_insert32(offset, buf, len);
+	qla27xx_read32(window, buf, len);
+}
+
+static inline void
+qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
+	uint offset, uint32_t data, void *buf)
+{
+	__iomem void *window = reg + offset;
+
+	if (buf) {
+		ql_dbg(ql_dbg_misc, NULL, 0xd015,
+		    "%s: @%x <- %x\n", __func__, offset, data);
+		WRT_REG_DWORD(window, data);
+	}
+}
+
+static inline void
+qla27xx_read_window(__iomem struct device_reg_24xx *reg,
+	uint32_t base, uint offset, uint count, uint width, void *buf,
+	ulong *len)
+{
+	void *window = (void *)reg + offset;
+	void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
+
+	if (buf) {
+		ql_dbg(ql_dbg_misc, NULL, 0xd016,
+		    "%s: base=%x offset=%x count=%x width=%x\n",
+		    __func__, base, offset, count, width);
+	}
+	qla27xx_write_reg(reg, IOBASE_ADDR, base, buf);
+	while (count--) {
+		qla27xx_insert32(base, buf, len);
+		readn(window, buf, len);
+		window += width;
+		base += width;
+	}
+}
+
+static inline void
+qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
+{
+	if (buf)
+		ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
+}
+
+static int
+qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd100,
+	    "%s: nop [%lx]\n", __func__, *len);
+	qla27xx_skip_entry(ent, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd1ff,
+	    "%s: end [%lx]\n", __func__, *len);
+	qla27xx_skip_entry(ent, buf);
+
+	/* terminate */
+	return true;
+}
+
+static int
+qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd200,
+	    "%s: rdio t1 [%lx]\n", __func__, *len);
+	qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
+	    ent->t256.reg_count, ent->t256.reg_width, buf, len);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd201,
+	    "%s: wrio t1 [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
+	qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd202,
+	    "%s: rdio t2 [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
+	qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
+	    ent->t258.reg_count, ent->t258.reg_width, buf, len);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd203,
+	    "%s: wrio t2 [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
+	qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
+	qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd204,
+	    "%s: rdpci [%lx]\n", __func__, *len);
+	qla27xx_read_reg(reg, ent->t260.pci_addr, buf, len);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd205,
+	    "%s: wrpci [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, ent->t261.pci_addr, ent->t261.write_data, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ulong dwords;
+	ulong start;
+	ulong end;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd206,
+	    "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
+	start = ent->t262.start_addr;
+	end = ent->t262.end_addr;
+
+	if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
+		;
+	} else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
+		end = vha->hw->fw_memory_size;
+		if (buf)
+			ent->t262.end_addr = end;
+	} else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
+		start = vha->hw->fw_shared_ram_start;
+		end = vha->hw->fw_shared_ram_end;
+		if (buf) {
+			ent->t262.start_addr = start;
+			ent->t262.end_addr = end;
+		}
+	} else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
+		ql_dbg(ql_dbg_misc, vha, 0xd021,
+		    "%s: unsupported ddr ram\n", __func__);
+		qla27xx_skip_entry(ent, buf);
+		goto done;
+	} else {
+		ql_dbg(ql_dbg_misc, vha, 0xd022,
+		    "%s: unknown area %u\n", __func__, ent->t262.ram_area);
+		qla27xx_skip_entry(ent, buf);
+		goto done;
+	}
+
+	if (end < start) {
+		ql_dbg(ql_dbg_misc, vha, 0xd023,
+		    "%s: bad range (start=%x end=%x)\n", __func__,
+		    ent->t262.end_addr, ent->t262.start_addr);
+		qla27xx_skip_entry(ent, buf);
+		goto done;
+	}
+
+	dwords = end - start + 1;
+	if (buf) {
+		ql_dbg(ql_dbg_misc, vha, 0xd024,
+		    "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
+		buf += *len;
+		qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+	}
+	*len += dwords * sizeof(uint32_t);
+done:
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	uint count = 0;
+	uint i;
+	uint length;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd207,
+	    "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
+	if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+		for (i = 0; i < vha->hw->max_req_queues; i++) {
+			struct req_que *req = vha->hw->req_q_map[i];
+			if (req || !buf) {
+				length = req ?
+				    req->length : REQUEST_ENTRY_CNT_24XX;
+				qla27xx_insert16(i, buf, len);
+				qla27xx_insert16(length, buf, len);
+				qla27xx_insertbuf(req ? req->ring : NULL,
+				    length * sizeof(*req->ring), buf, len);
+				count++;
+			}
+		}
+	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+			if (rsp || !buf) {
+				length = rsp ?
+				    rsp->length : RESPONSE_ENTRY_CNT_MQ;
+				qla27xx_insert16(i, buf, len);
+				qla27xx_insert16(length, buf, len);
+				qla27xx_insertbuf(rsp ? rsp->ring : NULL,
+				    length * sizeof(*rsp->ring), buf, len);
+				count++;
+			}
+		}
+	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
+		ql_dbg(ql_dbg_misc, vha, 0xd025,
+		    "%s: unsupported atio queue\n", __func__);
+		qla27xx_skip_entry(ent, buf);
+		goto done;
+	} else {
+		ql_dbg(ql_dbg_misc, vha, 0xd026,
+		    "%s: unknown queue %u\n", __func__, ent->t263.queue_type);
+		qla27xx_skip_entry(ent, buf);
+		goto done;
+	}
+
+	if (buf)
+		ent->t263.num_queues = count;
+done:
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd208,
+	    "%s: getfce [%lx]\n", __func__, *len);
+	if (vha->hw->fce) {
+		if (buf) {
+			ent->t264.fce_trace_size = FCE_SIZE;
+			ent->t264.write_pointer = vha->hw->fce_wr;
+			ent->t264.base_pointer = vha->hw->fce_dma;
+			ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
+			ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
+			ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
+			ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
+			ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
+			ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
+		}
+		qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
+	} else {
+		ql_dbg(ql_dbg_misc, vha, 0xd027,
+		    "%s: missing fce\n", __func__);
+		qla27xx_skip_entry(ent, buf);
+	}
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd209,
+	    "%s: pause risc [%lx]\n", __func__, *len);
+	if (buf)
+		qla24xx_pause_risc(reg);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd20a,
+	    "%s: reset risc [%lx]\n", __func__, *len);
+	if (buf)
+		qla24xx_soft_reset(vha->hw);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd20b,
+	    "%s: dis intr [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd20c,
+	    "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
+	if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
+		if (vha->hw->eft) {
+			if (buf) {
+				ent->t268.buf_size = EFT_SIZE;
+				ent->t268.start_addr = vha->hw->eft_dma;
+			}
+			qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
+		} else {
+			ql_dbg(ql_dbg_misc, vha, 0xd028,
+			    "%s: missing eft\n", __func__);
+			qla27xx_skip_entry(ent, buf);
+		}
+	} else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) {
+		ql_dbg(ql_dbg_misc, vha, 0xd029,
+		    "%s: unsupported exchange offload buffer\n", __func__);
+		qla27xx_skip_entry(ent, buf);
+	} else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) {
+		ql_dbg(ql_dbg_misc, vha, 0xd02a,
+		    "%s: unsupported extended login buffer\n", __func__);
+		qla27xx_skip_entry(ent, buf);
+	} else {
+		ql_dbg(ql_dbg_misc, vha, 0xd02b,
+		    "%s: unknown buf %x\n", __func__, ent->t268.buf_type);
+		qla27xx_skip_entry(ent, buf);
+	}
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd20d,
+	    "%s: scratch [%lx]\n", __func__, *len);
+	qla27xx_insert32(0xaaaaaaaa, buf, len);
+	qla27xx_insert32(0xbbbbbbbb, buf, len);
+	qla27xx_insert32(0xcccccccc, buf, len);
+	qla27xx_insert32(0xdddddddd, buf, len);
+	qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
+	if (buf)
+		ent->t269.scratch_size = 5 * sizeof(uint32_t);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+	void *window = (void *)reg + 0xc4;
+	ulong dwords = ent->t270.count;
+	ulong addr = ent->t270.addr;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd20e,
+	    "%s: rdremreg [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+	while (dwords--) {
+		qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
+		qla27xx_read_reg(reg, 0xc4, buf, len);
+		qla27xx_insert32(addr, buf, len);
+		qla27xx_read32(window, buf, len);
+		addr++;
+	}
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+	ulong addr = ent->t271.addr;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd20f,
+	    "%s: wrremreg [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+	qla27xx_read_reg(reg, 0xc4, buf, len);
+	qla27xx_insert32(addr, buf, len);
+	qla27xx_write_reg(reg, 0xc0, addr, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ulong dwords = ent->t272.count;
+	ulong start = ent->t272.addr;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd210,
+	    "%s: rdremram [%lx]\n", __func__, *len);
+	if (buf) {
+		ql_dbg(ql_dbg_misc, vha, 0xd02c,
+		    "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
+		buf += *len;
+		qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
+	}
+	*len += dwords * sizeof(uint32_t);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ulong dwords = ent->t273.count;
+	ulong addr = ent->t273.addr;
+	uint32_t value;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd211,
+	    "%s: pcicfg [%lx]\n", __func__, *len);
+	while (dwords--) {
+		value = ~0;
+		if (pci_read_config_dword(vha->hw->pdev, addr, &value))
+			ql_dbg(ql_dbg_misc, vha, 0xd02d,
+			    "%s: failed pcicfg read at %lx\n", __func__, addr);
+		qla27xx_insert32(addr, buf, len);
+		qla27xx_insert32(value, buf, len);
+		addr += 4;
+	}
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd2ff,
+	    "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
+	qla27xx_skip_entry(ent, buf);
+
+	return false;
+}
+
+struct qla27xx_fwdt_entry_call {
+	int type;
+	int (*call)(
+	    struct scsi_qla_host *,
+	    struct qla27xx_fwdt_entry *,
+	    void *,
+	    ulong *);
+};
+
+static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
+	{ ENTRY_TYPE_NOP		, qla27xx_fwdt_entry_t0    } ,
+	{ ENTRY_TYPE_TMP_END		, qla27xx_fwdt_entry_t255  } ,
+	{ ENTRY_TYPE_RD_IOB_T1		, qla27xx_fwdt_entry_t256  } ,
+	{ ENTRY_TYPE_WR_IOB_T1		, qla27xx_fwdt_entry_t257  } ,
+	{ ENTRY_TYPE_RD_IOB_T2		, qla27xx_fwdt_entry_t258  } ,
+	{ ENTRY_TYPE_WR_IOB_T2		, qla27xx_fwdt_entry_t259  } ,
+	{ ENTRY_TYPE_RD_PCI		, qla27xx_fwdt_entry_t260  } ,
+	{ ENTRY_TYPE_WR_PCI		, qla27xx_fwdt_entry_t261  } ,
+	{ ENTRY_TYPE_RD_RAM		, qla27xx_fwdt_entry_t262  } ,
+	{ ENTRY_TYPE_GET_QUEUE		, qla27xx_fwdt_entry_t263  } ,
+	{ ENTRY_TYPE_GET_FCE		, qla27xx_fwdt_entry_t264  } ,
+	{ ENTRY_TYPE_PSE_RISC		, qla27xx_fwdt_entry_t265  } ,
+	{ ENTRY_TYPE_RST_RISC		, qla27xx_fwdt_entry_t266  } ,
+	{ ENTRY_TYPE_DIS_INTR		, qla27xx_fwdt_entry_t267  } ,
+	{ ENTRY_TYPE_GET_HBUF		, qla27xx_fwdt_entry_t268  } ,
+	{ ENTRY_TYPE_SCRATCH		, qla27xx_fwdt_entry_t269  } ,
+	{ ENTRY_TYPE_RDREMREG		, qla27xx_fwdt_entry_t270  } ,
+	{ ENTRY_TYPE_WRREMREG		, qla27xx_fwdt_entry_t271  } ,
+	{ ENTRY_TYPE_RDREMRAM		, qla27xx_fwdt_entry_t272  } ,
+	{ ENTRY_TYPE_PCICFG		, qla27xx_fwdt_entry_t273  } ,
+	{ -1				, qla27xx_fwdt_entry_other }
+};
+
+static inline int (*qla27xx_find_entry(int type))
+	(struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
+{
+	struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
+
+	while (list->type != -1 && list->type != type)
+		list++;
+
+	return list->call;
+}
+
+static inline void *
+qla27xx_next_entry(void *p)
+{
+	struct qla27xx_fwdt_entry *ent = p;
+
+	return p + ent->hdr.entry_size;
+}
+
+static void
+qla27xx_walk_template(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
+{
+	struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
+	ulong count = tmp->entry_count;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd01a,
+	    "%s: entry count %lx\n", __func__, count);
+	while (count--) {
+		if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
+			break;
+		ent = qla27xx_next_entry(ent);
+	}
+	ql_dbg(ql_dbg_misc, vha, 0xd01b,
+	    "%s: len=%lx\n", __func__, *len);
+}
+
+static void
+qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
+{
+	tmp->capture_timestamp = jiffies;
+}
+
+static void
+qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
+{
+	uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
+	int rval = 0;
+
+	rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
+	    v+0, v+1, v+2, v+3, v+4, v+5);
+
+	tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
+	tmp->driver_info[1] = v[5] << 8 | v[4];
+	tmp->driver_info[2] = 0x12345678;
+}
+
+static void
+qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
+	struct scsi_qla_host *vha)
+{
+	tmp->firmware_version[0] = vha->hw->fw_major_version;
+	tmp->firmware_version[1] = vha->hw->fw_minor_version;
+	tmp->firmware_version[2] = vha->hw->fw_subminor_version;
+	tmp->firmware_version[3] =
+	    vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
+	tmp->firmware_version[4] =
+	    vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
+}
+
+static void
+ql27xx_edit_template(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_template *tmp)
+{
+	qla27xx_time_stamp(tmp);
+	qla27xx_driver_info(tmp);
+	qla27xx_firmware_info(tmp, vha);
+}
+
+static inline uint32_t
+qla27xx_template_checksum(void *p, ulong size)
+{
+	uint32_t *buf = p;
+	uint64_t sum = 0;
+
+	size /= sizeof(*buf);
+
+	while (size--)
+		sum += *buf++;
+
+	sum = (sum & 0xffffffff) + (sum >> 32);
+
+	return ~sum;
+}
+
+static inline int
+qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
+{
+	return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
+}
+
+static inline int
+qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
+{
+	return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
+}
+
+static void
+qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
+{
+	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+	ulong len;
+
+	if (qla27xx_fwdt_template_valid(tmp)) {
+		len = tmp->template_size;
+		tmp = memcpy(vha->hw->fw_dump, tmp, len);
+		ql27xx_edit_template(vha, tmp);
+		qla27xx_walk_template(vha, tmp, tmp, &len);
+		vha->hw->fw_dump_len = len;
+		vha->hw->fw_dumped = 1;
+	}
+}
+
+ulong
+qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
+{
+	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+	ulong len = 0;
+
+	if (qla27xx_fwdt_template_valid(tmp)) {
+		len = tmp->template_size;
+		qla27xx_walk_template(vha, tmp, NULL, &len);
+	}
+
+	return len;
+}
+
+ulong
+qla27xx_fwdt_template_size(void *p)
+{
+	struct qla27xx_fwdt_template *tmp = p;
+
+	return tmp->template_size;
+}
+
+ulong
+qla27xx_fwdt_template_default_size(void)
+{
+	return sizeof(ql27xx_fwdt_default_template);
+}
+
+const void *
+qla27xx_fwdt_template_default(void)
+{
+	return ql27xx_fwdt_default_template;
+}
+
+int
+qla27xx_fwdt_template_valid(void *p)
+{
+	struct qla27xx_fwdt_template *tmp = p;
+
+	if (!qla27xx_verify_template_header(tmp)) {
+		ql_log(ql_log_warn, NULL, 0xd01c,
+		    "%s: template type %x\n", __func__, tmp->template_type);
+		return false;
+	}
+
+	if (!qla27xx_verify_template_checksum(tmp)) {
+		ql_log(ql_log_warn, NULL, 0xd01d,
+		    "%s: failed template checksum\n", __func__);
+		return false;
+	}
+
+	return true;
+}
+
+void
+qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	ulong flags = 0;
+
+	if (!hardware_locked)
+		spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+
+	if (!vha->hw->fw_dump)
+		ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
+	else if (!vha->hw->fw_dump_template)
+		ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
+	else
+		qla27xx_execute_fwdt_template(vha);
+
+	if (!hardware_locked)
+		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
new file mode 100644
index 0000000..c9d2fff
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -0,0 +1,205 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA_DMP27_H__
+#define	__QLA_DMP27_H__
+
+#define IOBASE_ADDR	offsetof(struct device_reg_24xx, iobase_addr)
+
+struct __packed qla27xx_fwdt_template {
+	uint32_t template_type;
+	uint32_t entry_offset;
+	uint32_t template_size;
+	uint32_t reserved_1;
+
+	uint32_t entry_count;
+	uint32_t template_version;
+	uint32_t capture_timestamp;
+	uint32_t template_checksum;
+
+	uint32_t reserved_2;
+	uint32_t driver_info[3];
+
+	uint32_t saved_state[16];
+
+	uint32_t reserved_3[8];
+	uint32_t firmware_version[5];
+};
+
+#define TEMPLATE_TYPE_FWDUMP		99
+
+#define ENTRY_TYPE_NOP			0
+#define ENTRY_TYPE_TMP_END		255
+#define ENTRY_TYPE_RD_IOB_T1		256
+#define ENTRY_TYPE_WR_IOB_T1		257
+#define ENTRY_TYPE_RD_IOB_T2		258
+#define ENTRY_TYPE_WR_IOB_T2		259
+#define ENTRY_TYPE_RD_PCI		260
+#define ENTRY_TYPE_WR_PCI		261
+#define ENTRY_TYPE_RD_RAM		262
+#define ENTRY_TYPE_GET_QUEUE		263
+#define ENTRY_TYPE_GET_FCE		264
+#define ENTRY_TYPE_PSE_RISC		265
+#define ENTRY_TYPE_RST_RISC		266
+#define ENTRY_TYPE_DIS_INTR		267
+#define ENTRY_TYPE_GET_HBUF		268
+#define ENTRY_TYPE_SCRATCH		269
+#define ENTRY_TYPE_RDREMREG		270
+#define ENTRY_TYPE_WRREMREG		271
+#define ENTRY_TYPE_RDREMRAM		272
+#define ENTRY_TYPE_PCICFG		273
+
+#define CAPTURE_FLAG_PHYS_ONLY		BIT_0
+#define CAPTURE_FLAG_PHYS_VIRT		BIT_1
+
+#define DRIVER_FLAG_SKIP_ENTRY		BIT_7
+
+struct __packed qla27xx_fwdt_entry {
+	struct __packed {
+		uint32_t entry_type;
+		uint32_t entry_size;
+		uint32_t reserved_1;
+
+		uint8_t  capture_flags;
+		uint8_t  reserved_2[2];
+		uint8_t  driver_flags;
+	} hdr;
+	union __packed {
+		struct __packed {
+		} t0;
+
+		struct __packed {
+		} t255;
+
+		struct __packed {
+			uint32_t base_addr;
+			uint8_t  reg_width;
+			uint16_t reg_count;
+			uint8_t  pci_offset;
+		} t256;
+
+		struct __packed {
+			uint32_t base_addr;
+			uint32_t write_data;
+			uint8_t  pci_offset;
+			uint8_t  reserved[3];
+		} t257;
+
+		struct __packed {
+			uint32_t base_addr;
+			uint8_t  reg_width;
+			uint16_t reg_count;
+			uint8_t  pci_offset;
+			uint8_t  banksel_offset;
+			uint8_t  reserved[3];
+			uint32_t bank;
+		} t258;
+
+		struct __packed {
+			uint32_t base_addr;
+			uint32_t write_data;
+			uint8_t  reserved[2];
+			uint8_t  pci_offset;
+			uint8_t  banksel_offset;
+			uint32_t bank;
+		} t259;
+
+		struct __packed {
+			uint8_t pci_addr;
+			uint8_t reserved[3];
+		} t260;
+
+		struct __packed {
+			uint8_t pci_addr;
+			uint8_t reserved[3];
+			uint32_t write_data;
+		} t261;
+
+		struct __packed {
+			uint8_t  ram_area;
+			uint8_t  reserved[3];
+			uint32_t start_addr;
+			uint32_t end_addr;
+		} t262;
+
+		struct __packed {
+			uint32_t num_queues;
+			uint8_t  queue_type;
+			uint8_t  reserved[3];
+		} t263;
+
+		struct __packed {
+			uint32_t fce_trace_size;
+			uint64_t write_pointer;
+			uint64_t base_pointer;
+			uint32_t fce_enable_mb0;
+			uint32_t fce_enable_mb2;
+			uint32_t fce_enable_mb3;
+			uint32_t fce_enable_mb4;
+			uint32_t fce_enable_mb5;
+			uint32_t fce_enable_mb6;
+		} t264;
+
+		struct __packed {
+		} t265;
+
+		struct __packed {
+		} t266;
+
+		struct __packed {
+			uint8_t  pci_offset;
+			uint8_t  reserved[3];
+			uint32_t data;
+		} t267;
+
+		struct __packed {
+			uint8_t  buf_type;
+			uint8_t  reserved[3];
+			uint32_t buf_size;
+			uint64_t start_addr;
+		} t268;
+
+		struct __packed {
+			uint32_t scratch_size;
+		} t269;
+
+		struct __packed {
+			uint32_t addr;
+			uint32_t count;
+		} t270;
+
+		struct __packed {
+			uint32_t addr;
+			uint32_t data;
+		} t271;
+
+		struct __packed {
+			uint32_t addr;
+			uint32_t count;
+		} t272;
+
+		struct __packed {
+			uint32_t addr;
+			uint32_t count;
+		} t273;
+	};
+};
+
+#define T262_RAM_AREA_CRITICAL_RAM	1
+#define T262_RAM_AREA_EXTERNAL_RAM	2
+#define T262_RAM_AREA_SHARED_RAM	3
+#define T262_RAM_AREA_DDR_RAM		4
+
+#define T263_QUEUE_TYPE_REQ		1
+#define T263_QUEUE_TYPE_RSP		2
+#define T263_QUEUE_TYPE_ATIO		3
+
+#define T268_BUF_TYPE_EXTD_TRACE	1
+#define T268_BUF_TYPE_EXCH_BUFOFF	2
+#define T268_BUF_TYPE_EXTD_LOGIN	3
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 31d1953..e36b947 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.06.00.12-k"
+#define QLA2XXX_VERSION      "8.07.00.02-k"
 
 #define QLA_DRIVER_MAJOR_VER	8
-#define QLA_DRIVER_MINOR_VER	6
+#define QLA_DRIVER_MINOR_VER	7
 #define QLA_DRIVER_PATCH_VER	0
 #define QLA_DRIVER_BETA_VER	0
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 9192848..2eba353 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -1304,12 +1304,24 @@
 static int qla4_83xx_restart(struct scsi_qla_host *ha)
 {
 	int ret_val = QLA_SUCCESS;
+	uint32_t idc_ctrl;
 
 	qla4_83xx_process_stop_seq(ha);
 
-	/* Collect minidump*/
-	if (!test_and_clear_bit(AF_83XX_NO_FW_DUMP, &ha->flags))
+	/*
+	 * Collect minidump.
+	 * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
+	 * don't collect minidump
+	 */
+	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+	if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
+				 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+		ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
+			   __func__);
+	} else {
 		qla4_8xxx_get_minidump(ha);
+	}
 
 	qla4_83xx_process_init_seq(ha);
 
@@ -1664,3 +1676,23 @@
 	__qla4_83xx_disable_pause(ha);
 	ha->isp_ops->idc_unlock(ha);
 }
+
+/**
+ * qla4_83xx_is_detached - Check if we are marked invisible.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4_83xx_is_detached(struct scsi_qla_host *ha)
+{
+	uint32_t drv_active;
+
+	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+	if (test_bit(AF_INIT_DONE, &ha->flags) &&
+	    !(drv_active & (1 << ha->func_num))) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
+				  __func__, drv_active));
+		return QLA_SUCCESS;
+	}
+
+	return QLA_ERROR;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
index 04a0027..9f92cbf 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.c
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -517,7 +517,7 @@
 						(ha->idc_extend_tmo * HZ))) {
 				ha->notify_idc_comp = 0;
 				ha->notify_link_up_comp = 0;
-				ql4_printk(KERN_WARNING, ha, "%s: IDC Complete notification not received",
+				ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
 					   __func__);
 				status = QLA_ERROR;
 				goto exit_wait;
@@ -538,7 +538,7 @@
 		if (!wait_for_completion_timeout(&ha->link_up_comp,
 						 (IDC_COMP_TOV * HZ))) {
 			ha->notify_link_up_comp = 0;
-			ql4_printk(KERN_WARNING, ha, "%s: LINK UP notification not received",
+			ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
 				   __func__);
 			status = QLA_ERROR;
 			goto exit_wait;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index aa67bb9..73a5022 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -194,7 +194,7 @@
 #define ADAPTER_INIT_TOV		30
 #define ADAPTER_RESET_TOV		180
 #define EXTEND_CMD_TOV			60
-#define WAIT_CMD_TOV			30
+#define WAIT_CMD_TOV			5
 #define EH_WAIT_CMD_TOV			120
 #define FIRMWARE_UP_TOV			60
 #define RESET_FIRMWARE_TOV		30
@@ -297,6 +297,8 @@
 
 	/* Driver Re-login  */
 	unsigned long flags;		  /* DDB Flags */
+#define DDB_CONN_CLOSE_FAILURE		0 /* 0x00000001 */
+
 	uint16_t default_relogin_timeout; /*  Max time to wait for
 					   *  relogin to complete */
 	atomic_t retry_relogin_timer;	  /* Min Time between relogins
@@ -580,7 +582,6 @@
 #define AF_82XX_FW_DUMPED		24 /* 0x01000000 */
 #define AF_8XXX_RST_OWNER		25 /* 0x02000000 */
 #define AF_82XX_DUMP_READING		26 /* 0x04000000 */
-#define AF_83XX_NO_FW_DUMP		27 /* 0x08000000 */
 #define AF_83XX_IOCB_INTR_ON		28 /* 0x10000000 */
 #define AF_83XX_MBOX_INTR_ON		29 /* 0x20000000 */
 
@@ -595,10 +596,10 @@
 #define DPC_AEN				9 /* 0x00000200 */
 #define DPC_GET_DHCP_IP_ADDR		15 /* 0x00008000 */
 #define DPC_LINK_CHANGED		18 /* 0x00040000 */
-#define DPC_RESET_ACTIVE		20 /* 0x00040000 */
-#define DPC_HA_UNRECOVERABLE		21 /* 0x00080000 ISP-82xx only*/
-#define DPC_HA_NEED_QUIESCENT		22 /* 0x00100000 ISP-82xx only*/
-#define DPC_POST_IDC_ACK		23 /* 0x00200000 */
+#define DPC_RESET_ACTIVE		20 /* 0x00100000 */
+#define DPC_HA_UNRECOVERABLE		21 /* 0x00200000 ISP-82xx only*/
+#define DPC_HA_NEED_QUIESCENT		22 /* 0x00400000 ISP-82xx only*/
+#define DPC_POST_IDC_ACK		23 /* 0x00800000 */
 #define DPC_RESTORE_ACB			24 /* 0x01000000 */
 
 	struct Scsi_Host *host; /* pointer to host data */
@@ -768,6 +769,7 @@
 	uint32_t fw_dump_capture_mask;
 	void *fw_dump_tmplt_hdr;
 	uint32_t fw_dump_tmplt_size;
+	uint32_t fw_dump_skip_size;
 
 	struct completion mbx_intr_comp;
 
@@ -910,7 +912,8 @@
 static inline int is_aer_supported(struct scsi_qla_host *ha)
 {
 	return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) ||
-		(ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324));
+		(ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324) ||
+		(ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042));
 }
 
 static inline int adapter_up(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 8d4092b..209853c 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -390,6 +390,7 @@
 #define MBOX_CMD_CLEAR_DATABASE_ENTRY		0x0031
 #define MBOX_CMD_CONN_OPEN			0x0074
 #define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT		0x0056
+#define DDB_NOT_LOGGED_IN			0x09
 #define LOGOUT_OPTION_CLOSE_SESSION		0x0002
 #define LOGOUT_OPTION_RELOGIN			0x0004
 #define LOGOUT_OPTION_FREE_DDB			0x0008
@@ -505,9 +506,9 @@
 #define MBOX_ASTS_RESPONSE_QUEUE_FULL		0x8028
 #define MBOX_ASTS_IP_ADDR_STATE_CHANGED		0x8029
 #define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED	0x802A
-#define MBOX_ASTS_IPV6_PREFIX_EXPIRED		0x802B
-#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED	0x802C
-#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED	0x802D
+#define MBOX_ASTS_IPV6_LINK_MTU_CHANGE		0x802B
+#define MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED	0x802C
+#define MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED	0x802D
 #define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD		0x802E
 #define MBOX_ASTS_INITIALIZATION_FAILED		0x8031
 #define MBOX_ASTS_SYSTEM_WARNING_EVENT		0x8036
@@ -528,14 +529,14 @@
 #define ACB_CONFIG_DISABLE		0x00
 #define ACB_CONFIG_SET			0x01
 
-/* ACB State Defines */
-#define ACB_STATE_UNCONFIGURED	0x00
-#define ACB_STATE_INVALID	0x01
-#define ACB_STATE_ACQUIRING	0x02
-#define ACB_STATE_TENTATIVE	0x03
-#define ACB_STATE_DEPRICATED	0x04
-#define ACB_STATE_VALID		0x05
-#define ACB_STATE_DISABLING	0x06
+/* ACB/IP Address State Defines */
+#define IP_ADDRSTATE_UNCONFIGURED	0
+#define IP_ADDRSTATE_INVALID		1
+#define IP_ADDRSTATE_ACQUIRING		2
+#define IP_ADDRSTATE_TENTATIVE		3
+#define IP_ADDRSTATE_DEPRICATED		4
+#define IP_ADDRSTATE_PREFERRED		5
+#define IP_ADDRSTATE_DISABLING		6
 
 /* FLASH offsets */
 #define FLASH_SEGMENT_IFCB	0x04000000
@@ -698,14 +699,6 @@
 	uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
 	uint8_t ipv6_addr0_state;	/* 223 */
 	uint8_t ipv6_addr1_state;	/* 224 */
-#define IP_ADDRSTATE_UNCONFIGURED	0
-#define IP_ADDRSTATE_INVALID		1
-#define IP_ADDRSTATE_ACQUIRING		2
-#define IP_ADDRSTATE_TENTATIVE		3
-#define IP_ADDRSTATE_DEPRICATED		4
-#define IP_ADDRSTATE_PREFERRED		5
-#define IP_ADDRSTATE_DISABLING		6
-
 	uint8_t ipv6_dflt_rtr_state;    /* 225 */
 #define IPV6_RTRSTATE_UNKNOWN                   0
 #define IPV6_RTRSTATE_MANUAL                    1
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index d67c50e..b1a19cd 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -279,6 +279,8 @@
 uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
 int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
 int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
+int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha);
+int qla4_83xx_is_detached(struct scsi_qla_host *ha);
 
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 7456eeb..28fbece 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -959,13 +959,8 @@
 		qla4xxx_build_ddb_list(ha, is_reset);
 
 	set_bit(AF_ONLINE, &ha->flags);
-exit_init_hba:
-	if (is_qla80XX(ha) && (status == QLA_ERROR)) {
-		/* Since interrupts are registered in start_firmware for
-		 * 80XX, release them here if initialize_adapter fails */
-		qla4xxx_free_irqs(ha);
-	}
 
+exit_init_hba:
 	DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
 	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
 	return status;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index a3c8bc7..b1925d1 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -385,9 +385,9 @@
 
 	cls_conn = ddb_entry->conn;
 	conn = cls_conn->dd_data;
-	spin_lock(&conn->session->lock);
+	spin_lock(&conn->session->back_lock);
 	task = iscsi_itt_to_task(conn, itt);
-	spin_unlock(&conn->session->lock);
+	spin_unlock(&conn->session->back_lock);
 
 	if (task == NULL) {
 		ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
@@ -635,6 +635,18 @@
 	}
 }
 
+static void qla4xxx_default_router_changed(struct scsi_qla_host *ha,
+					   uint32_t *mbox_sts)
+{
+	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0],
+	       &mbox_sts[2], sizeof(uint32_t));
+	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1],
+	       &mbox_sts[3], sizeof(uint32_t));
+	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2],
+	       &mbox_sts[4], sizeof(uint32_t));
+	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3],
+	       &mbox_sts[5], sizeof(uint32_t));
+}
 
 /**
  * qla4xxx_isr_decode_mailbox - decodes mailbox status
@@ -781,27 +793,44 @@
 						    mbox_sts[3]);
 			/* mbox_sts[2] = Old ACB state
 			 * mbox_sts[3] = new ACB state */
-			if ((mbox_sts[3] == ACB_STATE_VALID) &&
-			    ((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
-			    (mbox_sts[2] == ACB_STATE_ACQUIRING))) {
+			if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) &&
+			    ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) ||
+			     (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) {
 				set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
-			} else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
-				   (mbox_sts[2] == ACB_STATE_VALID)) {
+			} else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) &&
+				   (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) {
 				if (is_qla80XX(ha))
 					set_bit(DPC_RESET_HA_FW_CONTEXT,
 						&ha->dpc_flags);
 				else
 					set_bit(DPC_RESET_HA, &ha->dpc_flags);
-			} else if (mbox_sts[3] == ACB_STATE_DISABLING) {
+			} else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) {
 				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
 					   ha->host_no, __func__);
-			} else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) {
+			} else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) {
 				complete(&ha->disable_acb_comp);
 				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
 					   ha->host_no, __func__);
 			}
 			break;
 
+		case MBOX_ASTS_IPV6_LINK_MTU_CHANGE:
+		case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED:
+		case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED:
+			/* No action */
+			DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n",
+					  ha->host_no, mbox_status));
+			break;
+
+		case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD:
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x, IPv6 ERROR, "
+					  "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+					  ha->host_no, mbox_sts[0], mbox_sts[1],
+					  mbox_sts[2], mbox_sts[3], mbox_sts[4],
+					  mbox_sts[5]));
+			break;
+
 		case MBOX_ASTS_MAC_ADDRESS_CHANGED:
 		case MBOX_ASTS_DNS:
 			/* No action */
@@ -939,6 +968,7 @@
 			DEBUG2(ql4_printk(KERN_INFO, ha,
 					  "scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
 					  ha->host_no, mbox_sts[0]));
+			qla4xxx_default_router_changed(ha, mbox_sts);
 			break;
 
 		case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
@@ -1022,7 +1052,8 @@
     uint32_t intr_status)
 {
 	/* Process response queue interrupt. */
-	if (intr_status & HSRX_RISC_IOCB_INT)
+	if ((intr_status & HSRX_RISC_IOCB_INT) &&
+	    test_bit(AF_INIT_DONE, &ha->flags))
 		qla4xxx_process_response_queue(ha);
 
 	/* Process mailbox/asynch event interrupt.*/
@@ -1399,6 +1430,7 @@
 {
 	struct scsi_qla_host *ha = dev_id;
 	unsigned long flags;
+	int intr_status;
 	uint32_t ival = 0;
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1412,8 +1444,15 @@
 		qla4xxx_process_response_queue(ha);
 		writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
 	} else {
-		qla4xxx_process_response_queue(ha);
-		writel(0, &ha->qla4_82xx_reg->host_int);
+		intr_status = readl(&ha->qla4_82xx_reg->host_status);
+		if (intr_status & HSRX_RISC_IOCB_INT) {
+			qla4xxx_process_response_queue(ha);
+			writel(0, &ha->qla4_82xx_reg->host_int);
+		} else {
+			ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n",
+				   __func__);
+			goto exit_msix_rsp_q;
+		}
 	}
 	ha->isr_count++;
 exit_msix_rsp_q:
@@ -1488,6 +1527,7 @@
 int qla4xxx_request_irqs(struct scsi_qla_host *ha)
 {
 	int ret;
+	int rval = QLA_ERROR;
 
 	if (is_qla40XX(ha))
 		goto try_intx;
@@ -1568,9 +1608,10 @@
 	set_bit(AF_IRQ_ATTACHED, &ha->flags);
 	ha->host->irq = ha->pdev->irq;
 	ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
-	    __func__, ha->pdev->irq);
+		   __func__, ha->pdev->irq);
+	rval = QLA_SUCCESS;
 irq_not_attached:
-	return ret;
+	return rval;
 }
 
 void qla4xxx_free_irqs(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 9ae8ca3..0a6b782 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -212,9 +212,8 @@
 			    ha->host_no, __func__));
 			goto mbox_exit;
 		}
-		DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...,"
-			      " Scheduling Adapter Reset\n", ha->host_no,
-			      mbx_cmd[0]));
+		ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n",
+			   ha->host_no, mbx_cmd[0]);
 		ha->mailbox_timeout_count++;
 		mbx_sts[0] = (-1);
 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -251,15 +250,16 @@
 		break;
 
 	case MBOX_STS_BUSY:
-		DEBUG2( printk("scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
-			       ha->host_no, __func__, mbx_cmd[0]));
+		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
+			   ha->host_no, __func__, mbx_cmd[0]);
 		ha->mailbox_timeout_count++;
 		break;
 
 	default:
-		DEBUG2(printk("scsi%ld: %s: **** FAILED, cmd = %08X, "
-			      "sts = %08X ****\n", ha->host_no, __func__,
-			      mbx_cmd[0], mbx_sts[0]));
+		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n",
+			   ha->host_no, __func__, mbx_cmd[0], mbx_sts[0],
+			   mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4],
+			   mbx_sts[5], mbx_sts[6], mbx_sts[7]);
 		break;
 	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -383,7 +383,6 @@
 	mbox_cmd[2] = LSDW(init_fw_cb_dma);
 	mbox_cmd[3] = MSDW(init_fw_cb_dma);
 	mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
-	mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN;
 
 	if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
 	    QLA_SUCCESS) {
@@ -648,9 +647,6 @@
 		goto exit_init_fw_cb;
 	}
 
-	/* Initialize request and response queues. */
-	qla4xxx_init_rings(ha);
-
 	/* Fill in the request and response queue information. */
 	init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
 	init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
@@ -1002,6 +998,10 @@
 				  "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
 				  "failed sts %04X %04X", __func__,
 				  mbox_sts[0], mbox_sts[1]));
+		if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) &&
+		    (mbox_sts[1] == DDB_NOT_LOGGED_IN)) {
+			set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
+		}
 	}
 
 	return status;
@@ -1918,6 +1918,7 @@
 				  mbox_sts[0], mbox_sts[1], mbox_sts[2]));
 	} else {
 		if (is_qla8042(ha) &&
+		    test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) &&
 		    (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
 			/*
 			 * Disable ACB mailbox command takes time to complete
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index d001202..63328c8 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2383,6 +2383,11 @@
 			  "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
 			  ha->host_no, index, entry_hdr->entry_type,
 			  entry_hdr->d_ctrl.entry_capture_mask));
+	/* If driver encounters a new entry type that it cannot process,
+	 * it should just skip the entry and adjust the total buffer size by
+	 * from subtracting the skipped bytes from it
+	 */
+	ha->fw_dump_skip_size += entry_hdr->entry_capture_size;
 }
 
 /* ISP83xx functions to process new minidump entries... */
@@ -2590,6 +2595,7 @@
 	uint64_t now;
 	uint32_t timestamp;
 
+	ha->fw_dump_skip_size = 0;
 	if (!ha->fw_dump) {
 		ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
 			   __func__, ha->host_no);
@@ -2761,7 +2767,7 @@
 				 entry_hdr->entry_size);
 	}
 
-	if (data_collected != ha->fw_dump_size) {
+	if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) {
 		ql4_printk(KERN_INFO, ha,
 			   "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
 			   data_collected, ha->fw_dump_size);
@@ -2820,63 +2826,35 @@
 int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
 {
 	int rval = QLA_ERROR;
-	int i, timeout;
-	uint32_t old_count, count, idc_ctrl;
-	int need_reset = 0, peg_stuck = 1;
+	int i;
+	uint32_t old_count, count;
+	int need_reset = 0;
 
 	need_reset = ha->isp_ops->need_reset(ha);
-	old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
-
-	for (i = 0; i < 10; i++) {
-		timeout = msleep_interruptible(200);
-		if (timeout) {
-			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
-					    QLA8XXX_DEV_FAILED);
-			return rval;
-		}
-
-		count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
-		if (count != old_count)
-			peg_stuck = 0;
-	}
 
 	if (need_reset) {
 		/* We are trying to perform a recovery here. */
-		if (peg_stuck)
+		if (test_bit(AF_FW_RECOVERY, &ha->flags))
 			ha->isp_ops->rom_lock_recovery(ha);
-		goto dev_initialize;
 	} else  {
-		/* Start of day for this ha context. */
-		if (peg_stuck) {
-			/* Either we are the first or recovery in progress. */
-			ha->isp_ops->rom_lock_recovery(ha);
-			goto dev_initialize;
-		} else {
-			/* Firmware already running. */
-			rval = QLA_SUCCESS;
-			goto dev_ready;
+		old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+		for (i = 0; i < 10; i++) {
+			msleep(200);
+			count = qla4_8xxx_rd_direct(ha,
+						    QLA8XXX_PEG_ALIVE_COUNTER);
+			if (count != old_count) {
+				rval = QLA_SUCCESS;
+				goto dev_ready;
+			}
 		}
+		ha->isp_ops->rom_lock_recovery(ha);
 	}
 
-dev_initialize:
 	/* set to DEV_INITIALIZING */
 	ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
 	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
 			    QLA8XXX_DEV_INITIALIZING);
 
-	/*
-	 * For ISP8324 and ISP8042, if IDC_CTRL GRACEFUL_RESET_BIT1 is set,
-	 * reset it after device goes to INIT state.
-	 */
-	if (is_qla8032(ha) || is_qla8042(ha)) {
-		idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
-		if (idc_ctrl & GRACEFUL_RESET_BIT1) {
-			qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
-					 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
-			set_bit(AF_83XX_NO_FW_DUMP, &ha->flags);
-		}
-	}
-
 	ha->isp_ops->idc_unlock(ha);
 
 	if (is_qla8022(ha))
@@ -3209,6 +3187,10 @@
 
 	retval = qla4_8xxx_device_state_handler(ha);
 
+	/* Initialize request and response queues. */
+	if (retval == QLA_SUCCESS)
+		qla4xxx_init_rings(ha);
+
 	if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
 		retval = qla4xxx_request_irqs(ha);
 
@@ -3836,3 +3818,24 @@
 msix_out:
 	return ret;
 }
+
+int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha)
+{
+	int status = QLA_SUCCESS;
+
+	/* Dont retry adapter initialization if IRQ allocation failed */
+	if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+		ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n",
+			   __func__);
+		status = QLA_ERROR;
+		goto exit_init_adapter_failure;
+	}
+
+	/* Since interrupts are registered in start_firmware for
+	 * 8xxx, release them here if initialize_adapter fails
+	 * and retry adapter initialization */
+	qla4xxx_free_irqs(ha);
+
+exit_init_adapter_failure:
+	return status;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c21adc3..459b9f7 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1670,16 +1670,13 @@
 	struct sockaddr_in *addr;
 	struct sockaddr_in6 *addr6;
 
-	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
 	if (!shost) {
 		ret = -ENXIO;
-		printk(KERN_ERR "%s: shost is NULL\n",
-		       __func__);
+		pr_err("%s: shost is NULL\n", __func__);
 		return ERR_PTR(ret);
 	}
 
 	ha = iscsi_host_priv(shost);
-
 	ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
 	if (!ep) {
 		ret = -ENOMEM;
@@ -1699,6 +1696,9 @@
 		addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
 				  (char *)&addr6->sin6_addr));
+	} else {
+		ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n",
+			   __func__);
 	}
 
 	qla_ep->host = shost;
@@ -1712,9 +1712,9 @@
 	struct scsi_qla_host *ha;
 	int ret = 0;
 
-	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
 	qla_ep = ep->dd_data;
 	ha = to_qla_host(qla_ep->host);
+	DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no));
 
 	if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
 		ret = 1;
@@ -1724,7 +1724,13 @@
 
 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
 {
-	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+	struct qla_endpoint *qla_ep;
+	struct scsi_qla_host *ha;
+
+	qla_ep = ep->dd_data;
+	ha = to_qla_host(qla_ep->host);
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+			  ha->host_no));
 	iscsi_destroy_endpoint(ep);
 }
 
@@ -1734,8 +1740,11 @@
 {
 	struct qla_endpoint *qla_ep = ep->dd_data;
 	struct sockaddr *dst_addr;
+	struct scsi_qla_host *ha;
 
-	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+	ha = to_qla_host(qla_ep->host);
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+			  ha->host_no));
 
 	switch (param) {
 	case ISCSI_PARAM_CONN_PORT:
@@ -1766,13 +1775,13 @@
 	int ret;
 	dma_addr_t iscsi_stats_dma;
 
-	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
-
 	cls_sess = iscsi_conn_to_session(cls_conn);
 	sess = cls_sess->dd_data;
 	ddb_entry = sess->dd_data;
 	ha = ddb_entry->ha;
 
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+			  ha->host_no));
 	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
 	/* Allocate memory */
 	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
@@ -2100,7 +2109,8 @@
 				cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE);
 		else
 			init_fw_cb->ipv6_tcp_opts &=
-				cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE);
+				cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE &
+					    0xFFFF);
 		break;
 	case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
 		if (iface_param->iface_num & 0x1)
@@ -2297,7 +2307,8 @@
 				cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE);
 		else
 			init_fw_cb->ipv4_tcp_opts &=
-				cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE);
+				cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE &
+					    0xFFFF);
 		break;
 	case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
 		if (iface_param->iface_num & 0x1)
@@ -3045,7 +3056,6 @@
 	struct sockaddr *dst_addr;
 	int ret;
 
-	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
 	if (!ep) {
 		printk(KERN_ERR "qla4xxx: missing ep.\n");
 		return NULL;
@@ -3054,6 +3064,8 @@
 	qla_ep = ep->dd_data;
 	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
 	ha = to_qla_host(qla_ep->host);
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+			  ha->host_no));
 
 	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
 	if (ret == QLA_ERROR)
@@ -3074,6 +3086,7 @@
 	ddb_entry->sess = cls_sess;
 	ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
 	ddb_entry->ddb_change = qla4xxx_ddb_change;
+	clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
 	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
 	ha->tot_ddbs++;
@@ -3092,10 +3105,11 @@
 	uint32_t ddb_state;
 	int ret;
 
-	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
 	sess = cls_sess->dd_data;
 	ddb_entry = sess->dd_data;
 	ha = ddb_entry->ha;
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+			  ha->host_no));
 
 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
 					  &fw_ddb_entry_dma, GFP_KERNEL);
@@ -3123,7 +3137,8 @@
 
 destroy_session:
 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
-
+	if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags))
+		clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	qla4xxx_free_ddb(ha, ddb_entry);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3141,17 +3156,23 @@
 	struct iscsi_cls_conn *cls_conn;
 	struct iscsi_session *sess;
 	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
 
-	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
 				    conn_idx);
-	if (!cls_conn)
+	if (!cls_conn) {
+		pr_info("%s: Can not create connection for conn_idx = %u\n",
+			__func__, conn_idx);
 		return NULL;
+	}
 
 	sess = cls_sess->dd_data;
 	ddb_entry = sess->dd_data;
 	ddb_entry->conn = cls_conn;
 
+	ha = ddb_entry->ha;
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__,
+			  conn_idx));
 	return cls_conn;
 }
 
@@ -3162,8 +3183,16 @@
 	struct iscsi_conn *conn;
 	struct qla_conn *qla_conn;
 	struct iscsi_endpoint *ep;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+	struct iscsi_session *sess;
 
-	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
+			  cls_session->sid, cls_conn->cid));
 
 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
 		return -EINVAL;
@@ -3186,10 +3215,11 @@
 	int ret = 0;
 	int status = QLA_SUCCESS;
 
-	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
 	sess = cls_sess->dd_data;
 	ddb_entry = sess->dd_data;
 	ha = ddb_entry->ha;
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
+			  cls_sess->sid, cls_conn->cid));
 
 	/* Check if we have  matching FW DDB, if yes then do not
 	 * login to this target. This could cause target to logout previous
@@ -3263,10 +3293,11 @@
 	struct ddb_entry *ddb_entry;
 	int options;
 
-	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
 	sess = cls_sess->dd_data;
 	ddb_entry = sess->dd_data;
 	ha = ddb_entry->ha;
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__,
+			  cls_conn->cid));
 
 	options = LOGOUT_OPTION_CLOSE_SESSION;
 	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
@@ -4372,6 +4403,11 @@
 	uint32_t dev_state;
 	uint32_t idc_ctrl;
 
+	if (is_qla8032(ha) &&
+	    (qla4_83xx_is_detached(ha) == QLA_SUCCESS))
+		WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n",
+			  __func__, ha->func_num);
+
 	/* don't poll if reset is going on */
 	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
@@ -4554,11 +4590,19 @@
 	uint32_t index = 0;
 	unsigned long flags;
 	struct scsi_cmnd *cmd;
+	unsigned long wtime;
+	uint32_t wtmo;
 
-	unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
+	if (is_qla40XX(ha))
+		wtmo = WAIT_CMD_TOV;
+	else
+		wtmo = ha->nx_reset_timeout / 2;
 
-	DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
-	    "complete\n", WAIT_CMD_TOV));
+	wtime = jiffies + (wtmo * HZ);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Wait up to %u seconds for cmds to complete\n",
+			  wtmo));
 
 	while (!time_after_eq(jiffies, wtime)) {
 		spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -4861,11 +4905,11 @@
 			qla4xxx_cmd_wait(ha);
 
 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
-		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
 		DEBUG2(ql4_printk(KERN_INFO, ha,
 		    "scsi%ld: %s - Performing chip reset..\n",
 		    ha->host_no, __func__));
 		status = ha->isp_ops->reset_chip(ha);
+		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
 	}
 
 	/* Flush any pending ddb changed AENs */
@@ -4881,8 +4925,21 @@
 			ssleep(6);
 
 		/* NOTE: AF_ONLINE flag set upon successful completion of
-		 *       qla4xxx_initialize_adapter */
+		 * qla4xxx_initialize_adapter */
 		status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
+		if (is_qla80XX(ha) && (status == QLA_ERROR)) {
+			status = qla4_8xxx_check_init_adapter_retry(ha);
+			if (status == QLA_ERROR) {
+				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n",
+					   ha->host_no, __func__);
+				qla4xxx_dead_adapter_cleanup(ha);
+				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA_FW_CONTEXT,
+					  &ha->dpc_flags);
+				goto exit_recover;
+			}
+		}
 	}
 
 	/* Retry failed adapter initialization, if necessary
@@ -5228,9 +5285,9 @@
 		container_of(work, struct scsi_qla_host, dpc_work);
 	int status = QLA_ERROR;
 
-	DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
-	    "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
-	    ha->host_no, __func__, ha->flags, ha->dpc_flags))
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n",
+			  ha->host_no, __func__, ha->flags, ha->dpc_flags));
 
 	/* Initialization not yet finished. Don't do anything yet. */
 	if (!test_bit(AF_INIT_DONE, &ha->flags))
@@ -8681,11 +8738,8 @@
 	status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
 
 	/* Dont retry adapter initialization if IRQ allocation failed */
-	if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
-		ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
-			   __func__);
+	if (is_qla80XX(ha) && (status == QLA_ERROR))
 		goto skip_retry_init;
-	}
 
 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
 	    init_retry_count++ < MAX_INIT_RETRIES) {
@@ -8709,6 +8763,10 @@
 			continue;
 
 		status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
+		if (is_qla80XX(ha) && (status == QLA_ERROR)) {
+			if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR)
+				goto skip_retry_init;
+		}
 	}
 
 skip_retry_init:
@@ -8857,10 +8915,56 @@
 	}
 }
 
+static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha,
+		struct ddb_entry *ddb_entry)
+{
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	unsigned long wtime;
+	uint32_t ddb_state;
+	int options;
+	int status;
+
+	options = LOGOUT_OPTION_CLOSE_SESSION;
+	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
+		goto clear_ddb;
+	}
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to allocate dma buffer\n", __func__);
+		goto clear_ddb;
+	}
+
+	wtime = jiffies + (HZ * LOGOUT_TOV);
+	do {
+		status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+						 fw_ddb_entry, fw_ddb_entry_dma,
+						 NULL, NULL, &ddb_state, NULL,
+						 NULL, NULL);
+		if (status == QLA_ERROR)
+			goto free_ddb;
+
+		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+		    (ddb_state == DDB_DS_SESSION_FAILED))
+			goto free_ddb;
+
+		schedule_timeout_uninterruptible(HZ);
+	} while ((time_after(wtime, jiffies)));
+
+free_ddb:
+	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+			  fw_ddb_entry, fw_ddb_entry_dma);
+clear_ddb:
+	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+}
+
 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
 {
 	struct ddb_entry *ddb_entry;
-	int options;
 	int idx;
 
 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
@@ -8869,13 +8973,7 @@
 		if ((ddb_entry != NULL) &&
 		    (ddb_entry->ddb_type == FLASH_DDB)) {
 
-			options = LOGOUT_OPTION_CLOSE_SESSION;
-			if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
-			    == QLA_ERROR)
-				ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
-					   __func__);
-
-			qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+			qla4xxx_destroy_ddb(ha, ddb_entry);
 			/*
 			 * we have decremented the reference count of the driver
 			 * when we setup the session to have the driver unload
@@ -9136,14 +9234,15 @@
 	int ret = SUCCESS;
 	int wait = 0;
 
-	ql4_printk(KERN_INFO, ha,
-	    "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
-	    ha->host_no, id, lun, cmd);
+	ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Abort command issued cmd=%p, cdb=0x%x\n",
+		   ha->host_no, id, lun, cmd, cmd->cmnd[0]);
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	srb = (struct srb *) CMD_SP(cmd);
 	if (!srb) {
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Specified command has already completed.\n",
+			   ha->host_no, id, lun);
 		return SUCCESS;
 	}
 	kref_get(&srb->srb_ref);
@@ -9560,28 +9659,36 @@
 	}
 
 	fn = PCI_FUNC(ha->pdev->devfn);
-	while (fn > 0) {
-		fn--;
-		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
-		    "func %x\n", ha->host_no, __func__, fn);
-		/* Get the pci device given the domain, bus,
-		 * slot/function number */
-		other_pdev =
-		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
-		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
-		    fn));
+	if (is_qla8022(ha)) {
+		while (fn > 0) {
+			fn--;
+			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n",
+				   ha->host_no, __func__, fn);
+			/* Get the pci device given the domain, bus,
+			 * slot/function number */
+			other_pdev = pci_get_domain_bus_and_slot(
+					   pci_domain_nr(ha->pdev->bus),
+					   ha->pdev->bus->number,
+					   PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+					   fn));
 
-		if (!other_pdev)
-			continue;
+			if (!other_pdev)
+				continue;
 
-		if (atomic_read(&other_pdev->enable_cnt)) {
-			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
-			    "func in enabled state%x\n", ha->host_no,
-			    __func__, fn);
+			if (atomic_read(&other_pdev->enable_cnt)) {
+				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n",
+					   ha->host_no, __func__, fn);
+				pci_dev_put(other_pdev);
+				break;
+			}
 			pci_dev_put(other_pdev);
-			break;
 		}
-		pci_dev_put(other_pdev);
+	} else {
+		/* this case is meant for ISP83xx/ISP84xx only */
+		if (qla4_83xx_can_perform_reset(ha)) {
+			/* reset fn as iSCSI is going to perform the reset */
+			fn = 0;
+		}
 	}
 
 	/* The first function on the card, the reset owner will
@@ -9615,6 +9722,7 @@
 		if (rval != QLA_SUCCESS) {
 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
 			    "FAILED\n", ha->host_no, __func__);
+			qla4xxx_free_irqs(ha);
 			ha->isp_ops->idc_lock(ha);
 			qla4_8xxx_clear_drv_active(ha);
 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
@@ -9642,6 +9750,8 @@
 			rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
 			if (rval == QLA_SUCCESS)
 				ha->isp_ops->enable_intrs(ha);
+			else
+				qla4xxx_free_irqs(ha);
 
 			ha->isp_ops->idc_lock(ha);
 			qla4_8xxx_set_drv_active(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 9b29466..c6ba0a6 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.04.00-k3"
+#define QLA4XXX_DRIVER_VERSION	"5.04.00-k4"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d8afec8..c4d632c 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -161,47 +161,20 @@
 static DEFINE_MUTEX(host_cmd_pool_mutex);
 
 /**
- * scsi_pool_alloc_command - internal function to get a fully allocated command
- * @pool:	slab pool to allocate the command from
- * @gfp_mask:	mask for the allocation
- *
- * Returns a fully allocated command (with the allied sense buffer) or
- * NULL on failure
- */
-static struct scsi_cmnd *
-scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
-{
-	struct scsi_cmnd *cmd;
-
-	cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
-	if (!cmd)
-		return NULL;
-
-	cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
-					     gfp_mask | pool->gfp_mask);
-	if (!cmd->sense_buffer) {
-		kmem_cache_free(pool->cmd_slab, cmd);
-		return NULL;
-	}
-
-	return cmd;
-}
-
-/**
- * scsi_pool_free_command - internal function to release a command
- * @pool:	slab pool to allocate the command from
+ * scsi_host_free_command - internal function to release a command
+ * @shost:	host to free the command for
  * @cmd:	command to release
  *
  * the command must previously have been allocated by
- * scsi_pool_alloc_command.
+ * scsi_host_alloc_command.
  */
 static void
-scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
-			 struct scsi_cmnd *cmd)
+scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 {
+	struct scsi_host_cmd_pool *pool = shost->cmd_pool;
+
 	if (cmd->prot_sdb)
 		kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
-
 	kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
 	kmem_cache_free(pool->cmd_slab, cmd);
 }
@@ -217,22 +190,32 @@
 static struct scsi_cmnd *
 scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
 {
+	struct scsi_host_cmd_pool *pool = shost->cmd_pool;
 	struct scsi_cmnd *cmd;
 
-	cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
+	cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
 	if (!cmd)
-		return NULL;
+		goto fail;
+
+	cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
+					     gfp_mask | pool->gfp_mask);
+	if (!cmd->sense_buffer)
+		goto fail_free_cmd;
 
 	if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
 		cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
-
-		if (!cmd->prot_sdb) {
-			scsi_pool_free_command(shost->cmd_pool, cmd);
-			return NULL;
-		}
+		if (!cmd->prot_sdb)
+			goto fail_free_sense;
 	}
 
 	return cmd;
+
+fail_free_sense:
+	kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
+fail_free_cmd:
+	kmem_cache_free(pool->cmd_slab, cmd);
+fail:
+	return NULL;
 }
 
 /**
@@ -284,27 +267,19 @@
  */
 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
 {
-	struct scsi_cmnd *cmd;
+	struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
+	unsigned long flags;
 
-	/* Bail if we can't get a reference to the device */
-	if (!get_device(&dev->sdev_gendev))
+	if (unlikely(cmd == NULL))
 		return NULL;
 
-	cmd = __scsi_get_command(dev->host, gfp_mask);
-
-	if (likely(cmd != NULL)) {
-		unsigned long flags;
-
-		cmd->device = dev;
-		INIT_LIST_HEAD(&cmd->list);
-		INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
-		spin_lock_irqsave(&dev->list_lock, flags);
-		list_add_tail(&cmd->list, &dev->cmd_list);
-		spin_unlock_irqrestore(&dev->list_lock, flags);
-		cmd->jiffies_at_alloc = jiffies;
-	} else
-		put_device(&dev->sdev_gendev);
-
+	cmd->device = dev;
+	INIT_LIST_HEAD(&cmd->list);
+	INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
+	spin_lock_irqsave(&dev->list_lock, flags);
+	list_add_tail(&cmd->list, &dev->cmd_list);
+	spin_unlock_irqrestore(&dev->list_lock, flags);
+	cmd->jiffies_at_alloc = jiffies;
 	return cmd;
 }
 EXPORT_SYMBOL(scsi_get_command);
@@ -313,25 +288,22 @@
  * __scsi_put_command - Free a struct scsi_cmnd
  * @shost: dev->host
  * @cmd: Command to free
- * @dev: parent scsi device
  */
-void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
-			struct device *dev)
+void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 {
 	unsigned long flags;
 
-	/* changing locks here, don't need to restore the irq state */
-	spin_lock_irqsave(&shost->free_list_lock, flags);
 	if (unlikely(list_empty(&shost->free_list))) {
-		list_add(&cmd->list, &shost->free_list);
-		cmd = NULL;
+		spin_lock_irqsave(&shost->free_list_lock, flags);
+		if (list_empty(&shost->free_list)) {
+			list_add(&cmd->list, &shost->free_list);
+			cmd = NULL;
+		}
+		spin_unlock_irqrestore(&shost->free_list_lock, flags);
 	}
-	spin_unlock_irqrestore(&shost->free_list_lock, flags);
 
 	if (likely(cmd != NULL))
-		scsi_pool_free_command(shost->cmd_pool, cmd);
-
-	put_device(dev);
+		scsi_host_free_command(shost, cmd);
 }
 EXPORT_SYMBOL(__scsi_put_command);
 
@@ -345,7 +317,6 @@
  */
 void scsi_put_command(struct scsi_cmnd *cmd)
 {
-	struct scsi_device *sdev = cmd->device;
 	unsigned long flags;
 
 	/* serious error if the command hasn't come from a device list */
@@ -356,50 +327,107 @@
 
 	cancel_delayed_work(&cmd->abort_work);
 
-	__scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
+	__scsi_put_command(cmd->device->host, cmd);
 }
 EXPORT_SYMBOL(scsi_put_command);
 
-static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask)
+static struct scsi_host_cmd_pool *
+scsi_find_host_cmd_pool(struct Scsi_Host *shost)
 {
+	if (shost->hostt->cmd_size)
+		return shost->hostt->cmd_pool;
+	if (shost->unchecked_isa_dma)
+		return &scsi_cmd_dma_pool;
+	return &scsi_cmd_pool;
+}
+
+static void
+scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
+{
+	kfree(pool->sense_name);
+	kfree(pool->cmd_name);
+	kfree(pool);
+}
+
+static struct scsi_host_cmd_pool *
+scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
+{
+	struct scsi_host_template *hostt = shost->hostt;
+	struct scsi_host_cmd_pool *pool;
+
+	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		return NULL;
+
+	pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name);
+	pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name);
+	if (!pool->cmd_name || !pool->sense_name) {
+		scsi_free_host_cmd_pool(pool);
+		return NULL;
+	}
+
+	pool->slab_flags = SLAB_HWCACHE_ALIGN;
+	if (shost->unchecked_isa_dma) {
+		pool->slab_flags |= SLAB_CACHE_DMA;
+		pool->gfp_mask = __GFP_DMA;
+	}
+	return pool;
+}
+
+static struct scsi_host_cmd_pool *
+scsi_get_host_cmd_pool(struct Scsi_Host *shost)
+{
+	struct scsi_host_template *hostt = shost->hostt;
 	struct scsi_host_cmd_pool *retval = NULL, *pool;
+	size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
+
 	/*
 	 * Select a command slab for this host and create it if not
 	 * yet existent.
 	 */
 	mutex_lock(&host_cmd_pool_mutex);
-	pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
-		&scsi_cmd_pool;
+	pool = scsi_find_host_cmd_pool(shost);
+	if (!pool) {
+		pool = scsi_alloc_host_cmd_pool(shost);
+		if (!pool)
+			goto out;
+	}
+
 	if (!pool->users) {
-		pool->cmd_slab = kmem_cache_create(pool->cmd_name,
-						   sizeof(struct scsi_cmnd), 0,
+		pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
 						   pool->slab_flags, NULL);
 		if (!pool->cmd_slab)
-			goto fail;
+			goto out_free_pool;
 
 		pool->sense_slab = kmem_cache_create(pool->sense_name,
 						     SCSI_SENSE_BUFFERSIZE, 0,
 						     pool->slab_flags, NULL);
-		if (!pool->sense_slab) {
-			kmem_cache_destroy(pool->cmd_slab);
-			goto fail;
-		}
+		if (!pool->sense_slab)
+			goto out_free_slab;
 	}
 
 	pool->users++;
 	retval = pool;
- fail:
+out:
 	mutex_unlock(&host_cmd_pool_mutex);
 	return retval;
+
+out_free_slab:
+	kmem_cache_destroy(pool->cmd_slab);
+out_free_pool:
+	if (hostt->cmd_size)
+		scsi_free_host_cmd_pool(pool);
+	goto out;
 }
 
-static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
+static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
 {
+	struct scsi_host_template *hostt = shost->hostt;
 	struct scsi_host_cmd_pool *pool;
 
 	mutex_lock(&host_cmd_pool_mutex);
-	pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
-		&scsi_cmd_pool;
+	pool = scsi_find_host_cmd_pool(shost);
+
 	/*
 	 * This may happen if a driver has a mismatched get and put
 	 * of the command pool; the driver should be implicated in
@@ -410,67 +438,13 @@
 	if (!--pool->users) {
 		kmem_cache_destroy(pool->cmd_slab);
 		kmem_cache_destroy(pool->sense_slab);
+		if (hostt->cmd_size)
+			scsi_free_host_cmd_pool(pool);
 	}
 	mutex_unlock(&host_cmd_pool_mutex);
 }
 
 /**
- * scsi_allocate_command - get a fully allocated SCSI command
- * @gfp_mask:	allocation mask
- *
- * This function is for use outside of the normal host based pools.
- * It allocates the relevant command and takes an additional reference
- * on the pool it used.  This function *must* be paired with
- * scsi_free_command which also has the identical mask, otherwise the
- * free pool counts will eventually go wrong and you'll trigger a bug.
- *
- * This function should *only* be used by drivers that need a static
- * command allocation at start of day for internal functions.
- */
-struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask)
-{
-	struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
-
-	if (!pool)
-		return NULL;
-
-	return scsi_pool_alloc_command(pool, gfp_mask);
-}
-EXPORT_SYMBOL(scsi_allocate_command);
-
-/**
- * scsi_free_command - free a command allocated by scsi_allocate_command
- * @gfp_mask:	mask used in the original allocation
- * @cmd:	command to free
- *
- * Note: using the original allocation mask is vital because that's
- * what determines which command pool we use to free the command.  Any
- * mismatch will cause the system to BUG eventually.
- */
-void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd)
-{
-	struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
-
-	/*
-	 * this could trigger if the mask to scsi_allocate_command
-	 * doesn't match this mask.  Otherwise we're guaranteed that this
-	 * succeeds because scsi_allocate_command must have taken a reference
-	 * on the pool
-	 */
-	BUG_ON(!pool);
-
-	scsi_pool_free_command(pool, cmd);
-	/*
-	 * scsi_put_host_cmd_pool is called twice; once to release the
-	 * reference we took above, and once to release the reference
-	 * originally taken by scsi_allocate_command
-	 */
-	scsi_put_host_cmd_pool(gfp_mask);
-	scsi_put_host_cmd_pool(gfp_mask);
-}
-EXPORT_SYMBOL(scsi_free_command);
-
-/**
  * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
  * @shost: host to allocate the freelist for.
  *
@@ -482,14 +456,13 @@
  */
 int scsi_setup_command_freelist(struct Scsi_Host *shost)
 {
-	struct scsi_cmnd *cmd;
 	const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
+	struct scsi_cmnd *cmd;
 
 	spin_lock_init(&shost->free_list_lock);
 	INIT_LIST_HEAD(&shost->free_list);
 
-	shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask);
-
+	shost->cmd_pool = scsi_get_host_cmd_pool(shost);
 	if (!shost->cmd_pool)
 		return -ENOMEM;
 
@@ -498,7 +471,7 @@
 	 */
 	cmd = scsi_host_alloc_command(shost, gfp_mask);
 	if (!cmd) {
-		scsi_put_host_cmd_pool(gfp_mask);
+		scsi_put_host_cmd_pool(shost);
 		shost->cmd_pool = NULL;
 		return -ENOMEM;
 	}
@@ -524,10 +497,10 @@
 
 		cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
 		list_del_init(&cmd->list);
-		scsi_pool_free_command(shost->cmd_pool, cmd);
+		scsi_host_free_command(shost, cmd);
 	}
 	shost->cmd_pool = NULL;
-	scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL);
+	scsi_put_host_cmd_pool(shost);
 }
 
 #ifdef CONFIG_SCSI_LOGGING
@@ -954,7 +927,7 @@
  * This is an internal helper function.  You probably want to use
  * scsi_get_vpd_page instead.
  *
- * Returns 0 on success or a negative error number.
+ * Returns size of the vpd page on success or a negative error number.
  */
 static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
 							u8 page, unsigned len)
@@ -962,6 +935,9 @@
 	int result;
 	unsigned char cmd[16];
 
+	if (len < 4)
+		return -EINVAL;
+
 	cmd[0] = INQUIRY;
 	cmd[1] = 1;		/* EVPD */
 	cmd[2] = page;
@@ -976,13 +952,13 @@
 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
 				  len, NULL, 30 * HZ, 3, NULL);
 	if (result)
-		return result;
+		return -EIO;
 
 	/* Sanity check that we got the page back that we asked for */
 	if (buffer[1] != page)
 		return -EIO;
 
-	return 0;
+	return get_unaligned_be16(&buffer[2]) + 4;
 }
 
 /**
@@ -1009,18 +985,18 @@
 
 	/* Ask for all the pages supported by this device */
 	result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
-	if (result)
+	if (result < 4)
 		goto fail;
 
 	/* If the user actually wanted this page, we can skip the rest */
 	if (page == 0)
 		return 0;
 
-	for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
-		if (buf[i + 4] == page)
+	for (i = 4; i < min(result, buf_len); i++)
+		if (buf[i] == page)
 			goto found;
 
-	if (i < buf[3] && i >= buf_len - 4)
+	if (i < result && i >= buf_len)
 		/* ran off the end of the buffer, give us benefit of doubt */
 		goto found;
 	/* The device claims it doesn't support the requested page */
@@ -1028,7 +1004,7 @@
 
  found:
 	result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
-	if (result)
+	if (result < 0)
 		goto fail;
 
 	return 0;
@@ -1039,6 +1015,93 @@
 EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
 
 /**
+ * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
+ * @sdev: The device to ask
+ *
+ * Attach the 'Device Identification' VPD page (0x83) and the
+ * 'Unit Serial Number' VPD page (0x80) to a SCSI device
+ * structure. This information can be used to identify the device
+ * uniquely.
+ */
+void scsi_attach_vpd(struct scsi_device *sdev)
+{
+	int result, i;
+	int vpd_len = SCSI_VPD_PG_LEN;
+	int pg80_supported = 0;
+	int pg83_supported = 0;
+	unsigned char *vpd_buf;
+
+	if (sdev->skip_vpd_pages)
+		return;
+retry_pg0:
+	vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+	if (!vpd_buf)
+		return;
+
+	/* Ask for all the pages supported by this device */
+	result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
+	if (result < 0) {
+		kfree(vpd_buf);
+		return;
+	}
+	if (result > vpd_len) {
+		vpd_len = result;
+		kfree(vpd_buf);
+		goto retry_pg0;
+	}
+
+	for (i = 4; i < result; i++) {
+		if (vpd_buf[i] == 0x80)
+			pg80_supported = 1;
+		if (vpd_buf[i] == 0x83)
+			pg83_supported = 1;
+	}
+	kfree(vpd_buf);
+	vpd_len = SCSI_VPD_PG_LEN;
+
+	if (pg80_supported) {
+retry_pg80:
+		vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+		if (!vpd_buf)
+			return;
+
+		result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
+		if (result < 0) {
+			kfree(vpd_buf);
+			return;
+		}
+		if (result > vpd_len) {
+			vpd_len = result;
+			kfree(vpd_buf);
+			goto retry_pg80;
+		}
+		sdev->vpd_pg80_len = result;
+		sdev->vpd_pg80 = vpd_buf;
+		vpd_len = SCSI_VPD_PG_LEN;
+	}
+
+	if (pg83_supported) {
+retry_pg83:
+		vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+		if (!vpd_buf)
+			return;
+
+		result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
+		if (result < 0) {
+			kfree(vpd_buf);
+			return;
+		}
+		if (result > vpd_len) {
+			vpd_len = result;
+			kfree(vpd_buf);
+			goto retry_pg83;
+		}
+		sdev->vpd_pg83_len = result;
+		sdev->vpd_pg83 = vpd_buf;
+	}
+}
+
+/**
  * scsi_report_opcode - Find out if a given command opcode is supported
  * @sdev:	scsi device to query
  * @buffer:	scratch buffer (must be at least 20 bytes long)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2decc64..f3e9cc0 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -64,6 +64,7 @@
 /* Additional Sense Code (ASC) */
 #define NO_ADDITIONAL_SENSE 0x0
 #define LOGICAL_UNIT_NOT_READY 0x4
+#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
 #define UNRECOVERED_READ_ERR 0x11
 #define PARAMETER_LIST_LENGTH_ERR 0x1a
 #define INVALID_OPCODE 0x20
@@ -195,6 +196,7 @@
 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
 static bool scsi_debug_removable = DEF_REMOVABLE;
+static bool scsi_debug_clustering;
 
 static int scsi_debug_cmnd_count = 0;
 
@@ -1780,7 +1782,6 @@
 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
 		pr_err("%s: REF check failed on sector %lu\n",
 			__func__, (unsigned long)sector);
-			dif_errors++;
 		return 0x03;
 	}
 	return 0;
@@ -1789,23 +1790,27 @@
 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
 			  unsigned int sectors, bool read)
 {
-	unsigned int i, resid;
-	struct scatterlist *psgl;
+	size_t resid;
 	void *paddr;
 	const void *dif_store_end = dif_storep + sdebug_store_sectors;
+	struct sg_mapping_iter miter;
 
 	/* Bytes of protection data to copy into sgl */
 	resid = sectors * sizeof(*dif_storep);
 
-	scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
-		int len = min(psgl->length, resid);
+	sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
+			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
+			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
+
+	while (sg_miter_next(&miter) && resid > 0) {
+		size_t len = min(miter.length, resid);
 		void *start = dif_store(sector);
-		int rest = 0;
+		size_t rest = 0;
 
 		if (dif_store_end < start + len)
 			rest = start + len - dif_store_end;
 
-		paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
+		paddr = miter.addr;
 
 		if (read)
 			memcpy(paddr, start, len - rest);
@@ -1821,8 +1826,8 @@
 
 		sector += len / sizeof(*dif_storep);
 		resid -= len;
-		kunmap_atomic(paddr);
 	}
+	sg_miter_stop(&miter);
 }
 
 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
@@ -1832,7 +1837,7 @@
 	struct sd_dif_tuple *sdt;
 	sector_t sector;
 
-	for (i = 0; i < sectors; i++) {
+	for (i = 0; i < sectors; i++, ei_lba++) {
 		int ret;
 
 		sector = start_sec + i;
@@ -1846,8 +1851,6 @@
 			dif_errors++;
 			return ret;
 		}
-
-		ei_lba++;
 	}
 
 	dif_copy_prot(SCpnt, start_sec, sectors, true);
@@ -1886,17 +1889,19 @@
 		return check_condition_result;
 	}
 
+	read_lock_irqsave(&atomic_rw, iflags);
+
 	/* DIX + T10 DIF */
 	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
 		int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
 
 		if (prot_ret) {
+			read_unlock_irqrestore(&atomic_rw, iflags);
 			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
 			return illegal_condition_result;
 		}
 	}
 
-	read_lock_irqsave(&atomic_rw, iflags);
 	ret = do_device_access(SCpnt, devip, lba, num, 0);
 	read_unlock_irqrestore(&atomic_rw, iflags);
 	if (ret == -1)
@@ -1931,55 +1936,62 @@
 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
 			     unsigned int sectors, u32 ei_lba)
 {
-	int i, j, ret;
+	int ret;
 	struct sd_dif_tuple *sdt;
-	struct scatterlist *dsgl;
-	struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
-	void *daddr, *paddr;
+	void *daddr;
 	sector_t sector = start_sec;
 	int ppage_offset;
+	int dpage_offset;
+	struct sg_mapping_iter diter;
+	struct sg_mapping_iter piter;
 
 	BUG_ON(scsi_sg_count(SCpnt) == 0);
 	BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
 
-	ppage_offset = 0;
+	sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
+			scsi_prot_sg_count(SCpnt),
+			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
+			SG_MITER_ATOMIC | SG_MITER_FROM_SG);
 
-	/* For each data page */
-	scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
-		daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
-		paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
+	/* For each protection page */
+	while (sg_miter_next(&piter)) {
+		dpage_offset = 0;
+		if (WARN_ON(!sg_miter_next(&diter))) {
+			ret = 0x01;
+			goto out;
+		}
 
-		/* For each sector-sized chunk in data page */
-		for (j = 0; j < dsgl->length; j += scsi_debug_sector_size) {
-
+		for (ppage_offset = 0; ppage_offset < piter.length;
+		     ppage_offset += sizeof(struct sd_dif_tuple)) {
 			/* If we're at the end of the current
-			 * protection page advance to the next one
+			 * data page advance to the next one
 			 */
-			if (ppage_offset >= psgl->length) {
-				kunmap_atomic(paddr);
-				psgl = sg_next(psgl);
-				BUG_ON(psgl == NULL);
-				paddr = kmap_atomic(sg_page(psgl))
-					+ psgl->offset;
-				ppage_offset = 0;
+			if (dpage_offset >= diter.length) {
+				if (WARN_ON(!sg_miter_next(&diter))) {
+					ret = 0x01;
+					goto out;
+				}
+				dpage_offset = 0;
 			}
 
-			sdt = paddr + ppage_offset;
+			sdt = piter.addr + ppage_offset;
+			daddr = diter.addr + dpage_offset;
 
-			ret = dif_verify(sdt, daddr + j, sector, ei_lba);
+			ret = dif_verify(sdt, daddr, sector, ei_lba);
 			if (ret) {
-				dump_sector(daddr + j, scsi_debug_sector_size);
+				dump_sector(daddr, scsi_debug_sector_size);
 				goto out;
 			}
 
 			sector++;
 			ei_lba++;
-			ppage_offset += sizeof(struct sd_dif_tuple);
+			dpage_offset += scsi_debug_sector_size;
 		}
-
-		kunmap_atomic(paddr);
-		kunmap_atomic(daddr);
+		diter.consumed = dpage_offset;
+		sg_miter_stop(&diter);
 	}
+	sg_miter_stop(&piter);
 
 	dif_copy_prot(SCpnt, start_sec, sectors, false);
 	dix_writes++;
@@ -1988,8 +2000,8 @@
 
 out:
 	dif_errors++;
-	kunmap_atomic(paddr);
-	kunmap_atomic(daddr);
+	sg_miter_stop(&diter);
+	sg_miter_stop(&piter);
 	return ret;
 }
 
@@ -2089,17 +2101,19 @@
 	if (ret)
 		return ret;
 
+	write_lock_irqsave(&atomic_rw, iflags);
+
 	/* DIX + T10 DIF */
 	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
 		int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
 
 		if (prot_ret) {
+			write_unlock_irqrestore(&atomic_rw, iflags);
 			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
 			return illegal_condition_result;
 		}
 	}
 
-	write_lock_irqsave(&atomic_rw, iflags);
 	ret = do_device_access(SCpnt, devip, lba, num, 1);
 	if (scsi_debug_lbp())
 		map_region(lba, num);
@@ -2178,6 +2192,7 @@
 	struct unmap_block_desc *desc;
 	unsigned int i, payload_len, descriptors;
 	int ret;
+	unsigned long iflags;
 
 	ret = check_readiness(scmd, 1, devip);
 	if (ret)
@@ -2199,6 +2214,8 @@
 
 	desc = (void *)&buf[8];
 
+	write_lock_irqsave(&atomic_rw, iflags);
+
 	for (i = 0 ; i < descriptors ; i++) {
 		unsigned long long lba = get_unaligned_be64(&desc[i].lba);
 		unsigned int num = get_unaligned_be32(&desc[i].blocks);
@@ -2213,6 +2230,7 @@
 	ret = 0;
 
 out:
+	write_unlock_irqrestore(&atomic_rw, iflags);
 	kfree(buf);
 
 	return ret;
@@ -2313,36 +2331,37 @@
 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
 			    unsigned int num, struct sdebug_dev_info *devip)
 {
-	int i, j, ret = -1;
+	int j;
 	unsigned char *kaddr, *buf;
 	unsigned int offset;
-	struct scatterlist *sg;
 	struct scsi_data_buffer *sdb = scsi_in(scp);
+	struct sg_mapping_iter miter;
 
 	/* better not to use temporary buffer. */
 	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
-	if (!buf)
-		return ret;
+	if (!buf) {
+		mk_sense_buffer(devip, NOT_READY,
+				LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+		return check_condition_result;
+	}
 
 	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
 
 	offset = 0;
-	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
-		kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
-		if (!kaddr)
-			goto out;
+	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
+			SG_MITER_ATOMIC | SG_MITER_TO_SG);
 
-		for (j = 0; j < sg->length; j++)
-			*(kaddr + sg->offset + j) ^= *(buf + offset + j);
+	while (sg_miter_next(&miter)) {
+		kaddr = miter.addr;
+		for (j = 0; j < miter.length; j++)
+			*(kaddr + j) ^= *(buf + offset + j);
 
-		offset += sg->length;
-		kunmap_atomic(kaddr);
+		offset += miter.length;
 	}
-	ret = 0;
-out:
+	sg_miter_stop(&miter);
 	kfree(buf);
 
-	return ret;
+	return 0;
 }
 
 /* When timer goes off this function is called. */
@@ -2744,6 +2763,7 @@
  */
 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
+module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
@@ -2787,6 +2807,7 @@
 
 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
@@ -3248,7 +3269,7 @@
 };
 ATTRIBUTE_GROUPS(sdebug_drv);
 
-struct device *pseudo_primary;
+static struct device *pseudo_primary;
 
 static int __init scsi_debug_init(void)
 {
@@ -3934,6 +3955,8 @@
 	sdbg_host = to_sdebug_host(dev);
 
 	sdebug_driver_template.can_queue = scsi_debug_max_queue;
+	if (scsi_debug_clustering)
+		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
 	if (NULL == hpnt) {
 		printk(KERN_ERR "%s: scsi_register failed\n", __func__);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 78b004d..771c16b 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -2288,6 +2288,11 @@
 	if (scsi_autopm_get_host(shost) < 0)
 		return FAILED;
 
+	if (!get_device(&dev->sdev_gendev)) {
+		rtn = FAILED;
+		goto out_put_autopm_host;
+	}
+
 	scmd = scsi_get_command(dev, GFP_KERNEL);
 	blk_rq_init(NULL, &req);
 	scmd->request = &req;
@@ -2345,6 +2350,7 @@
 	scsi_run_host_queues(shost);
 
 	scsi_next_command(scmd);
+out_put_autopm_host:
 	scsi_autopm_put_host(shost);
 	return rtn;
 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62ec84b..5681c05 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -75,28 +75,6 @@
  */
 #define SCSI_QUEUE_DELAY	3
 
-/*
- * Function:	scsi_unprep_request()
- *
- * Purpose:	Remove all preparation done for a request, including its
- *		associated scsi_cmnd, so that it can be requeued.
- *
- * Arguments:	req	- request to unprepare
- *
- * Lock status:	Assumed that no locks are held upon entry.
- *
- * Returns:	Nothing.
- */
-static void scsi_unprep_request(struct request *req)
-{
-	struct scsi_cmnd *cmd = req->special;
-
-	blk_unprep_request(req);
-	req->special = NULL;
-
-	scsi_put_command(cmd);
-}
-
 /**
  * __scsi_queue_insert - private queue insertion
  * @cmd: The SCSI command being requeued
@@ -385,29 +363,12 @@
 	return 0;
 }
 
-/*
- * Function:	scsi_run_queue()
- *
- * Purpose:	Select a proper request queue to serve next
- *
- * Arguments:	q	- last request's queue
- *
- * Returns:     Nothing
- *
- * Notes:	The previous command was completely finished, start
- *		a new one if possible.
- */
-static void scsi_run_queue(struct request_queue *q)
+static void scsi_starved_list_run(struct Scsi_Host *shost)
 {
-	struct scsi_device *sdev = q->queuedata;
-	struct Scsi_Host *shost;
 	LIST_HEAD(starved_list);
+	struct scsi_device *sdev;
 	unsigned long flags;
 
-	shost = sdev->host;
-	if (scsi_target(sdev)->single_lun)
-		scsi_single_lun_run(sdev);
-
 	spin_lock_irqsave(shost->host_lock, flags);
 	list_splice_init(&shost->starved_list, &starved_list);
 
@@ -459,6 +420,28 @@
 	/* put any unprocessed entries back */
 	list_splice(&starved_list, &shost->starved_list);
 	spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/*
+ * Function:   scsi_run_queue()
+ *
+ * Purpose:    Select a proper request queue to serve next
+ *
+ * Arguments:  q       - last request's queue
+ *
+ * Returns:     Nothing
+ *
+ * Notes:      The previous command was completely finished, start
+ *             a new one if possible.
+ */
+static void scsi_run_queue(struct request_queue *q)
+{
+	struct scsi_device *sdev = q->queuedata;
+
+	if (scsi_target(sdev)->single_lun)
+		scsi_single_lun_run(sdev);
+	if (!list_empty(&sdev->host->starved_list))
+		scsi_starved_list_run(sdev->host);
 
 	blk_run_queue(q);
 }
@@ -497,16 +480,10 @@
 	struct request *req = cmd->request;
 	unsigned long flags;
 
-	/*
-	 * We need to hold a reference on the device to avoid the queue being
-	 * killed after the unlock and before scsi_run_queue is invoked which
-	 * may happen because scsi_unprep_request() puts the command which
-	 * releases its reference on the device.
-	 */
-	get_device(&sdev->sdev_gendev);
-
 	spin_lock_irqsave(q->queue_lock, flags);
-	scsi_unprep_request(req);
+	blk_unprep_request(req);
+	req->special = NULL;
+	scsi_put_command(cmd);
 	blk_requeue_request(q, req);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 
@@ -520,13 +497,9 @@
 	struct scsi_device *sdev = cmd->device;
 	struct request_queue *q = sdev->request_queue;
 
-	/* need to hold a reference on the device before we let go of the cmd */
-	get_device(&sdev->sdev_gendev);
-
 	scsi_put_command(cmd);
 	scsi_run_queue(q);
 
-	/* ok to remove device now */
 	put_device(&sdev->sdev_gendev);
 }
 
@@ -788,6 +761,7 @@
 	enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
 	      ACTION_DELAYED_RETRY} action;
 	char *description = NULL;
+	unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
 
 	if (result) {
 		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -989,6 +963,12 @@
 		action = ACTION_FAIL;
 	}
 
+	if (action != ACTION_FAIL &&
+	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+		action = ACTION_FAIL;
+		description = "Command timed out";
+	}
+
 	switch (action) {
 	case ACTION_FAIL:
 		/* Give up and fail the remainder of the request */
@@ -1111,6 +1091,7 @@
 	scsi_release_buffers(cmd);
 	cmd->request->special = NULL;
 	scsi_put_command(cmd);
+	put_device(&cmd->device->sdev_gendev);
 	return error;
 }
 EXPORT_SYMBOL(scsi_init_io);
@@ -1121,9 +1102,15 @@
 	struct scsi_cmnd *cmd;
 
 	if (!req->special) {
-		cmd = scsi_get_command(sdev, GFP_ATOMIC);
-		if (unlikely(!cmd))
+		/* Bail if we can't get a reference to the device */
+		if (!get_device(&sdev->sdev_gendev))
 			return NULL;
+
+		cmd = scsi_get_command(sdev, GFP_ATOMIC);
+		if (unlikely(!cmd)) {
+			put_device(&sdev->sdev_gendev);
+			return NULL;
+		}
 		req->special = cmd;
 	} else {
 		cmd = req->special;
@@ -1286,6 +1273,7 @@
 			struct scsi_cmnd *cmd = req->special;
 			scsi_release_buffers(cmd);
 			scsi_put_command(cmd);
+			put_device(&cmd->device->sdev_gendev);
 			req->special = NULL;
 		}
 		break;
@@ -1543,16 +1531,14 @@
  * Lock status: IO request lock assumed to be held when called.
  */
 static void scsi_request_fn(struct request_queue *q)
+	__releases(q->queue_lock)
+	__acquires(q->queue_lock)
 {
 	struct scsi_device *sdev = q->queuedata;
 	struct Scsi_Host *shost;
 	struct scsi_cmnd *cmd;
 	struct request *req;
 
-	if(!get_device(&sdev->sdev_gendev))
-		/* We must be tearing the block queue down already */
-		return;
-
 	/*
 	 * To start with, we keep looping until the queue is empty, or until
 	 * the host is no longer able to accept any more requests.
@@ -1641,7 +1627,7 @@
 			goto out_delay;
 	}
 
-	goto out;
+	return;
 
  not_ready:
 	spin_unlock_irq(shost->host_lock);
@@ -1660,12 +1646,6 @@
 out_delay:
 	if (sdev->device_busy == 0)
 		blk_delay_queue(q, SCSI_QUEUE_DELAY);
-out:
-	/* must be careful here...if we trigger the ->remove() function
-	 * we cannot be holding the q lock */
-	spin_unlock_irq(q->queue_lock);
-	put_device(&sdev->sdev_gendev);
-	spin_lock_irq(q->queue_lock);
 }
 
 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 307a811..27f96d5 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -320,6 +320,7 @@
 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
 	unsigned long flags;
 
+	starget->state = STARGET_DEL;
 	transport_destroy_device(dev);
 	spin_lock_irqsave(shost->host_lock, flags);
 	if (shost->hostt->target_destroy)
@@ -371,6 +372,37 @@
 }
 
 /**
+ * scsi_target_reap_ref_release - remove target from visibility
+ * @kref: the reap_ref in the target being released
+ *
+ * Called on last put of reap_ref, which is the indication that no device
+ * under this target is visible anymore, so render the target invisible in
+ * sysfs.  Note: we have to be in user context here because the target reaps
+ * should be done in places where the scsi device visibility is being removed.
+ */
+static void scsi_target_reap_ref_release(struct kref *kref)
+{
+	struct scsi_target *starget
+		= container_of(kref, struct scsi_target, reap_ref);
+
+	/*
+	 * if we get here and the target is still in the CREATED state that
+	 * means it was allocated but never made visible (because a scan
+	 * turned up no LUNs), so don't call device_del() on it.
+	 */
+	if (starget->state != STARGET_CREATED) {
+		transport_remove_device(&starget->dev);
+		device_del(&starget->dev);
+	}
+	scsi_target_destroy(starget);
+}
+
+static void scsi_target_reap_ref_put(struct scsi_target *starget)
+{
+	kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
+}
+
+/**
  * scsi_alloc_target - allocate a new or find an existing target
  * @parent:	parent of the target (need not be a scsi host)
  * @channel:	target channel number (zero if no channels)
@@ -392,7 +424,7 @@
 		+ shost->transportt->target_size;
 	struct scsi_target *starget;
 	struct scsi_target *found_target;
-	int error;
+	int error, ref_got;
 
 	starget = kzalloc(size, GFP_KERNEL);
 	if (!starget) {
@@ -401,7 +433,7 @@
 	}
 	dev = &starget->dev;
 	device_initialize(dev);
-	starget->reap_ref = 1;
+	kref_init(&starget->reap_ref);
 	dev->parent = get_device(parent);
 	dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
 	dev->bus = &scsi_bus_type;
@@ -441,29 +473,36 @@
 	return starget;
 
  found:
-	found_target->reap_ref++;
+	/*
+	 * release routine already fired if kref is zero, so if we can still
+	 * take the reference, the target must be alive.  If we can't, it must
+	 * be dying and we need to wait for a new target
+	 */
+	ref_got = kref_get_unless_zero(&found_target->reap_ref);
+
 	spin_unlock_irqrestore(shost->host_lock, flags);
-	if (found_target->state != STARGET_DEL) {
+	if (ref_got) {
 		put_device(dev);
 		return found_target;
 	}
-	/* Unfortunately, we found a dying target; need to
-	 * wait until it's dead before we can get a new one */
+	/*
+	 * Unfortunately, we found a dying target; need to wait until it's
+	 * dead before we can get a new one.  There is an anomaly here.  We
+	 * *should* call scsi_target_reap() to balance the kref_get() of the
+	 * reap_ref above.  However, since the target being released, it's
+	 * already invisible and the reap_ref is irrelevant.  If we call
+	 * scsi_target_reap() we might spuriously do another device_del() on
+	 * an already invisible target.
+	 */
 	put_device(&found_target->dev);
-	flush_scheduled_work();
+	/*
+	 * length of time is irrelevant here, we just want to yield the CPU
+	 * for a tick to avoid busy waiting for the target to die.
+	 */
+	msleep(1);
 	goto retry;
 }
 
-static void scsi_target_reap_usercontext(struct work_struct *work)
-{
-	struct scsi_target *starget =
-		container_of(work, struct scsi_target, ew.work);
-
-	transport_remove_device(&starget->dev);
-	device_del(&starget->dev);
-	scsi_target_destroy(starget);
-}
-
 /**
  * scsi_target_reap - check to see if target is in use and destroy if not
  * @starget: target to be checked
@@ -474,28 +513,13 @@
  */
 void scsi_target_reap(struct scsi_target *starget)
 {
-	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
-	unsigned long flags;
-	enum scsi_target_state state;
-	int empty = 0;
-
-	spin_lock_irqsave(shost->host_lock, flags);
-	state = starget->state;
-	if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
-		empty = 1;
-		starget->state = STARGET_DEL;
-	}
-	spin_unlock_irqrestore(shost->host_lock, flags);
-
-	if (!empty)
-		return;
-
-	BUG_ON(state == STARGET_DEL);
-	if (state == STARGET_CREATED)
-		scsi_target_destroy(starget);
-	else
-		execute_in_process_context(scsi_target_reap_usercontext,
-					   &starget->ew);
+	/*
+	 * serious problem if this triggers: STARGET_DEL is only set in the if
+	 * the reap_ref drops to zero, so we're trying to do another final put
+	 * on an already released kref
+	 */
+	BUG_ON(starget->state == STARGET_DEL);
+	scsi_target_reap_ref_put(starget);
 }
 
 /**
@@ -946,6 +970,9 @@
 		}
 	}
 
+	if (sdev->scsi_level >= SCSI_3)
+		scsi_attach_vpd(sdev);
+
 	sdev->max_queue_depth = sdev->queue_depth;
 
 	/*
@@ -1532,6 +1559,10 @@
 	}
 	mutex_unlock(&shost->scan_mutex);
 	scsi_autopm_put_target(starget);
+	/*
+	 * paired with scsi_alloc_target().  Target will be destroyed unless
+	 * scsi_probe_and_add_lun made an underlying device visible
+	 */
 	scsi_target_reap(starget);
 	put_device(&starget->dev);
 
@@ -1612,8 +1643,10 @@
 
  out_reap:
 	scsi_autopm_put_target(starget);
-	/* now determine if the target has any children at all
-	 * and if not, nuke it */
+	/*
+	 * paired with scsi_alloc_target(): determine if the target has
+	 * any children at all and if not, nuke it
+	 */
 	scsi_target_reap(starget);
 
 	put_device(&starget->dev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 8ead24c..074e8cc 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -300,7 +300,9 @@
 	int ret = -EINVAL;
 	unsigned long deadline, flags;
 
-	if (shost->transportt && shost->transportt->eh_strategy_handler)
+	if (shost->transportt &&
+	    (shost->transportt->eh_strategy_handler ||
+	     !shost->hostt->eh_host_reset_handler))
 		return ret;
 
 	if (!strncmp(buf, "off", strlen("off")))
@@ -383,17 +385,14 @@
 {
 	struct scsi_device *sdev;
 	struct device *parent;
-	struct scsi_target *starget;
 	struct list_head *this, *tmp;
 	unsigned long flags;
 
 	sdev = container_of(work, struct scsi_device, ew.work);
 
 	parent = sdev->sdev_gendev.parent;
-	starget = to_scsi_target(parent);
 
 	spin_lock_irqsave(sdev->host->host_lock, flags);
-	starget->reap_ref++;
 	list_del(&sdev->siblings);
 	list_del(&sdev->same_target_siblings);
 	list_del(&sdev->starved_entry);
@@ -413,8 +412,8 @@
 	/* NULL queue means the device can't be used */
 	sdev->request_queue = NULL;
 
-	scsi_target_reap(scsi_target(sdev));
-
+	kfree(sdev->vpd_pg83);
+	kfree(sdev->vpd_pg80);
 	kfree(sdev->inquiry);
 	kfree(sdev);
 
@@ -579,7 +578,6 @@
  * Create the actual show/store functions and data structures.
  */
 sdev_rd_attr (device_blocked, "%d\n");
-sdev_rd_attr (queue_depth, "%d\n");
 sdev_rd_attr (device_busy, "%d\n");
 sdev_rd_attr (type, "%d\n");
 sdev_rd_attr (scsi_level, "%d\n");
@@ -712,10 +710,64 @@
 	return snprintf(buf, 20, "%s\n", name);
 }
 
-static DEVICE_ATTR(queue_type, S_IRUGO, show_queue_type_field, NULL);
+static ssize_t
+store_queue_type_field(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct scsi_host_template *sht = sdev->host->hostt;
+	int tag_type = 0, retval;
+	int prev_tag_type = scsi_get_tag_type(sdev);
+
+	if (!sdev->tagged_supported || !sht->change_queue_type)
+		return -EINVAL;
+
+	if (strncmp(buf, "ordered", 7) == 0)
+		tag_type = MSG_ORDERED_TAG;
+	else if (strncmp(buf, "simple", 6) == 0)
+		tag_type = MSG_SIMPLE_TAG;
+	else if (strncmp(buf, "none", 4) != 0)
+		return -EINVAL;
+
+	if (tag_type == prev_tag_type)
+		return count;
+
+	retval = sht->change_queue_type(sdev, tag_type);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
+		   store_queue_type_field);
+
+#define sdev_vpd_pg_attr(_page)						\
+static ssize_t							\
+show_vpd_##_page(struct file *filp, struct kobject *kobj,	\
+		 struct bin_attribute *bin_attr,			\
+		 char *buf, loff_t off, size_t count)			\
+{									\
+	struct device *dev = container_of(kobj, struct device, kobj);	\
+	struct scsi_device *sdev = to_scsi_device(dev);			\
+	if (!sdev->vpd_##_page)						\
+		return -EINVAL;						\
+	return memory_read_from_buffer(buf, count, &off,		\
+				       sdev->vpd_##_page,		\
+				       sdev->vpd_##_page##_len);	\
+}									\
+static struct bin_attribute dev_attr_vpd_##_page = {		\
+	.attr =	{.name = __stringify(vpd_##_page), .mode = S_IRUGO },	\
+	.size = 0,							\
+	.read = show_vpd_##_page,					\
+};
+
+sdev_vpd_pg_attr(pg83);
+sdev_vpd_pg_attr(pg80);
 
 static ssize_t
-show_iostat_counterbits(struct device *dev, struct device_attribute *attr, 				char *buf)
+show_iostat_counterbits(struct device *dev, struct device_attribute *attr,
+			char *buf)
 {
 	return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
 }
@@ -786,46 +838,9 @@
 DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED)
 DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED)
 
-/* Default template for device attributes.  May NOT be modified */
-static struct attribute *scsi_sdev_attrs[] = {
-	&dev_attr_device_blocked.attr,
-	&dev_attr_type.attr,
-	&dev_attr_scsi_level.attr,
-	&dev_attr_device_busy.attr,
-	&dev_attr_vendor.attr,
-	&dev_attr_model.attr,
-	&dev_attr_rev.attr,
-	&dev_attr_rescan.attr,
-	&dev_attr_delete.attr,
-	&dev_attr_state.attr,
-	&dev_attr_timeout.attr,
-	&dev_attr_eh_timeout.attr,
-	&dev_attr_iocounterbits.attr,
-	&dev_attr_iorequest_cnt.attr,
-	&dev_attr_iodone_cnt.attr,
-	&dev_attr_ioerr_cnt.attr,
-	&dev_attr_modalias.attr,
-	REF_EVT(media_change),
-	REF_EVT(inquiry_change_reported),
-	REF_EVT(capacity_change_reported),
-	REF_EVT(soft_threshold_reached),
-	REF_EVT(mode_parameter_change_reported),
-	REF_EVT(lun_change_reported),
-	NULL
-};
-
-static struct attribute_group scsi_sdev_attr_group = {
-	.attrs =	scsi_sdev_attrs,
-};
-
-static const struct attribute_group *scsi_sdev_attr_groups[] = {
-	&scsi_sdev_attr_group,
-	NULL
-};
-
 static ssize_t
-sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,
-			  const char *buf, size_t count)
+sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
 {
 	int depth, retval;
 	struct scsi_device *sdev = to_scsi_device(dev);
@@ -848,10 +863,10 @@
 
 	return count;
 }
+sdev_show_function(queue_depth, "%d\n");
 
-static struct device_attribute sdev_attr_queue_depth_rw =
-	__ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
-	       sdev_store_queue_depth_rw);
+static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
+		   sdev_store_queue_depth);
 
 static ssize_t
 sdev_show_queue_ramp_up_period(struct device *dev,
@@ -879,40 +894,79 @@
 	return period;
 }
 
-static struct device_attribute sdev_attr_queue_ramp_up_period =
-	__ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
-	       sdev_show_queue_ramp_up_period,
-	       sdev_store_queue_ramp_up_period);
+static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
+		   sdev_show_queue_ramp_up_period,
+		   sdev_store_queue_ramp_up_period);
 
-static ssize_t
-sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
-			 const char *buf, size_t count)
+static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
+					 struct attribute *attr, int i)
 {
+	struct device *dev = container_of(kobj, struct device, kobj);
 	struct scsi_device *sdev = to_scsi_device(dev);
-	struct scsi_host_template *sht = sdev->host->hostt;
-	int tag_type = 0, retval;
-	int prev_tag_type = scsi_get_tag_type(sdev);
 
-	if (!sdev->tagged_supported || !sht->change_queue_type)
-		return -EINVAL;
 
-	if (strncmp(buf, "ordered", 7) == 0)
-		tag_type = MSG_ORDERED_TAG;
-	else if (strncmp(buf, "simple", 6) == 0)
-		tag_type = MSG_SIMPLE_TAG;
-	else if (strncmp(buf, "none", 4) != 0)
-		return -EINVAL;
+	if (attr == &dev_attr_queue_depth.attr &&
+	    !sdev->host->hostt->change_queue_depth)
+		return S_IRUGO;
 
-	if (tag_type == prev_tag_type)
-		return count;
+	if (attr == &dev_attr_queue_ramp_up_period.attr &&
+	    !sdev->host->hostt->change_queue_depth)
+		return 0;
 
-	retval = sht->change_queue_type(sdev, tag_type);
-	if (retval < 0)
-		return retval;
+	if (attr == &dev_attr_queue_type.attr &&
+	    !sdev->host->hostt->change_queue_type)
+		return S_IRUGO;
 
-	return count;
+	return attr->mode;
 }
 
+/* Default template for device attributes.  May NOT be modified */
+static struct attribute *scsi_sdev_attrs[] = {
+	&dev_attr_device_blocked.attr,
+	&dev_attr_type.attr,
+	&dev_attr_scsi_level.attr,
+	&dev_attr_device_busy.attr,
+	&dev_attr_vendor.attr,
+	&dev_attr_model.attr,
+	&dev_attr_rev.attr,
+	&dev_attr_rescan.attr,
+	&dev_attr_delete.attr,
+	&dev_attr_state.attr,
+	&dev_attr_timeout.attr,
+	&dev_attr_eh_timeout.attr,
+	&dev_attr_iocounterbits.attr,
+	&dev_attr_iorequest_cnt.attr,
+	&dev_attr_iodone_cnt.attr,
+	&dev_attr_ioerr_cnt.attr,
+	&dev_attr_modalias.attr,
+	&dev_attr_queue_depth.attr,
+	&dev_attr_queue_type.attr,
+	&dev_attr_queue_ramp_up_period.attr,
+	REF_EVT(media_change),
+	REF_EVT(inquiry_change_reported),
+	REF_EVT(capacity_change_reported),
+	REF_EVT(soft_threshold_reached),
+	REF_EVT(mode_parameter_change_reported),
+	REF_EVT(lun_change_reported),
+	NULL
+};
+
+static struct bin_attribute *scsi_sdev_bin_attrs[] = {
+	&dev_attr_vpd_pg83,
+	&dev_attr_vpd_pg80,
+	NULL
+};
+static struct attribute_group scsi_sdev_attr_group = {
+	.attrs =	scsi_sdev_attrs,
+	.bin_attrs =	scsi_sdev_bin_attrs,
+	.is_visible =	scsi_sdev_attr_is_visible,
+};
+
+static const struct attribute_group *scsi_sdev_attr_groups[] = {
+	&scsi_sdev_attr_group,
+	NULL
+};
+
 static int scsi_target_add(struct scsi_target *starget)
 {
 	int error;
@@ -935,10 +989,6 @@
 	return 0;
 }
 
-static struct device_attribute sdev_attr_queue_type_rw =
-	__ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
-	       sdev_store_queue_type_rw);
-
 /**
  * scsi_sysfs_add_sdev - add scsi device to sysfs
  * @sdev:	scsi_device to add
@@ -992,25 +1042,6 @@
 	transport_add_device(&sdev->sdev_gendev);
 	sdev->is_visible = 1;
 
-	/* create queue files, which may be writable, depending on the host */
-	if (sdev->host->hostt->change_queue_depth) {
-		error = device_create_file(&sdev->sdev_gendev,
-					   &sdev_attr_queue_depth_rw);
-		error = device_create_file(&sdev->sdev_gendev,
-					   &sdev_attr_queue_ramp_up_period);
-	}
-	else
-		error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
-	if (error)
-		return error;
-
-	if (sdev->host->hostt->change_queue_type)
-		error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
-	else
-		error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
-	if (error)
-		return error;
-
 	error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
 
 	if (error)
@@ -1060,6 +1091,13 @@
 		sdev->host->hostt->slave_destroy(sdev);
 	transport_destroy_device(dev);
 
+	/*
+	 * Paired with the kref_get() in scsi_sysfs_initialize().  We have
+	 * remoed sysfs visibility from the device, so make the target
+	 * invisible if this was the last device underneath it.
+	 */
+	scsi_target_reap(scsi_target(sdev));
+
 	put_device(dev);
 }
 
@@ -1122,7 +1160,7 @@
 			continue;
 		if (starget->dev.parent == dev || &starget->dev == dev) {
 			/* assuming new targets arrive at the end */
-			starget->reap_ref++;
+			kref_get(&starget->reap_ref);
 			spin_unlock_irqrestore(shost->host_lock, flags);
 			if (last)
 				scsi_target_reap(last);
@@ -1206,6 +1244,12 @@
 	list_add_tail(&sdev->same_target_siblings, &starget->devices);
 	list_add_tail(&sdev->siblings, &shost->__devices);
 	spin_unlock_irqrestore(shost->host_lock, flags);
+	/*
+	 * device can now only be removed via __scsi_remove_device() so hold
+	 * the target.  Target will be held in CREATED state until something
+	 * beneath it becomes visible (in which case it moves to RUNNING)
+	 */
+	kref_get(&starget->reap_ref);
 }
 
 int scsi_is_sdev_device(const struct device *dev)
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 84a1fdf..e51add0 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -155,7 +155,8 @@
 	__blk_put_request(q, rq);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 
-	__scsi_put_command(shost, cmd, &shost->shost_gendev);
+	__scsi_put_command(shost, cmd);
+	put_device(&shost->shost_gendev);
 }
 EXPORT_SYMBOL_GPL(scsi_host_put_command);
 
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 4628fd5..f80908f 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -261,6 +261,7 @@
 	{ FC_PORTSPEED_10GBIT,		"10 Gbit" },
 	{ FC_PORTSPEED_8GBIT,		"8 Gbit" },
 	{ FC_PORTSPEED_16GBIT,		"16 Gbit" },
+	{ FC_PORTSPEED_32GBIT,		"32 Gbit" },
 	{ FC_PORTSPEED_NOT_NEGOTIATED,	"Not Negotiated" },
 };
 fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 470954a..89e6c04 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1463,8 +1463,8 @@
 			sd_print_sense_hdr(sdkp, &sshdr);
 		/* we need to evaluate the error return  */
 		if (scsi_sense_valid(&sshdr) &&
-			/* 0x3a is medium not present */
-			sshdr.asc == 0x3a)
+			(sshdr.asc == 0x3a ||	/* medium not present */
+			 sshdr.asc == 0x20))	/* invalid command */
 				/* this is no error here */
 				return 0;
 
@@ -2281,7 +2281,7 @@
 
 	set_disk_ro(sdkp->disk, 0);
 	if (sdp->skip_ms_page_3f) {
-		sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
+		sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
 		return;
 	}
 
@@ -2313,7 +2313,7 @@
 	}
 
 	if (!scsi_status_is_good(res)) {
-		sd_printk(KERN_WARNING, sdkp,
+		sd_first_printk(KERN_WARNING, sdkp,
 			  "Test WP failed, assume Write Enabled\n");
 	} else {
 		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
@@ -2381,7 +2381,8 @@
 	if (!data.header_length) {
 		modepage = 6;
 		first_len = 0;
-		sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
+		sd_first_printk(KERN_ERR, sdkp,
+				"Missing header in MODE_SENSE response\n");
 	}
 
 	/* that went OK, now ask for the proper length */
@@ -2394,7 +2395,7 @@
 	if (len < 3)
 		goto bad_sense;
 	else if (len > SD_BUF_SIZE) {
-		sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
+		sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
 			  "data from %d to %d bytes\n", len, SD_BUF_SIZE);
 		len = SD_BUF_SIZE;
 	}
@@ -2417,8 +2418,9 @@
 				/* We're interested only in the first 3 bytes.
 				 */
 				if (len - offset <= 2) {
-					sd_printk(KERN_ERR, sdkp, "Incomplete "
-						  "mode parameter data\n");
+					sd_first_printk(KERN_ERR, sdkp,
+						"Incomplete mode parameter "
+							"data\n");
 					goto defaults;
 				} else {
 					modepage = page_code;
@@ -2432,14 +2434,15 @@
 				else if (!spf && len - offset > 1)
 					offset += 2 + buffer[offset+1];
 				else {
-					sd_printk(KERN_ERR, sdkp, "Incomplete "
-						  "mode parameter data\n");
+					sd_first_printk(KERN_ERR, sdkp,
+							"Incomplete mode "
+							"parameter data\n");
 					goto defaults;
 				}
 			}
 		}
 
-		sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
+		sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
 		goto defaults;
 
 	Page_found:
@@ -2453,7 +2456,7 @@
 
 		sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
 		if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
-			sd_printk(KERN_NOTICE, sdkp,
+			sd_first_printk(KERN_NOTICE, sdkp,
 				  "Uses READ/WRITE(6), disabling FUA\n");
 			sdkp->DPOFUA = 0;
 		}
@@ -2475,16 +2478,19 @@
 	    sshdr.sense_key == ILLEGAL_REQUEST &&
 	    sshdr.asc == 0x24 && sshdr.ascq == 0x0)
 		/* Invalid field in CDB */
-		sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
+		sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
 	else
-		sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
+		sd_first_printk(KERN_ERR, sdkp,
+				"Asking for cache data failed\n");
 
 defaults:
 	if (sdp->wce_default_on) {
-		sd_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n");
+		sd_first_printk(KERN_NOTICE, sdkp,
+				"Assuming drive cache: write back\n");
 		sdkp->WCE = 1;
 	} else {
-		sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
+		sd_first_printk(KERN_ERR, sdkp,
+				"Assuming drive cache: write through\n");
 		sdkp->WCE = 0;
 	}
 	sdkp->RCD = 0;
@@ -2513,7 +2519,7 @@
 
 	if (!scsi_status_is_good(res) || !data.header_length ||
 	    data.length < 6) {
-		sd_printk(KERN_WARNING, sdkp,
+		sd_first_printk(KERN_WARNING, sdkp,
 			  "getting Control mode page failed, assume no ATO\n");
 
 		if (scsi_sense_valid(&sshdr))
@@ -2525,7 +2531,7 @@
 	offset = data.header_length + data.block_descriptor_length;
 
 	if ((buffer[offset] & 0x3f) != 0x0a) {
-		sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
+		sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
 		return;
 	}
 
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 26895ff..620871e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -104,6 +104,12 @@
 		    (sdsk)->disk->disk_name, ##a) :			\
 	sdev_printk(prefix, (sdsk)->device, fmt, ##a)
 
+#define sd_first_printk(prefix, sdsk, fmt, a...)			\
+	do {								\
+		if ((sdkp)->first_scan)					\
+			sd_printk(prefix, sdsk, fmt, ##a);		\
+	} while (0)
+
 static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
 {
 	switch (scmd->cmnd[0]) {
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index eba183c..80bfece 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/enclosure.h>
+#include <asm/unaligned.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -448,27 +449,18 @@
 static void ses_match_to_enclosure(struct enclosure_device *edev,
 				   struct scsi_device *sdev)
 {
-	unsigned char *buf;
 	unsigned char *desc;
-	unsigned int vpd_len;
 	struct efd efd = {
 		.addr = 0,
 	};
 
-	buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
-	if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE))
-		goto free;
-
 	ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
 
-	vpd_len = ((buf[2] << 8) | buf[3]) + 4;
-	kfree(buf);
-	buf = kmalloc(vpd_len, GFP_KERNEL);
-	if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len))
-		goto free;
+	if (!sdev->vpd_pg83_len)
+		return;
 
-	desc = buf + 4;
-	while (desc < buf + vpd_len) {
+	desc = sdev->vpd_pg83 + 4;
+	while (desc < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
 		enum scsi_protocol proto = desc[0] >> 4;
 		u8 code_set = desc[0] & 0x0f;
 		u8 piv = desc[1] & 0x80;
@@ -478,25 +470,15 @@
 
 		if (piv && code_set == 1 && assoc == 1
 		    && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8)
-			efd.addr = (u64)desc[4] << 56 |
-				(u64)desc[5] << 48 |
-				(u64)desc[6] << 40 |
-				(u64)desc[7] << 32 |
-				(u64)desc[8] << 24 |
-				(u64)desc[9] << 16 |
-				(u64)desc[10] << 8 |
-				(u64)desc[11];
+			efd.addr = get_unaligned_be64(&desc[4]);
 
 		desc += len + 4;
 	}
-	if (!efd.addr)
-		goto free;
+	if (efd.addr) {
+		efd.dev = &sdev->sdev_gendev;
 
-	efd.dev = &sdev->sdev_gendev;
-
-	enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
- free:
-	kfree(buf);
+		enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
+	}
 }
 
 static int ses_intf_add(struct device *cdev,
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a1d6986..afc834e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2198,12 +2198,19 @@
 	struct st_modedef *STm;
 	char *name = tape_name(STp);
 	struct cdev *cd0, *cd1;
+	struct device *d0, *d1;
 
 	STm = &(STp->modes[STp->current_mode]);
 	if (!STm->defined) {
-		cd0 = STm->cdevs[0]; cd1 = STm->cdevs[1];
+		cd0 = STm->cdevs[0];
+		cd1 = STm->cdevs[1];
+		d0  = STm->devs[0];
+		d1  = STm->devs[1];
 		memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef));
-		STm->cdevs[0] = cd0; STm->cdevs[1] = cd1;
+		STm->cdevs[0] = cd0;
+		STm->cdevs[1] = cd1;
+		STm->devs[0]  = d0;
+		STm->devs[1]  = d1;
 		modes_defined = 1;
                 DEBC(printk(ST_DEB_MSG
                             "%s: Initialized mode %d definition from mode 0\n",
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index f1e4b41..a4abce9 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -259,7 +259,7 @@
 	    instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
 
 	if (instance->irq != SCSI_IRQ_NONE) 
-	    if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128",
+	    if (request_irq(instance->irq, t128_intr, 0, "t128",
 			    instance)) {
 		printk("scsi%d : IRQ%d not free, interrupts disabled\n", 
 		    instance->host_no, instance->irq);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 9c216e5..5a03bb3 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -873,7 +873,7 @@
 
    /* Board detected, allocate its IRQ */
    if (request_irq(irq, do_interrupt_handler,
-             IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0),
+             (subversion == ESA) ? IRQF_SHARED : 0,
              driver_name, (void *) &sha[j])) {
       printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
       goto freelock;
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index b9755ec..c88e146 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1,7 +1,7 @@
 /*
  * Linux driver for VMware's para-virtualized SCSI HBA.
  *
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -32,6 +32,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
 
 #include "vmw_pvscsi.h"
 
@@ -44,7 +45,7 @@
 
 #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING	8
 #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING	1
-#define PVSCSI_DEFAULT_QUEUE_DEPTH		64
+#define PVSCSI_DEFAULT_QUEUE_DEPTH		254
 #define SGL_SIZE				PAGE_SIZE
 
 struct pvscsi_sg_list {
@@ -62,6 +63,7 @@
 	dma_addr_t		dataPA;
 	dma_addr_t		sensePA;
 	dma_addr_t		sglPA;
+	struct completion	*abort_cmp;
 };
 
 struct pvscsi_adapter {
@@ -71,6 +73,7 @@
 	bool				use_msi;
 	bool				use_msix;
 	bool				use_msg;
+	bool				use_req_threshold;
 
 	spinlock_t			hw_lock;
 
@@ -102,18 +105,22 @@
 
 
 /* Command line parameters */
-static int pvscsi_ring_pages     = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
+static int pvscsi_ring_pages;
 static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
 static int pvscsi_cmd_per_lun    = PVSCSI_DEFAULT_QUEUE_DEPTH;
 static bool pvscsi_disable_msi;
 static bool pvscsi_disable_msix;
 static bool pvscsi_use_msg       = true;
+static bool pvscsi_use_req_threshold = true;
 
 #define PVSCSI_RW (S_IRUSR | S_IWUSR)
 
 module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
 MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
-		 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
+		 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING)
+		 "[up to 16 targets],"
+		 __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)
+		 "[for 16+ targets])");
 
 module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
 MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
@@ -121,7 +128,7 @@
 
 module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
 MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
-		 __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
+		 __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")");
 
 module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
 MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
@@ -132,6 +139,10 @@
 module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
 MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
 
+module_param_named(use_req_threshold, pvscsi_use_req_threshold,
+		   bool, PVSCSI_RW);
+MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)");
+
 static const struct pci_device_id pvscsi_pci_tbl[] = {
 	{ PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
 	{ 0 }
@@ -177,6 +188,7 @@
 				   struct pvscsi_ctx *ctx)
 {
 	ctx->cmd = NULL;
+	ctx->abort_cmp = NULL;
 	list_add(&ctx->list, &adapter->cmd_pool);
 }
 
@@ -280,10 +292,15 @@
 static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
 			   unsigned char op)
 {
-	if (scsi_is_rw(op))
-		pvscsi_kick_rw_io(adapter);
-	else
+	if (scsi_is_rw(op)) {
+		struct PVSCSIRingsState *s = adapter->rings_state;
+
+		if (!adapter->use_req_threshold ||
+		    s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold)
+			pvscsi_kick_rw_io(adapter);
+	} else {
 		pvscsi_process_request_ring(adapter);
+	}
 }
 
 static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
@@ -487,6 +504,35 @@
 	}
 }
 
+static int pvscsi_change_queue_depth(struct scsi_device *sdev,
+				     int qdepth,
+				     int reason)
+{
+	int max_depth;
+	struct Scsi_Host *shost = sdev->host;
+
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		/*
+		 * We support only changing default.
+		 */
+		return -EOPNOTSUPP;
+
+	max_depth = shost->can_queue;
+	if (!sdev->tagged_supported)
+		max_depth = 1;
+	if (qdepth > max_depth)
+		qdepth = max_depth;
+	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+
+	if (sdev->inquiry_len > 7)
+		sdev_printk(KERN_INFO, sdev,
+			    "qdepth(%d), tagged(%d), simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
+			    sdev->queue_depth, sdev->tagged_supported,
+			    sdev->simple_tags, sdev->ordered_tags,
+			    sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1);
+	return sdev->queue_depth;
+}
+
 /*
  * Pull a completion descriptor off and pass the completion back
  * to the SCSI mid layer.
@@ -496,15 +542,27 @@
 {
 	struct pvscsi_ctx *ctx;
 	struct scsi_cmnd *cmd;
+	struct completion *abort_cmp;
 	u32 btstat = e->hostStatus;
 	u32 sdstat = e->scsiStatus;
 
 	ctx = pvscsi_get_context(adapter, e->context);
 	cmd = ctx->cmd;
+	abort_cmp = ctx->abort_cmp;
 	pvscsi_unmap_buffers(adapter, ctx);
 	pvscsi_release_context(adapter, ctx);
-	cmd->result = 0;
+	if (abort_cmp) {
+		/*
+		 * The command was requested to be aborted. Just signal that
+		 * the request completed and swallow the actual cmd completion
+		 * here. The abort handler will post a completion for this
+		 * command indicating that it got successfully aborted.
+		 */
+		complete(abort_cmp);
+		return;
+	}
 
+	cmd->result = 0;
 	if (sdstat != SAM_STAT_GOOD &&
 	    (btstat == BTSTAT_SUCCESS ||
 	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
@@ -726,6 +784,8 @@
 	struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
 	struct pvscsi_ctx *ctx;
 	unsigned long flags;
+	int result = SUCCESS;
+	DECLARE_COMPLETION_ONSTACK(abort_cmp);
 
 	scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
 		    adapter->host->host_no, cmd);
@@ -748,13 +808,40 @@
 		goto out;
 	}
 
-	pvscsi_abort_cmd(adapter, ctx);
+	/*
+	 * Mark that the command has been requested to be aborted and issue
+	 * the abort.
+	 */
+	ctx->abort_cmp = &abort_cmp;
 
-	pvscsi_process_completion_ring(adapter);
+	pvscsi_abort_cmd(adapter, ctx);
+	spin_unlock_irqrestore(&adapter->hw_lock, flags);
+	/* Wait for 2 secs for the completion. */
+	wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
+	spin_lock_irqsave(&adapter->hw_lock, flags);
+
+	if (!completion_done(&abort_cmp)) {
+		/*
+		 * Failed to abort the command, unmark the fact that it
+		 * was requested to be aborted.
+		 */
+		ctx->abort_cmp = NULL;
+		result = FAILED;
+		scmd_printk(KERN_DEBUG, cmd,
+			    "Failed to get completion for aborted cmd %p\n",
+			    cmd);
+		goto out;
+	}
+
+	/*
+	 * Successfully aborted the command.
+	 */
+	cmd->result = (DID_ABORT << 16);
+	cmd->scsi_done(cmd);
 
 out:
 	spin_unlock_irqrestore(&adapter->hw_lock, flags);
-	return SUCCESS;
+	return result;
 }
 
 /*
@@ -911,6 +998,7 @@
 	.dma_boundary			= UINT_MAX,
 	.max_sectors			= 0xffff,
 	.use_clustering			= ENABLE_CLUSTERING,
+	.change_queue_depth		= pvscsi_change_queue_depth,
 	.eh_abort_handler		= pvscsi_abort,
 	.eh_device_reset_handler	= pvscsi_device_reset,
 	.eh_bus_reset_handler		= pvscsi_bus_reset,
@@ -1034,6 +1122,34 @@
 	return 1;
 }
 
+static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter,
+				      bool enable)
+{
+	u32 val;
+
+	if (!pvscsi_use_req_threshold)
+		return false;
+
+	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
+			 PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
+	val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS);
+	if (val == -1) {
+		printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n");
+		return false;
+	} else {
+		struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 };
+		cmd_msg.enable = enable;
+		printk(KERN_INFO
+		       "vmw_pvscsi: %sabling reqCallThreshold\n",
+			enable ? "en" : "dis");
+		pvscsi_write_cmd_desc(adapter,
+				      PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
+				      &cmd_msg, sizeof(cmd_msg));
+		return pvscsi_reg_read(adapter,
+				       PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0;
+	}
+}
+
 static irqreturn_t pvscsi_isr(int irq, void *devp)
 {
 	struct pvscsi_adapter *adapter = devp;
@@ -1236,11 +1352,12 @@
 static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct pvscsi_adapter *adapter;
-	struct Scsi_Host *host;
-	struct device *dev;
+	struct pvscsi_adapter adapter_temp;
+	struct Scsi_Host *host = NULL;
 	unsigned int i;
 	unsigned long flags = 0;
 	int error;
+	u32 max_id;
 
 	error = -ENODEV;
 
@@ -1258,34 +1375,19 @@
 		goto out_disable_device;
 	}
 
-	pvscsi_template.can_queue =
-		min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
-		PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
-	pvscsi_template.cmd_per_lun =
-		min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
-	host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
-	if (!host) {
-		printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
-		goto out_disable_device;
-	}
-
-	adapter = shost_priv(host);
+	/*
+	 * Let's use a temp pvscsi_adapter struct until we find the number of
+	 * targets on the adapter, after that we will switch to the real
+	 * allocated struct.
+	 */
+	adapter = &adapter_temp;
 	memset(adapter, 0, sizeof(*adapter));
 	adapter->dev  = pdev;
-	adapter->host = host;
-
-	spin_lock_init(&adapter->hw_lock);
-
-	host->max_channel = 0;
-	host->max_id      = 16;
-	host->max_lun     = 1;
-	host->max_cmd_len = 16;
-
 	adapter->rev = pdev->revision;
 
 	if (pci_request_regions(pdev, "vmw_pvscsi")) {
 		printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
-		goto out_free_host;
+		goto out_disable_device;
 	}
 
 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1301,7 +1403,7 @@
 	if (i == DEVICE_COUNT_RESOURCE) {
 		printk(KERN_ERR
 		       "vmw_pvscsi: adapter has no suitable MMIO region\n");
-		goto out_release_resources;
+		goto out_release_resources_and_disable;
 	}
 
 	adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
@@ -1310,10 +1412,60 @@
 		printk(KERN_ERR
 		       "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
 		       i, PVSCSI_MEM_SPACE_SIZE);
-		goto out_release_resources;
+		goto out_release_resources_and_disable;
 	}
 
 	pci_set_master(pdev);
+
+	/*
+	 * Ask the device for max number of targets before deciding the
+	 * default pvscsi_ring_pages value.
+	 */
+	max_id = pvscsi_get_max_targets(adapter);
+	printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id);
+
+	if (pvscsi_ring_pages == 0)
+		/*
+		 * Set the right default value. Up to 16 it is 8, above it is
+		 * max.
+		 */
+		pvscsi_ring_pages = (max_id > 16) ?
+			PVSCSI_SETUP_RINGS_MAX_NUM_PAGES :
+			PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
+	printk(KERN_INFO
+	       "vmw_pvscsi: setting ring_pages to %d\n",
+	       pvscsi_ring_pages);
+
+	pvscsi_template.can_queue =
+		min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
+		PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+	pvscsi_template.cmd_per_lun =
+		min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
+	host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
+	if (!host) {
+		printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
+		goto out_release_resources_and_disable;
+	}
+
+	/*
+	 * Let's use the real pvscsi_adapter struct here onwards.
+	 */
+	adapter = shost_priv(host);
+	memset(adapter, 0, sizeof(*adapter));
+	adapter->dev  = pdev;
+	adapter->host = host;
+	/*
+	 * Copy back what we already have to the allocated adapter struct.
+	 */
+	adapter->rev = adapter_temp.rev;
+	adapter->mmioBase = adapter_temp.mmioBase;
+
+	spin_lock_init(&adapter->hw_lock);
+	host->max_channel = 0;
+	host->max_lun     = 1;
+	host->max_cmd_len = 16;
+	host->max_id      = max_id;
+
 	pci_set_drvdata(pdev, host);
 
 	ll_adapter_reset(adapter);
@@ -1327,13 +1479,6 @@
 	}
 
 	/*
-	 * Ask the device for max number of targets.
-	 */
-	host->max_id = pvscsi_get_max_targets(adapter);
-	dev = pvscsi_dev(adapter);
-	dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id);
-
-	/*
 	 * From this point on we should reset the adapter if anything goes
 	 * wrong.
 	 */
@@ -1373,6 +1518,10 @@
 		flags = IRQF_SHARED;
 	}
 
+	adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
+	printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
+	       adapter->use_req_threshold ? "en" : "dis");
+
 	error = request_irq(adapter->irq, pvscsi_isr, flags,
 			    "vmw_pvscsi", adapter);
 	if (error) {
@@ -1402,12 +1551,15 @@
 	ll_adapter_reset(adapter);
 out_release_resources:
 	pvscsi_release_resources(adapter);
-out_free_host:
 	scsi_host_put(host);
 out_disable_device:
 	pci_disable_device(pdev);
 
 	return error;
+
+out_release_resources_and_disable:
+	pvscsi_release_resources(adapter);
+	goto out_disable_device;
 }
 
 static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 3546e86..ce45888 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -1,7 +1,7 @@
 /*
  * VMware PVSCSI header file
  *
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -26,7 +26,7 @@
 
 #include <linux/types.h>
 
-#define PVSCSI_DRIVER_VERSION_STRING   "1.0.2.0-k"
+#define PVSCSI_DRIVER_VERSION_STRING   "1.0.5.0-k"
 
 #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
 
@@ -117,8 +117,9 @@
 	PVSCSI_CMD_CONFIG            = 7,
 	PVSCSI_CMD_SETUP_MSG_RING    = 8,
 	PVSCSI_CMD_DEVICE_UNPLUG     = 9,
+	PVSCSI_CMD_SETUP_REQCALLTHRESHOLD     = 10,
 
-	PVSCSI_CMD_LAST              = 10  /* has to be last */
+	PVSCSI_CMD_LAST              = 11  /* has to be last */
 };
 
 /*
@@ -141,6 +142,14 @@
 	u32 _pad;
 } __packed;
 
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_REQCALLTHRESHOLD --
+ */
+
+struct PVSCSICmdDescSetupReqCall {
+	u32 enable;
+} __packed;
+
 enum PVSCSIConfigPageType {
 	PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958,
 	PVSCSI_CONFIG_PAGE_PHY        = 0x1959,
@@ -261,7 +270,9 @@
 	u32	cmpConsIdx;
 	u32	cmpNumEntriesLog2;
 
-	u8	_pad[104];
+	u32	reqCallThreshold;
+
+	u8	_pad[100];
 
 	u32	msgProdIdx;
 	u32	msgConsIdx;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index f9a6e4b..3267423 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1252,7 +1252,7 @@
 		return 0;
 
 
-	if (request_irq(host->irq, wd7000_intr, IRQF_DISABLED, "wd7000", host)) {
+	if (request_irq(host->irq, wd7000_intr, 0, "wd7000", host)) {
 		printk("wd7000_init: can't get IRQ %d.\n", host->irq);
 		return (0);
 	}
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 297a802..d4de24b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -728,6 +728,7 @@
 #define PCI_DEVICE_ID_SI_7018		0x7018
 
 #define PCI_VENDOR_ID_HP		0x103c
+#define PCI_VENDOR_ID_HP_3PAR		0x1590
 #define PCI_DEVICE_ID_HP_VISUALIZE_EG	0x1005
 #define PCI_DEVICE_ID_HP_VISUALIZE_FX6	0x1006
 #define PCI_DEVICE_ID_HP_VISUALIZE_FX4	0x1008
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 309f513..7221a24 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -327,12 +327,19 @@
 	struct iscsi_transport	*tt;
 	struct Scsi_Host	*host;
 	struct iscsi_conn	*leadconn;	/* leading connection */
-	spinlock_t		lock;		/* protects session state, *
-						 * sequence numbers,       *
+	/* Between the forward and the backward locks exists a strict locking
+	 * hierarchy. The mutual exclusion zone protected by the forward lock
+	 * can enclose the mutual exclusion zone protected by the backward lock
+	 * but not vice versa.
+	 */
+	spinlock_t		frwd_lock;	/* protects session state, *
+						 * cmdsn, queued_cmdsn     *
 						 * session resources:      *
-						 * - cmdpool,		   *
-						 * - mgmtpool,		   *
-						 * - r2tpool		   */
+						 * - cmdpool kfifo_out ,   *
+						 * - mgmtpool,		   */
+	spinlock_t		back_lock;	/* protects cmdsn_exp      *
+						 * cmdsn_max,              *
+						 * cmdpool kfifo_in        */
 	int			state;		/* session state           */
 	int			age;		/* counts session re-opens */
 
diff --git a/include/scsi/libiscsi_tcp.h b/include/scsi/libiscsi_tcp.h
index 215469a..2a7aa75 100644
--- a/include/scsi/libiscsi_tcp.h
+++ b/include/scsi/libiscsi_tcp.h
@@ -83,6 +83,8 @@
 	struct iscsi_pool	r2tpool;
 	struct kfifo		r2tqueue;
 	void			*dd_data;
+	spinlock_t		pool2queue;
+	spinlock_t		queue2pool;
 };
 
 enum {
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 91558a1..dd7c998 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -142,8 +142,7 @@
 extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
 extern struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *, gfp_t);
 extern void scsi_put_command(struct scsi_cmnd *);
-extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *,
-			       struct device *);
+extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *);
 extern void scsi_finish_command(struct scsi_cmnd *cmd);
 
 extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
@@ -156,9 +155,6 @@
 extern int scsi_dma_map(struct scsi_cmnd *cmd);
 extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
 
-struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask);
-void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd);
-
 static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd)
 {
 	return cmd->sdb.table.nents;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index d65fbec..4e845b8 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -113,6 +113,12 @@
 	const char * vendor;		/* [back_compat] point into 'inquiry' ... */
 	const char * model;		/* ... after scan; point to static string */
 	const char * rev;		/* ... "nullnullnullnull" before scan */
+
+#define SCSI_VPD_PG_LEN                255
+	int vpd_pg83_len;
+	unsigned char *vpd_pg83;
+	int vpd_pg80_len;
+	unsigned char *vpd_pg80;
 	unsigned char current_tag;	/* current tag */
 	struct scsi_target      *sdev_target;   /* used only for single_lun */
 
@@ -235,12 +241,24 @@
 #define sdev_printk(prefix, sdev, fmt, a...)	\
 	dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a)
 
+#define sdev_dbg(sdev, fmt, a...) \
+	dev_dbg(&(sdev)->sdev_gendev, fmt, ##a)
+
 #define scmd_printk(prefix, scmd, fmt, a...)				\
         (scmd)->request->rq_disk ?					\
 	sdev_printk(prefix, (scmd)->device, "[%s] " fmt,		\
 		    (scmd)->request->rq_disk->disk_name, ##a) :		\
 	sdev_printk(prefix, (scmd)->device, fmt, ##a)
 
+#define scmd_dbg(scmd, fmt, a...)					   \
+	do {								   \
+		if ((scmd)->request->rq_disk)				   \
+			sdev_dbg((scmd)->device, "[%s] " fmt,		   \
+				 (scmd)->request->rq_disk->disk_name, ##a);\
+		else							   \
+			sdev_dbg((scmd)->device, fmt, ##a);		   \
+	} while (0)
+
 enum scsi_target_state {
 	STARGET_CREATED = 1,
 	STARGET_RUNNING,
@@ -257,7 +275,7 @@
 	struct list_head	siblings;
 	struct list_head	devices;
 	struct device		dev;
-	unsigned int		reap_ref; /* protected by the host lock */
+	struct kref		reap_ref; /* last put renders target invisible */
 	unsigned int		channel;
 	unsigned int		id; /* target id ... replace
 				     * scsi_device.id eventually */
@@ -284,7 +302,6 @@
 #define SCSI_DEFAULT_TARGET_BLOCKED	3
 
 	char			scsi_level;
-	struct execute_work	ew;
 	enum scsi_target_state	state;
 	void 			*hostdata; /* available to low-level driver */
 	unsigned long		starget_data[0]; /* for the transport */
@@ -309,6 +326,7 @@
 extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh);
 extern void scsi_remove_device(struct scsi_device *);
 extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
+void scsi_attach_vpd(struct scsi_device *sdev);
 
 extern int scsi_device_get(struct scsi_device *);
 extern void scsi_device_put(struct scsi_device *);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 53075e5..94844fc 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -15,6 +15,7 @@
 struct module;
 struct scsi_cmnd;
 struct scsi_device;
+struct scsi_host_cmd_pool;
 struct scsi_target;
 struct Scsi_Host;
 struct scsi_host_cmd_pool;
@@ -524,6 +525,12 @@
 	 *   scsi_netlink.h
 	 */
 	u64 vendor_id;
+
+	/*
+	 * Additional per-command data allocated for the driver.
+	 */
+	unsigned int cmd_size;
+	struct scsi_host_cmd_pool *cmd_pool;
 };
 
 /*
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index b797e8f..8c79980 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -130,6 +130,7 @@
 #define FC_PORTSPEED_4GBIT		8
 #define FC_PORTSPEED_8GBIT		0x10
 #define FC_PORTSPEED_16GBIT		0x20
+#define FC_PORTSPEED_32GBIT		0x40
 #define FC_PORTSPEED_NOT_NEGOTIATED	(1 << 15) /* Speed not established */
 
 /*