Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next-2.6
diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/bna/bfa_defs.h
index 29c1b8de..2ea0dfe 100644
--- a/drivers/net/bna/bfa_defs.h
+++ b/drivers/net/bna/bfa_defs.h
@@ -112,16 +112,18 @@
  * IOC states
  */
 enum bfa_ioc_state {
-	BFA_IOC_RESET		= 1,	/*!< IOC is in reset state */
-	BFA_IOC_SEMWAIT		= 2,	/*!< Waiting for IOC h/w semaphore */
-	BFA_IOC_HWINIT		= 3,	/*!< IOC h/w is being initialized */
-	BFA_IOC_GETATTR		= 4,	/*!< IOC is being configured */
-	BFA_IOC_OPERATIONAL	= 5,	/*!< IOC is operational */
-	BFA_IOC_INITFAIL	= 6,	/*!< IOC hardware failure */
-	BFA_IOC_HBFAIL		= 7,	/*!< IOC heart-beat failure */
-	BFA_IOC_DISABLING	= 8,	/*!< IOC is being disabled */
-	BFA_IOC_DISABLED	= 9,	/*!< IOC is disabled */
-	BFA_IOC_FWMISMATCH	= 10,	/*!< IOC f/w different from drivers */
+	BFA_IOC_UNINIT		= 1,	/*!< IOC is in uninit state */
+	BFA_IOC_RESET		= 2,	/*!< IOC is in reset state */
+	BFA_IOC_SEMWAIT		= 3,	/*!< Waiting for IOC h/w semaphore */
+	BFA_IOC_HWINIT		= 4,	/*!< IOC h/w is being initialized */
+	BFA_IOC_GETATTR		= 5,	/*!< IOC is being configured */
+	BFA_IOC_OPERATIONAL	= 6,	/*!< IOC is operational */
+	BFA_IOC_INITFAIL	= 7,	/*!< IOC hardware failure */
+	BFA_IOC_FAIL		= 8,	/*!< IOC heart-beat failure */
+	BFA_IOC_DISABLING	= 9,	/*!< IOC is being disabled */
+	BFA_IOC_DISABLED	= 10,	/*!< IOC is disabled */
+	BFA_IOC_FWMISMATCH	= 11,	/*!< IOC f/w different from drivers */
+	BFA_IOC_ENABLING	= 12,	/*!< IOC is being enabled */
 };
 
 /**
diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h
index 987978f..fdd6776 100644
--- a/drivers/net/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/bna/bfa_defs_mfg_comm.h
@@ -95,28 +95,6 @@
 	(type) == BFA_MFG_TYPE_CNA10P1 || \
 	bfa_mfg_is_mezz(type)))
 
-/**
- * Check if the card having old wwn/mac handling
- */
-#define bfa_mfg_is_old_wwn_mac_model(type) (( \
-	(type) == BFA_MFG_TYPE_FC8P2 || \
-	(type) == BFA_MFG_TYPE_FC8P1 || \
-	(type) == BFA_MFG_TYPE_FC4P2 || \
-	(type) == BFA_MFG_TYPE_FC4P1 || \
-	(type) == BFA_MFG_TYPE_CNA10P2 || \
-	(type) == BFA_MFG_TYPE_CNA10P1 || \
-	(type) == BFA_MFG_TYPE_JAYHAWK || \
-	(type) == BFA_MFG_TYPE_WANCHESE))
-
-#define bfa_mfg_increment_wwn_mac(m, i)				\
-do {								\
-	u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2];	\
-	t += (i);						\
-	(m)[0] = (t >> 16) & 0xFF;				\
-	(m)[1] = (t >> 8) & 0xFF;				\
-	(m)[2] = t & 0xFF;					\
-} while (0)
-
 #define bfa_mfg_adapter_prop_init_flash(card_type, prop)	\
 do {								\
 	switch ((card_type)) {					\
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index e94e5aa9..34933cb 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -26,25 +26,6 @@
  * IOC local definitions
  */
 
-#define bfa_ioc_timer_start(__ioc)					\
-	mod_timer(&(__ioc)->ioc_timer, jiffies +	\
-			msecs_to_jiffies(BFA_IOC_TOV))
-#define bfa_ioc_timer_stop(__ioc)   del_timer(&(__ioc)->ioc_timer)
-
-#define bfa_ioc_recovery_timer_start(__ioc)				\
-	mod_timer(&(__ioc)->ioc_timer, jiffies +	\
-			msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
-
-#define bfa_sem_timer_start(__ioc)					\
-	mod_timer(&(__ioc)->sem_timer, jiffies +	\
-			msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
-#define bfa_sem_timer_stop(__ioc)	del_timer(&(__ioc)->sem_timer)
-
-#define bfa_hb_timer_start(__ioc)					\
-	mod_timer(&(__ioc)->hb_timer, jiffies +		\
-			msecs_to_jiffies(BFA_IOC_HB_TOV))
-#define bfa_hb_timer_stop(__ioc)	del_timer(&(__ioc)->hb_timer)
-
 /**
  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  */
@@ -55,11 +36,16 @@
 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
-#define bfa_ioc_notify_hbfail(__ioc)			\
-			((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
-
-#define bfa_ioc_is_optrom(__ioc)	\
-	(bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
+#define bfa_ioc_notify_fail(__ioc)			\
+			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_join(__ioc)			\
+			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
+#define bfa_ioc_sync_leave(__ioc)			\
+			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
+#define bfa_ioc_sync_ack(__ioc)				\
+			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
+#define bfa_ioc_sync_complete(__ioc)			\
+			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
 
 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -85,6 +71,12 @@
 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
 			 u32 boot_param);
 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
@@ -101,72 +93,173 @@
 						char *manufacturer);
 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
-static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
 
 /**
- * IOC state machine events
+ * IOC state machine definitions/declarations
  */
 enum ioc_event {
-	IOC_E_ENABLE		= 1,	/*!< IOC enable request		*/
-	IOC_E_DISABLE		= 2,	/*!< IOC disable request	*/
-	IOC_E_TIMEOUT		= 3,	/*!< f/w response timeout	*/
-	IOC_E_FWREADY		= 4,	/*!< f/w initialization done	*/
-	IOC_E_FWRSP_GETATTR	= 5,	/*!< IOC get attribute response	*/
-	IOC_E_FWRSP_ENABLE	= 6,	/*!< enable f/w response	*/
-	IOC_E_FWRSP_DISABLE	= 7,	/*!< disable f/w response	*/
-	IOC_E_HBFAIL		= 8,	/*!< heartbeat failure		*/
-	IOC_E_HWERROR		= 9,	/*!< hardware error interrupt	*/
-	IOC_E_SEMLOCKED		= 10,	/*!< h/w semaphore is locked	*/
-	IOC_E_DETACH		= 11,	/*!< driver detach cleanup	*/
+	IOC_E_RESET		= 1,	/*!< IOC reset request		*/
+	IOC_E_ENABLE		= 2,	/*!< IOC enable request		*/
+	IOC_E_DISABLE		= 3,	/*!< IOC disable request	*/
+	IOC_E_DETACH		= 4,	/*!< driver detach cleanup	*/
+	IOC_E_ENABLED		= 5,	/*!< f/w enabled		*/
+	IOC_E_FWRSP_GETATTR	= 6,	/*!< IOC get attribute response	*/
+	IOC_E_DISABLED		= 7,	/*!< f/w disabled		*/
+	IOC_E_INITFAILED	= 8,	/*!< failure notice by iocpf sm	*/
+	IOC_E_PFAILED		= 9,	/*!< failure notice by iocpf sm	*/
+	IOC_E_HBFAIL		= 10,	/*!< heartbeat failure		*/
+	IOC_E_HWERROR		= 11,	/*!< hardware error interrupt	*/
+	IOC_E_TIMEOUT		= 12,	/*!< timeout			*/
 };
 
+bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
 
 static struct bfa_sm_table ioc_sm_table[] = {
+	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
-	{BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
-	{BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
-	{BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
-	{BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
-	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
+	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
-	{BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
-	{BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
+	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
+	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 };
 
 /**
+ * IOCPF state machine definitions/declarations
+ */
+
+/*
+ * Forward declareations for iocpf state machine
+ */
+static void bfa_iocpf_enable(struct bfa_ioc *ioc);
+static void bfa_iocpf_disable(struct bfa_ioc *ioc);
+static void bfa_iocpf_fail(struct bfa_ioc *ioc);
+static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
+static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
+static void bfa_iocpf_stop(struct bfa_ioc *ioc);
+
+/**
+ * IOCPF state machine events
+ */
+enum iocpf_event {
+	IOCPF_E_ENABLE		= 1,	/*!< IOCPF enable request	*/
+	IOCPF_E_DISABLE		= 2,	/*!< IOCPF disable request	*/
+	IOCPF_E_STOP		= 3,	/*!< stop on driver detach	*/
+	IOCPF_E_FWREADY	 	= 4,	/*!< f/w initialization done	*/
+	IOCPF_E_FWRSP_ENABLE	= 5,	/*!< enable f/w response	*/
+	IOCPF_E_FWRSP_DISABLE	= 6,	/*!< disable f/w response	*/
+	IOCPF_E_FAIL		= 7,	/*!< failure notice by ioc sm	*/
+	IOCPF_E_INITFAIL	= 8,	/*!< init fail notice by ioc sm	*/
+	IOCPF_E_GETATTRFAIL	= 9,	/*!< init fail notice by ioc sm	*/
+	IOCPF_E_SEMLOCKED	= 10,   /*!< h/w semaphore is locked	*/
+	IOCPF_E_TIMEOUT		= 11,   /*!< f/w response timeout	*/
+};
+
+/**
+ * IOCPF states
+ */
+enum bfa_iocpf_state {
+	BFA_IOCPF_RESET		= 1,	/*!< IOC is in reset state */
+	BFA_IOCPF_SEMWAIT	= 2,	/*!< Waiting for IOC h/w semaphore */
+	BFA_IOCPF_HWINIT	= 3,	/*!< IOC h/w is being initialized */
+	BFA_IOCPF_READY		= 4,	/*!< IOCPF is initialized */
+	BFA_IOCPF_INITFAIL	= 5,	/*!< IOCPF failed */
+	BFA_IOCPF_FAIL		= 6,	/*!< IOCPF failed */
+	BFA_IOCPF_DISABLING	= 7,	/*!< IOCPF is being disabled */
+	BFA_IOCPF_DISABLED	= 8,	/*!< IOCPF is disabled */
+	BFA_IOCPF_FWMISMATCH	= 9,	/*!< IOC f/w different from drivers */
+};
+
+bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
+						enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
+						enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
+
+static struct bfa_sm_table iocpf_sm_table[] = {
+	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
+	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
+	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
+	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
+	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
+	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
+	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
+	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
+	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
+	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
+	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
+	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
+	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
+	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
+};
+
+/**
+ * IOC State Machine
+ */
+
+/**
+ * Beginning state. IOC uninit state.
+ */
+static void
+bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
+{
+}
+
+/**
+ * IOC is in uninit state.
+ */
+static void
+bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
+{
+	switch (event) {
+	case IOC_E_RESET:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/**
  * Reset entry actions -- initialize state machine
  */
 static void
 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
 {
-	ioc->retry_count = 0;
-	ioc->auto_recover = bfa_nw_auto_recover;
+	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
 }
 
 /**
- * Beginning state. IOC is in reset state.
+ * IOC is in reset state.
  */
 static void
 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
 {
 	switch (event) {
 	case IOC_E_ENABLE:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 		break;
 
 	case IOC_E_DISABLE:
@@ -174,6 +267,51 @@
 		break;
 
 	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
+{
+	bfa_iocpf_enable(ioc);
+}
+
+/**
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
+{
+	switch (event) {
+	case IOC_E_ENABLED:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+		break;
+
+	case IOC_E_PFAILED:
+		/* !!! fall through !!! */
+	case IOC_E_HWERROR:
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+		if (event != IOC_E_PFAILED)
+			bfa_iocpf_initfail(ioc);
+		break;
+
+	case IOC_E_DISABLE:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+		break;
+
+	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_iocpf_stop(ioc);
+		break;
+
+	case IOC_E_ENABLE:
 		break;
 
 	default:
@@ -185,229 +323,14 @@
  * Semaphore should be acquired for version check.
  */
 static void
-bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
-{
-	bfa_ioc_hw_sem_get(ioc);
-}
-
-/**
- * Awaiting h/w semaphore to continue with version check.
- */
-static void
-bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
-{
-	switch (event) {
-	case IOC_E_SEMLOCKED:
-		if (bfa_ioc_firmware_lock(ioc)) {
-			ioc->retry_count = 0;
-			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
-		} else {
-			bfa_nw_ioc_hw_sem_release(ioc);
-			bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
-		}
-		break;
-
-	case IOC_E_DISABLE:
-		bfa_ioc_disable_comp(ioc);
-		/* fall through */
-
-	case IOC_E_DETACH:
-		bfa_ioc_hw_sem_get_cancel(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
-		break;
-
-	case IOC_E_FWREADY:
-		break;
-
-	default:
-		bfa_sm_fault(ioc, event);
-	}
-}
-
-/**
- * Notify enable completion callback and generate mismatch AEN.
- */
-static void
-bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
-{
-	/**
-	 * Provide enable completion callback and AEN notification only once.
-	 */
-	if (ioc->retry_count == 0)
-		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-	ioc->retry_count++;
-	bfa_ioc_timer_start(ioc);
-}
-
-/**
- * Awaiting firmware version match.
- */
-static void
-bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
-{
-	switch (event) {
-	case IOC_E_TIMEOUT:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
-		break;
-
-	case IOC_E_DISABLE:
-		bfa_ioc_disable_comp(ioc);
-		/* fall through */
-
-	case IOC_E_DETACH:
-		bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
-		break;
-
-	case IOC_E_FWREADY:
-		break;
-
-	default:
-		bfa_sm_fault(ioc, event);
-	}
-}
-
-/**
- * Request for semaphore.
- */
-static void
-bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
-{
-	bfa_ioc_hw_sem_get(ioc);
-}
-
-/**
- * Awaiting semaphore for h/w initialzation.
- */
-static void
-bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
-{
-	switch (event) {
-	case IOC_E_SEMLOCKED:
-		ioc->retry_count = 0;
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
-		break;
-
-	case IOC_E_DISABLE:
-		bfa_ioc_hw_sem_get_cancel(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
-		break;
-
-	default:
-		bfa_sm_fault(ioc, event);
-	}
-}
-
-static void
-bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
-{
-	bfa_ioc_timer_start(ioc);
-	bfa_ioc_reset(ioc, false);
-}
-
-/**
- * @brief
- * Hardware is being initialized. Interrupts are enabled.
- * Holding hardware semaphore lock.
- */
-static void
-bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
-{
-	switch (event) {
-	case IOC_E_FWREADY:
-		bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
-		break;
-
-	case IOC_E_HWERROR:
-		bfa_ioc_timer_stop(ioc);
-		/* fall through */
-
-	case IOC_E_TIMEOUT:
-		ioc->retry_count++;
-		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
-			bfa_ioc_timer_start(ioc);
-			bfa_ioc_reset(ioc, true);
-			break;
-		}
-
-		bfa_nw_ioc_hw_sem_release(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
-		break;
-
-	case IOC_E_DISABLE:
-		bfa_nw_ioc_hw_sem_release(ioc);
-		bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
-		break;
-
-	default:
-		bfa_sm_fault(ioc, event);
-	}
-}
-
-static void
-bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
-{
-	bfa_ioc_timer_start(ioc);
-	bfa_ioc_send_enable(ioc);
-}
-
-/**
- * Host IOC function is being enabled, awaiting response from firmware.
- * Semaphore is acquired.
- */
-static void
-bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
-{
-	switch (event) {
-	case IOC_E_FWRSP_ENABLE:
-		bfa_ioc_timer_stop(ioc);
-		bfa_nw_ioc_hw_sem_release(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
-		break;
-
-	case IOC_E_HWERROR:
-		bfa_ioc_timer_stop(ioc);
-		/* fall through */
-
-	case IOC_E_TIMEOUT:
-		ioc->retry_count++;
-		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
-			writel(BFI_IOC_UNINIT,
-				      ioc->ioc_regs.ioc_fwstate);
-			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
-			break;
-		}
-
-		bfa_nw_ioc_hw_sem_release(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
-		break;
-
-	case IOC_E_DISABLE:
-		bfa_ioc_timer_stop(ioc);
-		bfa_nw_ioc_hw_sem_release(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
-		break;
-
-	case IOC_E_FWREADY:
-		bfa_ioc_send_enable(ioc);
-		break;
-
-	default:
-		bfa_sm_fault(ioc, event);
-	}
-}
-
-static void
 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
 {
-	bfa_ioc_timer_start(ioc);
+	mod_timer(&ioc->ioc_timer, jiffies +
+		msecs_to_jiffies(BFA_IOC_TOV));
 	bfa_ioc_send_getattr(ioc);
 }
 
 /**
- * @brief
  * IOC configuration in progress. Timer is active.
  */
 static void
@@ -415,22 +338,28 @@
 {
 	switch (event) {
 	case IOC_E_FWRSP_GETATTR:
-		bfa_ioc_timer_stop(ioc);
+		del_timer(&ioc->ioc_timer);
 		bfa_ioc_check_attr_wwns(ioc);
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 		break;
 
+	case IOC_E_PFAILED:
 	case IOC_E_HWERROR:
-		bfa_ioc_timer_stop(ioc);
+		del_timer(&ioc->ioc_timer);
 		/* fall through */
-
 	case IOC_E_TIMEOUT:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+		if (event != IOC_E_PFAILED)
+			bfa_iocpf_getattrfail(ioc);
 		break;
 
 	case IOC_E_DISABLE:
-		bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+		del_timer(&ioc->ioc_timer);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+		break;
+
+	case IOC_E_ENABLE:
 		break;
 
 	default:
@@ -457,17 +386,19 @@
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 		break;
 
+	case IOC_E_PFAILED:
 	case IOC_E_HWERROR:
-	case IOC_E_FWREADY:
-		/**
-		 * Hard error or IOC recovery by other function.
-		 * Treat it same as heartbeat failure.
-		 */
 		bfa_ioc_hb_stop(ioc);
 		/* !!! fall through !!! */
-
 	case IOC_E_HBFAIL:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
+		bfa_ioc_fail_notify(ioc);
+		if (ioc->iocpf.auto_recover)
+			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+		else
+			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+
+		if (event != IOC_E_PFAILED)
+			bfa_iocpf_fail(ioc);
 		break;
 
 	default:
@@ -478,31 +409,27 @@
 static void
 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
 {
-	bfa_ioc_timer_start(ioc);
-	bfa_ioc_send_disable(ioc);
+	bfa_iocpf_disable(ioc);
 }
 
 /**
- * IOC is being disabled
+ * IOC is being desabled
  */
 static void
 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
 {
 	switch (event) {
-	case IOC_E_FWRSP_DISABLE:
-		bfa_ioc_timer_stop(ioc);
+	case IOC_E_DISABLED:
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 		break;
 
 	case IOC_E_HWERROR:
-		bfa_ioc_timer_stop(ioc);
 		/*
-		 * !!! fall through !!!
+		 * No state change.  Will move to disabled state
+		 * after iocpf sm completes failure processing and
+		 * moves to disabled state.
 		 */
-
-	case IOC_E_TIMEOUT:
-		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+		bfa_iocpf_fail(ioc);
 		break;
 
 	default:
@@ -511,7 +438,7 @@
 }
 
 /**
- * IOC disable completion entry.
+ * IOC desable completion entry.
  */
 static void
 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
@@ -524,19 +451,16 @@
 {
 	switch (event) {
 	case IOC_E_ENABLE:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 		break;
 
 	case IOC_E_DISABLE:
 		ioc->cbfn->disable_cbfn(ioc->bfa);
 		break;
 
-	case IOC_E_FWREADY:
-		break;
-
 	case IOC_E_DETACH:
-		bfa_ioc_firmware_unlock(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_iocpf_stop(ioc);
 		break;
 
 	default:
@@ -545,33 +469,45 @@
 }
 
 static void
-bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
+bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
 {
-	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-	bfa_ioc_timer_start(ioc);
 }
 
 /**
- * @brief
- * Hardware initialization failed.
+ * Hardware initialization retry.
  */
 static void
-bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
 {
 	switch (event) {
+	case IOC_E_ENABLED:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+		break;
+
+	case IOC_E_PFAILED:
+	case IOC_E_HWERROR:
+		/**
+		 * Initialization retry failed.
+		 */
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		if (event != IOC_E_PFAILED)
+			bfa_iocpf_initfail(ioc);
+		break;
+
+	case IOC_E_INITFAILED:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+		break;
+
+	case IOC_E_ENABLE:
+		break;
+
 	case IOC_E_DISABLE:
-		bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 		break;
 
 	case IOC_E_DETACH:
-		bfa_ioc_timer_stop(ioc);
-		bfa_ioc_firmware_unlock(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
-		break;
-
-	case IOC_E_TIMEOUT:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_iocpf_stop(ioc);
 		break;
 
 	default:
@@ -580,84 +516,609 @@
 }
 
 static void
-bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
+bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
 {
-	struct list_head			*qe;
-	struct bfa_ioc_hbfail_notify *notify;
-
-	/**
-	 * Mark IOC as failed in hardware and stop firmware.
-	 */
-	bfa_ioc_lpu_stop(ioc);
-	writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
-
-	/**
-	 * Notify other functions on HB failure.
-	 */
-	bfa_ioc_notify_hbfail(ioc);
-
-	/**
-	 * Notify driver and common modules registered for notification.
-	 */
-	ioc->cbfn->hbfail_cbfn(ioc->bfa);
-	list_for_each(qe, &ioc->hb_notify_q) {
-		notify = (struct bfa_ioc_hbfail_notify *) qe;
-		notify->cbfn(notify->cbarg);
-	}
-
-	/**
-	 * Flush any queued up mailbox requests.
-	 */
-	bfa_ioc_mbox_hbfail(ioc);
-
-	/**
-	 * Trigger auto-recovery after a delay.
-	 */
-	if (ioc->auto_recover)
-		mod_timer(&ioc->ioc_timer, jiffies +
-			msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
 }
 
 /**
- * @brief
- * IOC heartbeat failure.
+ * IOC failure.
  */
 static void
-bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
 {
 	switch (event) {
-
 	case IOC_E_ENABLE:
 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 		break;
 
 	case IOC_E_DISABLE:
-		if (ioc->auto_recover)
-			bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 		break;
 
-	case IOC_E_TIMEOUT:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
-		break;
-
-	case IOC_E_FWREADY:
-		/**
-		 * Recovery is already initiated by other function.
-		 */
+	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_iocpf_stop(ioc);
 		break;
 
 	case IOC_E_HWERROR:
-		/*
-		 * HB failure notification, ignore.
-		 */
+		/* HB failure notification, ignore. */
 		break;
+
 	default:
 		bfa_sm_fault(ioc, event);
 	}
 }
 
 /**
+ * IOCPF State Machine
+ */
+
+/**
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
+{
+	iocpf->retry_count = 0;
+	iocpf->auto_recover = bfa_nw_auto_recover;
+}
+
+/**
+ * Beginning state. IOC is in reset state.
+ */
+static void
+bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	switch (event) {
+	case IOCPF_E_ENABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+		break;
+
+	case IOCPF_E_STOP:
+		break;
+
+	default:
+		bfa_sm_fault(iocpf->ioc, event);
+	}
+}
+
+/**
+ * Semaphore should be acquired for version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
+{
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * Awaiting h/w semaphore to continue with version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		if (bfa_ioc_firmware_lock(ioc)) {
+			if (bfa_ioc_sync_complete(ioc)) {
+				iocpf->retry_count = 0;
+				bfa_ioc_sync_join(ioc);
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+			} else {
+				bfa_ioc_firmware_unlock(ioc);
+				bfa_nw_ioc_hw_sem_release(ioc);
+				mod_timer(&ioc->sem_timer, jiffies +
+					msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
+			}
+		} else {
+			bfa_nw_ioc_hw_sem_release(ioc);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
+		}
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_ioc_hw_sem_get_cancel(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		bfa_ioc_pf_disabled(ioc);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_ioc_hw_sem_get_cancel(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/**
+ * Notify enable completion callback
+ */
+static void
+bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
+{
+	/* Call only the first time sm enters fwmismatch state. */
+	if (iocpf->retry_count == 0)
+		bfa_ioc_pf_fwmismatch(iocpf->ioc);
+
+	iocpf->retry_count++;
+	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
+		msecs_to_jiffies(BFA_IOC_TOV));
+}
+
+/**
+ * Awaiting firmware version match.
+ */
+static void
+bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_TIMEOUT:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+		break;
+
+	case IOCPF_E_DISABLE:
+		del_timer(&ioc->iocpf_timer);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		bfa_ioc_pf_disabled(ioc);
+		break;
+
+	case IOCPF_E_STOP:
+		del_timer(&ioc->iocpf_timer);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/**
+ * Request for semaphore.
+ */
+static void
+bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
+{
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * Awaiting semaphore for h/w initialzation.
+ */
+static void
+bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		if (bfa_ioc_sync_complete(ioc)) {
+			bfa_ioc_sync_join(ioc);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+		} else {
+			bfa_nw_ioc_hw_sem_release(ioc);
+			mod_timer(&ioc->sem_timer, jiffies +
+				msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
+		}
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_ioc_hw_sem_get_cancel(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
+{
+	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
+		msecs_to_jiffies(BFA_IOC_TOV));
+	bfa_ioc_reset(iocpf->ioc, 0);
+}
+
+/**
+ * Hardware is being initialized. Interrupts are enabled.
+ * Holding hardware semaphore lock.
+ */
+static void
+bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_FWREADY:
+		del_timer(&ioc->iocpf_timer);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
+		break;
+
+	case IOCPF_E_INITFAIL:
+		del_timer(&ioc->iocpf_timer);
+		/*
+		 * !!! fall through !!!
+		 */
+
+	case IOCPF_E_TIMEOUT:
+		bfa_nw_ioc_hw_sem_release(ioc);
+		if (event == IOCPF_E_TIMEOUT)
+			bfa_ioc_pf_failed(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+		break;
+
+	case IOCPF_E_DISABLE:
+		del_timer(&ioc->iocpf_timer);
+		bfa_ioc_sync_leave(ioc);
+		bfa_nw_ioc_hw_sem_release(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
+{
+	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
+		msecs_to_jiffies(BFA_IOC_TOV));
+	bfa_ioc_send_enable(iocpf->ioc);
+}
+
+/**
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_FWRSP_ENABLE:
+		del_timer(&ioc->iocpf_timer);
+		bfa_nw_ioc_hw_sem_release(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
+		break;
+
+	case IOCPF_E_INITFAIL:
+		del_timer(&ioc->iocpf_timer);
+		/*
+		 * !!! fall through !!!
+		 */
+	case IOCPF_E_TIMEOUT:
+		bfa_nw_ioc_hw_sem_release(ioc);
+		if (event == IOCPF_E_TIMEOUT)
+			bfa_ioc_pf_failed(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+		break;
+
+	case IOCPF_E_DISABLE:
+		del_timer(&ioc->iocpf_timer);
+		bfa_nw_ioc_hw_sem_release(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+		break;
+
+	case IOCPF_E_FWREADY:
+		bfa_ioc_send_enable(ioc);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static bool
+bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+{
+	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
+static void
+bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
+{
+	bfa_ioc_pf_enabled(iocpf->ioc);
+}
+
+static void
+bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_DISABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+		break;
+
+	case IOCPF_E_GETATTRFAIL:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+		break;
+
+	case IOCPF_E_FAIL:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
+		break;
+
+	case IOCPF_E_FWREADY:
+		bfa_ioc_pf_failed(ioc);
+		if (bfa_nw_ioc_is_operational(ioc))
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
+		else
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
+{
+	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
+		msecs_to_jiffies(BFA_IOC_TOV));
+	bfa_ioc_send_disable(iocpf->ioc);
+}
+
+/**
+ * IOC is being disabled
+ */
+static void
+bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_FWRSP_DISABLE:
+	case IOCPF_E_FWREADY:
+		del_timer(&ioc->iocpf_timer);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	case IOCPF_E_FAIL:
+		del_timer(&ioc->iocpf_timer);
+		/*
+		 * !!! fall through !!!
+		 */
+
+	case IOCPF_E_TIMEOUT:
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	case IOCPF_E_FWRSP_ENABLE:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
+{
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * IOC hb ack request is being removed.
+ */
+static void
+bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		bfa_ioc_sync_leave(ioc);
+		bfa_nw_ioc_hw_sem_release(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	case IOCPF_E_FAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/**
+ * IOC disable completion entry.
+ */
+static void
+bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
+{
+	bfa_ioc_pf_disabled(iocpf->ioc);
+}
+
+static void
+bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_ENABLE:
+		iocpf->retry_count = 0;
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_ioc_firmware_unlock(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
+{
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * Hardware initialization failed.
+ */
+static void
+bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		bfa_ioc_notify_fail(ioc);
+		bfa_ioc_sync_ack(ioc);
+		iocpf->retry_count++;
+		if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
+			bfa_ioc_sync_leave(ioc);
+			bfa_nw_ioc_hw_sem_release(ioc);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+		} else {
+			if (bfa_ioc_sync_complete(ioc))
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+			else {
+				bfa_nw_ioc_hw_sem_release(ioc);
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+			}
+		}
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_ioc_hw_sem_get_cancel(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_ioc_hw_sem_get_cancel(ioc);
+		bfa_ioc_firmware_unlock(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	case IOCPF_E_FAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
+{
+	bfa_ioc_pf_initfailed(iocpf->ioc);
+}
+
+/**
+ * Hardware initialization failed.
+ */
+static void
+bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_DISABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_ioc_firmware_unlock(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
+{
+	/**
+	 * Mark IOC as failed in hardware and stop firmware.
+	 */
+	bfa_ioc_lpu_stop(iocpf->ioc);
+
+	/**
+	 * Flush any queued up mailbox requests.
+	 */
+	bfa_ioc_mbox_hbfail(iocpf->ioc);
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * IOC is in failed state.
+ */
+static void
+bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc *ioc = iocpf->ioc;
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		iocpf->retry_count = 0;
+		bfa_ioc_sync_ack(ioc);
+		bfa_ioc_notify_fail(ioc);
+		if (!iocpf->auto_recover) {
+			bfa_ioc_sync_leave(ioc);
+			bfa_nw_ioc_hw_sem_release(ioc);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+		} else {
+			if (bfa_ioc_sync_complete(ioc))
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+			else {
+				bfa_nw_ioc_hw_sem_release(ioc);
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+			}
+		}
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_ioc_hw_sem_get_cancel(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	case IOCPF_E_FAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
+{
+}
+
+/**
+ * @brief
+ * IOC is in failed state.
+ */
+static void
+bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+	switch (event) {
+	case IOCPF_E_DISABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	default:
+		bfa_sm_fault(iocpf->ioc, event);
+	}
+}
+
+/**
  * BFA IOC private functions
  */
 
@@ -678,14 +1139,6 @@
 	}
 }
 
-void
-bfa_nw_ioc_sem_timeout(void *ioc_arg)
-{
-	struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
-
-	bfa_ioc_hw_sem_get(ioc);
-}
-
 bool
 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
 {
@@ -725,7 +1178,7 @@
 	 */
 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
 	if (r32 == 0) {
-		bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
 		return;
 	}
 
@@ -865,12 +1318,6 @@
 {
 	struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
 
-	/**
-	 * If bios/efi boot (flash based) -- return true
-	 */
-	if (bfa_ioc_is_optrom(ioc))
-		return true;
-
 	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
 	drv_fwhdr = (struct bfi_ioc_image_hdr *)
 		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
@@ -934,20 +1381,15 @@
 	/**
 	 * If IOC function is disabled and firmware version is same,
 	 * just re-enable IOC.
-	 *
-	 * If option rom, IOC must not be in operational state. With
-	 * convergence, IOC will be in operational state when 2nd driver
-	 * is loaded.
 	 */
-	if (ioc_fwstate == BFI_IOC_DISABLED ||
-	    (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
+	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
 		/**
 		 * When using MSI-X any pending firmware ready event should
 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
 		 */
 		bfa_ioc_msgflush(ioc);
 		ioc->cbfn->reset_cbfn(ioc->bfa);
-		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
 		return;
 	}
 
@@ -1033,7 +1475,6 @@
 
 	hb_count = readl(ioc->ioc_regs.heartbeat);
 	if (ioc->hb_count == hb_count) {
-		pr_crit("Firmware heartbeat failure at %d", hb_count);
 		bfa_ioc_recover(ioc);
 		return;
 	} else {
@@ -1078,11 +1519,6 @@
 	 */
 	bfa_ioc_lmem_init(ioc);
 
-	/**
-	 * Flash based firmware boot
-	 */
-	if (bfa_ioc_is_optrom(ioc))
-		boot_type = BFI_BOOT_TYPE_FLASH;
 	fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
 
 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
@@ -1209,6 +1645,55 @@
 		bfa_q_deq(&mod->cmd_q, &cmd);
 }
 
+static void
+bfa_ioc_fail_notify(struct bfa_ioc *ioc)
+{
+	struct list_head		*qe;
+	struct bfa_ioc_hbfail_notify	*notify;
+
+	/**
+	 * Notify driver and common modules registered for notification.
+	 */
+	ioc->cbfn->hbfail_cbfn(ioc->bfa);
+	list_for_each(qe, &ioc->hb_notify_q) {
+		notify = (struct bfa_ioc_hbfail_notify *) qe;
+		notify->cbfn(notify->cbarg);
+	}
+}
+
+static void
+bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
+{
+	bfa_fsm_send_event(ioc, IOC_E_ENABLED);
+}
+
+static void
+bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
+{
+	bfa_fsm_send_event(ioc, IOC_E_DISABLED);
+}
+
+static void
+bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
+{
+	bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
+}
+
+static void
+bfa_ioc_pf_failed(struct bfa_ioc *ioc)
+{
+	bfa_fsm_send_event(ioc, IOC_E_PFAILED);
+}
+
+static void
+bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
+{
+	/**
+	 * Provide enable completion callback and AEN notification.
+	 */
+	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+}
+
 /**
  * IOC public
  */
@@ -1304,6 +1789,7 @@
 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
 {
 	union bfi_ioc_i2h_msg_u	*msg;
+	struct bfa_iocpf *iocpf = &ioc->iocpf;
 
 	msg = (union bfi_ioc_i2h_msg_u *) m;
 
@@ -1314,15 +1800,15 @@
 		break;
 
 	case BFI_IOC_I2H_READY_EVENT:
-		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+		bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
 		break;
 
 	case BFI_IOC_I2H_ENABLE_REPLY:
-		bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
+		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
 		break;
 
 	case BFI_IOC_I2H_DISABLE_REPLY:
-		bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
+		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
 		break;
 
 	case BFI_IOC_I2H_GETATTR_REPLY:
@@ -1348,11 +1834,13 @@
 	ioc->fcmode	= false;
 	ioc->pllinit	= false;
 	ioc->dbg_fwsave_once = true;
+	ioc->iocpf.ioc  = ioc;
 
 	bfa_ioc_mbox_attach(ioc);
 	INIT_LIST_HEAD(&ioc->hb_notify_q);
 
-	bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+	bfa_fsm_send_event(ioc, IOC_E_RESET);
 }
 
 /**
@@ -1657,7 +2145,40 @@
 static enum bfa_ioc_state
 bfa_ioc_get_state(struct bfa_ioc *ioc)
 {
-	return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+	enum bfa_iocpf_state iocpf_st;
+	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+
+	if (ioc_st == BFA_IOC_ENABLING ||
+		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
+
+		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+
+		switch (iocpf_st) {
+		case BFA_IOCPF_SEMWAIT:
+			ioc_st = BFA_IOC_SEMWAIT;
+			break;
+
+		case BFA_IOCPF_HWINIT:
+			ioc_st = BFA_IOC_HWINIT;
+			break;
+
+		case BFA_IOCPF_FWMISMATCH:
+			ioc_st = BFA_IOC_FWMISMATCH;
+			break;
+
+		case BFA_IOCPF_FAIL:
+			ioc_st = BFA_IOC_FAIL;
+			break;
+
+		case BFA_IOCPF_INITFAIL:
+			ioc_st = BFA_IOC_INITFAIL;
+			break;
+
+		default:
+			break;
+		}
+	}
+	return ioc_st;
 }
 
 void
@@ -1689,28 +2210,7 @@
 mac_t
 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
 {
-	/*
-	 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
-	 */
-	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
-		return bfa_ioc_get_mfg_mac(ioc);
-	else
-		return ioc->attr->mac;
-}
-
-static mac_t
-bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
-{
-	mac_t	m;
-
-	m = ioc->attr->mfg_mac;
-	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
-		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
-	else
-		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
-			bfa_ioc_pcifn(ioc));
-
-	return m;
+	return ioc->attr->mac;
 }
 
 /**
@@ -1719,8 +2219,13 @@
 static void
 bfa_ioc_recover(struct bfa_ioc *ioc)
 {
-	bfa_ioc_stats(ioc, ioc_hbfails);
-	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
+	u16 bdf;
+
+	bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 |
+					ioc->pcidev.device_id);
+
+	pr_crit("Firmware heartbeat failure at %d", bdf);
+	BUG_ON(1);
 }
 
 static void
@@ -1728,5 +2233,61 @@
 {
 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
 		return;
+}
 
+/**
+ * @dg hal_iocpf_pvt BFA IOC PF private functions
+ * @{
+ */
+
+static void
+bfa_iocpf_enable(struct bfa_ioc *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
+}
+
+static void
+bfa_iocpf_disable(struct bfa_ioc *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
+}
+
+static void
+bfa_iocpf_fail(struct bfa_ioc *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
+}
+
+static void
+bfa_iocpf_initfail(struct bfa_ioc *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
+}
+
+static void
+bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
+}
+
+static void
+bfa_iocpf_stop(struct bfa_ioc *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+}
+
+void
+bfa_nw_iocpf_timeout(void *ioc_arg)
+{
+	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
+
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
+}
+
+void
+bfa_nw_iocpf_sem_timeout(void *ioc_arg)
+{
+	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
+
+	bfa_ioc_hw_sem_get(ioc);
 }
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index a73d84e..e4974bc 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -26,16 +26,7 @@
 #define BFA_IOC_TOV		3000	/* msecs */
 #define BFA_IOC_HWSEM_TOV	500	/* msecs */
 #define BFA_IOC_HB_TOV		500	/* msecs */
-#define BFA_IOC_HWINIT_MAX	2
-#define BFA_IOC_TOV_RECOVER	BFA_IOC_HB_TOV
-
-/**
- * Generic Scatter Gather Element used by driver
- */
-struct bfa_sge {
-	u32	sg_len;
-	void	*sg_addr;
-};
+#define BFA_IOC_HWINIT_MAX	5
 
 /**
  * PCI device information required by IOC
@@ -65,19 +56,6 @@
 #define BFI_SMEM_CT_SIZE	0x280000U	/* ! 2.5MB for catapult	*/
 
 /**
- * @brief BFA dma address assignment macro
- */
-#define bfa_dma_addr_set(dma_addr, pa)	\
-		__bfa_dma_addr_set(&dma_addr, (u64)pa)
-
-static inline void
-__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
-{
-	dma_addr->a32.addr_lo = (u32) pa;
-	dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa));
-}
-
-/**
  * @brief BFA dma address assignment macro. (big endian format)
  */
 #define bfa_dma_be_addr_set(dma_addr, pa)	\
@@ -105,8 +83,11 @@
 	void __iomem *host_page_num_fn;
 	void __iomem *heartbeat;
 	void __iomem *ioc_fwstate;
+	void __iomem *alt_ioc_fwstate;
 	void __iomem *ll_halt;
+	void __iomem *alt_ll_halt;
 	void __iomem *err_set;
+	void __iomem *ioc_fail_sync;
 	void __iomem *shirq_isr_next;
 	void __iomem *shirq_msk_next;
 	void __iomem *smem_page_start;
@@ -165,16 +146,22 @@
 	(__notify)->cbarg = (__cbarg);				\
 } while (0)
 
+struct bfa_iocpf {
+	bfa_fsm_t		fsm;
+	struct bfa_ioc		*ioc;
+	u32			retry_count;
+	bool			auto_recover;
+};
+
 struct bfa_ioc {
 	bfa_fsm_t		fsm;
 	struct bfa 		*bfa;
 	struct bfa_pcidev 	pcidev;
-	struct bfa_timer_mod	*timer_mod;
 	struct timer_list 	ioc_timer;
+	struct timer_list 	iocpf_timer;
 	struct timer_list 	sem_timer;
 	struct timer_list	hb_timer;
 	u32			hb_count;
-	u32			retry_count;
 	struct list_head	hb_notify_q;
 	void			*dbg_fwsave;
 	int			dbg_fwsave_len;
@@ -182,7 +169,6 @@
 	enum bfi_mclass		ioc_mc;
 	struct bfa_ioc_regs 	ioc_regs;
 	struct bfa_ioc_drv_stats stats;
-	bool			auto_recover;
 	bool			fcmode;
 	bool			ctdev;
 	bool			cna;
@@ -195,6 +181,7 @@
 	struct bfa_ioc_cbfn	*cbfn;
 	struct bfa_ioc_mbox_mod	mbox_mod;
 	struct bfa_ioc_hwif	*ioc_hwif;
+	struct bfa_iocpf	iocpf;
 };
 
 struct bfa_ioc_hwif {
@@ -205,8 +192,12 @@
 	void		(*ioc_map_port)	(struct bfa_ioc *ioc);
 	void		(*ioc_isr_mode_set)	(struct bfa_ioc *ioc,
 					bool msix);
-	void		(*ioc_notify_hbfail)	(struct bfa_ioc *ioc);
+	void		(*ioc_notify_fail)	(struct bfa_ioc *ioc);
 	void		(*ioc_ownership_reset)	(struct bfa_ioc *ioc);
+	void		(*ioc_sync_join)	(struct bfa_ioc *ioc);
+	void		(*ioc_sync_leave)	(struct bfa_ioc *ioc);
+	void		(*ioc_sync_ack)		(struct bfa_ioc *ioc);
+	bool		(*ioc_sync_complete)	(struct bfa_ioc *ioc);
 };
 
 #define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
@@ -271,7 +262,6 @@
 void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
 
 void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
-
 void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
 void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
 	struct bfa_ioc_hbfail_notify *notify);
@@ -289,7 +279,8 @@
  */
 void bfa_nw_ioc_timeout(void *ioc);
 void bfa_nw_ioc_hb_check(void *ioc);
-void bfa_nw_ioc_sem_timeout(void *ioc);
+void bfa_nw_iocpf_timeout(void *ioc);
+void bfa_nw_iocpf_sem_timeout(void *ioc);
 
 /*
  * F/W Image Size & Chunk
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
index 121cfd6..469997c 100644
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -22,6 +22,15 @@
 #include "bfi_ctreg.h"
 #include "bfa_defs.h"
 
+#define bfa_ioc_ct_sync_pos(__ioc)	\
+		((u32) (1 << bfa_ioc_pcifn(__ioc)))
+#define BFA_IOC_SYNC_REQD_SH		16
+#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
+#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
+#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
+#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
+		(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
+
 /*
  * forward declarations
  */
@@ -30,8 +39,12 @@
 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
-static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
 
 static struct bfa_ioc_hwif nw_hwif_ct;
@@ -48,8 +61,12 @@
 	nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
 	nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
 	nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
-	nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
+	nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
 	nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+	nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
+	nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
+	nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
+	nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
 
 	ioc->ioc_hwif = &nw_hwif_ct;
 }
@@ -86,6 +103,7 @@
 	if (usecnt == 0) {
 		writel(1, ioc->ioc_regs.ioc_usage_reg);
 		bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(0, ioc->ioc_regs.ioc_fail_sync);
 		return true;
 	}
 
@@ -149,12 +167,14 @@
  * Notify other functions on HB failure.
  */
 static void
-bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
+bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
 {
 	if (ioc->cna) {
 		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+		writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
 		/* Wait for halt to take effect */
 		readl(ioc->ioc_regs.ll_halt);
+		readl(ioc->ioc_regs.alt_ll_halt);
 	} else {
 		writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
 		readl(ioc->ioc_regs.err_set);
@@ -206,15 +226,19 @@
 	if (ioc->port_id == 0) {
 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
 		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
 		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
 	} else {
 		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
 		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
 		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
 	}
 
 	/*
@@ -232,6 +256,7 @@
 	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
 	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
 	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
 
 	/**
 	 * sram memory access
@@ -317,6 +342,77 @@
 	bfa_nw_ioc_hw_sem_release(ioc);
 }
 
+/**
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
+{
+	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
+
+	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
+{
+	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
+					bfa_ioc_ct_sync_pos(ioc);
+
+	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
+{
+	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+
+	writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static bool
+bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
+{
+	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+	u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
+	u32 tmp_ackd;
+
+	if (sync_ackd == 0)
+		return true;
+
+	/**
+	 * The check below is to see whether any other PCI fn
+	 * has reinitialized the ASIC (reset sync_ackd bits)
+	 * and failed again while this IOC was waiting for hw
+	 * semaphore (in bfa_iocpf_sm_semwait()).
+	 */
+	tmp_ackd = sync_ackd;
+	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
+			!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
+		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
+
+	if (sync_reqd == sync_ackd) {
+		writel(bfa_ioc_ct_clear_sync_ackd(r32),
+				ioc->ioc_regs.ioc_fail_sync);
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
+		return true;
+	}
+
+	/**
+	 * If another PCI fn reinitialized and failed again while
+	 * this IOC was waiting for hw sem, the sync_ackd bit for
+	 * this IOC need to be set again to allow reinitialization.
+	 */
+	if (tmp_ackd != sync_ackd)
+		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
+
+	return false;
+}
+
 static enum bfa_status
 bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
 {
diff --git a/drivers/net/bna/bfi_ctreg.h b/drivers/net/bna/bfi_ctreg.h
index 404ea351..5130d79 100644
--- a/drivers/net/bna/bfi_ctreg.h
+++ b/drivers/net/bna/bfi_ctreg.h
@@ -535,6 +535,7 @@
 #define BFA_IOC1_HBEAT_REG		HOST_SEM2_INFO_REG
 #define BFA_IOC1_STATE_REG		HOST_SEM3_INFO_REG
 #define BFA_FW_USE_COUNT		 HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC		HOST_SEM5_INFO_REG
 
 #define CPE_DEPTH_Q(__n) \
 	(CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
@@ -552,22 +553,30 @@
 	(RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
 #define RME_CI_PTR_Q(__n) \
 	(RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
-#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
-	* (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
-#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
-	* (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
-#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
-	* (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
-#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
-	* (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
-#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
-	* (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
-#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
-	* (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
-#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
-	* (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
-#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
-	* (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
+#define HQM_QSET_RXQ_DRBL_P0(__n) \
+	(HQM_QSET0_RXQ_DRBL_P0 + (__n) * \
+		(HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
+#define HQM_QSET_TXQ_DRBL_P0(__n) \
+	(HQM_QSET0_TXQ_DRBL_P0 + (__n) * \
+		(HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
+#define HQM_QSET_IB_DRBL_1_P0(__n) \
+	(HQM_QSET0_IB_DRBL_1_P0 + (__n) * \
+		(HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
+#define HQM_QSET_IB_DRBL_2_P0(__n) \
+	(HQM_QSET0_IB_DRBL_2_P0 + (__n) * \
+		(HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
+#define HQM_QSET_RXQ_DRBL_P1(__n) \
+	(HQM_QSET0_RXQ_DRBL_P1 + (__n) * \
+		(HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
+#define HQM_QSET_TXQ_DRBL_P1(__n) \
+	(HQM_QSET0_TXQ_DRBL_P1 + (__n) * \
+		(HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
+#define HQM_QSET_IB_DRBL_1_P1(__n) \
+	(HQM_QSET0_IB_DRBL_1_P1 + (__n) * \
+		(HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
+#define HQM_QSET_IB_DRBL_2_P1(__n) \
+	(HQM_QSET0_IB_DRBL_2_P1 + (__n) * \
+		(HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
 
 #define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
 #define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
index df6676b..a287f89 100644
--- a/drivers/net/bna/bna.h
+++ b/drivers/net/bna/bna.h
@@ -32,8 +32,6 @@
 /* Log string size */
 #define BNA_MESSAGE_SIZE		256
 
-#define bna_device_timer(_dev)		bfa_timer_beat(&((_dev)->timer_mod))
-
 /* MBOX API for PORT, TX, RX */
 #define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg)		\
 do {									\
@@ -390,8 +388,8 @@
 
 /* API for RX */
 int bna_port_mtu_get(struct bna_port *port);
-void bna_llport_admin_up(struct bna_llport *llport);
-void bna_llport_admin_down(struct bna_llport *llport);
+void bna_llport_rx_started(struct bna_llport *llport);
+void bna_llport_rx_stopped(struct bna_llport *llport);
 
 /* API for BNAD */
 void bna_port_enable(struct bna_port *port);
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
index 07b2659..e152747 100644
--- a/drivers/net/bna/bna_ctrl.c
+++ b/drivers/net/bna/bna_ctrl.c
@@ -59,14 +59,70 @@
 	port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
 }
 
+static inline int
+llport_can_be_up(struct bna_llport *llport)
+{
+	int ready = 0;
+	if (llport->type == BNA_PORT_T_REGULAR)
+		ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
+			 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
+			 (llport->flags & BNA_LLPORT_F_PORT_ENABLED));
+	else
+		ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
+			 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
+			 !(llport->flags & BNA_LLPORT_F_PORT_ENABLED));
+	return ready;
+}
+
+#define llport_is_up llport_can_be_up
+
+enum bna_llport_event {
+	LLPORT_E_START			= 1,
+	LLPORT_E_STOP			= 2,
+	LLPORT_E_FAIL			= 3,
+	LLPORT_E_UP			= 4,
+	LLPORT_E_DOWN			= 5,
+	LLPORT_E_FWRESP_UP_OK		= 6,
+	LLPORT_E_FWRESP_UP_FAIL		= 7,
+	LLPORT_E_FWRESP_DOWN		= 8
+};
+
+static void
+bna_llport_cb_port_enabled(struct bna_llport *llport)
+{
+	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
+
+	if (llport_can_be_up(llport))
+		bfa_fsm_send_event(llport, LLPORT_E_UP);
+}
+
+static void
+bna_llport_cb_port_disabled(struct bna_llport *llport)
+{
+	int llport_up = llport_is_up(llport);
+
+	llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
+
+	if (llport_up)
+		bfa_fsm_send_event(llport, LLPORT_E_DOWN);
+}
+
 /**
  * MBOX
  */
 static int
 bna_is_aen(u8 msg_id)
 {
-	return msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
-	       msg_id == BFI_LL_I2H_LINK_UP_AEN;
+	switch (msg_id) {
+	case BFI_LL_I2H_LINK_DOWN_AEN:
+	case BFI_LL_I2H_LINK_UP_AEN:
+	case BFI_LL_I2H_PORT_ENABLE_AEN:
+	case BFI_LL_I2H_PORT_DISABLE_AEN:
+		return 1;
+
+	default:
+		return 0;
+	}
 }
 
 static void
@@ -81,6 +137,12 @@
 	case BFI_LL_I2H_LINK_DOWN_AEN:
 		bna_port_cb_link_down(&bna->port, aen->reason);
 		break;
+	case BFI_LL_I2H_PORT_ENABLE_AEN:
+		bna_llport_cb_port_enabled(&bna->port.llport);
+		break;
+	case BFI_LL_I2H_PORT_DISABLE_AEN:
+		bna_llport_cb_port_disabled(&bna->port.llport);
+		break;
 	default:
 		break;
 	}
@@ -251,16 +313,6 @@
 static void bna_llport_stop(struct bna_llport *llport);
 static void bna_llport_fail(struct bna_llport *llport);
 
-enum bna_llport_event {
-	LLPORT_E_START			= 1,
-	LLPORT_E_STOP			= 2,
-	LLPORT_E_FAIL			= 3,
-	LLPORT_E_UP			= 4,
-	LLPORT_E_DOWN			= 5,
-	LLPORT_E_FWRESP_UP		= 6,
-	LLPORT_E_FWRESP_DOWN		= 7
-};
-
 enum bna_llport_state {
 	BNA_LLPORT_STOPPED		= 1,
 	BNA_LLPORT_DOWN			= 2,
@@ -320,7 +372,7 @@
 		/* No-op */
 		break;
 
-	case LLPORT_E_FWRESP_UP:
+	case LLPORT_E_FWRESP_UP_OK:
 	case LLPORT_E_FWRESP_DOWN:
 		/**
 		 * These events are received due to flushing of mbox when
@@ -366,6 +418,7 @@
 static void
 bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
 {
+	BUG_ON(!llport_can_be_up(llport));
 	/**
 	 * NOTE: Do not call bna_fw_llport_up() here. That will over step
 	 * mbox due to down_resp_wait -> up_resp_wait transition on event
@@ -390,10 +443,14 @@
 		bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
 		break;
 
-	case LLPORT_E_FWRESP_UP:
+	case LLPORT_E_FWRESP_UP_OK:
 		bfa_fsm_set_state(llport, bna_llport_sm_up);
 		break;
 
+	case LLPORT_E_FWRESP_UP_FAIL:
+		bfa_fsm_set_state(llport, bna_llport_sm_down);
+		break;
+
 	case LLPORT_E_FWRESP_DOWN:
 		/* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
 		bna_fw_llport_up(llport);
@@ -431,11 +488,12 @@
 		bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
 		break;
 
-	case LLPORT_E_FWRESP_UP:
+	case LLPORT_E_FWRESP_UP_OK:
 		/* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
 		bna_fw_llport_down(llport);
 		break;
 
+	case LLPORT_E_FWRESP_UP_FAIL:
 	case LLPORT_E_FWRESP_DOWN:
 		bfa_fsm_set_state(llport, bna_llport_sm_down);
 		break;
@@ -496,11 +554,12 @@
 		/* No-op */
 		break;
 
-	case LLPORT_E_FWRESP_UP:
+	case LLPORT_E_FWRESP_UP_OK:
 		/* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
 		bna_fw_llport_down(llport);
 		break;
 
+	case LLPORT_E_FWRESP_UP_FAIL:
 	case LLPORT_E_FWRESP_DOWN:
 		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
 		break;
@@ -541,7 +600,14 @@
 	struct bna_llport *llport = (struct bna_llport *)arg;
 
 	bfa_q_qe_init(&llport->mbox_qe.qe);
-	bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP);
+	if (status == BFI_LL_CMD_FAIL) {
+		if (llport->type == BNA_PORT_T_REGULAR)
+			llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
+		else
+			llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
+		bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL);
+	} else
+		bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK);
 }
 
 static void
@@ -588,13 +654,14 @@
 static void
 bna_llport_init(struct bna_llport *llport, struct bna *bna)
 {
-	llport->flags |= BNA_LLPORT_F_ENABLED;
+	llport->flags |= BNA_LLPORT_F_ADMIN_UP;
+	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
 	llport->type = BNA_PORT_T_REGULAR;
 	llport->bna = bna;
 
 	llport->link_status = BNA_LINK_DOWN;
 
-	llport->admin_up_count = 0;
+	llport->rx_started_count = 0;
 
 	llport->stop_cbfn = NULL;
 
@@ -606,7 +673,8 @@
 static void
 bna_llport_uninit(struct bna_llport *llport)
 {
-	llport->flags &= ~BNA_LLPORT_F_ENABLED;
+	llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
+	llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
 
 	llport->bna = NULL;
 }
@@ -628,6 +696,8 @@
 static void
 bna_llport_fail(struct bna_llport *llport)
 {
+	/* Reset the physical port status to enabled */
+	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
 	bfa_fsm_send_event(llport, LLPORT_E_FAIL);
 }
 
@@ -638,25 +708,31 @@
 }
 
 void
-bna_llport_admin_up(struct bna_llport *llport)
+bna_llport_rx_started(struct bna_llport *llport)
 {
-	llport->admin_up_count++;
+	llport->rx_started_count++;
 
-	if (llport->admin_up_count == 1) {
-		llport->flags |= BNA_LLPORT_F_RX_ENABLED;
-		if (llport->flags & BNA_LLPORT_F_ENABLED)
+	if (llport->rx_started_count == 1) {
+
+		llport->flags |= BNA_LLPORT_F_RX_STARTED;
+
+		if (llport_can_be_up(llport))
 			bfa_fsm_send_event(llport, LLPORT_E_UP);
 	}
 }
 
 void
-bna_llport_admin_down(struct bna_llport *llport)
+bna_llport_rx_stopped(struct bna_llport *llport)
 {
-	llport->admin_up_count--;
+	int llport_up = llport_is_up(llport);
 
-	if (llport->admin_up_count == 0) {
-		llport->flags &= ~BNA_LLPORT_F_RX_ENABLED;
-		if (llport->flags & BNA_LLPORT_F_ENABLED)
+	llport->rx_started_count--;
+
+	if (llport->rx_started_count == 0) {
+
+		llport->flags &= ~BNA_LLPORT_F_RX_STARTED;
+
+		if (llport_up)
 			bfa_fsm_send_event(llport, LLPORT_E_DOWN);
 	}
 }
@@ -2056,37 +2132,6 @@
 	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
 }
 
-static void
-__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
-{
-	struct bna_rx_fndb_ram *rx_fndb_ram;
-	u32 ctrl_flags;
-	int i;
-
-	rx_fndb_ram = (struct bna_rx_fndb_ram *)
-			BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva,
-			RX_FNDB_RAM_BASE_OFFSET);
-
-	for (i = 0; i < BFI_MAX_RXF; i++) {
-		if (status == BNA_STATUS_T_ENABLED) {
-			if (i == rxf->rxf_id)
-				continue;
-
-			ctrl_flags =
-				readl(&rx_fndb_ram[i].control_flags);
-			ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
-			writel(ctrl_flags,
-						&rx_fndb_ram[i].control_flags);
-		} else {
-			ctrl_flags =
-				readl(&rx_fndb_ram[i].control_flags);
-			ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
-			writel(ctrl_flags,
-						&rx_fndb_ram[i].control_flags);
-		}
-	}
-}
-
 int
 rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
 {
@@ -2153,46 +2198,6 @@
 }
 
 int
-rxf_process_packet_filter_default(struct bna_rxf *rxf)
-{
-	struct bna *bna = rxf->rx->bna;
-
-	/* Enable/disable default mode */
-	if (is_default_enable(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask)) {
-		/* move default configuration from pending -> active */
-		default_inactive(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask);
-		rxf->rxmode_active |= BNA_RXMODE_DEFAULT;
-
-		/* Disable VLAN filter to allow all VLANs */
-		__rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
-		/* Redirect all other RxF vlan filtering to this one */
-		__rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED);
-		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
-				BNA_STATUS_T_ENABLED);
-		return 1;
-	} else if (is_default_disable(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask)) {
-		/* move default configuration from pending -> active */
-		default_inactive(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask);
-		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
-		bna->rxf_default_id = BFI_MAX_RXF;
-
-		/* Revert VLAN filter */
-		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
-		/* Stop RxF vlan filter table redirection */
-		__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
-		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
-				BNA_STATUS_T_DISABLED);
-		return 1;
-	}
-
-	return 0;
-}
-
-int
 rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
 {
 	/* Enable/disable allmulti mode */
@@ -2289,48 +2294,6 @@
 }
 
 int
-rxf_clear_packet_filter_default(struct bna_rxf *rxf)
-{
-	struct bna *bna = rxf->rx->bna;
-
-	/* 8. Execute pending default mode disable command */
-	if (is_default_disable(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask)) {
-		/* move default configuration from pending -> active */
-		default_inactive(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask);
-		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
-		bna->rxf_default_id = BFI_MAX_RXF;
-
-		/* Revert VLAN filter */
-		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
-		/* Stop RxF vlan filter table redirection */
-		__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
-		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
-				BNA_STATUS_T_DISABLED);
-		return 1;
-	}
-
-	/* 9. Clear active default mode; move it to pending enable */
-	if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
-		/* move default configuration from active -> pending */
-		default_enable(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask);
-		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
-
-		/* Revert VLAN filter */
-		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
-		/* Stop RxF vlan filter table redirection */
-		__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
-		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
-				BNA_STATUS_T_DISABLED);
-		return 1;
-	}
-
-	return 0;
-}
-
-int
 rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
 {
 	/* 10. Execute pending allmulti mode disable command */
@@ -2405,28 +2368,6 @@
 }
 
 void
-rxf_reset_packet_filter_default(struct bna_rxf *rxf)
-{
-	struct bna *bna = rxf->rx->bna;
-
-	/* 8. Clear pending default mode disable */
-	if (is_default_disable(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask)) {
-		default_inactive(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask);
-		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
-		bna->rxf_default_id = BFI_MAX_RXF;
-	}
-
-	/* 9. Move default mode config from active -> pending */
-	if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
-		default_enable(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask);
-		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
-	}
-}
-
-void
 rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
 {
 	/* 10. Clear pending allmulti mode disable */
@@ -2523,76 +2464,6 @@
  *	1 = need h/w change
  */
 static int
-rxf_default_enable(struct bna_rxf *rxf)
-{
-	struct bna *bna = rxf->rx->bna;
-	int ret = 0;
-
-	/* There can not be any pending disable command */
-
-	/* Do nothing if pending enable or already enabled */
-	if (is_default_enable(rxf->rxmode_pending,
-		rxf->rxmode_pending_bitmask) ||
-		(rxf->rxmode_active & BNA_RXMODE_DEFAULT)) {
-		/* Schedule enable */
-	} else {
-		/* Default mode should not be active in the system */
-		default_enable(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask);
-		bna->rxf_default_id = rxf->rxf_id;
-		ret = 1;
-	}
-
-	return ret;
-}
-
-/**
- * Should only be called by bna_rxf_mode_set.
- * Helps deciding if h/w configuration is needed or not.
- *  Returns:
- *	0 = no h/w change
- *	1 = need h/w change
- */
-static int
-rxf_default_disable(struct bna_rxf *rxf)
-{
-	struct bna *bna = rxf->rx->bna;
-	int ret = 0;
-
-	/* There can not be any pending disable */
-
-	/* Turn off pending enable command , if any */
-	if (is_default_enable(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask)) {
-		/* Promisc mode should not be active */
-		/* system default state should be pending */
-		default_inactive(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask);
-		/* Remove the default state from the system */
-		bna->rxf_default_id = BFI_MAX_RXF;
-
-	/* Schedule disable */
-	} else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
-		/* Default mode should be active in the system */
-		default_disable(rxf->rxmode_pending,
-				rxf->rxmode_pending_bitmask);
-		ret = 1;
-
-	/* Do nothing if already disabled */
-	} else {
-	}
-
-	return ret;
-}
-
-/**
- * Should only be called by bna_rxf_mode_set.
- * Helps deciding if h/w configuration is needed or not.
- *  Returns:
- *	0 = no h/w change
- *	1 = need h/w change
- */
-static int
 rxf_allmulti_enable(struct bna_rxf *rxf)
 {
 	int ret = 0;
@@ -2654,38 +2525,13 @@
 	struct bna_rxf *rxf = &rx->rxf;
 	int need_hw_config = 0;
 
-	/* Error checks */
+	/* Process the commands */
 
 	if (is_promisc_enable(new_mode, bitmask)) {
 		/* If promisc mode is already enabled elsewhere in the system */
 		if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
 			(rx->bna->rxf_promisc_id != rxf->rxf_id))
 			goto err_return;
-
-		/* If default mode is already enabled in the system */
-		if (rx->bna->rxf_default_id != BFI_MAX_RXF)
-			goto err_return;
-
-		/* Trying to enable promiscuous and default mode together */
-		if (is_default_enable(new_mode, bitmask))
-			goto err_return;
-	}
-
-	if (is_default_enable(new_mode, bitmask)) {
-		/* If default mode is already enabled elsewhere in the system */
-		if ((rx->bna->rxf_default_id != BFI_MAX_RXF) &&
-			(rx->bna->rxf_default_id != rxf->rxf_id)) {
-				goto err_return;
-		}
-
-		/* If promiscuous mode is already enabled in the system */
-		if (rx->bna->rxf_promisc_id != BFI_MAX_RXF)
-			goto err_return;
-	}
-
-	/* Process the commands */
-
-	if (is_promisc_enable(new_mode, bitmask)) {
 		if (rxf_promisc_enable(rxf))
 			need_hw_config = 1;
 	} else if (is_promisc_disable(new_mode, bitmask)) {
@@ -2693,14 +2539,6 @@
 			need_hw_config = 1;
 	}
 
-	if (is_default_enable(new_mode, bitmask)) {
-		if (rxf_default_enable(rxf))
-			need_hw_config = 1;
-	} else if (is_default_disable(new_mode, bitmask)) {
-		if (rxf_default_disable(rxf))
-			need_hw_config = 1;
-	}
-
 	if (is_allmulti_enable(new_mode, bitmask)) {
 		if (rxf_allmulti_enable(rxf))
 			need_hw_config = 1;
@@ -3126,7 +2964,6 @@
 
 	bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
 
-	bna->rxf_default_id = BFI_MAX_RXF;
 	bna->rxf_promisc_id = BFI_MAX_RXF;
 
 	/* Mbox q element for posting stat request to f/w */
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
index ad93fdb..58c7664 100644
--- a/drivers/net/bna/bna_txrx.c
+++ b/drivers/net/bna/bna_txrx.c
@@ -1226,8 +1226,7 @@
 	/* Apply the VLAN filter */
 	if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
 		rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
-		if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
-			!(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
+		if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))
 			__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
 	}
 
@@ -1276,9 +1275,6 @@
 	if (rxf_process_packet_filter_promisc(rxf))
 		return 1;
 
-	if (rxf_process_packet_filter_default(rxf))
-		return 1;
-
 	if (rxf_process_packet_filter_allmulti(rxf))
 		return 1;
 
@@ -1340,9 +1336,6 @@
 	if (rxf_clear_packet_filter_promisc(rxf))
 		return 1;
 
-	if (rxf_clear_packet_filter_default(rxf))
-		return 1;
-
 	if (rxf_clear_packet_filter_allmulti(rxf))
 		return 1;
 
@@ -1389,8 +1382,6 @@
 
 	rxf_reset_packet_filter_promisc(rxf);
 
-	rxf_reset_packet_filter_default(rxf);
-
 	rxf_reset_packet_filter_allmulti(rxf);
 }
 
@@ -1441,12 +1432,16 @@
 	memset(rxf->vlan_filter_table, 0,
 			(sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
 
+	/* Set up VLAN 0 for pure priority tagged packets */
+	rxf->vlan_filter_table[0] |= 1;
+
 	bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 }
 
 static void
 bna_rxf_uninit(struct bna_rxf *rxf)
 {
+	struct bna *bna = rxf->rx->bna;
 	struct bna_mac *mac;
 
 	bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
@@ -1473,6 +1468,27 @@
 		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
 	}
 
+	/* Turn off pending promisc mode */
+	if (is_promisc_enable(rxf->rxmode_pending,
+				rxf->rxmode_pending_bitmask)) {
+		/* system promisc state should be pending */
+		BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
+		promisc_inactive(rxf->rxmode_pending,
+				rxf->rxmode_pending_bitmask);
+		 bna->rxf_promisc_id = BFI_MAX_RXF;
+	}
+	/* Promisc mode should not be active */
+	BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
+
+	/* Turn off pending all-multi mode */
+	if (is_allmulti_enable(rxf->rxmode_pending,
+				rxf->rxmode_pending_bitmask)) {
+		allmulti_inactive(rxf->rxmode_pending,
+				rxf->rxmode_pending_bitmask);
+	}
+	/* Allmulti mode should not be active */
+	BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
+
 	rxf->rx = NULL;
 }
 
@@ -1947,7 +1963,7 @@
 		bna_ib_ack(&rxp->cq.ib->door_bell, 0);
 	}
 
-	bna_llport_admin_up(&rx->bna->port.llport);
+	bna_llport_rx_started(&rx->bna->port.llport);
 }
 
 void
@@ -1955,13 +1971,13 @@
 {
 	switch (event) {
 	case RX_E_FAIL:
-		bna_llport_admin_down(&rx->bna->port.llport);
+		bna_llport_rx_stopped(&rx->bna->port.llport);
 		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
 		rx_ib_fail(rx);
 		bna_rxf_fail(&rx->rxf);
 		break;
 	case RX_E_STOP:
-		bna_llport_admin_down(&rx->bna->port.llport);
+		bna_llport_rx_stopped(&rx->bna->port.llport);
 		bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
 		break;
 	default:
@@ -3373,7 +3389,7 @@
 
 	txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
 	txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
-			(txq->priority & 0x3));
+			(txq->priority & 0x7));
 	txq_cfg.wvc_n_cquota_n_rquota =
 			((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
 			(BFI_TX_MAX_WRR_QUOTA & 0xfff));
diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
index 6877310..b9c134f 100644
--- a/drivers/net/bna/bna_types.h
+++ b/drivers/net/bna/bna_types.h
@@ -165,8 +165,7 @@
 
 enum bna_rxmode {
 	BNA_RXMODE_PROMISC 	= 1,
-	BNA_RXMODE_DEFAULT 	= 2,
-	BNA_RXMODE_ALLMULTI 	= 4
+	BNA_RXMODE_ALLMULTI 	= 2
 };
 
 enum bna_rx_event {
@@ -249,8 +248,9 @@
 };
 
 enum bna_llport_flags {
-	BNA_LLPORT_F_ENABLED 	= 1,
-	BNA_LLPORT_F_RX_ENABLED	= 2
+	BNA_LLPORT_F_ADMIN_UP	 	= 1,
+	BNA_LLPORT_F_PORT_ENABLED	= 2,
+	BNA_LLPORT_F_RX_STARTED		= 4
 };
 
 enum bna_port_flags {
@@ -405,7 +405,7 @@
 
 	enum bna_link_status link_status;
 
-	int			admin_up_count;
+	int			rx_started_count;
 
 	void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
 
@@ -1117,7 +1117,6 @@
 
 	struct bna_rit_mod rit_mod;
 
-	int			rxf_default_id;
 	int			rxf_promisc_id;
 
 	struct bna_mbox_qe mbox_qe;
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 7e839b9..fad9126 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -70,6 +70,8 @@
 	(sizeof(struct bnad_skb_unmap) * ((_depth) - 1));	\
 } while (0)
 
+#define BNAD_TXRX_SYNC_MDELAY	250	/* 250 msecs */
+
 /*
  * Reinitialize completions in CQ, once Rx is taken down
  */
@@ -107,7 +109,7 @@
 bnad_free_all_txbufs(struct bnad *bnad,
 		 struct bna_tcb *tcb)
 {
-	u16 		unmap_cons;
+	u32 		unmap_cons;
 	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
 	struct bnad_skb_unmap *unmap_array;
 	struct sk_buff 		*skb = NULL;
@@ -130,7 +132,9 @@
 						PCI_DMA_TODEVICE);
 
 		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
-		unmap_cons++;
+		if (++unmap_cons >= unmap_q->q_depth)
+			break;
+
 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 			pci_unmap_page(bnad->pcidev,
 				       pci_unmap_addr(&unmap_array[unmap_cons],
@@ -139,7 +143,8 @@
 				       PCI_DMA_TODEVICE);
 			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
 					   0);
-			unmap_cons++;
+			if (++unmap_cons >= unmap_q->q_depth)
+				break;
 		}
 		dev_kfree_skb_any(skb);
 	}
@@ -167,11 +172,11 @@
 	/*
 	 * Just return if TX is stopped. This check is useful
 	 * when bnad_free_txbufs() runs out of a tasklet scheduled
-	 * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
+	 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
 	 * but this routine runs actually after the cleanup has been
 	 * executed.
 	 */
-	if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
+	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
 		return 0;
 
 	updated_hw_cons = *(tcb->hw_consumer_index);
@@ -239,7 +244,7 @@
 {
 	struct bnad *bnad = (struct bnad *)bnad_ptr;
 	struct bna_tcb *tcb;
-	u32 		acked;
+	u32 		acked = 0;
 	int			i, j;
 
 	for (i = 0; i < bnad->num_tx; i++) {
@@ -252,10 +257,26 @@
 				(!test_and_set_bit(BNAD_TXQ_FREE_SENT,
 						  &tcb->flags))) {
 				acked = bnad_free_txbufs(bnad, tcb);
-				bna_ib_ack(tcb->i_dbell, acked);
+				if (likely(test_bit(BNAD_TXQ_TX_STARTED,
+					&tcb->flags)))
+					bna_ib_ack(tcb->i_dbell, acked);
 				smp_mb__before_clear_bit();
 				clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
 			}
+			if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
+						&tcb->flags)))
+				continue;
+			if (netif_queue_stopped(bnad->netdev)) {
+				if (acked && netif_carrier_ok(bnad->netdev) &&
+					BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
+						BNAD_NETIF_WAKE_THRESHOLD) {
+					netif_wake_queue(bnad->netdev);
+					/* TODO */
+					/* Counters for individual TxQs? */
+					BNAD_UPDATE_CTR(bnad,
+						netif_queue_wakeup);
+				}
+			}
 		}
 	}
 }
@@ -264,7 +285,7 @@
 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
 {
 	struct net_device *netdev = bnad->netdev;
-	u32 sent;
+	u32 sent = 0;
 
 	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
 		return 0;
@@ -275,12 +296,15 @@
 		    netif_carrier_ok(netdev) &&
 		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
 				    BNAD_NETIF_WAKE_THRESHOLD) {
-			netif_wake_queue(netdev);
-			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
+				netif_wake_queue(netdev);
+				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+			}
 		}
+	}
+
+	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
 		bna_ib_ack(tcb->i_dbell, sent);
-	} else
-		bna_ib_ack(tcb->i_dbell, 0);
 
 	smp_mb__before_clear_bit();
 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
@@ -313,25 +337,24 @@
 }
 
 static void
-bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
 {
 	struct bnad_unmap_q *unmap_q;
 	struct sk_buff *skb;
+	int unmap_cons;
 
 	unmap_q = rcb->unmap_q;
-	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
-		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
-		BUG_ON(!(skb));
-		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
+	for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
+		skb = unmap_q->unmap_array[unmap_cons].skb;
+		if (!skb)
+			continue;
+		unmap_q->unmap_array[unmap_cons].skb = NULL;
 		pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
-					unmap_array[unmap_q->consumer_index],
-					dma_addr), rcb->rxq->buffer_size +
-					NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
+					unmap_array[unmap_cons],
+					dma_addr), rcb->rxq->buffer_size,
+					PCI_DMA_FROMDEVICE);
 		dev_kfree_skb(skb);
-		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
-		BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
 	}
-
 	bnad_reset_rcb(bnad, rcb);
 }
 
@@ -385,43 +408,11 @@
 		unmap_q->producer_index = unmap_prod;
 		rcb->producer_index = unmap_prod;
 		smp_mb();
-		bna_rxq_prod_indx_doorbell(rcb);
+		if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
+			bna_rxq_prod_indx_doorbell(rcb);
 	}
 }
 
-/*
- * Locking is required in the enable path
- * because it is called from a napi poll
- * context, where the bna_lock is not held
- * unlike the IRQ context.
- */
-static void
-bnad_enable_txrx_irqs(struct bnad *bnad)
-{
-	struct bna_tcb *tcb;
-	struct bna_ccb *ccb;
-	int i, j;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bnad->bna_lock, flags);
-	for (i = 0; i < bnad->num_tx; i++) {
-		for (j = 0; j < bnad->num_txq_per_tx; j++) {
-			tcb = bnad->tx_info[i].tcb[j];
-			bna_ib_coalescing_timer_set(tcb->i_dbell,
-				tcb->txq->ib->ib_config.coalescing_timeo);
-			bna_ib_ack(tcb->i_dbell, 0);
-		}
-	}
-
-	for (i = 0; i < bnad->num_rx; i++) {
-		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
-			ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
-			bnad_enable_rx_irq_unsafe(ccb);
-		}
-	}
-	spin_unlock_irqrestore(&bnad->bna_lock, flags);
-}
-
 static inline void
 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
 {
@@ -448,6 +439,9 @@
 	u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
 
+	if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
+		return 0;
+
 	prefetch(bnad->netdev);
 	BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
 			    wi_range);
@@ -544,12 +538,15 @@
 	BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
 
 	if (likely(ccb)) {
-		bna_ib_ack(ccb->i_dbell, packets);
+		if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
+			bna_ib_ack(ccb->i_dbell, packets);
 		bnad_refill_rxq(bnad, ccb->rcb[0]);
 		if (ccb->rcb[1])
 			bnad_refill_rxq(bnad, ccb->rcb[1]);
-	} else
-		bna_ib_ack(ccb->i_dbell, 0);
+	} else {
+		if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
+			bna_ib_ack(ccb->i_dbell, 0);
+	}
 
 	return packets;
 }
@@ -557,6 +554,9 @@
 static void
 bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
 {
+	if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
+		return;
+
 	bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
 	bna_ib_ack(ccb->i_dbell, 0);
 }
@@ -566,7 +566,8 @@
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
+	/* Because of polling context */
+	spin_lock_irqsave(&bnad->bna_lock, flags);
 	bnad_enable_rx_irq_unsafe(ccb);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
@@ -575,9 +576,11 @@
 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
 {
 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
-	if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
+	struct napi_struct *napi = &rx_ctrl->napi;
+
+	if (likely(napi_schedule_prep(napi))) {
 		bnad_disable_rx_irq(bnad, ccb);
-		__napi_schedule((&rx_ctrl->napi));
+		__napi_schedule(napi);
 	}
 	BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
 }
@@ -602,12 +605,11 @@
 {
 	u32 intr_status;
 	unsigned long flags;
-	struct net_device *netdev = data;
-	struct bnad *bnad;
+	struct bnad *bnad = (struct bnad *)data;
 
-	bnad = netdev_priv(netdev);
+	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
+		return IRQ_HANDLED;
 
-	/* BNA_ISR_GET(bnad); Inc Ref count */
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 
 	bna_intr_status_get(&bnad->bna, intr_status);
@@ -617,7 +619,6 @@
 
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-	/* BNAD_ISR_PUT(bnad); Dec Ref count */
 	return IRQ_HANDLED;
 }
 
@@ -627,8 +628,7 @@
 	int i, j;
 	u32 intr_status;
 	unsigned long flags;
-	struct net_device *netdev = data;
-	struct bnad *bnad = netdev_priv(netdev);
+	struct bnad *bnad = (struct bnad *)data;
 	struct bnad_rx_info *rx_info;
 	struct bnad_rx_ctrl *rx_ctrl;
 
@@ -642,16 +642,21 @@
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 
-	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+	if (BNA_IS_MBOX_ERR_INTR(intr_status))
 		bna_mbox_handler(&bnad->bna, intr_status);
-		if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
-			spin_unlock_irqrestore(&bnad->bna_lock, flags);
-			goto done;
-		}
-	}
+
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
+	if (!BNA_IS_INTX_DATA_INTR(intr_status))
+		return IRQ_HANDLED;
+
 	/* Process data interrupts */
+	/* Tx processing */
+	for (i = 0; i < bnad->num_tx; i++) {
+		for (j = 0; j < bnad->num_txq_per_tx; j++)
+			bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+	}
+	/* Rx processing */
 	for (i = 0; i < bnad->num_rx; i++) {
 		rx_info = &bnad->rx_info[i];
 		if (!rx_info->rx)
@@ -663,7 +668,6 @@
 							    rx_ctrl->ccb);
 		}
 	}
-done:
 	return IRQ_HANDLED;
 }
 
@@ -674,11 +678,7 @@
 static void
 bnad_enable_mbox_irq(struct bnad *bnad)
 {
-	int irq = BNAD_GET_MBOX_IRQ(bnad);
-
-	if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
-		if (bnad->cfg_flags & BNAD_CF_MSIX)
-			enable_irq(irq);
+	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
 
 	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
 }
@@ -690,16 +690,21 @@
 static void
 bnad_disable_mbox_irq(struct bnad *bnad)
 {
-	int irq = BNAD_GET_MBOX_IRQ(bnad);
-
-
-	if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
-		if (bnad->cfg_flags & BNAD_CF_MSIX)
-			disable_irq_nosync(irq);
+	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
 
 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
 }
 
+static void
+bnad_set_netdev_perm_addr(struct bnad *bnad)
+{
+	struct net_device *netdev = bnad->netdev;
+
+	memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
+	if (is_zero_ether_addr(netdev->dev_addr))
+		memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
+}
+
 /* Control Path Handlers */
 
 /* Callbacks */
@@ -755,11 +760,14 @@
 
 	if (link_up) {
 		if (!netif_carrier_ok(bnad->netdev)) {
+			struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
+			if (!tcb)
+				return;
 			pr_warn("bna: %s link up\n",
 				bnad->netdev->name);
 			netif_carrier_on(bnad->netdev);
 			BNAD_UPDATE_CTR(bnad, link_toggle);
-			if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
+			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
 				/* Force an immediate Transmit Schedule */
 				pr_info("bna: %s TX_STARTED\n",
 					bnad->netdev->name);
@@ -807,6 +815,18 @@
 {
 	struct bnad_tx_info *tx_info =
 			(struct bnad_tx_info *)tcb->txq->tx->priv;
+	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+
+	while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+		cpu_relax();
+
+	bnad_free_all_txbufs(bnad, tcb);
+
+	unmap_q->producer_index = 0;
+	unmap_q->consumer_index = 0;
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
 
 	tx_info->tcb[tcb->id] = NULL;
 }
@@ -822,6 +842,12 @@
 }
 
 static void
+bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
+{
+	bnad_free_all_rxbufs(bnad, rcb);
+}
+
+static void
 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
 {
 	struct bnad_rx_info *rx_info =
@@ -849,7 +875,7 @@
 	if (tx_info != &bnad->tx_info[0])
 		return;
 
-	clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
+	clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
 	netif_stop_queue(bnad->netdev);
 	pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
 }
@@ -857,9 +883,36 @@
 static void
 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
 {
-	if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
+	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+
+	if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
 		return;
 
+	clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
+
+	while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+		cpu_relax();
+
+	bnad_free_all_txbufs(bnad, tcb);
+
+	unmap_q->producer_index = 0;
+	unmap_q->consumer_index = 0;
+
+	smp_mb__before_clear_bit();
+	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+
+	/*
+	 * Workaround for first device enable failure & we
+	 * get a 0 MAC address. We try to get the MAC address
+	 * again here.
+	 */
+	if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
+		bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
+		bnad_set_netdev_perm_addr(bnad);
+	}
+
+	set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+
 	if (netif_carrier_ok(bnad->netdev)) {
 		pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
 		netif_wake_queue(bnad->netdev);
@@ -870,40 +923,22 @@
 static void
 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
 {
-	struct bnad_unmap_q *unmap_q;
-
-	if (!tcb || (!tcb->unmap_q))
-		return;
-
-	unmap_q = tcb->unmap_q;
-	if (!unmap_q->unmap_array)
-		return;
-
-	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
-		return;
-
-	bnad_free_all_txbufs(bnad, tcb);
-
-	unmap_q->producer_index = 0;
-	unmap_q->consumer_index = 0;
-
-	smp_mb__before_clear_bit();
-	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+	/* Delay only once for the whole Tx Path Shutdown */
+	if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
+		mdelay(BNAD_TXRX_SYNC_MDELAY);
 }
 
 static void
 bnad_cb_rx_cleanup(struct bnad *bnad,
 			struct bna_ccb *ccb)
 {
-	bnad_cq_cmpl_init(bnad, ccb);
-
-	bnad_free_rxbufs(bnad, ccb->rcb[0]);
 	clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
 
-	if (ccb->rcb[1]) {
-		bnad_free_rxbufs(bnad, ccb->rcb[1]);
+	if (ccb->rcb[1])
 		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
-	}
+
+	if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
+		mdelay(BNAD_TXRX_SYNC_MDELAY);
 }
 
 static void
@@ -911,6 +946,13 @@
 {
 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
 
+	clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
+
+	if (rcb == rcb->cq->ccb->rcb[0])
+		bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
+
+	bnad_free_all_rxbufs(bnad, rcb);
+
 	set_bit(BNAD_RXQ_STARTED, &rcb->flags);
 
 	/* Now allocate & post buffers for this RCB */
@@ -1047,7 +1089,7 @@
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
 	irq = BNAD_GET_MBOX_IRQ(bnad);
-	free_irq(irq, bnad->netdev);
+	free_irq(irq, bnad);
 
 	kfree(intr_info->idl);
 }
@@ -1061,7 +1103,7 @@
 bnad_mbox_irq_alloc(struct bnad *bnad,
 		    struct bna_intr_info *intr_info)
 {
-	int 		err;
+	int 		err = 0;
 	unsigned long 	flags;
 	u32	irq;
 	irq_handler_t 	irq_handler;
@@ -1096,22 +1138,17 @@
 	 */
 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
 
+	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
+
 	err = request_irq(irq, irq_handler, flags,
-			  bnad->mbox_irq_name, bnad->netdev);
+			  bnad->mbox_irq_name, bnad);
 
 	if (err) {
 		kfree(intr_info->idl);
 		intr_info->idl = NULL;
-		return err;
 	}
 
-	spin_lock_irqsave(&bnad->bna_lock, flags);
-
-	if (bnad->cfg_flags & BNAD_CF_MSIX)
-		disable_irq_nosync(irq);
-
-	spin_unlock_irqrestore(&bnad->bna_lock, flags);
-	return 0;
+	return err;
 }
 
 static void
@@ -1388,13 +1425,24 @@
 }
 
 static void
-bnad_ioc_sem_timeout(unsigned long data)
+bnad_iocpf_timeout(unsigned long data)
 {
 	struct bnad *bnad = (struct bnad *)data;
 	unsigned long flags;
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
-	bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
+	bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_iocpf_sem_timeout(unsigned long data)
+{
+	struct bnad *bnad = (struct bnad *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
@@ -1555,62 +1603,19 @@
 	return rcvd;
 }
 
-static int
-bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
-{
-	struct bnad_rx_ctrl *rx_ctrl =
-		container_of(napi, struct bnad_rx_ctrl, napi);
-	struct bna_ccb *ccb;
-	struct bnad *bnad;
-	int 			rcvd = 0;
-	int			i, j;
-
-	ccb = rx_ctrl->ccb;
-
-	bnad = ccb->bnad;
-
-	if (!netif_carrier_ok(bnad->netdev))
-		goto poll_exit;
-
-	/* Handle Tx Completions, if any */
-	for (i = 0; i < bnad->num_tx; i++) {
-		for (j = 0; j < bnad->num_txq_per_tx; j++)
-			bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
-	}
-
-	/* Handle Rx Completions */
-	rcvd = bnad_poll_cq(bnad, ccb, budget);
-	if (rcvd == budget)
-		return rcvd;
-poll_exit:
-	napi_complete((napi));
-
-	BNAD_UPDATE_CTR(bnad, netif_rx_complete);
-
-	bnad_enable_txrx_irqs(bnad);
-	return rcvd;
-}
-
 static void
 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
 {
-	int (*napi_poll) (struct napi_struct *, int);
 	struct bnad_rx_ctrl *rx_ctrl;
 	int i;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bnad->bna_lock, flags);
-	if (bnad->cfg_flags & BNAD_CF_MSIX)
-		napi_poll = bnad_napi_poll_rx;
-	else
-		napi_poll = bnad_napi_poll_txrx;
-	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
 	/* Initialize & enable NAPI */
 	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
 		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
+
 		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
-			       napi_poll, 64);
+			       bnad_napi_poll_rx, 64);
+
 		napi_enable(&rx_ctrl->napi);
 	}
 }
@@ -1825,6 +1830,7 @@
 
 	/* Initialize the Rx event handlers */
 	rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
+	rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
 	rx_cbfn.rcb_destroy_cbfn = NULL;
 	rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
 	rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
@@ -1968,6 +1974,27 @@
 	return 0;
 }
 
+/* Called with bnad_conf_lock() held */
+static void
+bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
+{
+	u16 vlan_id;
+	unsigned long flags;
+
+	if (!bnad->vlan_grp)
+		return;
+
+	BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
+
+	for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) {
+		if (!vlan_group_get_device(bnad->vlan_grp, vlan_id))
+			continue;
+		spin_lock_irqsave(&bnad->bna_lock, flags);
+		bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id);
+		spin_unlock_irqrestore(&bnad->bna_lock, flags);
+	}
+}
+
 /* Statistics utilities */
 void
 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
@@ -2152,16 +2179,6 @@
 		bnad->num_rxp_per_rx = 1;
 }
 
-static void
-bnad_set_netdev_perm_addr(struct bnad *bnad)
-{
-	struct net_device *netdev = bnad->netdev;
-
-	memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
-	if (is_zero_ether_addr(netdev->dev_addr))
-		memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
-}
-
 /* Enable / disable device */
 static void
 bnad_device_disable(struct bnad *bnad)
@@ -2353,6 +2370,9 @@
 	/* Enable broadcast */
 	bnad_enable_default_bcast(bnad);
 
+	/* Restore VLANs, if any */
+	bnad_restore_vlans(bnad, 0);
+
 	/* Set the UCAST address */
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 	bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
@@ -2433,21 +2453,21 @@
 		return NETDEV_TX_OK;
 	}
 
-	/*
-	 * Takes care of the Tx that is scheduled between clearing the flag
-	 * and the netif_stop_queue() call.
-	 */
-	if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
-		dev_kfree_skb(skb);
-		return NETDEV_TX_OK;
-	}
-
 	tx_id = 0;
 
 	tx_info = &bnad->tx_info[tx_id];
 	tcb = tx_info->tcb[tx_id];
 	unmap_q = tcb->unmap_q;
 
+	/*
+	 * Takes care of the Tx that is scheduled between clearing the flag
+	 * and the netif_stop_queue() call.
+	 */
+	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
 	vectors = 1 + skb_shinfo(skb)->nr_frags;
 	if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
 		dev_kfree_skb(skb);
@@ -2462,7 +2482,8 @@
 		    tcb->consumer_index &&
 		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
 			acked = bnad_free_txbufs(bnad, tcb);
-			bna_ib_ack(tcb->i_dbell, acked);
+			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
+				bna_ib_ack(tcb->i_dbell, acked);
 			smp_mb__before_clear_bit();
 			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
 		} else {
@@ -2624,6 +2645,10 @@
 	tcb->producer_index = txq_prod;
 
 	smp_mb();
+
+	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
+		return NETDEV_TX_OK;
+
 	bna_txq_prod_indx_doorbell(tcb);
 
 	if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
@@ -3032,7 +3057,7 @@
 bnad_pci_probe(struct pci_dev *pdev,
 		const struct pci_device_id *pcidev_id)
 {
-	bool 	using_dac;
+	bool 	using_dac = false;
 	int 	err;
 	struct bnad *bnad;
 	struct bna *bna;
@@ -3066,7 +3091,7 @@
 	/*
 	 * PCI initialization
 	 * 	Output : using_dac = 1 for 64 bit DMA
-	 *		           = 0 for 32 bit DMA
+	 *			   = 0 for 32 bit DMA
 	 */
 	err = bnad_pci_init(bnad, pdev, &using_dac);
 	if (err)
@@ -3084,6 +3109,9 @@
 	/* Initialize netdev structure, set up ethtool ops */
 	bnad_netdev_init(bnad, using_dac);
 
+	/* Set link to down state */
+	netif_carrier_off(netdev);
+
 	bnad_enable_msix(bnad);
 
 	/* Get resource requirement form bna */
@@ -3115,11 +3143,13 @@
 				((unsigned long)bnad));
 	setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
 				((unsigned long)bnad));
-	setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
+	setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
+				((unsigned long)bnad));
+	setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
 				((unsigned long)bnad));
 
 	/* Now start the timer before calling IOC */
-	mod_timer(&bnad->bna.device.ioc.ioc_timer,
+	mod_timer(&bnad->bna.device.ioc.iocpf_timer,
 		  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
 
 	/*
@@ -3137,11 +3167,6 @@
 
 	mutex_unlock(&bnad->conf_mutex);
 
-	/*
-	 * Make sure the link appears down to the stack
-	 */
-	netif_carrier_off(netdev);
-
 	/* Finally, reguister with net_device layer */
 	err = register_netdev(netdev);
 	if (err) {
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index ebc3a907..8b1d515 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -51,6 +51,7 @@
  */
 struct bnad_rx_ctrl {
 	struct bna_ccb *ccb;
+	unsigned long  flags;
 	struct napi_struct	napi;
 };
 
@@ -64,7 +65,7 @@
 #define BNAD_NAME			"bna"
 #define BNAD_NAME_LEN			64
 
-#define BNAD_VERSION			"2.3.2.0"
+#define BNAD_VERSION			"2.3.2.3"
 
 #define BNAD_MAILBOX_MSIX_VECTORS	1
 
@@ -82,6 +83,7 @@
 
 /* Bit positions for tcb->flags */
 #define BNAD_TXQ_FREE_SENT		0
+#define BNAD_TXQ_TX_STARTED		1
 
 /* Bit positions for rcb->flags */
 #define BNAD_RXQ_REFILL			0
@@ -124,6 +126,7 @@
 struct bnad_drv_stats {
 	u64 		netif_queue_stop;
 	u64		netif_queue_wakeup;
+	u64		netif_queue_stopped;
 	u64		tso4;
 	u64		tso6;
 	u64		tso_err;
@@ -199,12 +202,12 @@
 /* Set, tested & cleared using xxx_bit() functions */
 /* Values indicated bit positions */
 #define	BNAD_RF_CEE_RUNNING		1
-#define BNAD_RF_HW_ERROR 		2
-#define BNAD_RF_MBOX_IRQ_DISABLED	3
-#define BNAD_RF_TX_STARTED		4
-#define BNAD_RF_RX_STARTED		5
-#define BNAD_RF_DIM_TIMER_RUNNING	6
-#define BNAD_RF_STATS_TIMER_RUNNING	7
+#define BNAD_RF_MBOX_IRQ_DISABLED	2
+#define BNAD_RF_RX_STARTED		3
+#define BNAD_RF_DIM_TIMER_RUNNING	4
+#define BNAD_RF_STATS_TIMER_RUNNING	5
+#define BNAD_RF_TX_SHUTDOWN_DELAYED	6
+#define BNAD_RF_RX_SHUTDOWN_DELAYED	7
 
 struct bnad {
 	struct net_device 	*netdev;
@@ -306,8 +309,10 @@
 extern void bnad_dim_timer_start(struct bnad *bnad);
 
 /* Statistics */
-extern void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
-extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
+extern void bnad_netdev_qstats_fill(struct bnad *bnad,
+		struct rtnl_link_stats64 *stats);
+extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
+		struct rtnl_link_stats64 *stats);
 
 /**
  * MACROS
@@ -320,9 +325,11 @@
 
 #define bnad_enable_rx_irq_unsafe(_ccb)			\
 {							\
-	bna_ib_coalescing_timer_set((_ccb)->i_dbell,	\
-		(_ccb)->rx_coalescing_timeo);		\
-	bna_ib_ack((_ccb)->i_dbell, 0);			\
+	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\
+		bna_ib_coalescing_timer_set((_ccb)->i_dbell,	\
+			(_ccb)->rx_coalescing_timeo);		\
+		bna_ib_ack((_ccb)->i_dbell, 0);			\
+	}							\
 }
 
 #define bnad_dim_timer_running(_bnad)				\
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 11fa2ea..99be5ae 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -68,6 +68,7 @@
 
 	"netif_queue_stop",
 	"netif_queue_wakeup",
+	"netif_queue_stopped",
 	"tso4",
 	"tso6",
 	"tso_err",
@@ -330,10 +331,6 @@
 
 	BNAD_GET_REG(PCIE_MISC_REG);
 
-	BNAD_GET_REG(HOST_SEM0_REG);
-	BNAD_GET_REG(HOST_SEM1_REG);
-	BNAD_GET_REG(HOST_SEM2_REG);
-	BNAD_GET_REG(HOST_SEM3_REG);
 	BNAD_GET_REG(HOST_SEM0_INFO_REG);
 	BNAD_GET_REG(HOST_SEM1_INFO_REG);
 	BNAD_GET_REG(HOST_SEM2_INFO_REG);
@@ -1184,6 +1181,9 @@
 
 	bi = sizeof(*net_stats64) / sizeof(u64);
 
+	/* Get netif_queue_stopped from stack */
+	bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
+
 	/* Fill driver stats into ethtool buffers */
 	stats64 = (u64 *)&bnad->stats.drv_stats;
 	for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)