diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
index 09ee2a1e35bb..69c6ac2e8263 100644
--- a/drivers/base/transport_class.c
+++ b/drivers/base/transport_class.c
@@ -169,6 +169,12 @@ static int transport_add_class_device(struct attribute_container *cont,
 			goto err_del;
 	}
 
+	if (tcont->encryption) {
+		error = sysfs_create_group(&classdev->kobj, tcont->encryption);
+		if (error)
+			goto err_del;
+	}
+
 	return 0;
 
 err_del:
@@ -244,6 +250,8 @@ static int transport_remove_classdev(struct attribute_container *cont,
 	if (tclass->remove != anon_transport_dummy_function) {
 		if (tcont->statistics)
 			sysfs_remove_group(&classdev->kobj, tcont->statistics);
+		if (tcont->encryption)
+			sysfs_remove_group(&classdev->kobj, tcont->encryption);
 		attribute_container_class_device_del(classdev);
 	}
 
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 33582d48ec09..4af5c069635a 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -6979,6 +6979,42 @@ lpfc_reset_stats(struct Scsi_Host *shost)
 	return;
 }
 
+/**
+ * lpfc_get_enc_info - Return encryption information about the session for
+ *                     a given remote port.
+ * @rport: ptr to fc_rport from scsi transport fc
+ *
+ * Given an rport object, iterate through the fc_nodes list to find node
+ * corresponding with rport. Pass the encryption information from the node to
+ * rport's encryption attribute for reporting to upper layers. Information is
+ * passed through nlp_enc_info struct which contains encryption status.
+ *
+ * Returns:
+ * - Address of rport's fc_encryption_info struct
+ * - NULL when not found
+ **/
+static struct fc_encryption_info *
+lpfc_get_enc_info(struct fc_rport *rport)
+{
+	struct Scsi_Host *shost = rport_to_shost(rport);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct fc_encryption_info *ef = NULL;
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (ndlp->rport && ndlp->rport == rport) {
+			ef = &rport->enc_info;
+			ef->status = ndlp->nlp_enc_info.status;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
+	return ef;
+}
+
+
 /*
  * The LPFC driver treats linkdown handling as target loss events so there
  * are no sysfs handlers for link_down_tmo.
@@ -7196,6 +7232,8 @@ struct fc_function_template lpfc_transport_functions = {
 	.get_fc_host_stats = lpfc_get_stats,
 	.reset_fc_host_stats = lpfc_reset_stats,
 
+	.get_fc_rport_enc_info = lpfc_get_enc_info,
+
 	.dd_fcrport_size = sizeof(struct lpfc_rport_data),
 	.show_rport_maxframe_size = 1,
 	.show_rport_supported_classes = 1,
@@ -7265,6 +7303,8 @@ struct fc_function_template lpfc_vport_transport_functions = {
 	.get_fc_host_stats = lpfc_get_stats,
 	.reset_fc_host_stats = lpfc_reset_stats,
 
+	.get_fc_rport_enc_info = lpfc_get_enc_info,
+
 	.dd_fcrport_size = sizeof(struct lpfc_rport_data),
 	.show_rport_maxframe_size = 1,
 	.show_rport_supported_classes = 1,
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 92b5b2dbe847..646f88c776f5 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -872,6 +872,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 				 ndlp->nlp_rpi);
 		len += scnprintf(buf+len, size-len, "flag:x%08lx ",
 				 ndlp->nlp_flag);
+		if (ndlp->nlp_enc_info.status) {
+			len += scnprintf(buf + len,
+					 size - len, "ENCRYPTED");
+			len += scnprintf(buf + len, size - len,
+					 ndlp->nlp_enc_info.level
+					 ? "(CNSA2.0) " : "(CNSA1.0) ");
+		}
 		if (!ndlp->nlp_type)
 			len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE ");
 		if (ndlp->nlp_type & NLP_FC_NODE)
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 51cb8571c049..de0adeecf668 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -77,6 +77,11 @@ struct lpfc_node_rrqs {
 	unsigned long xri_bitmap[XRI_BITMAP_ULONGS];
 };
 
+struct lpfc_enc_info {
+	u8 status; /* encryption status for session */
+	u8 level; /* CNSA encryption level */
+};
+
 enum lpfc_fc4_xpt_flags {
 	NLP_XPT_REGD		= 0x1,
 	SCSI_XPT_REGD		= 0x2,
@@ -138,6 +143,8 @@ struct lpfc_nodelist {
 	uint8_t		vmid_support;		/* destination VMID support */
 #define NLP_NVME_NSLER     0x1			/* NVME NSLER device */
 
+	struct lpfc_enc_info nlp_enc_info; /* Encryption information struct */
+
 	struct timer_list   nlp_delayfunc;	/* Used for delayed ELS cmds */
 	struct lpfc_hba *phba;
 	struct fc_rport *rport;		/* scsi_transport_fc port structure */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 02b6d31b9ad9..32da3c23c7f4 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2014,6 +2014,58 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	lpfc_nlp_put(ndlp);
 	return;
 }
+
+/**
+ * lpfc_check_encryption - Reports an ndlp's encryption information
+ * @phba: pointer to lpfc hba data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @cmdiocb: pointer to lpfc command iocbq data structure.
+ * @rspiocb: pointer to lpfc response iocbq data structure.
+ *
+ * This routine is called in the completion callback function for issuing
+ * or receiving a Port Login (PLOGI) command. In a PLOGI completion, if FEDIF
+ * is supported, encryption information will be provided in completion status
+ * data. If @phba supports FEDIF, a log message containing encryption
+ * information will be logged. Encryption status is also saved for encryption
+ * reporting with upper layer through the rport encryption attribute.
+ **/
+static void
+lpfc_check_encryption(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+		      struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	u32 did = ndlp->nlp_DID;
+	struct lpfc_enc_info *nlp_enc_info = &ndlp->nlp_enc_info;
+	char enc_status[FC_RPORT_ENCRYPTION_STATUS_MAX_LEN] = {0};
+	char enc_level[8] = "N/A";
+	u8 encryption;
+
+	if (phba->sli4_hba.encryption_support &&
+	    ((did & Fabric_DID_MASK) != Fabric_DID_MASK)) {
+		encryption = bf_get(lpfc_wcqe_c_enc,
+				    &rspiocb->wcqe_cmpl);
+		nlp_enc_info->status = encryption;
+
+		strscpy(enc_status, encryption ? "Encrypted" : "Unencrypted",
+			sizeof(enc_status));
+
+		if (encryption) {
+			nlp_enc_info->level = bf_get(lpfc_wcqe_c_enc_lvl,
+						     &rspiocb->wcqe_cmpl);
+			strscpy(enc_level, nlp_enc_info->level ? "CNSA2.0" :
+								 "CNSA1.0",
+				sizeof(enc_level));
+		}
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ENCRYPTION,
+				 "0924 DID:x%06x %s Session "
+				 "Established, Encryption Level:%s "
+				 "rpi:x%x\n",
+				 ndlp->nlp_DID, enc_status, enc_level,
+				 ndlp->nlp_rpi);
+	}
+}
+
 /**
  * lpfc_cmpl_els_plogi - Completion callback function for plogi
  * @phba: pointer to lpfc hba data structure.
@@ -2153,6 +2205,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 			goto out;
 		ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
 
+		lpfc_check_encryption(phba, ndlp, cmdiocb, rspiocb);
+
 		sp = (struct serv_parm *)((u8 *)prsp->virt +
 					  sizeof(u32));
 
@@ -5407,6 +5461,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		goto out;
 	}
 
+	if (!ulp_status && test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag))
+		lpfc_check_encryption(phba, ndlp, cmdiocb, rspiocb);
+
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
 		"ELS rsp cmpl:    status:x%x/x%x did:x%x",
 		ulp_status, ulp_word4, did);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bb803f32bc1b..1aeebdc08073 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -5340,6 +5340,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 		clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
 		if (acc_plogi)
 			clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
+		memset(&ndlp->nlp_enc_info, 0, sizeof(ndlp->nlp_enc_info));
 		return 1;
 	}
 	clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index a7f7ed86d2b0..c000474c3066 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -437,6 +437,12 @@ struct lpfc_wcqe_complete {
 #define lpfc_wcqe_c_cmf_bw_MASK		0x0FFFFFFF
 #define lpfc_wcqe_c_cmf_bw_WORD		total_data_placed
 	uint32_t parameter;
+#define lpfc_wcqe_c_enc_SHIFT		31
+#define lpfc_wcqe_c_enc_MASK		0x00000001
+#define lpfc_wcqe_c_enc_WORD		parameter
+#define lpfc_wcqe_c_enc_lvl_SHIFT	30
+#define lpfc_wcqe_c_enc_lvl_MASK	0x00000001
+#define lpfc_wcqe_c_enc_lvl_WORD	parameter
 #define lpfc_wcqe_c_bg_edir_SHIFT	5
 #define lpfc_wcqe_c_bg_edir_MASK	0x00000001
 #define lpfc_wcqe_c_bg_edir_WORD	parameter
@@ -2942,7 +2948,10 @@ struct lpfc_mbx_read_config {
 #define lpfc_mbx_rd_conf_topology_SHIFT		24
 #define lpfc_mbx_rd_conf_topology_MASK		0x000000FF
 #define lpfc_mbx_rd_conf_topology_WORD		word2
-	uint32_t rsvd_3;
+	uint32_t word3;
+#define lpfc_mbx_rd_conf_fedif_SHIFT		6
+#define lpfc_mbx_rd_conf_fedif_MASK		0x00000001
+#define lpfc_mbx_rd_conf_fedif_WORD		word3
 	uint32_t word4;
 #define lpfc_mbx_rd_conf_e_d_tov_SHIFT		0
 #define lpfc_mbx_rd_conf_e_d_tov_MASK		0x0000FFFF
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b1460b16dd91..a116a16c4a6f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -9999,6 +9999,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
 				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
 		phba->max_vports = phba->max_vpi;
 
+		if (bf_get(lpfc_mbx_rd_conf_fedif, rd_config))
+			phba->sli4_hba.encryption_support = true;
+		else
+			phba->sli4_hba.encryption_support = false;
+
 		/* Next decide on FPIN or Signal E2E CGN support
 		 * For congestion alarms and warnings valid combination are:
 		 * 1. FPIN alarms / FPIN warnings
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 59bd2bafc73f..e00d101d548c 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -47,6 +47,7 @@
 #define LOG_RSVD1	0x01000000	/* Reserved */
 #define LOG_RSVD2	0x02000000	/* Reserved */
 #define LOG_CGN_MGMT    0x04000000	/* Congestion Mgmt events */
+#define LOG_ENCRYPTION  0x40000000      /* EDIF Encryption events. */
 #define LOG_TRACE_EVENT 0x80000000	/* Dmp the DBG log on this err */
 #define LOG_ALL_MSG	0x7fffffff	/* LOG all messages */
 
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 73d77cfab5f8..734af3d039f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -20432,62 +20432,36 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
 uint16_t
 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
 {
-	uint16_t next_fcf_index;
+	uint16_t next;
 
-initial_priority:
-	/* Search start from next bit of currently registered FCF index */
-	next_fcf_index = phba->fcf.current_rec.fcf_indx;
-
-next_priority:
-	/* Determine the next fcf index to check */
-	next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
-	next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
-				       LPFC_SLI4_FCF_TBL_INDX_MAX,
-				       next_fcf_index);
+	do {
+		for_each_set_bit_wrap(next, phba->fcf.fcf_rr_bmask,
+				LPFC_SLI4_FCF_TBL_INDX_MAX, phba->fcf.current_rec.fcf_indx) {
+			if (next == phba->fcf.current_rec.fcf_indx)
+				continue;
 
-	/* Wrap around condition on phba->fcf.fcf_rr_bmask */
-	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
-		/*
-		 * If we have wrapped then we need to clear the bits that
-		 * have been tested so that we can detect when we should
-		 * change the priority level.
-		 */
-		next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
-					       LPFC_SLI4_FCF_TBL_INDX_MAX);
-	}
+			if (!(phba->fcf.fcf_pri[next].fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)) {
+				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2845 Get next roundrobin failover FCF (x%x)\n", next);
+				return next;
+			}
 
+			if (list_is_singular(&phba->fcf.fcf_pri_list))
+				return LPFC_FCOE_FCF_NEXT_NONE;
+		}
 
-	/* Check roundrobin failover list empty condition */
-	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
-		next_fcf_index == phba->fcf.current_rec.fcf_indx) {
 		/*
 		 * If next fcf index is not found check if there are lower
 		 * Priority level fcf's in the fcf_priority list.
 		 * Set up the rr_bmask with all of the avaiable fcf bits
 		 * at that level and continue the selection process.
 		 */
-		if (lpfc_check_next_fcf_pri_level(phba))
-			goto initial_priority;
-		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
-				"2844 No roundrobin failover FCF available\n");
-
-		return LPFC_FCOE_FCF_NEXT_NONE;
-	}
-
-	if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
-		phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
-		LPFC_FCF_FLOGI_FAILED) {
-		if (list_is_singular(&phba->fcf.fcf_pri_list))
-			return LPFC_FCOE_FCF_NEXT_NONE;
+	} while (lpfc_check_next_fcf_pri_level(phba));
 
-		goto next_priority;
-	}
-
-	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-			"2845 Get next roundrobin failover FCF (x%x)\n",
-			next_fcf_index);
+	lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+			"2844 No roundrobin failover FCF available\n");
 
-	return next_fcf_index;
+	return LPFC_FCOE_FCF_NEXT_NONE;
 }
 
 /**
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index fd6dab157887..ee58383492b2 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -888,6 +888,10 @@ struct lpfc_sli4_hba {
 #define LPFC_FP_EQ_MAX_INTR_SEC         10000
 
 	uint32_t intr_enable;
+
+	 /* Indicates whether SLI Port supports FEDIF */
+	bool encryption_support;
+
 	struct lpfc_bmbx bmbx;
 	struct lpfc_max_cfg_param max_cfg_param;
 	uint16_t extents_in_use; /* must allocate resource extents. */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f3dada5bf7c1..c4ca8bf5843a 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "14.4.0.12"
+#define LPFC_DRIVER_VERSION "14.4.0.13"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 0d652db8fe24..e4e22cb0e277 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -843,7 +843,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
 	/* initialize fault polling */
 
 	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
-	snprintf(ioc->fault_reset_work_q_name,
+	scnprintf(ioc->fault_reset_work_q_name,
 	    sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
 	    ioc->driver_name, ioc->id);
 	ioc->fault_reset_work_q = alloc_ordered_workqueue(
@@ -1564,6 +1564,8 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 	int i;
 	u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
 	u8 cb_idx = 0xFF;
+	u16 discovery_smid =
+	    ioc->shost->can_queue + INTERNAL_SCSIIO_FOR_DISCOVERY;
 
 	if (smid < ioc->hi_priority_smid) {
 		struct scsiio_tracker *st;
@@ -1572,8 +1574,10 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 			st = _get_st_from_smid(ioc, smid);
 			if (st)
 				cb_idx = st->cb_idx;
-		} else if (smid == ctl_smid)
+		} else if (smid < discovery_smid)
 			cb_idx = ioc->ctl_cb_idx;
+		else
+			cb_idx = ioc->scsih_cb_idx;
 	} else if (smid < ioc->internal_smid) {
 		i = smid - ioc->hi_priority_smid;
 		cb_idx = ioc->hpr_lookup[i].cb_idx;
@@ -3174,7 +3178,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
 
 	if (index >= ioc->iopoll_q_start_index) {
 		qid = index - ioc->iopoll_q_start_index;
-		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d",
+		scnprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d",
 		    ioc->driver_name, ioc->id, qid);
 		reply_q->is_iouring_poll_q = 1;
 		ioc->io_uring_poll_queues[qid].reply_q = reply_q;
@@ -3183,10 +3187,10 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
 
 
 	if (ioc->msix_enable)
-		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
+		scnprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
 		    ioc->driver_name, ioc->id, index);
 	else
-		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
+		scnprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
 		    ioc->driver_name, ioc->id);
 	r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
 			IRQF_SHARED, reply_q->name, reply_q);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index e6a6f21d309b..de37fa5ac073 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -147,6 +147,7 @@
 #define INTERNAL_CMDS_COUNT		10	/* reserved cmds */
 /* reserved for issuing internally framed scsi io cmds */
 #define INTERNAL_SCSIIO_CMDS_COUNT	3
+#define INTERNAL_SCSIIO_FOR_DISCOVERY	2
 
 #define MPI3_HIM_MASK			0xFFFFFFFF /* mask every bit*/
 
@@ -480,6 +481,7 @@ struct MPT3SAS_DEVICE {
 	u32	flags;
 	u8	configured_lun;
 	u8	block;
+	u8	deleted;
 	u8	tlr_snoop_check;
 	u8	ignore_delay_remove;
 	/* Iopriority Command Handling */
@@ -577,7 +579,9 @@ struct _sas_device {
 	u8	chassis_slot;
 	u8	is_chassis_slot_valid;
 	u8	connector_name[5];
+	u8	ssd_device;
 	struct kref refcount;
+
 	u8	port_type;
 	struct hba_port *port;
 	struct sas_rphy *rphy;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 7092d0debef3..ac69a5abe2e2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -61,6 +61,8 @@
 
 #define PCIE_CHANNEL 2
 
+#define MPT3_MAX_LUNS (255)
+
 /* forward proto's */
 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
 	struct _sas_node *sas_expander);
@@ -70,13 +72,24 @@ static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
 	struct _sas_device *sas_device);
 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
 	u8 retry_count, u8 is_pd);
-static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+	u8 retry_count);
 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
 	struct _pcie_device *pcie_device);
 static void
 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
+static enum device_responsive_state
+_scsih_wait_for_target_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+	u8 retry_count, u8 is_pd, u8 tr_timeout, u8 tr_method);
+static enum device_responsive_state
+_scsih_ata_pass_thru_idd(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 *is_ssd_device,
+	u8 tr_timeout, u8 tr_method);
+static enum device_responsive_state
+_scsih_wait_for_device_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+	u8 retry_count, u8 is_pd, int lun, u8 tr_timeout, u8 tr_method);
+static void _firmware_event_work_delayed(struct work_struct *work);
 
 /* global parameters */
 LIST_HEAD(mpt3sas_ioc_list);
@@ -159,6 +172,15 @@ module_param(enable_sdev_max_qd, bool, 0444);
 MODULE_PARM_DESC(enable_sdev_max_qd,
 	"Enable sdev max qd as can_queue, def=disabled(0)");
 
+/*
+ * permit overriding the SCSI command issuing capability of
+ * the driver to bring the drive to READY state
+ */
+static int issue_scsi_cmd_to_bringup_drive = 1;
+module_param(issue_scsi_cmd_to_bringup_drive, int, 0444);
+MODULE_PARM_DESC(issue_scsi_cmd_to_bringup_drive, "allow host driver to\n"
+	"issue SCSI commands to bring the drive to READY state, default=1 ");
+
 static int multipath_on_hba = -1;
 module_param(multipath_on_hba, int, 0);
 MODULE_PARM_DESC(multipath_on_hba,
@@ -173,10 +195,33 @@ module_param(host_tagset_enable, int, 0444);
 MODULE_PARM_DESC(host_tagset_enable,
 	"Shared host tagset enable/disable Default: enable(1)");
 
+static int command_retry_count = 144;
+module_param(command_retry_count, int, 0444);
+MODULE_PARM_DESC(command_retry_count, "Device discovery TUR command retry\n"
+	"count: (default=144)");
+
 /* raid transport support */
 static struct raid_template *mpt3sas_raid_template;
 static struct raid_template *mpt2sas_raid_template;
 
+/**
+ * enum device_responsive_state - responsive state
+ * @DEVICE_READY: device is ready to be added
+ * @DEVICE_RETRY: device can be retried later
+ * @DEVICE_RETRY_UA: retry unit attentions
+ * @DEVICE_START_UNIT: requires start unit
+ * @DEVICE_STOP_UNIT: requires stop unit
+ * @DEVICE_ERROR: device reported some fatal error
+ *
+ */
+enum device_responsive_state {
+	DEVICE_READY,
+	DEVICE_RETRY,
+	DEVICE_RETRY_UA,
+	DEVICE_START_UNIT,
+	DEVICE_STOP_UNIT,
+	DEVICE_ERROR,
+};
 
 /**
  * struct sense_info - common structure for obtaining sense keys
@@ -205,6 +250,9 @@ struct sense_info {
 
 /**
  * struct fw_event_work - firmware event struct
+ * @retries: retry count for processing the event
+ * @delayed_work_active: flag indicating if delayed work is active
+ * @delayed_work: delayed work item for deferred event handling
  * @list: link list framework
  * @work: work object (ioc->fault_reset_work_q)
  * @ioc: per adapter object
@@ -219,6 +267,9 @@ struct sense_info {
  * This object stored on ioc->fw_event_list.
  */
 struct fw_event_work {
+	u8			*retries;
+	u8                      delayed_work_active;
+	struct delayed_work     delayed_work;
 	struct list_head	list;
 	struct work_struct	work;
 
@@ -230,11 +281,16 @@ struct fw_event_work {
 	u16			event;
 	struct kref		refcount;
 	char			event_data[] __aligned(4);
+
 };
 
 static void fw_event_work_free(struct kref *r)
 {
-	kfree(container_of(r, struct fw_event_work, refcount));
+	struct fw_event_work *fw_work;
+
+	fw_work = container_of(r, struct fw_event_work, refcount);
+	kfree(fw_work->retries);
+	kfree(fw_work);
 }
 
 static void fw_event_work_get(struct fw_event_work *fw_work)
@@ -955,6 +1011,7 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
 		sas_device_put(sas_device);
 	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
 }
 
 /**
@@ -2528,6 +2585,8 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
 	char *r_level = "";
 	u16 handle, volume_handle = 0;
 	u64 volume_wwid = 0;
+	enum device_responsive_state retval;
+	u8 count = 0;
 
 	qdepth = 1;
 	sas_device_priv_data = sdev->hostdata;
@@ -2686,6 +2745,7 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
 
 		pcie_device_put(pcie_device);
 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
 		lim->virt_boundary_mask = ioc->page_size - 1;
 		return 0;
@@ -2737,9 +2797,16 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
 	sas_device_put(sas_device);
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
-	if (!ssp_target)
+	if (!ssp_target) {
 		_scsih_display_sata_capabilities(ioc, handle, sdev);
 
+		do {
+			retval = _scsih_ata_pass_thru_idd(ioc, handle,
+				    &sas_device->ssd_device, 30, 0);
+		} while ((retval == DEVICE_RETRY || retval == DEVICE_RETRY_UA)
+			&& count++ < 3);
+	}
+
 
 	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
 
@@ -3595,6 +3662,37 @@ _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 }
 
+/**
+ * _scsih_fw_event_requeue - requeue an event
+ * @ioc: per adapter object
+ * @fw_event: object describing the event
+ * @delay: time in milliseconds to wait before retrying the event
+ *
+ * Context: This function will acquire ioc->fw_event_lock.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_requeue(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
+	*fw_event, unsigned long delay)
+{
+	unsigned long flags;
+
+	if (ioc->firmware_event_thread == NULL)
+		return;
+
+	spin_lock_irqsave(&ioc->fw_event_lock, flags);
+	fw_event_work_get(fw_event);
+	list_add_tail(&fw_event->list, &ioc->fw_event_list);
+	if (!fw_event->delayed_work_active) {
+		fw_event->delayed_work_active = 1;
+		INIT_DELAYED_WORK(&fw_event->delayed_work,
+		    _firmware_event_work_delayed);
+	}
+	queue_delayed_work(ioc->firmware_event_thread, &fw_event->delayed_work,
+	    msecs_to_jiffies(delay));
+	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
 
  /**
  * mpt3sas_send_trigger_data_event - send event for processing trigger data
@@ -3825,29 +3923,235 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
 /**
  * _scsih_ublock_io_all_device - unblock every device
  * @ioc: per adapter object
+ * @no_turs: flag to disable TEST UNIT READY checks during device unblocking
  *
  * change the device state from block to running
  */
 static void
-_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
+_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc, u8 no_turs)
 {
 	struct MPT3SAS_DEVICE *sas_device_priv_data;
 	struct scsi_device *sdev;
+	struct MPT3SAS_TARGET *sas_target;
+	enum device_responsive_state rc;
+	struct _sas_device *sas_device = NULL;
+	struct _pcie_device *pcie_device = NULL;
+	int count = 0;
+	u8 tr_method = 0;
+	u8 tr_timeout = 30;
+
 
 	shost_for_each_device(sdev, ioc->shost) {
 		sas_device_priv_data = sdev->hostdata;
 		if (!sas_device_priv_data)
 			continue;
+
+		sas_target = sas_device_priv_data->sas_target;
+		if (!sas_target || sas_target->deleted)
+			continue;
+
 		if (!sas_device_priv_data->block)
 			continue;
 
-		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
-			"device_running, handle(0x%04x)\n",
-		    sas_device_priv_data->sas_target->handle));
+		if ((no_turs) || (!issue_scsi_cmd_to_bringup_drive)) {
+			sdev_printk(KERN_WARNING, sdev, "device_unblocked handle(0x%04x)\n",
+				sas_device_priv_data->sas_target->handle);
+			_scsih_internal_device_unblock(sdev, sas_device_priv_data);
+			continue;
+		}
+
+		do {
+			pcie_device = mpt3sas_get_pdev_by_handle(ioc, sas_target->handle);
+			if (pcie_device && (!ioc->tm_custom_handling) &&
+				(!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
+				tr_timeout = pcie_device->reset_timeout;
+				tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
+			}
+			rc = _scsih_wait_for_device_to_become_ready(ioc,
+			    sas_target->handle, 0, (sas_target->flags &
+			    MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun, tr_timeout, tr_method);
+			if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
+			    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
+				ssleep(1);
+			if (pcie_device)
+				pcie_device_put(pcie_device);
+		} while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
+		    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
+			&& count++ < command_retry_count);
+		sas_device_priv_data->block = 0;
+		if (rc != DEVICE_READY)
+			sas_device_priv_data->deleted = 1;
+
 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
+
+		if (rc != DEVICE_READY) {
+			sdev_printk(KERN_WARNING, sdev, "%s: device_offlined,\n"
+			    "handle(0x%04x)\n",
+			    __func__, sas_device_priv_data->sas_target->handle);
+			scsi_device_set_state(sdev, SDEV_OFFLINE);
+			sas_device = mpt3sas_get_sdev_by_addr(ioc,
+					sas_device_priv_data->sas_target->sas_address,
+					sas_device_priv_data->sas_target->port);
+			if (sas_device) {
+				_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
+				sas_device_put(sas_device);
+			} else {
+				pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
+						    sas_device_priv_data->sas_target->sas_address);
+				if (pcie_device) {
+					if (pcie_device->enclosure_handle != 0)
+						sdev_printk(KERN_INFO, sdev, "enclosure logical id\n"
+						    "(0x%016llx), slot(%d)\n", (unsigned long long)
+							pcie_device->enclosure_logical_id,
+							pcie_device->slot);
+					if (pcie_device->connector_name[0] != '\0')
+						sdev_printk(KERN_INFO, sdev, "enclosure level(0x%04x),\n"
+							" connector name( %s)\n",
+							pcie_device->enclosure_level,
+							pcie_device->connector_name);
+					pcie_device_put(pcie_device);
+				}
+			}
+		} else
+			sdev_printk(KERN_WARNING, sdev, "device_unblocked,\n"
+			    "handle(0x%04x)\n",
+			    sas_device_priv_data->sas_target->handle);
 	}
 }
 
+/**
+ * _scsih_ublock_io_device_wait - unblock IO for target
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @port: hba port entry
+ *
+ * make sure device is reponsponding before unblocking
+ */
+static void
+_scsih_ublock_io_device_wait(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+			     struct hba_port *port)
+{
+	struct MPT3SAS_DEVICE *sas_device_priv_data;
+	struct MPT3SAS_TARGET *sas_target;
+	enum device_responsive_state rc;
+	struct scsi_device *sdev;
+	int host_reset_completion_count;
+	struct _sas_device *sas_device;
+	struct _pcie_device *pcie_device;
+	u8 tr_timeout = 30;
+	u8 tr_method = 0;
+	int count = 0;
+
+	/* moving devices from SDEV_OFFLINE to SDEV_BLOCK */
+	shost_for_each_device(sdev, ioc->shost) {
+		sas_device_priv_data = sdev->hostdata;
+		if (!sas_device_priv_data)
+			continue;
+		sas_target = sas_device_priv_data->sas_target;
+		if (!sas_target)
+			continue;
+		if (sas_target->sas_address != sas_address ||
+		    sas_target->port != port)
+			continue;
+		if (sdev->sdev_state == SDEV_OFFLINE) {
+			sas_device_priv_data->block = 1;
+			sas_device_priv_data->deleted = 0;
+			scsi_device_set_state(sdev, SDEV_RUNNING);
+			scsi_internal_device_block_nowait(sdev);
+		}
+	}
+
+	/* moving devices from SDEV_BLOCK to SDEV_RUNNING state */
+	shost_for_each_device(sdev, ioc->shost) {
+		sas_device_priv_data = sdev->hostdata;
+		if (!sas_device_priv_data)
+			continue;
+		sas_target = sas_device_priv_data->sas_target;
+		if (!sas_target)
+			continue;
+		if (sas_target->sas_address != sas_address ||
+		    sas_target->port != port)
+			continue;
+		if (!sas_device_priv_data->block)
+			continue;
+
+		do {
+			host_reset_completion_count = 0;
+			pcie_device = mpt3sas_get_pdev_by_handle(ioc, sas_target->handle);
+			if (pcie_device && (!ioc->tm_custom_handling) &&
+				(!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
+				tr_timeout = pcie_device->reset_timeout;
+				tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
+			}
+			rc = _scsih_wait_for_device_to_become_ready(ioc,
+			      sas_target->handle, 0, (sas_target->flags &
+			      MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun, tr_timeout, tr_method);
+			if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
+			    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) {
+				do {
+					msleep(500);
+					host_reset_completion_count++;
+				} while (rc == DEVICE_RETRY &&
+							ioc->shost_recovery);
+				if (host_reset_completion_count > 1) {
+					rc = _scsih_wait_for_device_to_become_ready(ioc,
+						sas_target->handle, 0, (sas_target->flags &
+						MPT_TARGET_FLAGS_RAID_COMPONENT), sdev->lun,
+						tr_timeout, tr_method);
+					if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
+					    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
+						msleep(500);
+				}
+				continue;
+			}
+			if (pcie_device)
+				pcie_device_put(pcie_device);
+		} while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
+		    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
+			&& count++ <= command_retry_count);
+
+		sas_device_priv_data->block = 0;
+		if (rc != DEVICE_READY)
+			sas_device_priv_data->deleted = 1;
+		scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+
+		if (rc != DEVICE_READY) {
+			sdev_printk(KERN_WARNING, sdev,
+			    "%s: device_offlined, handle(0x%04x)\n",
+			    __func__, sas_device_priv_data->sas_target->handle);
+
+			sas_device = mpt3sas_get_sdev_by_handle(ioc,
+				sas_device_priv_data->sas_target->handle);
+			if (sas_device) {
+				_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
+				sas_device_put(sas_device);
+			} else {
+				pcie_device = mpt3sas_get_pdev_by_handle(ioc,
+							sas_device_priv_data->sas_target->handle);
+				if (pcie_device) {
+					if (pcie_device->enclosure_handle != 0)
+						sdev_printk(KERN_INFO, sdev,
+							"device_offlined, enclosure logical id(0x%016llx),\n"
+							" slot(%d)\n", (unsigned long long)
+							pcie_device->enclosure_logical_id,
+							pcie_device->slot);
+					if (pcie_device->connector_name[0] != '\0')
+						sdev_printk(KERN_WARNING, sdev,
+							"device_offlined, enclosure level(0x%04x),\n"
+							"connector name( %s)\n",
+							pcie_device->enclosure_level,
+							pcie_device->connector_name);
+					pcie_device_put(pcie_device);
+				}
+			}
+			scsi_device_set_state(sdev, SDEV_OFFLINE);
+		} else {
+			sdev_printk(KERN_WARNING, sdev,
+				"device_unblocked, handle(0x%04x)\n",
+				sas_device_priv_data->sas_target->handle);
+		}
+	}
+}
 
 /**
  * _scsih_ublock_io_device - prepare device to be deleted
@@ -7108,91 +7412,850 @@ _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 	return 1;
 }
 
-
-
-
-#define MPT3_MAX_LUNS (255)
-
-
 /**
- * _scsih_check_access_status - check access flags
+ * _scsi_send_scsi_io - send internal SCSI_IO to target
  * @ioc: per adapter object
- * @sas_address: sas address
- * @handle: sas device handle
- * @access_status: errors returned during discovery of the device
+ * @transfer_packet: packet describing the transfer
+ * @tr_timeout: Target Reset Timeout
+ * @tr_method: Target Reset Method
+ * Context: user
  *
- * Return: 0 for success, else failure
+ * Returns 0 for success, non-zero for failure.
  */
-static u8
-_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
-	u16 handle, u8 access_status)
+static int
+_scsi_send_scsi_io(struct MPT3SAS_ADAPTER *ioc, struct _scsi_io_transfer
+	*transfer_packet, u8 tr_timeout, u8 tr_method)
 {
-	u8 rc = 1;
-	char *desc = NULL;
+	Mpi2SCSIIOReply_t *mpi_reply;
+	Mpi2SCSIIORequest_t *mpi_request;
+	u16 smid;
+	u8 issue_reset = 0;
+	int rc;
+	void *priv_sense;
+	u32 mpi_control;
+	void *psge;
+	dma_addr_t data_out_dma = 0;
+	dma_addr_t data_in_dma = 0;
+	size_t data_in_sz = 0;
+	size_t data_out_sz = 0;
+	u16 handle;
+	u8 retry_count = 0, host_reset_count = 0;
+	int tm_return_code;
 
-	switch (access_status) {
-	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
-	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
-		rc = 0;
-		break;
-	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
-		desc = "sata capability failed";
-		break;
-	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
-		desc = "sata affiliation conflict";
-		break;
-	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
-		desc = "route not addressable";
-		break;
-	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
-		desc = "smp error not addressable";
+	if (ioc->pci_error_recovery) {
+		pr_info("%s: pci error recovery in progress!\n", __func__);
+		return -EFAULT;
+	}
+
+	if (ioc->shost_recovery) {
+		pr_info("%s: host recovery in progress!\n", __func__);
+		return -EAGAIN;
+	}
+
+	handle = transfer_packet->handle;
+	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
+		pr_info("%s: no device!\n",  __func__);
+		return -EFAULT;
+	}
+
+	mutex_lock(&ioc->scsih_cmds.mutex);
+
+	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+		pr_err("%s: scsih_cmd in use\n", __func__);
+		rc = -EAGAIN;
+		goto out;
+	}
+
+ retry_loop:
+	if (test_bit(handle, ioc->device_remove_in_progress)) {
+		pr_info("%s: device removal in progress\n", __func__);
+		rc = -EFAULT;
+		goto out;
+	}
+
+	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+	rc = mpt3sas_wait_for_ioc(ioc, 10);
+	if (rc)
+		goto out;
+
+	/* Use second reserved smid for discovery related IOs */
+	smid = ioc->shost->can_queue + INTERNAL_SCSIIO_FOR_DISCOVERY;
+
+	rc = 0;
+	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+	ioc->scsih_cmds.smid = smid;
+	memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+	if (transfer_packet->is_raid)
+		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
+	else
+		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+	mpi_request->DevHandle = cpu_to_le16(handle);
+
+	switch (transfer_packet->dir) {
+	case DMA_TO_DEVICE:
+		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
+		data_out_dma = transfer_packet->data_dma;
+		data_out_sz = transfer_packet->data_length;
 		break;
-	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
-		desc = "device blocked";
+	case DMA_FROM_DEVICE:
+		mpi_control = MPI2_SCSIIO_CONTROL_READ;
+		data_in_dma = transfer_packet->data_dma;
+		data_in_sz = transfer_packet->data_length;
 		break;
-	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
-	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
-	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
-	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
-	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
-	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
-	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
-	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
-	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
-	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
-	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
-	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
-		desc = "sata initialization failed";
+	case DMA_BIDIRECTIONAL:
+		mpi_control = MPI2_SCSIIO_CONTROL_BIDIRECTIONAL;
+		/* TODO - is BIDI support needed ?? */
+		WARN_ON_ONCE(true);
 		break;
 	default:
-		desc = "unknown";
+	case DMA_NONE:
+		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
 		break;
 	}
 
-	if (!rc)
-		return 0;
+	psge = &mpi_request->SGL;
+	ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+	    data_in_sz);
 
-	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
-		desc, (u64)sas_address, handle);
+	mpi_request->Control = cpu_to_le32(mpi_control |
+	    MPI2_SCSIIO_CONTROL_SIMPLEQ);
+	mpi_request->DataLength = cpu_to_le32(transfer_packet->data_length);
+	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
+	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+	mpi_request->SenseBufferLowAddress =
+	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+	priv_sense = mpt3sas_base_get_sense_buffer(ioc, smid);
+	mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
+	mpi_request->IoFlags = cpu_to_le16(transfer_packet->cdb_length);
+	int_to_scsilun(transfer_packet->lun, (struct scsi_lun *)
+	    mpi_request->LUN);
+	memcpy(mpi_request->CDB.CDB32, transfer_packet->cdb,
+	    transfer_packet->cdb_length);
+	init_completion(&ioc->scsih_cmds.done);
+	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST))
+		ioc->put_smid_scsi_io(ioc, smid, handle);
+	else
+		ioc->put_smid_default(ioc, smid);
+	wait_for_completion_timeout(&ioc->scsih_cmds.done,
+	    transfer_packet->timeout*HZ);
+	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+		mpt3sas_check_cmd_timeout(ioc,
+		    ioc->scsih_cmds.status, mpi_request,
+		    sizeof(Mpi2SCSIIORequest_t)/4, issue_reset);
+		goto issue_target_reset;
+	}
+	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+		transfer_packet->valid_reply = 1;
+		mpi_reply = ioc->scsih_cmds.reply;
+		transfer_packet->sense_length =
+		   le32_to_cpu(mpi_reply->SenseCount);
+		if (transfer_packet->sense_length)
+			memcpy(transfer_packet->sense, priv_sense,
+			    transfer_packet->sense_length);
+		transfer_packet->transfer_length =
+		    le32_to_cpu(mpi_reply->TransferCount);
+		transfer_packet->ioc_status =
+		    le16_to_cpu(mpi_reply->IOCStatus) &
+		    MPI2_IOCSTATUS_MASK;
+		transfer_packet->scsi_state = mpi_reply->SCSIState;
+		transfer_packet->scsi_status = mpi_reply->SCSIStatus;
+		transfer_packet->log_info =
+		    le32_to_cpu(mpi_reply->IOCLogInfo);
+	}
+	goto out;
+
+ issue_target_reset:
+	if (issue_reset) {
+		pr_info("issue target reset: handle (0x%04x)\n", handle);
+		tm_return_code =
+			mpt3sas_scsih_issue_locked_tm(ioc, handle,
+				0xFFFFFFFF, 0xFFFFFFFF, 0,
+				MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, smid, 0,
+				tr_timeout, tr_method);
+
+		if (tm_return_code == SUCCESS) {
+			pr_info("target reset completed: handle (0x%04x)\n", handle);
+			/* If the command is successfully aborted due to
+			 * target reset TM then do up to three retries else
+			 * command will be terminated by the host reset TM and
+			 * hence retry once.
+			 */
+			if (((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) &&
+			    retry_count++ < 3) ||
+			    ((ioc->scsih_cmds.status & MPT3_CMD_RESET) &&
+			    host_reset_count++ == 0)) {
+				pr_info("issue retry: handle (0x%04x)\n", handle);
+				goto retry_loop;
+			}
+		} else
+			pr_info("target reset didn't complete:  handle(0x%04x)\n", handle);
+		rc = -EFAULT;
+	} else
+		rc = -EAGAIN;
+
+ out:
+	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+	mutex_unlock(&ioc->scsih_cmds.mutex);
 	return rc;
 }
 
 /**
- * _scsih_check_device - checking device responsiveness
+ * _scsih_determine_disposition -
  * @ioc: per adapter object
- * @parent_sas_address: sas address of parent expander or sas host
- * @handle: attached device handle
- * @phy_number: phy number
- * @link_rate: new link rate
+ * @transfer_packet: packet describing the transfer
+ * Context: user
+ *
+ * Determines if an internal generated scsi_io is good data, or
+ * whether it needs to be retried or treated as an error.
+ *
+ * Returns device_responsive_state
  */
-static void
-_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
-	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
+static enum device_responsive_state
+_scsih_determine_disposition(struct MPT3SAS_ADAPTER *ioc,
+	struct _scsi_io_transfer *transfer_packet)
 {
-	Mpi2ConfigReply_t mpi_reply;
-	Mpi2SasDevicePage0_t sas_device_pg0;
-	struct _sas_device *sas_device = NULL;
-	struct _enclosure_node *enclosure_dev = NULL;
+	static enum device_responsive_state rc;
+	struct sense_info sense_info = {0, 0, 0};
+	u8 check_sense = 0;
+	char *desc = NULL;
+
+	if (!transfer_packet->valid_reply)
+		return DEVICE_READY;
+
+	switch (transfer_packet->ioc_status) {
+	case MPI2_IOCSTATUS_BUSY:
+	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
+	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+		rc = DEVICE_RETRY;
+		break;
+	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+		if (transfer_packet->log_info ==  0x31170000) {
+			rc = DEVICE_RETRY;
+			break;
+		}
+		if (transfer_packet->cdb[0] == REPORT_LUNS)
+			rc = DEVICE_READY;
+		else
+			rc = DEVICE_RETRY;
+		break;
+	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+	case MPI2_IOCSTATUS_SUCCESS:
+		if (!transfer_packet->scsi_state &&
+		    !transfer_packet->scsi_status) {
+			rc = DEVICE_READY;
+			break;
+		}
+		if (transfer_packet->scsi_state &
+		    MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+			rc = DEVICE_ERROR;
+			check_sense = 1;
+			break;
+		}
+		if (transfer_packet->scsi_state &
+		    (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+		    MPI2_SCSI_STATE_NO_SCSI_STATUS |
+		    MPI2_SCSI_STATE_TERMINATED)) {
+			rc = DEVICE_RETRY;
+			break;
+		}
+		if (transfer_packet->scsi_status >=
+		    MPI2_SCSI_STATUS_BUSY) {
+			rc = DEVICE_RETRY;
+			break;
+		}
+		rc = DEVICE_READY;
+		break;
+	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+		if (transfer_packet->scsi_state &
+		    MPI2_SCSI_STATE_TERMINATED)
+			rc = DEVICE_RETRY;
+		else
+			rc = DEVICE_ERROR;
+		break;
+	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
+	default:
+		rc = DEVICE_ERROR;
+		break;
+	}
+
+	if (check_sense) {
+		_scsih_normalize_sense(transfer_packet->sense, &sense_info);
+		if (sense_info.skey == UNIT_ATTENTION)
+			rc = DEVICE_RETRY_UA;
+		else if (sense_info.skey == NOT_READY) {
+			/* medium isn't present */
+			if (sense_info.asc == 0x3a)
+				rc = DEVICE_READY;
+			/* LOGICAL UNIT NOT READY */
+			else if (sense_info.asc == 0x04) {
+				if (sense_info.ascq == 0x03 ||
+				   sense_info.ascq == 0x0b ||
+				   sense_info.ascq == 0x0c) {
+					rc = DEVICE_ERROR;
+				} else
+					rc = DEVICE_START_UNIT;
+			}
+			/* LOGICAL UNIT HAS NOT SELF-CONFIGURED YET */
+			else if (sense_info.asc == 0x3e && !sense_info.ascq)
+				rc = DEVICE_START_UNIT;
+		} else if (sense_info.skey == ILLEGAL_REQUEST &&
+		    transfer_packet->cdb[0] == REPORT_LUNS) {
+			rc = DEVICE_READY;
+		} else if (sense_info.skey == MEDIUM_ERROR) {
+
+			/* medium is corrupt, lets add the device so
+			 * users can collect some info as needed
+			 */
+
+			if (sense_info.asc == 0x31)
+				rc = DEVICE_READY;
+		} else if (sense_info.skey == HARDWARE_ERROR) {
+			/* Defect List Error, still add the device */
+			if (sense_info.asc == 0x19)
+				rc = DEVICE_READY;
+		}
+	}
+
+	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
+		switch (rc) {
+		case DEVICE_READY:
+			desc = "ready";
+			break;
+		case DEVICE_RETRY:
+			desc = "retry";
+			break;
+		case DEVICE_RETRY_UA:
+			desc = "retry_ua";
+			break;
+		case DEVICE_START_UNIT:
+			desc = "start_unit";
+			break;
+		case DEVICE_STOP_UNIT:
+			desc = "stop_unit";
+			break;
+		case DEVICE_ERROR:
+			desc = "error";
+			break;
+		}
+
+		pr_info("ioc_status(0x%04x),\n"
+		    "loginfo(0x%08x), scsi_status(0x%02x),\n"
+		    "scsi_state(0x%02x), rc(%s)\n",
+			transfer_packet->ioc_status,
+			transfer_packet->log_info, transfer_packet->scsi_status,
+			transfer_packet->scsi_state, desc);
+
+		if (check_sense)
+			pr_info("\t[sense_key,asc,ascq]:\n"
+			    "[0x%02x,0x%02x,0x%02x]\n",
+			    sense_info.skey, sense_info.asc, sense_info.ascq);
+	}
+	return rc;
+}
+
+/**
+ * _scsih_report_luns - send REPORT_LUNS to target
+ * @ioc: per adapter object
+ * @handle: expander handle
+ * @data: report luns data payload
+ * @data_length: length of data in bytes
+ * @retry_count: Requeue count
+ * @is_pd: is this hidden raid component
+ * @tr_timeout: Target Reset Timeout
+ * @tr_method: Target Reset Method
+ * Context: user
+ *
+ * Returns device_responsive_state
+ */
+static enum device_responsive_state
+_scsih_report_luns(struct MPT3SAS_ADAPTER *ioc, u16 handle, void *data,
+	u32 data_length, u8 retry_count, u8 is_pd, u8 tr_timeout, u8 tr_method)
+{
+	struct _scsi_io_transfer *transfer_packet;
+	enum device_responsive_state rc;
+	void *lun_data;
+	int return_code;
+	int retries;
+
+	lun_data = NULL;
+	transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL);
+	if (!transfer_packet) {
+
+		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+		rc = DEVICE_RETRY;
+		goto out;
+	}
+
+	lun_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
+		&transfer_packet->data_dma, GFP_ATOMIC);
+	if (!lun_data) {
+
+		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+		rc = DEVICE_RETRY;
+		goto out;
+	}
+
+	for (retries = 0; retries < 4; retries++) {
+		rc = DEVICE_ERROR;
+		ioc_info(ioc, "REPORT_LUNS: handle(0x%04x),\n"
+		    "retries(%d)\n", handle, retries);
+		memset(lun_data, 0, data_length);
+		transfer_packet->handle = handle;
+		transfer_packet->dir = DMA_FROM_DEVICE;
+		transfer_packet->data_length = data_length;
+		transfer_packet->cdb_length = 12;
+		transfer_packet->cdb[0] = REPORT_LUNS;
+		transfer_packet->cdb[6] = (data_length >> 24) & 0xFF;
+		transfer_packet->cdb[7] = (data_length >> 16) & 0xFF;
+		transfer_packet->cdb[8] = (data_length >>  8) & 0xFF;
+		transfer_packet->cdb[9] = data_length & 0xFF;
+		transfer_packet->timeout = 30;
+		transfer_packet->is_raid = is_pd;
+
+		return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method);
+		switch (return_code) {
+		case 0:
+			rc = _scsih_determine_disposition(ioc, transfer_packet);
+			if (rc == DEVICE_READY) {
+				memcpy(data, lun_data, data_length);
+				goto out;
+			} else if (rc == DEVICE_ERROR)
+				goto out;
+			break;
+		case -EAGAIN:
+			rc = DEVICE_RETRY;
+			break;
+		case -EFAULT:
+		default:
+			ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+			goto out;
+		}
+	}
+ out:
+
+	if (lun_data)
+		dma_free_coherent(&ioc->pdev->dev, data_length, lun_data,
+		    transfer_packet->data_dma);
+	kfree(transfer_packet);
+
+	if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
+	    rc == DEVICE_RETRY_UA) && retry_count >= command_retry_count)
+		rc = DEVICE_ERROR;
+
+	return rc;
+}
+
+/**
+ * _scsih_start_unit - send START_UNIT to target
+ * @ioc: per adapter object
+ * @handle: expander handle
+ * @lun: lun number
+ * @is_pd: is this hidden raid component
+ * @tr_timeout: Target Reset Timeout
+ * @tr_method: Target Reset Method
+ * Context: user
+ *
+ * Returns device_responsive_state
+ */
+static enum device_responsive_state
+_scsih_start_unit(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 lun, u8 is_pd,
+	u8 tr_timeout, u8 tr_method)
+{
+	struct _scsi_io_transfer *transfer_packet;
+	enum device_responsive_state rc;
+	int return_code;
+
+	transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL);
+	if (!transfer_packet) {
+
+		pr_info("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+		rc = DEVICE_RETRY;
+		goto out;
+	}
+
+	rc = DEVICE_READY;
+	transfer_packet->handle = handle;
+	transfer_packet->dir = DMA_NONE;
+	transfer_packet->lun = lun;
+	transfer_packet->cdb_length = 6;
+	transfer_packet->cdb[0] = START_STOP;
+	transfer_packet->cdb[1] = 1;
+	transfer_packet->cdb[4] = 1;
+	transfer_packet->timeout = 30;
+	transfer_packet->is_raid = is_pd;
+
+	pr_info("START_UNIT: handle(0x%04x), lun(%d)\n", handle, lun);
+
+	return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method);
+	switch (return_code) {
+	case 0:
+		rc = _scsih_determine_disposition(ioc, transfer_packet);
+		break;
+	case -EAGAIN:
+		rc = DEVICE_RETRY;
+		break;
+	case -EFAULT:
+	default:
+		pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+		rc = DEVICE_ERROR;
+		break;
+	}
+ out:
+	kfree(transfer_packet);
+	return rc;
+}
+
+/**
+ * _scsih_test_unit_ready - send TUR to target
+ * @ioc: per adapter object
+ * @handle: expander handle
+ * @lun: lun number
+ * @is_pd: is this hidden raid component
+ * @tr_timeout: Target Reset timeout value for Pcie devie
+ * @tr_method: pcie device Target reset method
+ * Context: user
+ *
+ * Returns device_responsive_state
+ */
+static enum device_responsive_state
+_scsih_test_unit_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle, u32 lun,
+	u8 is_pd, u8 tr_timeout, u8 tr_method)
+{
+	struct _scsi_io_transfer *transfer_packet;
+	enum device_responsive_state rc;
+	int return_code;
+	int sata_init_failure = 0;
+
+	transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL);
+	if (!transfer_packet) {
+
+		pr_info("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+		rc = DEVICE_RETRY;
+		goto out;
+	}
+
+	rc = DEVICE_READY;
+	transfer_packet->handle = handle;
+	transfer_packet->dir = DMA_NONE;
+	transfer_packet->lun = lun;
+	transfer_packet->cdb_length = 6;
+	transfer_packet->cdb[0] = TEST_UNIT_READY;
+	transfer_packet->timeout = 30;
+	transfer_packet->is_raid = is_pd;
+
+ sata_init_retry:
+	pr_info("TEST_UNIT_READY: handle(0x%04x) lun(%d)\n", handle, lun);
+
+	return_code = _scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, tr_method);
+	switch (return_code) {
+	case 0:
+		rc = _scsih_determine_disposition(ioc, transfer_packet);
+		if (rc == DEVICE_RETRY &&
+		    transfer_packet->log_info == 0x31111000) {
+			if (!sata_init_failure++) {
+				pr_info("SATA Initialization Timeout sending a retry\n");
+				rc = DEVICE_READY;
+				goto sata_init_retry;
+			} else {
+				pr_err("SATA Initialization Failed\n");
+				rc = DEVICE_ERROR;
+			}
+		}
+		break;
+	case -EAGAIN:
+		rc = DEVICE_RETRY;
+		break;
+	case -EFAULT:
+	default:
+		pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+		rc = DEVICE_ERROR;
+		break;
+	}
+ out:
+	kfree(transfer_packet);
+	return rc;
+}
+
+/**
+ * _scsih_ata_pass_thru_idd - obtain SATA device Identify Device Data
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @is_ssd_device : is this SATA SSD device
+ * @tr_timeout: Target Reset Timeout
+ * @tr_method: Target Reset Method
+ * Context: user
+ *
+ * Returns device_responsive_state
+ */
+static enum device_responsive_state
+_scsih_ata_pass_thru_idd(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+	u8 *is_ssd_device, u8 tr_timeout, u8 tr_method)
+{
+	struct _scsi_io_transfer *transfer_packet;
+	enum device_responsive_state rc;
+	u16 *idd_data;
+	int return_code;
+	u32 data_length;
+
+	idd_data = NULL;
+	transfer_packet = kzalloc(sizeof(struct _scsi_io_transfer), GFP_KERNEL);
+	if (!transfer_packet) {
+
+		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+		rc = DEVICE_RETRY;
+		goto out;
+	}
+	data_length = 512;
+	idd_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
+		&transfer_packet->data_dma, GFP_ATOMIC);
+	if (!idd_data) {
+
+		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+		rc = DEVICE_RETRY;
+		goto out;
+	}
+	rc = DEVICE_READY;
+	memset(idd_data, 0, data_length);
+	transfer_packet->handle = handle;
+	transfer_packet->dir = DMA_FROM_DEVICE;
+	transfer_packet->data_length = data_length;
+	transfer_packet->cdb_length = 12;
+	transfer_packet->cdb[0] = ATA_12;
+	transfer_packet->cdb[1] = 0x8;
+	transfer_packet->cdb[2] = 0xd;
+	transfer_packet->cdb[3] = 0x1;
+	transfer_packet->cdb[9] = 0xec;
+	transfer_packet->timeout = 30;
+
+	return_code = _scsi_send_scsi_io(ioc, transfer_packet, 30, 0);
+	switch (return_code) {
+	case 0:
+		rc = _scsih_determine_disposition(ioc, transfer_packet);
+		if (rc == DEVICE_READY) {
+			// Check if nominal media rotation rate is set to 1 i.e. SSD device
+			if (idd_data[217] == 1)
+				*is_ssd_device = 1;
+		}
+		break;
+	case -EAGAIN:
+		rc = DEVICE_RETRY;
+		break;
+	case -EFAULT:
+	default:
+
+		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+		rc = DEVICE_ERROR;
+		break;
+	}
+
+ out:
+	if (idd_data) {
+		dma_free_coherent(&ioc->pdev->dev, data_length, idd_data,
+		    transfer_packet->data_dma);
+	}
+	kfree(transfer_packet);
+	return rc;
+}
+
+/**
+ * _scsih_wait_for_device_to_become_ready - handle busy devices
+ * @ioc: per adapter object
+ * @handle: expander handle
+ * @retry_count: number of times this event has been retried
+ * @is_pd: is this hidden raid component
+ * @lun: lun number
+ * @tr_timeout: Target Reset Timeout
+ * @tr_method: Target Reset Method
+ *
+ * Some devices spend too much time in busy state, queue event later
+ *
+ * Return the device_responsive_state.
+ */
+
+static enum device_responsive_state
+_scsih_wait_for_device_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+	u8 retry_count, u8 is_pd, int lun, u8 tr_timeout, u8 tr_method)
+{
+	enum device_responsive_state rc;
+
+	if (ioc->pci_error_recovery)
+		return DEVICE_ERROR;
+
+	if (ioc->shost_recovery)
+		return DEVICE_RETRY;
+
+	rc = _scsih_test_unit_ready(ioc, handle, lun, is_pd, tr_timeout, tr_method);
+	if (rc == DEVICE_READY || rc == DEVICE_ERROR)
+		return rc;
+	else if (rc == DEVICE_START_UNIT) {
+		rc = _scsih_start_unit(ioc, handle, lun, is_pd, tr_timeout, tr_method);
+		if (rc == DEVICE_ERROR)
+			return rc;
+		rc = _scsih_test_unit_ready(ioc, handle, lun, is_pd, tr_timeout, tr_method);
+	}
+
+	if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
+	    rc == DEVICE_RETRY_UA) && retry_count >= command_retry_count)
+		rc = DEVICE_ERROR;
+	return rc;
+}
+
+static inline int mpt_scsilun_to_int(struct scsi_lun *scsilun)
+{
+	return scsilun_to_int(scsilun);
+}
+
+/**
+ * _scsih_wait_for_target_to_become_ready - handle busy devices
+ * @ioc: per adapter object
+ * @handle: expander handle
+ * @retry_count: number of times this event has been retried
+ * @is_pd: is this hidden raid component
+ * @tr_timeout: Target Reset timeout value
+ * @tr_method: Target Reset method Hot/Protocol level.
+ *
+ * Some devices spend too much time in busy state, queue event later
+ *
+ * Return the device_responsive_state.
+ */
+static enum device_responsive_state
+_scsih_wait_for_target_to_become_ready(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+	u8 retry_count, u8 is_pd, u8 tr_timeout, u8 tr_method)
+{
+	enum device_responsive_state rc;
+	struct scsi_lun *lun_data;
+	u32 length, num_luns;
+	u8 *data;
+	int lun;
+	struct scsi_lun *lunp;
+
+	lun_data = kcalloc(MPT3_MAX_LUNS, sizeof(struct scsi_lun), GFP_KERNEL);
+	if (!lun_data) {
+
+		ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+		return DEVICE_RETRY;
+	}
+
+	rc = _scsih_report_luns(ioc, handle, lun_data,
+	    MPT3_MAX_LUNS * sizeof(struct scsi_lun), retry_count, is_pd,
+	    tr_timeout, tr_method);
+
+	if (rc != DEVICE_READY)
+		goto out;
+
+	/* some debug bits*/
+	data = (u8 *)lun_data;
+	length = ((data[0] << 24) | (data[1] << 16) |
+		(data[2] << 8) | (data[3] << 0));
+
+	num_luns = (length / sizeof(struct scsi_lun));
+
+	lunp = &lun_data[1];
+	lun = (num_luns) ? mpt_scsilun_to_int(&lun_data[1]) : 0;
+	rc = _scsih_wait_for_device_to_become_ready(ioc, handle, retry_count,
+	    is_pd, lun, tr_timeout, tr_method);
+
+	if (rc == DEVICE_ERROR) {
+		struct scsi_lun *lunq;
+
+		for (lunq = lunp++; lunq <= &lun_data[num_luns]; lunq++) {
+
+			rc = _scsih_wait_for_device_to_become_ready(ioc, handle,
+					retry_count, is_pd, mpt_scsilun_to_int(lunq),
+					tr_timeout, tr_method);
+			if (rc != DEVICE_ERROR)
+				goto out;
+		}
+	}
+out:
+	kfree(lun_data);
+	return rc;
+}
+
+
+/**
+ * _scsih_check_access_status - check access flags
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle: sas device handle
+ * @access_status: errors returned during discovery of the device
+ *
+ * Return: 0 for success, else failure
+ */
+static u8
+_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+	u16 handle, u8 access_status)
+{
+	u8 rc = 1;
+	char *desc = NULL;
+
+	switch (access_status) {
+	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
+	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
+		rc = 0;
+		break;
+	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
+		desc = "sata capability failed";
+		break;
+	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
+		desc = "sata affiliation conflict";
+		break;
+	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
+		desc = "route not addressable";
+		break;
+	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
+		desc = "smp error not addressable";
+		break;
+	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
+		desc = "device blocked";
+		break;
+	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
+	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
+	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
+	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
+	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
+	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
+	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
+	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
+	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
+	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
+	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
+	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
+		desc = "sata initialization failed";
+		break;
+	default:
+		desc = "unknown";
+		break;
+	}
+
+	if (!rc)
+		return 0;
+
+	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
+		desc, (u64)sas_address, handle);
+	return rc;
+}
+
+/**
+ * _scsih_check_device - checking device responsiveness
+ * @ioc: per adapter object
+ * @parent_sas_address: sas address of parent expander or sas host
+ * @handle: attached device handle
+ * @phy_number: phy number
+ * @link_rate: new link rate
+ */
+static void
+_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
+	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
+{
+	Mpi2ConfigReply_t mpi_reply;
+	Mpi2SasDevicePage0_t sas_device_pg0;
+	struct _sas_device *sas_device = NULL;
+	struct _enclosure_node *enclosure_dev = NULL;
 	u32 ioc_status;
 	unsigned long flags;
 	u64 sas_address;
@@ -7239,8 +8302,8 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
 			sas_device->handle, handle);
 		sas_target_priv_data->handle = handle;
 		sas_device->handle = handle;
-		if (le16_to_cpu(sas_device_pg0.Flags) &
-		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+		if ((le16_to_cpu(sas_device_pg0.Flags) & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID)
+		    && (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
 			sas_device->enclosure_level =
 				sas_device_pg0.EnclosureLevel;
 			memcpy(sas_device->connector_name,
@@ -7282,7 +8345,11 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
 		goto out_unlock;
 
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-	_scsih_ublock_io_device(ioc, sas_address, port);
+
+	if (issue_scsi_cmd_to_bringup_drive)
+		_scsih_ublock_io_device_wait(ioc, sas_address, port);
+	else
+		_scsih_ublock_io_device(ioc, sas_address, port);
 
 	if (sas_device)
 		sas_device_put(sas_device);
@@ -7298,7 +8365,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
  * _scsih_add_device -  creating sas device object
  * @ioc: per adapter object
  * @handle: sas device handle
- * @phy_num: phy number end device attached to
+ * @retry_count: number of times this event has been retried
  * @is_pd: is this hidden raid component
  *
  * Creating end device object, stored in ioc->sas_device_list.
@@ -7306,16 +8373,18 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
  * Return: 0 for success, non-zero for failure.
  */
 static int
-_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
+_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count,
 	u8 is_pd)
 {
 	Mpi2ConfigReply_t mpi_reply;
 	Mpi2SasDevicePage0_t sas_device_pg0;
 	struct _sas_device *sas_device;
 	struct _enclosure_node *enclosure_dev = NULL;
+	enum device_responsive_state rc;
 	u32 ioc_status;
 	u64 sas_address;
 	u32 device_info;
+	u8 connector_name[5];
 	u8 port_id;
 
 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
@@ -7371,6 +8440,48 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
 				 sas_device_pg0.EnclosureHandle);
 	}
 
+	/*
+	 * Wait for device that is becoming ready
+	 * queue request later if device is busy.
+	 */
+	if ((!ioc->wait_for_discovery_to_complete) &&
+		(issue_scsi_cmd_to_bringup_drive)) {
+		ioc_info(ioc, "detecting: handle(0x%04x),\n"
+				"sas_address(0x%016llx), phy(%d)\n", handle,
+				(unsigned long long)sas_address, sas_device_pg0.PhyNum);
+		rc = _scsih_wait_for_target_to_become_ready(ioc, handle,
+		    retry_count, is_pd, 30, 0);
+		if (rc != DEVICE_READY) {
+			if (le16_to_cpu(sas_device_pg0.EnclosureHandle) != 0)
+				dewtprintk(ioc, ioc_info(ioc, "%s:\n"
+				    "device not ready: slot(%d)\n", __func__,
+				    le16_to_cpu(sas_device_pg0.Slot)));
+			if ((le16_to_cpu(sas_device_pg0.Flags) &
+			    MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) &&
+			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
+				memcpy(connector_name,
+					sas_device_pg0.ConnectorName, 4);
+				connector_name[4] = '\0';
+				dewtprintk(ioc, ioc_info(ioc, "%s:\n"
+				    "device not ready:\n"
+				    "enclosure level(0x%04x),\n"
+				    "connector name( %s)\n",  __func__,
+				    sas_device_pg0.EnclosureLevel, connector_name));
+			}
+
+			if ((enclosure_dev) && (le16_to_cpu(enclosure_dev->pg0.Flags) &
+			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID))
+				ioc_info(ioc, "chassis slot(0x%04x)\n",
+						enclosure_dev->pg0.ChassisSlot);
+
+			if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
+			    rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
+				return 1;
+			else if (rc == DEVICE_ERROR)
+				return 0;
+		}
+	}
+
 	sas_device = kzalloc(sizeof(struct _sas_device),
 	    GFP_KERNEL);
 	if (!sas_device) {
@@ -7586,10 +8697,13 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 	struct fw_event_work *fw_event)
 {
 	int i;
+	int rc;
+	int requeue_event;
 	u16 parent_handle, handle;
 	u16 reason_code;
 	u8 phy_number, max_phys;
 	struct _sas_node *sas_expander;
+	struct _sas_device *sas_device;
 	u64 sas_address;
 	unsigned long flags;
 	u8 link_rate, prev_link_rate;
@@ -7639,7 +8753,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 
 	/* handle siblings events */
-	for (i = 0; i < event_data->NumEntries; i++) {
+	for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) {
 		if (fw_event->ignore) {
 			dewtprintk(ioc,
 				   ioc_info(ioc, "ignoring expander event\n"));
@@ -7656,6 +8770,20 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
 				continue;
+		if (fw_event->delayed_work_active && (reason_code ==
+		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
+			dewtprintk(ioc, ioc_info(ioc, "ignoring\n"
+			    "Target not responding event phy in re-queued event processing\n"));
+			continue;
+		}
+
+		if (fw_event->delayed_work_active && (reason_code ==
+		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
+			dewtprintk(ioc, ioc_info(ioc, "ignoring Target not responding\n"
+						"event phy in re-queued event processing\n"));
+			continue;
+		}
+
 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
 		if (!handle)
 			continue;
@@ -7679,9 +8807,32 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 			_scsih_check_device(ioc, sas_address, handle,
 			    phy_number, link_rate);
 
+			/* This code after this point handles the test case
+			 * where a device has been added, however its returning
+			 * BUSY for sometime.  Then before the Device Missing
+			 * Delay expires and the device becomes READY, the
+			 * device is removed and added back.
+			 */
+			spin_lock_irqsave(&ioc->sas_device_lock, flags);
+			sas_device = __mpt3sas_get_sdev_by_handle(ioc,
+			    handle);
+			spin_unlock_irqrestore(&ioc->sas_device_lock,
+			    flags);
+
+			if (sas_device) {
+				sas_device_put(sas_device);
+				break;
+			}
+
 			if (!test_bit(handle, ioc->pend_os_device_add))
 				break;
 
+			dewtprintk(ioc, ioc_info(ioc, "handle(0x%04x) device not found: convert\n"
+			    "event to a device add\n", handle));
+			event_data->PHY[i].PhyStatus &= 0xF0;
+			event_data->PHY[i].PhyStatus |=
+						MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED;
+
 			fallthrough;
 
 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -7692,7 +8843,18 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 			mpt3sas_transport_update_links(ioc, sas_address,
 			    handle, phy_number, link_rate, port);
 
-			_scsih_add_device(ioc, handle, phy_number, 0);
+			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+				break;
+
+			rc = _scsih_add_device(ioc, handle,
+			    fw_event->retries[i], 0);
+			if (rc) {/* retry due to busy device */
+				fw_event->retries[i]++;
+				requeue_event = 1;
+			} else {/* mark entry vacant */
+				event_data->PHY[i].PhyStatus |=
+			    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT;
+			}
 
 			break;
 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
@@ -7707,7 +8869,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 	    sas_expander)
 		mpt3sas_expander_remove(ioc, sas_address, port);
 
-	return 0;
+	return requeue_event;
 }
 
 /**
@@ -8078,7 +9240,10 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
 	pcie_device_put(pcie_device);
 
-	_scsih_ublock_io_device(ioc, wwid, NULL);
+	if (issue_scsi_cmd_to_bringup_drive)
+		_scsih_ublock_io_device_wait(ioc, wwid, NULL);
+	else
+		_scsih_ublock_io_device(ioc, wwid, NULL);
 
 	return;
 }
@@ -8087,19 +9252,24 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  * _scsih_pcie_add_device -  creating pcie device object
  * @ioc: per adapter object
  * @handle: pcie device handle
+ * @retry_count: number of times this event has been retried
  *
  * Creating end device object, stored in ioc->pcie_device_list.
  *
  * Return: 1 means queue the event later, 0 means complete the event
  */
 static int
-_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count)
 {
 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
 	Mpi2ConfigReply_t mpi_reply;
 	struct _pcie_device *pcie_device;
 	struct _enclosure_node *enclosure_dev;
+	enum device_responsive_state rc;
+	u8 connector_name[5];
+	u8 tr_timeout = 30;
+	u8 tr_method = 0;
 	u32 ioc_status;
 	u64 wwid;
 
@@ -8167,6 +9337,53 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 			    __LINE__, __func__);
 			return 0;
 		}
+
+		if (!ioc->tm_custom_handling) {
+			tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
+			if (pcie_device_pg2.ControllerResetTO)
+				tr_timeout = pcie_device_pg2.ControllerResetTO;
+
+		}
+	}
+
+	/*
+	 * Wait for device that is becoming ready
+	 * queue request later if device is busy.
+	 */
+	if ((!ioc->wait_for_discovery_to_complete) &&
+		(issue_scsi_cmd_to_bringup_drive) &&
+		(pcie_device_pg0.AccessStatus !=
+			MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) {
+		ioc_info(ioc, "detecting: handle(0x%04x),\n"
+		    "wwid(0x%016llx), port(%d)\n", handle,
+		    (unsigned long long)wwid, pcie_device_pg0.PortNum);
+
+		rc = _scsih_wait_for_target_to_become_ready(ioc, handle,
+		    retry_count, 0, tr_timeout, tr_method);
+		if (rc != DEVICE_READY) {
+			if (le16_to_cpu(pcie_device_pg0.EnclosureHandle) != 0)
+				dewtprintk(ioc, ioc_info(ioc, "%s:\n"
+				    "device not ready: slot(%d)\n",
+				    __func__,
+				    le16_to_cpu(pcie_device_pg0.Slot)));
+
+			if (le32_to_cpu(pcie_device_pg0.Flags) &
+			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
+				memcpy(connector_name,
+				    pcie_device_pg0.ConnectorName, 4);
+				connector_name[4] = '\0';
+				dewtprintk(ioc, ioc_info(ioc, "%s: device not ready: enclosure\n"
+				    "level(0x%04x), connector name( %s)\n", __func__,
+				    pcie_device_pg0.EnclosureLevel,
+				    connector_name));
+			}
+
+			if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
+				rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
+				return 1;
+			else if (rc == DEVICE_ERROR)
+				return 0;
+		}
 	}
 
 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
@@ -8330,7 +9547,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  * Context: user.
  *
  */
-static void
+static int
 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 	struct fw_event_work *fw_event)
 {
@@ -8340,6 +9557,7 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 	u8 link_rate, prev_link_rate;
 	unsigned long flags;
 	int rc;
+	int requeue_event;
 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
 	struct _pcie_device *pcie_device;
@@ -8349,22 +9567,22 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 
 	if (ioc->shost_recovery || ioc->remove_host ||
 		ioc->pci_error_recovery)
-		return;
+		return 0;
 
 	if (fw_event->ignore) {
 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
-		return;
+		return 0;
 	}
 
 	/* handle siblings events */
-	for (i = 0; i < event_data->NumEntries; i++) {
+	for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) {
 		if (fw_event->ignore) {
 			dewtprintk(ioc,
 				   ioc_info(ioc, "ignoring switch event\n"));
-			return;
+			return 0;
 		}
 		if (ioc->remove_host || ioc->pci_error_recovery)
-			return;
+			return 0;
 		reason_code = event_data->PortEntry[i].PortStatus;
 		handle =
 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
@@ -8418,8 +9636,11 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
 				break;
 
-			rc = _scsih_pcie_add_device(ioc, handle);
-			if (!rc) {
+			rc = _scsih_pcie_add_device(ioc, handle, fw_event->retries[i]);
+			if (rc) {/* retry due to busy device */
+				fw_event->retries[i]++;
+				requeue_event = 1;
+			} else {
 				/* mark entry vacant */
 				/* TODO This needs to be reviewed and fixed,
 				 * we dont have an entry
@@ -8434,11 +9655,12 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 			break;
 		}
 	}
+	return requeue_event;
 }
 
 /**
  * _scsih_pcie_device_status_change_event_debug - debug for device event
- * @ioc: ?
+ * @ioc: per adapter object
  * @event_data: event data payload
  * Context: user.
  */
@@ -8810,7 +10032,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
 
 	ioc->broadcast_aen_busy = 0;
 	if (!ioc->shost_recovery)
-		_scsih_ublock_io_all_device(ioc);
+		_scsih_ublock_io_all_device(ioc, 1);
 	mutex_unlock(&ioc->tm_cmds.mutex);
 }
 
@@ -10344,7 +11566,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
 	ioc_info(ioc, "removing unresponding devices: complete\n");
 
 	/* unblock devices */
-	_scsih_ublock_io_all_device(ioc);
+	_scsih_ublock_io_all_device(ioc, 0);
 }
 
 static void
@@ -10624,7 +11846,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
 		}
 		retry_count = 0;
 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
-		_scsih_pcie_add_device(ioc, handle);
+		while (_scsih_pcie_add_device(ioc, handle, retry_count++))
+			ssleep(1);
 
 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
@@ -10768,7 +11991,11 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
 		break;
 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
-		_scsih_sas_topology_change_event(ioc, fw_event);
+		if (_scsih_sas_topology_change_event(ioc, fw_event)) {
+			_scsih_fw_event_requeue(ioc, fw_event, 1000);
+			ioc->current_event = NULL;
+			return;
+		}
 		break;
 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
 		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
@@ -10808,7 +12035,11 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
 		_scsih_pcie_enumeration_event(ioc, fw_event);
 		break;
 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
-		_scsih_pcie_topology_change_event(ioc, fw_event);
+		if (_scsih_pcie_topology_change_event(ioc, fw_event)) {
+			_scsih_fw_event_requeue(ioc, fw_event, 1000);
+			ioc->current_event = NULL;
+			return;
+		}
 		break;
 	}
 out:
@@ -10833,6 +12064,15 @@ _firmware_event_work(struct work_struct *work)
 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
 }
 
+static void
+_firmware_event_work_delayed(struct work_struct *work)
+{
+	struct fw_event_work *fw_event = container_of(work,
+	    struct fw_event_work, delayed_work.work);
+
+	_mpt3sas_fw_work(fw_event->ioc, fw_event);
+}
+
 /**
  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
  * @ioc: per adapter object
@@ -11013,6 +12253,34 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
 		return 1;
 	}
 
+	if (event == MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST) {
+		Mpi2EventDataSasTopologyChangeList_t *topo_event_data =
+		    (Mpi2EventDataSasTopologyChangeList_t *)
+		    mpi_reply->EventData;
+		fw_event->retries = kzalloc(topo_event_data->NumEntries,
+		    GFP_ATOMIC);
+		if (!fw_event->retries) {
+
+			ioc_err(ioc, "failure at %s:%d/%s()!\n",  __FILE__, __LINE__, __func__);
+			kfree(fw_event->event_data);
+			fw_event_work_put(fw_event);
+			return 1;
+		}
+	}
+
+	if (event == MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST) {
+		Mpi26EventDataPCIeTopologyChangeList_t *topo_event_data =
+			(Mpi26EventDataPCIeTopologyChangeList_t *) mpi_reply->EventData;
+		fw_event->retries = kzalloc(topo_event_data->NumEntries,
+			GFP_ATOMIC);
+		if (!fw_event->retries) {
+
+			ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__);
+			fw_event_work_put(fw_event);
+			return 1;
+		}
+	}
+
 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
 	fw_event->ioc = ioc;
 	fw_event->VF_ID = mpi_reply->VF_ID;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index ccfc2d26dd37..2c44a379cb23 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -11,6 +11,8 @@
 #include <linux/delay.h>
 #include <linux/bsg-lib.h>
 
+static int qla28xx_validate_flash_image(struct bsg_job *bsg_job);
+
 static void qla2xxx_free_fcport_work(struct work_struct *work)
 {
 	struct fc_port *fcport = container_of(work, typeof(*fcport),
@@ -1546,8 +1548,9 @@ qla2x00_update_optrom(struct bsg_job *bsg_job)
 	ha->optrom_buffer = NULL;
 	ha->optrom_state = QLA_SWAITING;
 	mutex_unlock(&ha->optrom_mutex);
-	bsg_job_done(bsg_job, bsg_reply->result,
-		       bsg_reply->reply_payload_rcv_len);
+	if (!rval)
+		bsg_job_done(bsg_job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
 	return rval;
 }
 
@@ -2549,6 +2552,30 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
 	return 0;
 }
 
+static int
+qla2x00_get_drv_attr(struct bsg_job *bsg_job)
+{
+	struct qla_drv_attr drv_attr;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+	memset(&drv_attr, 0, sizeof(struct qla_drv_attr));
+	drv_attr.ext_attributes |= QLA_IMG_SET_VALID_SUPPORT;
+
+
+	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+			bsg_job->reply_payload.sg_cnt, &drv_attr,
+			sizeof(struct qla_drv_attr));
+
+	bsg_reply->reply_payload_rcv_len = sizeof(struct qla_drv_attr);
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+	return 0;
+}
+
 static int
 qla2x00_manage_host_stats(struct bsg_job *bsg_job)
 {
@@ -2612,8 +2639,9 @@ qla2x00_manage_host_stats(struct bsg_job *bsg_job)
 				    sizeof(struct ql_vnd_mng_host_stats_resp));
 
 	bsg_reply->result = DID_OK;
-	bsg_job_done(bsg_job, bsg_reply->result,
-		     bsg_reply->reply_payload_rcv_len);
+	if (!ret)
+		bsg_job_done(bsg_job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
 
 	return ret;
 }
@@ -2702,8 +2730,9 @@ qla2x00_get_host_stats(struct bsg_job *bsg_job)
 							       bsg_job->reply_payload.sg_cnt,
 							       data, response_len);
 	bsg_reply->result = DID_OK;
-	bsg_job_done(bsg_job, bsg_reply->result,
-		     bsg_reply->reply_payload_rcv_len);
+	if (!ret)
+		bsg_job_done(bsg_job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
 
 	kfree(data);
 host_stat_out:
@@ -2802,8 +2831,9 @@ qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
 				    bsg_job->reply_payload.sg_cnt, data,
 				    response_len);
 	bsg_reply->result = DID_OK;
-	bsg_job_done(bsg_job, bsg_reply->result,
-		     bsg_reply->reply_payload_rcv_len);
+	if (!ret)
+		bsg_job_done(bsg_job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
 
 tgt_stat_out:
 	kfree(data);
@@ -2864,8 +2894,9 @@ qla2x00_manage_host_port(struct bsg_job *bsg_job)
 				    bsg_job->reply_payload.sg_cnt, &rsp_data,
 				    sizeof(struct ql_vnd_mng_host_port_resp));
 	bsg_reply->result = DID_OK;
-	bsg_job_done(bsg_job, bsg_reply->result,
-		     bsg_reply->reply_payload_rcv_len);
+	if (!ret)
+		bsg_job_done(bsg_job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
 
 	return ret;
 }
@@ -2933,6 +2964,12 @@ qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_j
 	case QL_VND_GET_FLASH_UPDATE_CAPS:
 		return qla27xx_get_flash_upd_cap(bsg_job);
 
+	case QL_VND_GET_DRV_ATTR:
+		return qla2x00_get_drv_attr(bsg_job);
+
+	case QL_VND_IMG_SET_VALID:
+		return qla28xx_validate_flash_image(bsg_job);
+
 	case QL_VND_SET_FLASH_UPDATE_CAPS:
 		return qla27xx_set_flash_upd_cap(bsg_job);
 
@@ -3240,9 +3277,97 @@ int qla2x00_mailbox_passthru(struct bsg_job *bsg_job)
 
 	bsg_job->reply_len = sizeof(*bsg_job->reply);
 	bsg_reply->result = DID_OK << 16;
-	bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+	if (!ret)
+		bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
 
 	kfree(req_data);
 
 	return ret;
 }
+
+static int
+qla28xx_do_validate_flash_image(struct bsg_job *bsg_job, uint16_t *state)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+	uint16_t mstate[16];
+	uint16_t mpi_state = 0;
+	uint16_t img_idx;
+	int rval = QLA_SUCCESS;
+
+	memset(mstate, 0, sizeof(mstate));
+
+	rval = qla2x00_get_firmware_state(vha, mstate);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0xffff,
+				"MBC to get MPI state failed (%d)\n", rval);
+		rval = -EINVAL;
+		goto exit_flash_img;
+	}
+
+	mpi_state = mstate[11];
+
+	if (!(mpi_state & BIT_9 && mpi_state & BIT_8 && mpi_state & BIT_15)) {
+		ql_log(ql_log_warn, vha, 0xffff,
+				"MPI firmware state failed (0x%02x)\n", mpi_state);
+		rval = -EINVAL;
+		goto exit_flash_img;
+	}
+
+	rval = qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_LOCK);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0xffff,
+				"Unable to lock flash semaphore.");
+		goto exit_flash_img;
+	}
+
+	img_idx = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+
+	rval = qla_mpipt_validate_fw(vha, img_idx, state);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0xffff,
+				"Failed to validate Firmware image index [0x%x].\n",
+				img_idx);
+	}
+
+	qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK);
+
+exit_flash_img:
+	return rval;
+}
+
+static int qla28xx_validate_flash_image(struct bsg_job *bsg_job)
+{
+	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t state = 0;
+	int rval = 0;
+
+	if (!IS_QLA28XX(ha) || vha->vp_idx != 0)
+		return -EPERM;
+
+	mutex_lock(&ha->optrom_mutex);
+	rval = qla28xx_do_validate_flash_image(bsg_job, &state);
+	if (rval)
+		rval = -EINVAL;
+	mutex_unlock(&ha->optrom_mutex);
+
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+
+	if (rval)
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+			(state == 39) ? EXT_STATUS_IMG_SET_VALID_ERR :
+			EXT_STATUS_IMG_SET_CONFIG_ERR;
+	else
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+
+	bsg_reply->result = DID_OK << 16;
+	bsg_reply->reply_payload_rcv_len = 0;
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	if (!rval)
+		bsg_job_done(bsg_job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
+
+	return QLA_SUCCESS;
+}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index d38dab0a07e8..a920c8e482bc 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -32,12 +32,14 @@
 #define QL_VND_GET_PRIV_STATS_EX	0x1A
 #define QL_VND_SS_GET_FLASH_IMAGE_STATUS	0x1E
 #define QL_VND_EDIF_MGMT                0X1F
+#define QL_VND_GET_DRV_ATTR		0x22
 #define QL_VND_MANAGE_HOST_STATS	0x23
 #define QL_VND_GET_HOST_STATS		0x24
 #define QL_VND_GET_TGT_STATS		0x25
 #define QL_VND_MANAGE_HOST_PORT		0x26
 #define QL_VND_MBX_PASSTHRU		0x2B
 #define QL_VND_DPORT_DIAGNOSTICS_V2	0x2C
+#define QL_VND_IMG_SET_VALID	0x30
 
 /* BSG Vendor specific subcode returns */
 #define EXT_STATUS_OK			0
@@ -50,6 +52,8 @@
 #define EXT_STATUS_BUFFER_TOO_SMALL	16
 #define EXT_STATUS_NO_MEMORY		17
 #define EXT_STATUS_DEVICE_OFFLINE	22
+#define EXT_STATUS_IMG_SET_VALID_ERR	47
+#define EXT_STATUS_IMG_SET_CONFIG_ERR	48
 
 /*
  * To support bidirectional iocb
@@ -318,6 +322,14 @@ struct qla_active_regions {
 	uint8_t reserved[31];
 } __packed;
 
+struct qla_drv_attr {
+        uint32_t        attributes;
+        u32             ext_attributes;
+#define QLA_IMG_SET_VALID_SUPPORT       BIT_4
+        u32             status_flags;
+        uint8_t         reserved[20];
+} __packed;
+
 #include "qla_edif_bsg.h"
 
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b3265952c4be..5593ad7fad27 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1270,6 +1270,7 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
  */
 #define MBC_LOAD_RAM			1	/* Load RAM. */
 #define MBC_EXECUTE_FIRMWARE		2	/* Execute firmware. */
+#define MBC_LOAD_FLASH_FIRMWARE		3	/* Load flash firmware. */
 #define MBC_READ_RAM_WORD		5	/* Read RAM word. */
 #define MBC_MAILBOX_REGISTER_TEST	6	/* Wrap incoming mailboxes */
 #define MBC_VERIFY_CHECKSUM		7	/* Verify checksum. */
@@ -1385,6 +1386,26 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
 #define HCS_WRITE_SERDES		0x3
 #define HCS_READ_SERDES			0x4
 
+/*
+ * ISP2[7|8]xx mailbox commands.
+ */
+#define MBC_MPI_PASSTHROUGH            0x200
+
+/* MBC_MPI_PASSTHROUGH */
+#define MPIPT_REQ_V1 1
+enum {
+       MPIPT_SUBCMD_GET_SUP_CMD = 0x10,
+       MPIPT_SUBCMD_GET_SUP_FEATURE,
+       MPIPT_SUBCMD_GET_STATUS,
+       MPIPT_SUBCMD_VALIDATE_FW,
+};
+
+enum {
+       MPIPT_MPI_STATUS = 1,
+       MPIPT_FCORE_STATUS,
+       MPIPT_LOCKDOWN_STATUS,
+};
+
 /* Firmware return data sizes */
 #define FCAL_MAP_SIZE	128
 
@@ -4149,6 +4170,7 @@ struct qla_hw_data {
 		uint32_t	eeh_flush:2;
 #define EEH_FLUSH_RDY  1
 #define EEH_FLUSH_DONE 2
+		uint32_t	secure_mcu:1;
 	} flags;
 
 	uint16_t max_exchg;
@@ -4414,6 +4436,8 @@ struct qla_hw_data {
 	((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&\
 	 (ha->zio_mode == QLA_ZIO_MODE_6))
 
+#define IS_QLA28XX_SECURED(ha)	(IS_QLA28XX(ha) && ha->flags.secure_mcu)
+
 	/* HBA serial number */
 	uint8_t		serial0;
 	uint8_t		serial1;
@@ -5368,7 +5392,7 @@ struct edif_sa_index_entry {
 	struct list_head next;
 };
 
-/* Refer to SNIA SFF 8247 */
+/* Refer to SNIA SFF 8472 */
 struct sff_8247_a0 {
 	u8 txid;	/* transceiver id */
 	u8 ext_txid;
@@ -5412,6 +5436,7 @@ struct sff_8247_a0 {
 #define FC_SP_32 BIT_3
 #define FC_SP_2  BIT_2
 #define FC_SP_1  BIT_0
+#define FC_SPEED_2	BIT_1
 	u8 fc_sp_cc10;
 	u8 encode;
 	u8 bitrate;
@@ -5430,7 +5455,8 @@ struct sff_8247_a0 {
 	u8 vendor_pn[SFF_PART_NAME_LEN];	/* part number */
 	u8 vendor_rev[4];
 	u8 wavelength[2];
-	u8 resv;
+#define FC_SP_64	BIT_0
+	u8 fiber_channel_speed2;
 	u8 cc_base;
 	u8 options[2];	/* offset 64 */
 	u8 br_max;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 55d531c19e6b..9e328c235e39 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -344,6 +344,9 @@ qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
 extern int
 qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
 
+extern int
+qla28xx_load_flash_firmware(scsi_qla_host_t *vha);
+
 extern int
 qla2x00_get_fw_version(scsi_qla_host_t *);
 
@@ -839,6 +842,8 @@ extern int qla82xx_write_optrom_data(struct scsi_qla_host *, void *,
 extern int qla82xx_abort_isp(scsi_qla_host_t *);
 extern int qla82xx_restart_isp(scsi_qla_host_t *);
 
+extern int qla_mpipt_validate_fw(scsi_qla_host_t *vha, u16 img_idx, u16 *state);
+
 /* IOCB related functions */
 extern int qla82xx_start_scsi(srb_t *);
 extern void qla2x00_sp_free(srb_t *sp);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 51c7cea71f90..880cd73feaca 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3266,9 +3266,6 @@ void qla_fab_scan_finish(scsi_qla_host_t *vha, srb_t *sp)
 			    atomic_read(&fcport->state) == FCS_ONLINE) ||
 				do_delete) {
 				if (fcport->loop_id != FC_NO_LOOP_ID) {
-					if (fcport->flags & FCF_FCP2_DEVICE)
-						continue;
-
 					ql_log(ql_log_warn, vha, 0x20f0,
 					       "%s %d %8phC post del sess\n",
 					       __func__, __LINE__,
@@ -3535,8 +3532,8 @@ int qla_fab_async_scan(scsi_qla_host_t *vha, srb_t *sp)
 	if (vha->scan.scan_flags & SF_SCANNING) {
 		spin_unlock_irqrestore(&vha->work_lock, flags);
 		ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x2012,
-		    "%s: scan active\n", __func__);
-		return rval;
+		    "%s: scan active for sp:%p\n", __func__, sp);
+		goto done_free_sp;
 	}
 	vha->scan.scan_flags |= SF_SCANNING;
 	if (!sp)
@@ -3701,23 +3698,25 @@ int qla_fab_async_scan(scsi_qla_host_t *vha, srb_t *sp)
 	return rval;
 
 done_free_sp:
-	if (sp->u.iocb_cmd.u.ctarg.req) {
-		dma_free_coherent(&vha->hw->pdev->dev,
-		    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
-		    sp->u.iocb_cmd.u.ctarg.req,
-		    sp->u.iocb_cmd.u.ctarg.req_dma);
-		sp->u.iocb_cmd.u.ctarg.req = NULL;
-	}
-	if (sp->u.iocb_cmd.u.ctarg.rsp) {
-		dma_free_coherent(&vha->hw->pdev->dev,
-		    sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
-		    sp->u.iocb_cmd.u.ctarg.rsp,
-		    sp->u.iocb_cmd.u.ctarg.rsp_dma);
-		sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-	}
+	if (sp) {
+		if (sp->u.iocb_cmd.u.ctarg.req) {
+			dma_free_coherent(&vha->hw->pdev->dev,
+			    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+			    sp->u.iocb_cmd.u.ctarg.req,
+			    sp->u.iocb_cmd.u.ctarg.req_dma);
+			sp->u.iocb_cmd.u.ctarg.req = NULL;
+		}
+		if (sp->u.iocb_cmd.u.ctarg.rsp) {
+			dma_free_coherent(&vha->hw->pdev->dev,
+			    sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+			    sp->u.iocb_cmd.u.ctarg.rsp,
+			    sp->u.iocb_cmd.u.ctarg.rsp_dma);
+			sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+		}
 
-	/* ref: INIT */
-	kref_put(&sp->cmd_kref, qla2x00_sp_release);
+		/* ref: INIT */
+		kref_put(&sp->cmd_kref, qla2x00_sp_release);
+	}
 
 	spin_lock_irqsave(&vha->work_lock, flags);
 	vha->scan.scan_flags &= ~SF_SCANNING;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d395cbfe6802..689f909943b4 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1859,15 +1859,6 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
 	case RSCN_PORT_ADDR:
 		fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
 		if (fcport) {
-			if (ql2xfc2target &&
-			    fcport->flags & FCF_FCP2_DEVICE &&
-			    atomic_read(&fcport->state) == FCS_ONLINE) {
-				ql_dbg(ql_dbg_disc, vha, 0x2115,
-				       "Delaying session delete for FCP2 portid=%06x %8phC ",
-					fcport->d_id.b24, fcport->port_name);
-				return;
-			}
-
 			if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) {
 				/*
 				 * On ipsec start by remote port, Target port
@@ -2471,8 +2462,23 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
 	    ea->sp->gen1, fcport->rscn_gen,
 	    ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
 
-	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
-	    (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
+	if (fcport->fw_login_state == DSC_LS_PLOGI_PEND) {
+		ql_dbg(ql_dbg_disc, vha, 0x20ea,
+		    "%s %d %8phC Remote is trying to login\n",
+		    __func__, __LINE__, fcport->port_name);
+		/*
+		 * If we get here, there is port thats already logged in,
+		 * but it's state has not moved ahead. Recheck with FW on
+		 * what state it is in and proceed ahead
+		 */
+		if (!N2N_TOPO(vha->hw)) {
+			fcport->fw_login_state = DSC_LS_PRLI_COMP;
+			qla24xx_post_gpdb_work(vha, fcport, 0);
+		}
+		return;
+	}
+
+	if (fcport->fw_login_state == DSC_LS_PRLI_PEND) {
 		ql_dbg(ql_dbg_disc, vha, 0x20ea,
 		    "%s %d %8phC Remote is trying to login\n",
 		    __func__, __LINE__, fcport->port_name);
@@ -4074,6 +4080,22 @@ static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
 	u8 str[STR_LEN], *ptr, p;
 	int leftover, len;
 
+	ql_dbg(ql_dbg_init, vha, 0x015a,
+	    "SFP: %.*s -> %.*s ->%s%s%s%s%s%s%s\n",
+	    (int)sizeof(a0->vendor_name), a0->vendor_name,
+	    (int)sizeof(a0->vendor_pn), a0->vendor_pn,
+	    a0->fc_sp_cc10 & FC_SP_2 ? a0->fiber_channel_speed2  &  FC_SP_64 ?
+					" 64G" : "" : "",
+	    a0->fc_sp_cc10 & FC_SP_32 ? " 32G" : "",
+	    a0->fc_sp_cc10 & FC_SP_16 ? " 16G" : "",
+	    a0->fc_sp_cc10 & FC_SP_8  ?  " 8G" : "",
+	    a0->fc_sp_cc10 & FC_SP_4  ?  " 4G" : "",
+	    a0->fc_sp_cc10 & FC_SP_2  ?  " 2G" : "",
+	    a0->fc_sp_cc10 & FC_SP_1  ?  " 1G" : "");
+
+	if (!(ql2xextended_error_logging & ql_dbg_verbose))
+		return;
+
 	memset(str, 0, STR_LEN);
 	snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
 	ql_dbg(ql_dbg_init, vha, 0x015a,
@@ -8442,6 +8464,148 @@ bool qla24xx_risc_firmware_invalid(uint32_t *dword)
 	    !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
 }
 
+static int
+qla28xx_get_srisc_addr(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+		       uint32_t faddr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	uint32_t *dcode;
+	int rval;
+
+	*srisc_addr = 0;
+	dcode = (uint32_t *)req->ring;
+
+	rval = qla24xx_read_flash_data(vha, dcode, faddr, 10);
+	if (rval) {
+		ql_log(ql_log_fatal, vha, 0x01aa,
+		    "-> Failed to read flash addr + size .\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	*srisc_addr = be32_to_cpu((__force __be32)dcode[2]);
+	return QLA_SUCCESS;
+}
+
+static int
+qla28xx_load_fw_template(scsi_qla_host_t *vha, uint32_t faddr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct fwdt *fwdt = ha->fwdt;
+	struct req_que *req = ha->req_q_map[0];
+	uint32_t risc_size, risc_attr = 0;
+	uint templates, segments, fragment;
+	uint32_t *dcode;
+	ulong dlen;
+	int rval;
+	uint j;
+
+	dcode = (uint32_t *)req->ring;
+	segments = FA_RISC_CODE_SEGMENTS;
+
+	for (j = 0; j < segments; j++) {
+		rval = qla24xx_read_flash_data(vha, dcode, faddr, 10);
+		if (rval) {
+			ql_log(ql_log_fatal, vha, 0x01a1,
+			       "-> Failed to read flash addr + size .\n");
+			return QLA_FUNCTION_FAILED;
+		}
+
+		risc_size = be32_to_cpu((__force __be32)dcode[3]);
+
+		if (risc_attr == 0)
+			risc_attr = be32_to_cpu((__force __be32)dcode[9]);
+
+		dlen = ha->fw_transfer_size >> 2;
+		for (fragment = 0; fragment < risc_size; fragment++) {
+			if (dlen > risc_size)
+				dlen = risc_size;
+
+			faddr += dlen;
+			risc_size -= dlen;
+		}
+	}
+
+	templates = (risc_attr & BIT_9) ? 2 : 1;
+
+	ql_dbg(ql_dbg_init, vha, 0x01a1, "-> templates = %u\n", templates);
+
+	for (j = 0; j < templates; j++, fwdt++) {
+		vfree(fwdt->template);
+		fwdt->template = NULL;
+		fwdt->length = 0;
+
+		dcode = (uint32_t *)req->ring;
+
+		rval = qla24xx_read_flash_data(vha, dcode, faddr, 7);
+		if (rval) {
+			ql_log(ql_log_fatal, vha, 0x01a2,
+			    "-> Unable to read template size.\n");
+			goto failed;
+		}
+
+		risc_size = be32_to_cpu((__force __be32)dcode[2]);
+		ql_dbg(ql_dbg_init, vha, 0x01a3,
+		    "-> fwdt%u template array at %#x (%#x dwords)\n",
+		    j, faddr, risc_size);
+		if (!risc_size || !~risc_size) {
+			ql_dbg(ql_dbg_init, vha, 0x01a4,
+			    "-> fwdt%u failed to read array\n", j);
+			goto failed;
+		}
+
+		/* skip header and ignore checksum */
+		faddr += 7;
+		risc_size -= 8;
+
+		ql_dbg(ql_dbg_init, vha, 0x01a5,
+		    "-> fwdt%u template allocate template %#x words...\n",
+		    j, risc_size);
+		fwdt->template = vmalloc(risc_size * sizeof(*dcode));
+		if (!fwdt->template) {
+			ql_log(ql_log_warn, vha, 0x01a6,
+			    "-> fwdt%u failed allocate template.\n", j);
+			goto failed;
+		}
+
+		dcode = fwdt->template;
+		rval = qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+
+		if (rval || !qla27xx_fwdt_template_valid(dcode)) {
+			ql_log(ql_log_warn, vha, 0x01a7,
+			    "-> fwdt%u failed template validate (rval %x)\n",
+			    j, rval);
+			goto failed;
+		}
+
+		dlen = qla27xx_fwdt_template_size(dcode);
+		ql_dbg(ql_dbg_init, vha, 0x01a7,
+		    "-> fwdt%u template size %#lx bytes (%#lx words)\n",
+		    j, dlen, dlen / sizeof(*dcode));
+		if (dlen > risc_size * sizeof(*dcode)) {
+			ql_log(ql_log_warn, vha, 0x01a8,
+			    "-> fwdt%u template exceeds array (%-lu bytes)\n",
+			    j, dlen - risc_size * sizeof(*dcode));
+			goto failed;
+		}
+
+		fwdt->length = dlen;
+		ql_dbg(ql_dbg_init, vha, 0x01a9,
+		    "-> fwdt%u loaded template ok\n", j);
+
+		faddr += risc_size + 1;
+	}
+
+	return QLA_SUCCESS;
+
+failed:
+	vfree(fwdt->template);
+	fwdt->template = NULL;
+	fwdt->length = 0;
+
+	return QLA_SUCCESS;
+}
+
 static int
 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
     uint32_t faddr)
@@ -8881,16 +9045,18 @@ int
 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 {
 	int rval;
+	uint32_t f_region = 0;
 	struct qla_hw_data *ha = vha->hw;
 	struct active_regions active_regions = { };
 
-	if (ql2xfwloadbin == 2)
+	if (ql2xfwloadbin == 2 && !IS_QLA28XX(ha))
 		goto try_blob_fw;
 
 	/* FW Load priority:
-	 * 1) Firmware residing in flash.
-	 * 2) Firmware via request-firmware interface (.bin file).
-	 * 3) Golden-Firmware residing in flash -- (limited operation).
+	 * 1) If 28xxx, ROM cmd to load flash firmware.
+	 * 2) Firmware residing in flash.
+	 * 3) Firmware via request-firmware interface (.bin file).
+	 * 4) Golden-Firmware residing in flash -- (limited operation).
 	 */
 
 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
@@ -8898,6 +9064,40 @@ qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 
 	qla27xx_get_active_image(vha, &active_regions);
 
+	/* For 28XXX, always load the flash firmware using rom mbx */
+	if (IS_QLA28XX_SECURED(ha)) {
+		rval = qla28xx_load_flash_firmware(vha);
+		if (rval != QLA_SUCCESS) {
+			ql_log(ql_log_fatal, vha, 0x019e,
+			       "Failed to load flash firmware.\n");
+			goto exit_load_risc;
+		}
+
+		f_region =
+		(active_regions.global != QLA27XX_SECONDARY_IMAGE) ?
+		 ha->flt_region_fw : ha->flt_region_fw_sec;
+
+		ql_log(ql_log_info, vha, 0x019f,
+		       "Load flash firmware successful (%s).\n",
+		       ((active_regions.global != QLA27XX_SECONDARY_IMAGE) ?
+		       "Primary" : "Secondary"));
+
+		rval = qla28xx_get_srisc_addr(vha, srisc_addr, f_region);
+		if (rval != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x019f,
+			       "failed to read srisc address\n");
+			goto exit_load_risc;
+		}
+
+		rval = qla28xx_load_fw_template(vha, f_region);
+		if (rval != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x01a0,
+			       "failed to read firmware template\n");
+		}
+
+		goto exit_load_risc;
+	}
+
 	if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
 		goto try_primary_fw;
 
@@ -8927,6 +9127,8 @@ qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 
 	ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
 	ha->flags.running_gold_fw = 1;
+
+exit_load_risc:
 	return rval;
 }
 
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a3971afc2dd1..a4dda4fcb52c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1669,13 +1669,28 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 
 			/* Port logout */
 			fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
-			if (!fcport)
+			if (!fcport) {
+				ql_dbg(ql_dbg_async, vha, 0x5011,
+					"Could not find fcport:%04x %04x %04x\n",
+					mb[1], mb[2], mb[3]);
 				break;
-			if (atomic_read(&fcport->state) != FCS_ONLINE)
+			}
+
+			if (atomic_read(&fcport->state) != FCS_ONLINE) {
+				ql_dbg(ql_dbg_async, vha, 0x5012,
+					"Port state is not online State:0x%x \n",
+					atomic_read(&fcport->state));
+				ql_dbg(ql_dbg_async, vha, 0x5012,
+					"Scheduling session for deletion \n");
+				fcport->logout_on_delete = 0;
+				qlt_schedule_sess_for_deletion(fcport);
 				break;
+			}
+
 			ql_dbg(ql_dbg_async, vha, 0x508a,
 			    "Marking port lost loopid=%04x portid=%06x.\n",
 			    fcport->loop_id, fcport->d_id.b24);
+
 			if (qla_ini_mode_enabled(vha)) {
 				fcport->logout_on_delete = 0;
 				qlt_schedule_sess_for_deletion(fcport);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 1f01576f044b..0d598be6f3ea 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -43,6 +43,7 @@ static struct rom_cmd {
 } rom_cmds[] = {
 	{ MBC_LOAD_RAM },
 	{ MBC_EXECUTE_FIRMWARE },
+	{ MBC_LOAD_FLASH_FIRMWARE },
 	{ MBC_READ_RAM_WORD },
 	{ MBC_MAILBOX_REGISTER_TEST },
 	{ MBC_VERIFY_CHECKSUM },
@@ -824,6 +825,53 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
 	return rval;
 }
 
+/*
+ * qla2x00_load_flash_firmware
+ *	Load firmware from flash.
+ *
+ * Input:
+ *	vha = adapter block pointer.
+ *
+ * Returns:
+ *	qla28xx local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla28xx_load_flash_firmware(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int rval = QLA_COMMAND_ERROR;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA28XX(ha))
+		return rval;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a6,
+	       "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_LOAD_FLASH_FIRMWARE;
+	mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_log_info, vha, 0x11a7,
+		       "Failed=%x cmd error=%x img error=%x.\n",
+		       rval, mcp->mb[1], mcp->mb[2]);
+	} else {
+		ql_dbg(ql_log_info, vha, 0x11a8,
+		       "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+
 /*
  * qla_get_exlogin_status
  *	Get extended login status
@@ -7157,3 +7205,43 @@ int qla_mailbox_passthru(scsi_qla_host_t *vha,
 
 	return rval;
 }
+
+int qla_mpipt_validate_fw(scsi_qla_host_t *vha, u16 img_idx, uint16_t *state)
+{
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	int rval;
+
+	if (!IS_QLA28XX(ha)) {
+		ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s %d\n", __func__, __LINE__);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	if (img_idx > 1) {
+		ql_log(ql_log_info, vha, 0xffff,
+				"%s %d Invalid flash image index [%d]\n",
+				__func__, __LINE__, img_idx);
+		return QLA_INVALID_COMMAND;
+	}
+
+	memset(&mc, 0, sizeof(mc));
+	mcp->mb[0] = MBC_MPI_PASSTHROUGH;
+	mcp->mb[1] = MPIPT_SUBCMD_VALIDATE_FW;
+	mcp->mb[2] = img_idx;
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+
+	/* send mb via iocb */
+	rval = qla24xx_send_mb_cmd(vha, &mc);
+	if (rval) {
+		ql_log(ql_log_info, vha, 0xffff, "%s:Failed %x (mb=%x,%x)\n",
+				__func__, rval, mcp->mb[0], mcp->mb[1]);
+		*state = mcp->mb[1];
+	} else {
+		ql_log(ql_log_info, vha, 0xffff, "%s: mb=%x,%x,%x\n", __func__,
+				mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+	}
+
+	return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 5d1bdc15b75c..8e7a7f5f0adb 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -892,6 +892,7 @@ struct ct6_dsd {
 #define	FA_VPD_SIZE_82XX	0x400
 
 #define FA_FLASH_LAYOUT_ADDR_82	0xFC400
+#define FA_FLASH_MCU_OFF	0x13000
 
 /******************************************************************************
 *
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 16a44c0917e1..c9aff70e7357 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1183,7 +1183,8 @@ qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
 	while ((qla2x00_reset_active(vha) || ha->dpc_active ||
 		ha->flags.mbox_busy) ||
 	       test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
-	       test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
+	       test_bit(FX00_TARGET_SCAN, &vha->dpc_flags) ||
+	       (vha->scan.scan_flags & SF_SCANNING)) {
 		if (test_bit(UNLOADING, &base_vha->dpc_flags))
 			break;
 		msleep(1000);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 9e7a407ba1b9..b6c36a8a2d60 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1084,6 +1084,32 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
 	return;
 }
 
+static int qla28xx_validate_mcu_signature(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	uint32_t *dcode = (uint32_t *)req->ring;
+	uint32_t signature[2] = {0x000c0000, 0x00050000};
+	int ret = QLA_SUCCESS;
+
+	ret = qla24xx_read_flash_data(vha, dcode, FA_FLASH_MCU_OFF >> 2, 2);
+	if (ret) {
+		ql_log(ql_log_fatal, vha, 0x01ab,
+		       "-> Failed to read flash mcu signature.\n");
+		ret = QLA_FUNCTION_FAILED;
+		goto done;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x01ac,
+		"Flash data 0x%08x 0x%08x.\n", dcode[0], dcode[1]);
+
+	if (!(dcode[0] == signature[0] && dcode[1] == signature[1]))
+		ret = QLA_FUNCTION_FAILED;
+
+done:
+	return ret;
+}
+
 int
 qla2xxx_get_flash_info(scsi_qla_host_t *vha)
 {
@@ -1096,6 +1122,9 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
 		return QLA_SUCCESS;
 
+	if (IS_QLA28XX(ha) && !qla28xx_validate_mcu_signature(vha))
+		ha->flags.secure_mcu = 1;
+
 	ret = qla2xxx_find_flt_start(vha, &flt_addr);
 	if (ret != QLA_SUCCESS)
 		return ret;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index a491d6ee5c94..9564beafdab7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "10.02.09.400-k"
+#define QLA2XXX_VERSION      "10.02.10.100-k"
 
 #define QLA_DRIVER_MAJOR_VER	10
-#define QLA_DRIVER_MINOR_VER	2
-#define QLA_DRIVER_PATCH_VER	9
-#define QLA_DRIVER_BETA_VER	400
+#define QLA_DRIVER_MINOR_VER	02
+#define QLA_DRIVER_PATCH_VER	10
+#define QLA_DRIVER_BETA_VER	100
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 1f2a53ba5dd9..9f17e9c49cb5 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1371,8 +1371,7 @@ static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
 
 	sbuff = scp->sense_buffer;
 	if (!sbuff) {
-		sdev_printk(KERN_ERR, scp->device,
-			    "%s: sense_buffer is NULL\n", __func__);
+		sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n");
 		return;
 	}
 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
@@ -1404,8 +1403,7 @@ static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
 {
 	if (!scp->sense_buffer) {
-		sdev_printk(KERN_ERR, scp->device,
-			    "%s: sense_buffer is NULL\n", __func__);
+		sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n");
 		return;
 	}
 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
@@ -1423,8 +1421,7 @@ static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
 			unsigned int information, unsigned char tape_flags)
 {
 	if (!scp->sense_buffer) {
-		sdev_printk(KERN_ERR, scp->device,
-			    "%s: sense_buffer is NULL\n", __func__);
+		sdev_printk(KERN_ERR, scp->device, "sense_buffer is NULL\n");
 		return;
 	}
 	memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
@@ -1452,15 +1449,12 @@ static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
 {
 	if (sdebug_verbose) {
 		if (0x1261 == cmd)
-			sdev_printk(KERN_INFO, dev,
-				    "%s: BLKFLSBUF [0x1261]\n", __func__);
+			sdev_printk(KERN_INFO, dev, "BLKFLSBUF [0x1261]\n");
 		else if (0x5331 == cmd)
 			sdev_printk(KERN_INFO, dev,
-				    "%s: CDROM_GET_CAPABILITY [0x5331]\n",
-				    __func__);
+				    "CDROM_GET_CAPABILITY [0x5331]\n");
 		else
-			sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
-				    __func__, cmd);
+			sdev_printk(KERN_INFO, dev, "cmd=0x%x\n", cmd);
 	}
 	return -EINVAL;
 	/* return -ENOTTY; // correct return but upsets fdisk */
@@ -1664,8 +1658,8 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
 
 	act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
 				       arr, arr_len, skip);
-	pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
-		 __func__, off_dst, scsi_bufflen(scp), act_len,
+	pr_debug("off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
+		 off_dst, scsi_bufflen(scp), act_len,
 		 scsi_get_resid(scp));
 	n = scsi_bufflen(scp) - (off_dst + act_len);
 	scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
@@ -3188,8 +3182,8 @@ static int resp_mode_select(struct scsi_cmnd *scp,
 		return DID_ERROR << 16;
 	else if (sdebug_verbose && (res < param_len))
 		sdev_printk(KERN_INFO, scp->device,
-			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
-			    __func__, param_len, res);
+			    "cdb indicated=%d, IO sent=%d bytes\n",
+			    param_len, res);
 	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
 	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
 	off = (mselect6 ? 4 : 8);
@@ -5133,8 +5127,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
 	if (lbdof == 0) {
 		if (sdebug_verbose)
 			sdev_printk(KERN_INFO, scp->device,
-				"%s: %s: LB Data Offset field bad\n",
-				my_name, __func__);
+				"%s: LB Data Offset field bad\n", my_name);
 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
 		return illegal_condition_result;
 	}
@@ -5142,8 +5135,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
 	if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
 		if (sdebug_verbose)
 			sdev_printk(KERN_INFO, scp->device,
-				"%s: %s: LBA range descriptors don't fit\n",
-				my_name, __func__);
+				"%s: LBA range descriptors don't fit\n", my_name);
 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
 		return illegal_condition_result;
 	}
@@ -5152,8 +5144,8 @@ static int resp_write_scat(struct scsi_cmnd *scp,
 		return SCSI_MLQUEUE_HOST_BUSY;
 	if (sdebug_verbose)
 		sdev_printk(KERN_INFO, scp->device,
-			"%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
-			my_name, __func__, lbdof_blen);
+			"%s: Fetch header+scatter_list, lbdof_blen=%u\n",
+			my_name, lbdof_blen);
 	res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
 	if (res == -1) {
 		ret = DID_ERROR << 16;
@@ -5170,8 +5162,8 @@ static int resp_write_scat(struct scsi_cmnd *scp,
 		num = get_unaligned_be32(up + 8);
 		if (sdebug_verbose)
 			sdev_printk(KERN_INFO, scp->device,
-				"%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
-				my_name, __func__, k, lba, num, sg_off);
+				"%s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
+				my_name, k, lba, num, sg_off);
 		if (num == 0)
 			continue;
 		ret = check_device_access_params(scp, lba, num, true);
@@ -5183,8 +5175,8 @@ static int resp_write_scat(struct scsi_cmnd *scp,
 		if ((cum_lb + num) > bt_len) {
 			if (sdebug_verbose)
 				sdev_printk(KERN_INFO, scp->device,
-				    "%s: %s: sum of blocks > data provided\n",
-				    my_name, __func__);
+				    "%s: sum of blocks > data provided\n",
+				    my_name);
 			mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
 					0);
 			ret = illegal_condition_result;
@@ -5876,8 +5868,8 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 		goto cleanup;
 	} else if (sdebug_verbose && (ret < (a_num * lb_size))) {
 		sdev_printk(KERN_INFO, scp->device,
-			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
-			    my_name, __func__, a_num * lb_size, ret);
+			    "%s: cdb indicated=%u, IO sent=%d bytes\n",
+			    my_name, a_num * lb_size, ret);
 	}
 	if (is_bytchk3) {
 		for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
@@ -6404,11 +6396,6 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
 			atomic_inc(&sdebug_miss_cpus);
 	}
 
-	if (!scp) {
-		pr_err("scmd=NULL\n");
-		return;
-	}
-
 	spin_lock_irqsave(&sdsc->lock, flags);
 	aborted = sd_dp->aborted;
 	if (unlikely(aborted))
@@ -6685,14 +6672,14 @@ static int scsi_debug_sdev_configure(struct scsi_device *sdp,
 	devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
 				sdebug_debugfs_root);
 	if (IS_ERR_OR_NULL(devip->debugfs_entry))
-		pr_info("%s: failed to create debugfs directory for device %s\n",
-			__func__, dev_name(&sdp->sdev_gendev));
+		pr_info("failed to create debugfs directory for device %s\n",
+			dev_name(&sdp->sdev_gendev));
 
 	dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
 				&sdebug_error_fops);
 	if (IS_ERR_OR_NULL(dentry))
-		pr_info("%s: failed to create error file for device %s\n",
-			__func__, dev_name(&sdp->sdev_gendev));
+		pr_info("failed to create error file for device %s\n",
+			dev_name(&sdp->sdev_gendev));
 
 	return 0;
 }
@@ -6734,7 +6721,7 @@ static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
 {
 	struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
 	struct sdebug_defer *sd_dp = &sdsc->sd_dp;
-	enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t);
+	enum sdeb_defer_type defer_t = sd_dp->defer_t;
 
 	lockdep_assert_held(&sdsc->lock);
 
@@ -6880,7 +6867,7 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
 
 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
 		sdev_printk(KERN_INFO, SCpnt->device,
-			    "%s: command%s found\n", __func__,
+			    "command%s found\n",
 			    aborted ? "" : " not");
 
 
@@ -6968,7 +6955,7 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
 	++num_dev_resets;
 
 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
-		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+		sdev_printk(KERN_INFO, sdp, "doing device reset");
 
 	scsi_debug_stop_all_queued(sdp);
 	if (devip) {
@@ -7008,7 +6995,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
 
 	++num_target_resets;
 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
-		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+		sdev_printk(KERN_INFO, sdp, "doing target reset\n");
 
 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
 		if (devip->target == sdp->id) {
@@ -7021,7 +7008,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
 
 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
 		sdev_printk(KERN_INFO, sdp,
-			    "%s: %d device(s) found in target\n", __func__, k);
+			    "%d device(s) found in target\n", k);
 
 	if (sdebug_fail_target_reset(SCpnt)) {
 		scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
@@ -7042,7 +7029,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
 	++num_bus_resets;
 
 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
-		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+		sdev_printk(KERN_INFO, sdp, "doing bus reset\n");
 
 	list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
 		set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
@@ -7053,7 +7040,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
 
 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
 		sdev_printk(KERN_INFO, sdp,
-			    "%s: %d device(s) found in host\n", __func__, k);
+			    "%d device(s) found in host\n", k);
 	return SUCCESS;
 }
 
@@ -7065,7 +7052,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
 
 	++num_host_resets;
 	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
-		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
+		sdev_printk(KERN_INFO, SCpnt->device, "doing host reset\n");
 	mutex_lock(&sdebug_host_list_mutex);
 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
 		list_for_each_entry(devip, &sdbg_host->dev_info_list,
@@ -7080,7 +7067,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
 	stop_all_queued();
 	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
 		sdev_printk(KERN_INFO, SCpnt->device,
-			    "%s: %d device(s) found\n", __func__, k);
+			"%d device(s) found\n", k);
 	return SUCCESS;
 }
 
@@ -7231,8 +7218,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
 			scsi_result = device_qfull_result;
 
 			if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
-				sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
-					    __func__, num_in_q);
+				sdev_printk(KERN_INFO, sdp, "num_in_q=%d +1, <inject> status: TASK SET FULL\n",
+					    num_in_q);
 		}
 	}
 
@@ -7258,8 +7245,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
 	}
 
 	if (unlikely(sdebug_verbose && cmnd->result))
-		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
-			    __func__, cmnd->result);
+		sdev_printk(KERN_INFO, sdp, "non-zero result=0x%x\n",
+			    cmnd->result);
 
 	if (delta_jiff > 0 || ndelay > 0) {
 		ktime_t kt;
@@ -7296,12 +7283,12 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
 		if (polled) {
 			spin_lock_irqsave(&sdsc->lock, flags);
 			sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
-			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
+			sd_dp->defer_t = SDEB_DEFER_POLL;
 			spin_unlock_irqrestore(&sdsc->lock, flags);
 		} else {
 			/* schedule the invocation of scsi_done() for a later time */
 			spin_lock_irqsave(&sdsc->lock, flags);
-			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
+			sd_dp->defer_t = SDEB_DEFER_HRT;
 			hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
 			/*
 			 * The completion handler will try to grab sqcp->lock,
@@ -7325,11 +7312,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
 		if (polled) {
 			spin_lock_irqsave(&sdsc->lock, flags);
 			sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
-			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
+			sd_dp->defer_t = SDEB_DEFER_POLL;
 			spin_unlock_irqrestore(&sdsc->lock, flags);
 		} else {
 			spin_lock_irqsave(&sdsc->lock, flags);
-			WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
+			sd_dp->defer_t = SDEB_DEFER_WQ;
 			schedule_work(&sd_dp->ew.work);
 			spin_unlock_irqrestore(&sdsc->lock, flags);
 		}
@@ -8697,7 +8684,7 @@ static int __init scsi_debug_init(void)
 
 	sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
 	if (IS_ERR_OR_NULL(sdebug_debugfs_root))
-		pr_info("%s: failed to create initial debugfs directory\n", __func__);
+		pr_info("failed to create initial debugfs directory\n");
 
 	for (k = 0; k < hosts_to_add; k++) {
 		if (want_store && k == 0) {
@@ -8813,7 +8800,7 @@ static int sdebug_add_store(void)
 	if (unlikely(res < 0)) {
 		xa_unlock_irqrestore(per_store_ap, iflags);
 		kfree(sip);
-		pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
+		pr_warn("xa_alloc() errno=%d\n", -res);
 		return res;
 	}
 	sdeb_most_recent_idx = n_idx;
@@ -8870,7 +8857,7 @@ static int sdebug_add_store(void)
 	return (int)n_idx;
 err:
 	sdebug_erase_store((int)n_idx, sip);
-	pr_warn("%s: failed, errno=%d\n", __func__, -res);
+	pr_warn("failed, errno=%d\n", -res);
 	return res;
 }
 
@@ -8929,7 +8916,7 @@ static int sdebug_add_host_helper(int per_host_idx)
 		put_device(&sdbg_host->dev);
 	else
 		kfree(sdbg_host);
-	pr_warn("%s: failed, errno=%d\n", __func__, -error);
+	pr_warn("failed, errno=%d\n", -error);
 	return error;
 }
 
@@ -8997,7 +8984,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
 
 	if (qdepth > SDEBUG_CANQUEUE) {
 		qdepth = SDEBUG_CANQUEUE;
-		pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
+		pr_warn("requested qdepth [%d] exceeds canqueue [%d], trim\n",
 			qdepth, SDEBUG_CANQUEUE);
 	}
 	if (qdepth < 1)
@@ -9009,7 +8996,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
 	mutex_unlock(&sdebug_host_list_mutex);
 
 	if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
-		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
+		sdev_printk(KERN_INFO, sdev, "qdepth=%d\n", qdepth);
 
 	return sdev->queue_depth;
 }
@@ -9133,7 +9120,7 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
 
 	spin_lock_irqsave(&sdsc->lock, flags);
 	sd_dp = &sdsc->sd_dp;
-	if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
+	if (sd_dp->defer_t != SDEB_DEFER_POLL) {
 		spin_unlock_irqrestore(&sdsc->lock, flags);
 		return true;
 	}
@@ -9282,8 +9269,7 @@ static void scsi_debug_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *scp)
 	bool res = false;
 
 	if (!to_be_aborted_scmd) {
-		pr_err("%s: command with tag %#x not found\n", __func__,
-		       unique_tag);
+		pr_err("command with tag %#x not found\n", unique_tag);
 		return;
 	}
 
@@ -9291,11 +9277,9 @@ static void scsi_debug_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *scp)
 		res = scsi_debug_stop_cmnd(to_be_aborted_scmd);
 
 	if (res)
-		pr_info("%s: aborted command with tag %#x\n",
-			__func__, unique_tag);
+		pr_info("aborted command with tag %#x\n", unique_tag);
 	else
-		pr_err("%s: failed to abort command with tag %#x\n",
-		       __func__, unique_tag);
+		pr_err("failed to abort command with tag %#x\n", unique_tag);
 
 	set_host_byte(scp, res ? DID_OK : DID_ERROR);
 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 93031326ac3e..3b860b369692 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -76,7 +76,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
 }
 
 static void
-scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
+scsi_set_blocked(struct scsi_cmnd *cmd, enum scsi_qc_status reason)
 {
 	struct Scsi_Host *host = cmd->device->host;
 	struct scsi_device *device = cmd->device;
@@ -139,7 +139,8 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
  * for a requeue after completion, which should only occur in this
  * file.
  */
-static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
+static void __scsi_queue_insert(struct scsi_cmnd *cmd,
+				enum scsi_qc_status reason, bool unbusy)
 {
 	struct scsi_device *device = cmd->device;
 
@@ -179,7 +180,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
  * Context: This could be called either from an interrupt context or a normal
  * process context.
  */
-void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
+void scsi_queue_insert(struct scsi_cmnd *cmd, enum scsi_qc_status reason)
 {
 	__scsi_queue_insert(cmd, reason, true);
 }
@@ -1577,7 +1578,7 @@ static void scsi_complete(struct request *rq)
  * Return: nonzero return request was rejected and device's queue needs to be
  * plugged.
  */
-static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+static enum scsi_qc_status scsi_dispatch_cmd(struct scsi_cmnd *cmd)
 {
 	struct Scsi_Host *host = cmd->device->host;
 	int rtn = 0;
@@ -1826,7 +1827,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 	struct Scsi_Host *shost = sdev->host;
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
 	blk_status_t ret;
-	int reason;
+	enum scsi_qc_status reason;
 
 	WARN_ON_ONCE(cmd->budget_token < 0);
 
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index d07ec15d6c00..7a193cc04e5b 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -102,7 +102,8 @@ void scsi_eh_done(struct scsi_cmnd *scmd);
 
 /* scsi_lib.c */
 extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd);
-extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
+extern void scsi_queue_insert(struct scsi_cmnd *cmd,
+			      enum scsi_qc_status reason);
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
 extern void scsi_run_host_queues(struct Scsi_Host *shost);
 extern void scsi_requeue_run_queue(struct work_struct *work);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 987befb02408..b95c46a346fb 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1328,6 +1328,46 @@ store_fc_rport_fast_io_fail_tmo(struct device *dev,
 static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
 	show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
 
+#define fc_rport_encryption(name)							\
+static ssize_t fc_rport_encinfo_##name(struct device *cd,				\
+				       struct device_attribute *attr,			\
+				       char *buf)					\
+{											\
+	struct fc_rport *rport = transport_class_to_rport(cd);				\
+	struct Scsi_Host *shost = rport_to_shost(rport);				\
+	struct fc_internal *i = to_fc_internal(shost->transportt);			\
+	struct fc_encryption_info *info;						\
+	ssize_t ret = -ENOENT;								\
+	u32 data;									\
+											\
+	if (i->f->get_fc_rport_enc_info) {						\
+		info = (i->f->get_fc_rport_enc_info)(rport);				\
+		if (info) {								\
+			data = info->name;						\
+			if (!strcmp(#name, "status")) {					\
+				ret = scnprintf(buf,					\
+						FC_RPORT_ENCRYPTION_STATUS_MAX_LEN,	\
+						"%s\n",					\
+						data ? "Encrypted" : "Unencrypted");	\
+			}								\
+		}									\
+	}										\
+	return ret;									\
+}											\
+static FC_DEVICE_ATTR(rport, encryption_##name, 0444, fc_rport_encinfo_##name, NULL)	\
+
+fc_rport_encryption(status);
+
+static struct attribute *fc_rport_encryption_attrs[] = {
+	&device_attr_rport_encryption_status.attr,
+	NULL
+};
+
+static struct attribute_group fc_rport_encryption_group = {
+	.name = "encryption",
+	.attrs = fc_rport_encryption_attrs,
+};
+
 #define fc_rport_fpin_statistic(name)					\
 static ssize_t fc_rport_fpinstat_##name(struct device *cd,		\
 				  struct device_attribute *attr,	\
@@ -2633,6 +2673,8 @@ fc_attach_transport(struct fc_function_template *ft)
 	i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
 	i->rport_attr_cont.ac.class = &fc_rport_class.class;
 	i->rport_attr_cont.ac.match = fc_rport_match;
+	if (ft->get_fc_rport_enc_info)
+		i->rport_attr_cont.encryption = &fc_rport_encryption_group;
 	i->rport_attr_cont.statistics = &fc_rport_statistics_group;
 	transport_container_register(&i->rport_attr_cont);
 
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f7868b41c5e6..eb5bc3f1673b 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1741,6 +1741,54 @@ static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
 	return len;
 }
 
+static ssize_t target_wwn_pd_text_id_info_show(struct config_item *item,
+		char *page)
+{
+	return sysfs_emit(page, "%s\n", &to_t10_wwn(item)->pd_text_id_info[0]);
+}
+
+static ssize_t target_wwn_pd_text_id_info_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct t10_wwn *t10_wwn = to_t10_wwn(item);
+	struct se_device *dev = t10_wwn->t10_dev;
+
+	/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
+	unsigned char buf[PD_TEXT_ID_INFO_LEN + 2];
+	char *stripped;
+
+	/*
+	 * Check to see if any active exports exist.  If they do exist, fail
+	 * here as changing this information on the fly (underneath the
+	 * initiator side OS dependent multipath code) could cause negative
+	 * effects.
+	 */
+	if (dev->export_count) {
+		pr_err("Unable to set the peripheral device text id info while active %d exports exist\n",
+			dev->export_count);
+		return -EINVAL;
+	}
+
+	if (strscpy(buf, page, sizeof(buf)) < 0)
+		return -EOVERFLOW;
+
+	/* Strip any newline added from userspace. */
+	stripped = strstrip(buf);
+	if (strlen(stripped) >= PD_TEXT_ID_INFO_LEN) {
+		pr_err("Emulated peripheral device text id info exceeds PD_TEXT_ID_INFO_LEN: " __stringify(PD_TEXT_ID_INFO_LEN "\n"));
+		return -EOVERFLOW;
+	}
+
+	BUILD_BUG_ON(sizeof(dev->t10_wwn.pd_text_id_info) != PD_TEXT_ID_INFO_LEN);
+	strscpy(dev->t10_wwn.pd_text_id_info, stripped,
+	       sizeof(dev->t10_wwn.pd_text_id_info));
+
+	pr_debug("Target_Core_ConfigFS: Set emulated peripheral dev text id info:"
+		  " %s\n", dev->t10_wwn.pd_text_id_info);
+
+	return count;
+}
+
 /*
  * Generic wrapper for dumping VPD identifiers by association.
  */
@@ -1797,6 +1845,7 @@ CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
+CONFIGFS_ATTR(target_wwn_, pd_text_id_info);
 
 static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
 	&target_wwn_attr_vendor_id,
@@ -1808,6 +1857,7 @@ static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
 	&target_wwn_attr_vpd_assoc_logical_unit,
 	&target_wwn_attr_vpd_assoc_target_port,
 	&target_wwn_attr_vpd_assoc_scsi_target_device,
+	&target_wwn_attr_pd_text_id_info,
 	NULL,
 };
 
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index fe2b888bcb43..f20bc6ea019b 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -25,6 +25,8 @@
 #include "target_core_ua.h"
 #include "target_core_xcopy.h"
 
+#define PD_TEXT_ID_INFO_HDR_LEN	4
+
 static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
 {
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
@@ -1999,6 +2001,18 @@ static const struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
 	.enabled = spc_rsoc_enabled,
 };
 
+static struct target_opcode_descriptor tcm_opcode_report_identifying_information = {
+	.support = SCSI_SUPPORT_FULL,
+	.serv_action_valid = 1,
+	.opcode = MAINTENANCE_IN,
+	.service_action = MI_REPORT_IDENTIFYING_INFORMATION,
+	.cdb_size = 12,
+	.usage_bits = {MAINTENANCE_IN, MI_REPORT_IDENTIFYING_INFORMATION,
+		       0x00, 0x00,
+		       0x00, 0x00, 0xff, 0xff,
+		       0xff, 0xff, 0xff, SCSI_CONTROL_MASK},
+};
+
 static bool tcm_is_set_tpg_enabled(const struct target_opcode_descriptor *descr,
 				   struct se_cmd *cmd)
 {
@@ -2086,6 +2100,7 @@ static const struct target_opcode_descriptor *tcm_supported_opcodes[] = {
 	&tcm_opcode_report_target_pgs,
 	&tcm_opcode_report_supp_opcodes,
 	&tcm_opcode_set_tpg,
+	&tcm_opcode_report_identifying_information,
 };
 
 static int
@@ -2303,6 +2318,72 @@ spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
 	return ret;
 }
 
+static sense_reason_t
+spc_fill_pd_text_id_info(struct se_cmd *cmd, u8 *cdb)
+{
+	struct se_device *dev = cmd->se_dev;
+	unsigned char *buf;
+	unsigned char *rbuf;
+	u32 buf_len;
+	u16 data_len;
+
+	buf_len = get_unaligned_be32(&cdb[6]);
+	if (buf_len < PD_TEXT_ID_INFO_HDR_LEN)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	data_len = strlen(dev->t10_wwn.pd_text_id_info);
+	if (data_len > 0)
+		/* trailing null */
+		data_len += 1;
+
+	data_len = data_len + PD_TEXT_ID_INFO_HDR_LEN;
+
+	if (data_len < buf_len)
+		buf_len = data_len;
+
+	buf = kzalloc(buf_len, GFP_KERNEL);
+	if (!buf) {
+		pr_err("Unable to allocate response buffer for IDENTITY INFO\n");
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+
+	scnprintf(&buf[PD_TEXT_ID_INFO_HDR_LEN], buf_len - PD_TEXT_ID_INFO_HDR_LEN, "%s",
+		 dev->t10_wwn.pd_text_id_info);
+
+	put_unaligned_be16(data_len, &buf[2]);
+
+	rbuf = transport_kmap_data_sg(cmd);
+	if (!rbuf) {
+		pr_err("transport_kmap_data_sg() failed in %s\n", __func__);
+		kfree(buf);
+		return TCM_OUT_OF_RESOURCES;
+	}
+
+	memcpy(rbuf, buf, buf_len);
+	transport_kunmap_data_sg(cmd);
+	kfree(buf);
+
+	target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, buf_len);
+	return TCM_NO_SENSE;
+}
+
+static sense_reason_t
+spc_emulate_report_id_info(struct se_cmd *cmd)
+{
+	u8 *cdb = cmd->t_task_cdb;
+	sense_reason_t rc;
+
+	switch ((cdb[10] >> 1)) {
+	case 2:
+		rc = spc_fill_pd_text_id_info(cmd, cdb);
+		break;
+	default:
+		return TCM_UNSUPPORTED_SCSI_OPCODE;
+	}
+
+	return rc;
+}
+
 sense_reason_t
 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
 {
@@ -2442,6 +2523,11 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
 			    MI_REPORT_SUPPORTED_OPERATION_CODES)
 				cmd->execute_cmd =
 					spc_emulate_report_supp_op_codes;
+			if ((cdb[1] & 0x1f) ==
+			    MI_REPORT_IDENTIFYING_INFORMATION) {
+				cmd->execute_cmd =
+					spc_emulate_report_id_info;
+			}
 			*size = get_unaligned_be32(&cdb[6]);
 		} else {
 			/*
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 9ab91b4c05b0..64c234096e23 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -431,8 +431,7 @@ void ufshcd_mcq_disable(struct ufs_hba *hba)
 
 void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
 {
-	ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
-		      REG_UFS_MEM_CFG);
+	ufshcd_rmwl(hba, ESI_ENABLE, ESI_ENABLE, REG_UFS_MEM_CFG);
 }
 EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
 
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
index 2efc271a96fa..a009d66db15a 100644
--- a/include/linux/transport_class.h
+++ b/include/linux/transport_class.h
@@ -56,6 +56,7 @@ struct anon_transport_class cls = {				\
 struct transport_container {
 	struct attribute_container ac;
 	const struct attribute_group *statistics;
+	const struct attribute_group *encryption;
 };
 
 #define attribute_container_to_transport_container(x) \
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 96b350366670..08ac3200b4a4 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -106,12 +106,15 @@ enum scsi_disposition {
 };
 
 /*
- * Midlevel queue return values.
+ * Status values returned by the .queuecommand() callback if a command has not
+ * been queued.
  */
-#define SCSI_MLQUEUE_HOST_BUSY   0x1055
-#define SCSI_MLQUEUE_DEVICE_BUSY 0x1056
-#define SCSI_MLQUEUE_EH_RETRY    0x1057
-#define SCSI_MLQUEUE_TARGET_BUSY 0x1058
+enum scsi_qc_status {
+	SCSI_MLQUEUE_HOST_BUSY   = 0x1055,
+	SCSI_MLQUEUE_DEVICE_BUSY = 0x1056,
+	SCSI_MLQUEUE_EH_RETRY    = 0x1057,
+	SCSI_MLQUEUE_TARGET_BUSY = 0x1058,
+};
 
 /*
  *  Use these to separate status msg and our bytes
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index b908aacfef48..9f30625aa0d3 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -317,6 +317,15 @@ struct fc_fpin_stats {
 	u64 cn_device_specific;
 };
 
+#define FC_RPORT_ENCRYPTION_STATUS_MAX_LEN      14
+/*
+ * Encryption Information
+ */
+struct fc_encryption_info {
+	/* Encryption Status */
+	u8 status;
+};
+
 /* Macro for use in defining Remote Port attributes */
 #define FC_RPORT_ATTR(_name,_mode,_show,_store)				\
 struct device_attribute dev_attr_rport_##_name = 	\
@@ -364,6 +373,7 @@ struct fc_rport {	/* aka fc_starget_attrs */
 	u64 port_name;
 	u32 port_id;
 	u32 roles;
+	struct fc_encryption_info enc_info;
 	enum fc_port_state port_state;	/* Will only be ONLINE or UNKNOWN */
 	u32 scsi_target_id;
 	u32 fast_io_fail_tmo;
@@ -691,6 +701,8 @@ struct fc_function_template {
 	struct fc_host_statistics * (*get_fc_host_stats)(struct Scsi_Host *);
 	void	(*reset_fc_host_stats)(struct Scsi_Host *);
 
+	struct fc_encryption_info * (*get_fc_rport_enc_info)(struct fc_rport *);
+
 	int	(*issue_fc_host_lip)(struct Scsi_Host *);
 
 	void    (*dev_loss_tmo_callbk)(struct fc_rport *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7016d93fa383..b62d5fcce950 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -108,6 +108,9 @@
 #define SE_MODE_PAGE_BUF			512
 #define SE_SENSE_BUF				96
 
+/* Peripheral Device Text Identification Information */
+#define PD_TEXT_ID_INFO_LEN			256
+
 enum target_submit_type {
 	/* Use the fabric driver's default submission type */
 	TARGET_FABRIC_DEFAULT_SUBMIT,
@@ -348,6 +351,7 @@ struct t10_wwn {
 	struct se_device *t10_dev;
 	struct config_group t10_wwn_group;
 	struct list_head t10_vpd_list;
+	char pd_text_id_info[PD_TEXT_ID_INFO_LEN];
 };
 
 struct t10_pr_registration {
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index d36df24242a3..806fdaf52bd9 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -288,6 +288,7 @@ enum {
 
 /* REG_UFS_MEM_CFG - Global Config Registers 300h */
 #define MCQ_MODE_SELECT	BIT(0)
+#define ESI_ENABLE	BIT(1)
 
 /* CQISy - CQ y Interrupt Status Register  */
 #define UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS	0x1
