All of lore.kernel.org
 help / color / mirror / Atom feed
From: himanshu.madhani@cavium.com (Himanshu Madhani)
Subject: [PATCH v3 3/5] qla2xxx_nvmet: Add FC-NVMe Target handling
Date: Fri, 28 Sep 2018 15:46:24 -0700	[thread overview]
Message-ID: <20180928224626.19777-4-himanshu.madhani@cavium.com> (raw)
In-Reply-To: <20180928224626.19777-1-himanshu.madhani@cavium.com>

From: Anil Gurumurthy <anil.gurumurthy@cavium.com>

This patch Adds following code in the driver to
support FC-NVMe Target

- Updated ql2xenablenvme to allow FC-NVMe Target operation
- Added Link Service Request handling for NVMe Target
- Added passthru IOCB for LS4 request
- Added CTIO for sending response to FW
- Added FC4 Registration for FC-NVMe Target
- Added PUREX IOCB support for login processing in FC-NVMe Target mode
- Added Continuation IOCB for PUREX
- Added Session creation with PUREX IOCB in FC-NVMe Target mode
- To enable FC-NVMe Target mode use ql2xnvmeenable=2 while loading driver

Signed-off-by: Anil Gurumurthy <anil.gurumurthy at cavium.com>
Signed-off-by: Giridhar Malavali <giridhar.malavali at cavium.com>
Signed-off-by: Darren Trapp <darren.trapp at cavium.com>
Signed-off-by: Himanshu Madhani <himanshu.madhani at cavium.com>
---
 drivers/scsi/qla2xxx/Makefile     |   3 +-
 drivers/scsi/qla2xxx/qla_def.h    |  32 +-
 drivers/scsi/qla2xxx/qla_fw.h     | 263 ++++++++++
 drivers/scsi/qla2xxx/qla_gbl.h    |  21 +-
 drivers/scsi/qla2xxx/qla_gs.c     |  14 +-
 drivers/scsi/qla2xxx/qla_init.c   |  46 +-
 drivers/scsi/qla2xxx/qla_isr.c    | 112 ++++-
 drivers/scsi/qla2xxx/qla_mbx.c    | 101 +++-
 drivers/scsi/qla2xxx/qla_nvme.h   |  33 --
 drivers/scsi/qla2xxx/qla_nvmet.c  |  36 ++
 drivers/scsi/qla2xxx/qla_os.c     |  75 ++-
 drivers/scsi/qla2xxx/qla_target.c | 988 ++++++++++++++++++++++++++++++++++++--
 drivers/scsi/qla2xxx/qla_target.h |  90 ++++
 13 files changed, 1731 insertions(+), 83 deletions(-)

diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 17d5bc1cc56b..ec924733c10e 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
 		qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-		qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
+		qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o \
+		qla_nvmet.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
 obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a37a4d2261e2..9e2e2d9ddb30 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -480,6 +480,10 @@ struct srb_iocb {
 			uint32_t dl;
 			uint32_t timeout_sec;
 			struct	list_head   entry;
+			uint32_t exchange_address;
+			uint16_t nport_handle;
+			uint8_t vp_index;
+			void *cmd;
 		} nvme;
 		struct {
 			u16 cmd;
@@ -490,7 +494,11 @@ struct srb_iocb {
 	struct timer_list timer;
 	void (*timeout)(void *);
 };
-
+struct srb_nvme_els_rsp {
+		dma_addr_t dma_addr;
+		void *dma_ptr;
+		void *ptr;
+};
 /* Values for srb_ctx type */
 #define SRB_LOGIN_CMD	1
 #define SRB_LOGOUT_CMD	2
@@ -518,6 +526,8 @@ struct srb_iocb {
 #define SRB_NVME_ELS_RSP 24
 #define SRB_NVMET_LS   25
 #define SRB_NVMET_FCP  26
+#define SRB_NVMET_ABTS	27
+#define SRB_NVMET_SEND_ABTS	28
 
 enum {
 	TYPE_SRB,
@@ -548,10 +558,13 @@ typedef struct srb {
 	int rc;
 	int retry_count;
 	struct completion comp;
+	struct work_struct nvmet_comp_work;
+	uint16_t comp_status;
 	union {
 		struct srb_iocb iocb_cmd;
 		struct bsg_job *bsg_job;
 		struct srb_cmd scmd;
+		struct srb_nvme_els_rsp snvme_els;
 	} u;
 	void (*done)(void *, int);
 	void (*free)(void *);
@@ -2276,6 +2289,15 @@ struct qlt_plogi_ack_t {
 	void		*fcport;
 };
 
+/* NVMET */
+struct qlt_purex_plogi_ack_t {
+	struct list_head	list;
+	struct __fc_plogi rcvd_plogi;
+	port_id_t	id;
+	int		ref_count;
+	void		*fcport;
+};
+
 struct ct_sns_desc {
 	struct ct_sns_pkt	*ct_sns;
 	dma_addr_t		ct_sns_dma;
@@ -3238,6 +3260,7 @@ enum qla_work_type {
 	QLA_EVT_SP_RETRY,
 	QLA_EVT_IIDMA,
 	QLA_EVT_ELS_PLOGI,
+	QLA_EVT_NEW_NVMET_SESS,
 };
 
 
@@ -4232,6 +4255,7 @@ typedef struct scsi_qla_host {
 		uint32_t	qpairs_req_created:1;
 		uint32_t	qpairs_rsp_created:1;
 		uint32_t	nvme_enabled:1;
+		uint32_t	nvmet_enabled:1;
 	} flags;
 
 	atomic_t	loop_state;
@@ -4277,6 +4301,7 @@ typedef struct scsi_qla_host {
 #define N2N_LOGIN_NEEDED	30
 #define IOCB_WORK_ACTIVE	31
 #define SET_ZIO_THRESHOLD_NEEDED 32
+#define NVMET_PUREX		33
 
 	unsigned long	pci_flags;
 #define PFLG_DISCONNECTED	0	/* PCI device removed */
@@ -4317,6 +4342,7 @@ typedef struct scsi_qla_host {
 	uint8_t		fabric_node_name[WWN_SIZE];
 
 	struct		nvme_fc_local_port *nvme_local_port;
+	struct		nvmet_fc_target_port *targetport;
 	struct completion nvme_del_done;
 	struct list_head nvme_rport_list;
 
@@ -4397,6 +4423,9 @@ typedef struct scsi_qla_host {
 	uint16_t	n2n_id;
 	struct list_head gpnid_list;
 	struct fab_scan scan;
+	/*NVMET*/
+	struct list_head	purex_atio_list;
+	struct completion	purex_plogi_sess;
 } scsi_qla_host_t;
 
 struct qla27xx_image_status {
@@ -4667,6 +4696,7 @@ struct sff_8247_a0 {
 	 !ha->current_topology)
 
 #include "qla_target.h"
+#include "qla_nvmet.h"
 #include "qla_gbl.h"
 #include "qla_dbg.h"
 #include "qla_inline.h"
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 50c1e6c62e31..67a42d153f64 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -723,6 +723,269 @@ struct ct_entry_24xx {
 	uint32_t dseg_1_len;		/* Data segment 1 length. */
 };
 
+/* NVME-T changes */
+/*
+ * Fibre Channel Header
+ * Little Endian format.  As received in PUREX and PURLS
+ */
+struct __fc_hdr {
+	uint16_t	did_lo;
+	uint8_t		did_hi;
+	uint8_t		r_ctl;
+	uint16_t	sid_lo;
+	uint8_t		sid_hi;
+	uint8_t		cs_ctl;
+	uint16_t	f_ctl_lo;
+	uint8_t		f_ctl_hi;
+	uint8_t		type;
+	uint16_t	seq_cnt;
+	uint8_t		df_ctl;
+	uint8_t		seq_id;
+	uint16_t	rx_id;
+	uint16_t	ox_id;
+	uint32_t	param;
+};
+
+/*
+ * Fibre Channel LOGO acc
+ * In big endian format
+ */
+struct __fc_logo_acc {
+	uint8_t	op_code;
+	uint8_t	reserved[3];
+};
+
+struct __fc_lsrjt {
+	uint8_t	op_code;
+	uint8_t	reserved[3];
+	uint8_t	reserved2;
+	uint8_t	reason;
+	uint8_t	exp;
+	uint8_t	vendor;
+};
+
+/*
+ * Fibre Channel LOGO Frame
+ * Little Endian format. As received in PUREX
+ */
+struct __fc_logo {
+	struct __fc_hdr	hdr;
+	uint16_t	reserved;
+	uint8_t		reserved1;
+	uint8_t		op_code;
+	uint16_t	sid_lo;
+	uint8_t		sid_hi;
+	uint8_t		reserved2;
+	uint8_t		pname[8];
+};
+
+/*
+ * Fibre Channel PRLI Frame
+ * Little Endian format. As received in PUREX
+ */
+struct __fc_prli {
+	struct  __fc_hdr	hdr;
+	uint16_t		pyld_length;  /* word 0 of prli */
+	uint8_t		page_length;
+	uint8_t		op_code;
+	uint16_t	common;/* word 1.  1st word of SP page */
+	uint8_t		type_ext;
+	uint8_t		prli_type;
+#define PRLI_TYPE_FCP  0x8
+#define PRLI_TYPE_NVME 0x28
+	union {
+		struct {
+			uint32_t	reserved[2];
+			uint32_t	sp_info;
+		} fcp;
+		struct {
+			uint32_t	reserved[2];
+			uint32_t	sp_info;
+#define NVME_PRLI_DISC BIT_3
+#define NVME_PRLI_TRGT BIT_4
+#define NVME_PRLI_INIT BIT_5
+#define NVME_PRLI_CONFIRMATION BIT_7
+			uint32_t	reserved1;
+		} nvme;
+	};
+};
+
+/*
+ * Fibre Channel PLOGI Frame
+ * Little Endian format.  As received in PUREX
+ */
+struct __fc_plogi {
+	uint16_t        did_lo;
+	uint8_t         did_hi;
+	uint8_t         r_ctl;
+	uint16_t        sid_lo;
+	uint8_t         sid_hi;
+	uint8_t         cs_ctl;
+	uint16_t        f_ctl_lo;
+	uint8_t         f_ctl_hi;
+	uint8_t         type;
+	uint16_t        seq_cnt;
+	uint8_t         df_ctl;
+	uint8_t         seq_id;
+	uint16_t        rx_id;
+	uint16_t        ox_id;
+	uint32_t        param;
+	uint8_t         rsvd[3];
+	uint8_t         op_code;
+	uint32_t        cs_params[4]; /* common service params */
+	uint8_t         pname[8];     /* port name */
+	uint8_t         nname[8];     /* node name */
+	uint32_t        class1[4];    /* class 1 service params */
+	uint32_t        class2[4];    /* class 2 service params */
+	uint32_t        class3[4];    /* class 3 service params */
+	uint32_t        class4[4];
+	uint32_t        vndr_vers[4];
+};
+
+#define IOCB_TYPE_ELS_PASSTHRU 0x53
+
+/* ELS Pass-Through IOCB (IOCB_TYPE_ELS_PASSTHRU = 0x53)
+ */
+struct __els_pt {
+	uint8_t	entry_type;		/* Entry type. */
+	uint8_t	entry_count;		/* Entry count. */
+	uint8_t	sys_define;		/* System defined. */
+	uint8_t	entry_status;		/* Entry Status. */
+	uint32_t	handle;
+	uint16_t	status;       /* when returned from fw */
+	uint16_t	nphdl;
+	uint16_t	tx_dsd_cnt;
+	uint8_t	vp_index;
+	uint8_t	sof;          /* bits 7:4 */
+	uint32_t	rcv_exchg_id;
+	uint16_t	rx_dsd_cnt;
+	uint8_t	op_code;
+	uint8_t	rsvd1;
+	uint16_t	did_lo;
+	uint8_t	did_hi;
+	uint8_t	sid_hi;
+	uint16_t	sid_lo;
+	uint16_t	cntl_flags;
+#define ELS_PT_RESPONDER_ACC   (1 << 13)
+	uint32_t	rx_bc;
+	uint32_t	tx_bc;
+	uint32_t	tx_dsd[2];	/* Data segment 0 address. */
+	uint32_t	tx_dsd_len;		/* Data segment 0 length. */
+	uint32_t	rx_dsd[2];	/* Data segment 1 address. */
+	uint32_t	rx_dsd_len;		/* Data segment 1 length. */
+};
+
+/*
+ * Reject a FCP PRLI
+ *
+ */
+struct __fc_prli_rjt {
+	uint8_t op_code;	/* word 0 of prli rjt */
+	uint8_t rsvd1[3];
+	uint8_t rsvd2;		/* word 1 of prli rjt */
+	uint8_t	reason;
+#define PRLI_RJT_REASON 0x3	/* logical error */
+	uint8_t	expl;
+	uint8_t vendor;
+#define PRLI_RJT_FCP_RESP_LEN 8
+};
+
+/*
+ * Fibre Channel PRLI ACC
+ * Payload only
+ */
+struct __fc_prli_acc {
+/* payload only.  In big-endian format */
+	uint8_t         op_code;      /* word 0 of prli acc */
+	uint8_t         page_length;
+#define PRLI_FCP_PAGE_LENGTH  16
+#define PRLI_NVME_PAGE_LENGTH 20
+	uint16_t        pyld_length;
+	uint8_t         type;         /* word 1 of prli acc */
+	uint8_t         type_ext;
+	uint16_t        common;
+#define PRLI_EST_FCP_PAIR 0x2000
+#define PRLI_REQ_EXEC     0x0100
+#define PRLI_REQ_DOES_NOT_EXIST 0x0400
+	union {
+		struct {
+			uint32_t	reserved[2];
+			uint32_t	sp_info;
+			/* hard coding resp.  target, rdxfr disabled.*/
+#define FCP_PRLI_SP 0x12
+		} fcp;
+		struct {
+			uint32_t	reserved[2];
+			uint32_t	sp_info;
+			uint16_t	reserved2;
+			uint16_t	first_burst;
+		} nvme;
+	};
+#define PRLI_ACC_FCP_RESP_LEN  20
+#define PRLI_ACC_NVME_RESP_LEN 24
+
+};
+
+/*
+ * ISP queue - PUREX IOCB entry structure definition
+ */
+#define PUREX_IOCB_TYPE		0x51 /* CT Pass Through IOCB entry */
+struct purex_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint16_t reserved1;
+	uint8_t vp_idx;
+	uint8_t reserved2;
+
+	uint16_t status_flags;
+	uint16_t nport_handle;
+
+	uint16_t frame_size;
+	uint16_t trunc_frame_size;
+
+	uint32_t rx_xchg_addr;
+
+	uint8_t d_id[3];
+	uint8_t r_ctl;
+
+	uint8_t s_id[3];
+	uint8_t cs_ctl;
+
+	uint8_t f_ctl[3];
+	uint8_t type;
+
+	uint16_t seq_cnt;
+	uint8_t df_ctl;
+	uint8_t seq_id;
+
+	uint16_t rx_id;
+	uint16_t ox_id;
+	uint32_t param;
+
+	uint8_t pyld[20];
+#define PUREX_PYLD_SIZE 44 /* Number of bytes (hdr+pyld) in this IOCB */
+};
+
+#define PUREX_ENTRY_SIZE	(sizeof(purex_entry_24xx_t))
+
+#define CONT_SENSE_DATA 60
+/*
+ * Continuation Status Type 0 (IOCB_TYPE_STATUS_CONT = 0x10)
+ * Section 5.6 FW Interface Spec
+ */
+struct __status_cont {
+	uint8_t entry_type;		/* Entry type. - 0x10 */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint8_t reserved;
+
+	uint8_t data[CONT_SENSE_DATA];
+} __packed;
+
+
 /*
  * ISP queue - ELS Pass-Through entry structure definition.
  */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2946c65812cd..7b574c0d1423 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -123,8 +123,19 @@ void qla_rscn_replay(fc_port_t *fcport);
 /*
  * Used by FC-NVMe Target
  */
-int qla_nvmet_ls(srb_t *sp, void *rsp_pkt);
-int qlt_send_els_resp(srb_t *sp, void *pkt);
+extern int qla_nvmet_ls(srb_t *sp, void *rsp_pkt);
+extern int qla2x00_get_plogi_template(scsi_qla_host_t *vha, dma_addr_t buf,
+	uint16_t length);
+extern void qlt_dequeue_purex(struct scsi_qla_host *vha);
+int qla24xx_post_nvmet_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
+	u8 *port_name, void *pla);
+int qlt_send_els_resp(srb_t *sp, struct __els_pt *pkt);
+extern void nvmet_release_sessions(struct scsi_qla_host *vha);
+struct fc_port *qla_nvmet_find_sess_by_s_id(scsi_qla_host_t *vha,
+	const uint32_t s_id);
+void qla_nvme_cmpl_io(struct srb_iocb *);
+void qla24xx_nvmet_abts_resp_iocb(struct scsi_qla_host *vha,
+	struct abts_resp_to_24xx *pkt, struct req_que *req);
 
 /*
  * Global Data in qla_os.c source file.
@@ -320,7 +331,10 @@ extern int
 qla2x00_set_fw_options(scsi_qla_host_t *, uint16_t *);
 
 extern int
-qla2x00_mbx_reg_test(scsi_qla_host_t *);
+qla2x00_set_purex_mode(scsi_qla_host_t *vha);
+
+extern int
+qla2x00_mbx_reg_test(scsi_qla_host_t *vha);
 
 extern int
 qla2x00_verify_checksum(scsi_qla_host_t *, uint32_t);
@@ -905,5 +919,4 @@ void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
 void qlt_remove_target_resources(struct qla_hw_data *);
 void qlt_clr_qp_table(struct scsi_qla_host *vha);
 void qlt_set_mode(struct scsi_qla_host *);
-
 #endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 90cfa394f942..ea55ed972eed 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -646,9 +646,11 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
 	ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
 	ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
 	ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
-	ct_req->req.rft_id.fc4_types[2] = 0x01;		/* FCP-3 */
 
-	if (vha->flags.nvme_enabled)
+	if (!vha->flags.nvmet_enabled)
+		ct_req->req.rft_id.fc4_types[2] = 0x01;		/* FCP-3 */
+
+	if (vha->flags.nvme_enabled || vha->flags.nvmet_enabled)
 		ct_req->req.rft_id.fc4_types[6] = 1;    /* NVMe type 28h */
 
 	sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
@@ -691,6 +693,10 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
 		return (QLA_SUCCESS);
 	}
 
+	/* only single mode for now */
+	if ((vha->flags.nvmet_enabled) && (type == FC4_TYPE_FCP_SCSI))
+		return (QLA_SUCCESS);
+
 	return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
 	    FC4_TYPE_FCP_SCSI);
 }
@@ -2355,7 +2361,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
 	    eiter->a.fc4_types[2],
 	    eiter->a.fc4_types[1]);
 
-	if (vha->flags.nvme_enabled) {
+	if (vha->flags.nvme_enabled || vha->flags.nvmet_enabled) {
 		eiter->a.fc4_types[6] = 1;	/* NVMe type 28h */
 		ql_dbg(ql_dbg_disc, vha, 0x211f,
 		    "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
@@ -2559,7 +2565,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
 	    "Port Active FC4 Type = %02x %02x.\n",
 	    eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
 
-	if (vha->flags.nvme_enabled) {
+	if (vha->flags.nvme_enabled || vha->flags.nvmet_enabled) {
 		eiter->a.port_fc4_type[4] = 0;
 		eiter->a.port_fc4_type[5] = 0;
 		eiter->a.port_fc4_type[6] = 1;	/* NVMe type 28h */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 28e6c215c695..cad8e0185a3b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -19,6 +19,7 @@
 
 #include <target/target_core_base.h>
 #include "qla_target.h"
+#include "qla_nvmet.h"
 
 /*
 *  QLogic ISP2x00 Hardware Support Function Prototypes.
@@ -1109,6 +1110,23 @@ int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 	return qla2x00_post_work(vha, e);
 }
 
+/* NVMET */
+int qla24xx_post_nvmet_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
+	u8 *port_name, void *pla)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_NEW_NVMET_SESS);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.new_sess.id = *id;
+	e->u.new_sess.pla = pla;
+	memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
+
+	return qla2x00_post_work(vha, e);
+}
+
 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 {
 	srb_t *sp;
@@ -3594,6 +3612,13 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
 					rval = qla2x00_get_fw_version(vha);
 				if (rval != QLA_SUCCESS)
 					goto failed;
+
+				if (vha->flags.nvmet_enabled) {
+					ql_log(ql_log_info, vha, 0xffff,
+					    "Enabling PUREX mode\n");
+					qla2x00_set_purex_mode(vha);
+				}
+
 				ha->flags.npiv_supported = 0;
 				if (IS_QLA2XXX_MIDTYPE(ha) &&
 					 (ha->fw_attributes & BIT_2)) {
@@ -3814,11 +3839,14 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
 	/* Move PUREX, ABTS RX & RIDA to ATIOQ */
 	if (ql2xmvasynctoatio &&
 	    (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
-		if (qla_tgt_mode_enabled(vha) ||
-		    qla_dual_mode_enabled(vha))
+		if ((qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) &&
+		    qlt_op_target_mode) {
+			ql_log(ql_log_info, vha, 0xffff,
+			    "Moving Purex to ATIO Q\n");
 			ha->fw_options[2] |= BIT_11;
-		else
+		} else {
 			ha->fw_options[2] &= ~BIT_11;
+		}
 	}
 
 	if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
@@ -5466,7 +5494,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
 				    &vha->dpc_flags))
 					break;
 			}
-			if (vha->flags.nvme_enabled) {
+			if (vha->flags.nvme_enabled ||
+			    vha->flags.nvmet_enabled) {
 				if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
 					ql_dbg(ql_dbg_disc, vha, 0x2049,
 					    "Register NVME FC Type Features failed.\n");
@@ -5634,7 +5663,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
 
 				new_fcport->nvme_flag = 0;
 				new_fcport->fc4f_nvme = 0;
-				if (vha->flags.nvme_enabled &&
+				if ((vha->flags.nvme_enabled ||
+				    vha->flags.nvmet_enabled) &&
 				    swl[swl_idx].fc4f_nvme) {
 					new_fcport->fc4f_nvme =
 					    swl[swl_idx].fc4f_nvme;
@@ -8460,6 +8490,12 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
 			ha->fw_options[2] |= BIT_11;
 		else
 			ha->fw_options[2] &= ~BIT_11;
+
+		if (ql2xnvmeenable == 2 && qlt_op_target_mode) {
+			/* Enabled PUREX node */
+			ha->fw_options[1] |= FO1_ENABLE_PUREX;
+			ha->fw_options[2] |= BIT_11;
+		}
 	}
 
 	if (qla_tgt_mode_enabled(vha) ||
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d73b04e40590..5c1833f030a4 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -23,6 +23,8 @@ static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
 	sts_entry_t *);
 
+extern struct workqueue_struct *qla_nvmet_comp_wq;
+
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
  * @irq:
@@ -1583,6 +1585,12 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 			sp->name);
 		sp->done(sp, res);
 		return;
+	case SRB_NVME_ELS_RSP:
+		type = "nvme els";
+		ql_log(ql_log_info, vha, 0xffff,
+			"Completing %s: (%p) type=%d.\n", type, sp, sp->type);
+		sp->done(sp, 0);
+		return;
 	default:
 		ql_dbg(ql_dbg_user, vha, 0x503e,
 		    "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
@@ -2456,6 +2464,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 		return;
 	}
 
+	if (sp->type == SRB_NVMET_LS) {
+		ql_log(ql_log_info, vha, 0xffff,
+			"Dump NVME-LS response pkt\n");
+		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+			(uint8_t *)pkt, 64);
+	}
+
 	if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
 		qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
 		return;
@@ -2825,6 +2840,12 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
 	    "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
 	    pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
 
+	ql_log(ql_log_info, vha, 0xffff,
+		"(%s-%d)Dumping the NVMET-ERROR pkt IOCB\n",
+		__func__, __LINE__);
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+		(uint8_t *)pkt, 64);
+
 	if (que >= ha->max_req_queues || !ha->req_q_map[que])
 		goto fatal;
 
@@ -2918,6 +2939,23 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	sp->done(sp, 0);
 }
 
+/*
+ * Post a completion to the NVMET layer
+ */
+
+static void qla_nvmet_comp_work(struct work_struct *work)
+{
+	srb_t *sp = container_of(work, srb_t, nvmet_comp_work);
+
+	sp->done(sp, sp->comp_status);
+}
+
+/**
+ * qla24xx_nvme_ls4_iocb() - Process LS4 completions
+ * @vha: SCSI driver HA context
+ * @pkt: LS4 req packet
+ * @req: Request Queue
+ */
 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
     struct pt_ls4_request *pkt, struct req_que *req)
 {
@@ -2929,11 +2967,78 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
 	if (!sp)
 		return;
 
+	ql_log(ql_log_info, vha, 0xc01f,
+		"Dumping response pkt for SRB type: %#x\n", sp->type);
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+		(uint8_t *)pkt, 16);
+
 	comp_status = le16_to_cpu(pkt->status);
-	sp->done(sp, comp_status);
+	sp->comp_status = comp_status;
+	/* Queue the work item */
+	INIT_WORK(&sp->nvmet_comp_work, qla_nvmet_comp_work);
+	queue_work(qla_nvmet_comp_wq, &sp->nvmet_comp_work);
 }
 
 /**
+ * qla24xx_nvmet_fcp_iocb() - Process FCP completions
+ * @vha: SCSI driver HA context
+ * @pkt: FCP completion from firmware
+ * @req: Request Queue
+ */
+static void qla24xx_nvmet_fcp_iocb(struct scsi_qla_host *vha,
+	struct ctio_nvme_from_27xx *pkt, struct req_que *req)
+{
+	srb_t *sp;
+	const char func[] = "NVMET_FCP_IOCB";
+	uint16_t comp_status;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	if ((pkt->entry_status) || (pkt->status != 1)) {
+		ql_log(ql_log_info, vha, 0xc01f,
+			"Dumping response pkt for SRB type: %#x\n", sp->type);
+		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+			(uint8_t *)pkt, 16);
+	}
+
+	comp_status = le16_to_cpu(pkt->status);
+	sp->comp_status = comp_status;
+	/* Queue the work item */
+	INIT_WORK(&sp->nvmet_comp_work, qla_nvmet_comp_work);
+	queue_work(qla_nvmet_comp_wq, &sp->nvmet_comp_work);
+}
+
+/**
+ * qla24xx_nvmet_abts_resp_iocb() - Process ABTS completions
+ * @vha: SCSI driver HA context
+ * @pkt: ABTS completion from firmware
+ * @req: Request Queue
+ */
+void qla24xx_nvmet_abts_resp_iocb(struct scsi_qla_host *vha,
+	struct abts_resp_to_24xx *pkt, struct req_que *req)
+{
+	srb_t *sp;
+	const char func[] = "NVMET_ABTS_RESP_IOCB";
+	uint16_t comp_status;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	ql_log(ql_log_info, vha, 0xc01f,
+		"Dumping response pkt for SRB type: %#x\n", sp->type);
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+		(uint8_t *)pkt, 16);
+
+	comp_status = le16_to_cpu(pkt->entry_status);
+	sp->comp_status = comp_status;
+	/* Queue the work item */
+	INIT_WORK(&sp->nvmet_comp_work, qla_nvmet_comp_work);
+	queue_work(qla_nvmet_comp_wq, &sp->nvmet_comp_work);
+}
+/**
  * qla24xx_process_response_queue() - Process response queue entries.
  * @vha: SCSI driver HA context
  * @rsp: response queue
@@ -3011,6 +3116,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
 			qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
 			    rsp->req);
 			break;
+		case CTIO_NVME:
+			qla24xx_nvmet_fcp_iocb(vha,
+			    (struct ctio_nvme_from_27xx *)pkt,
+			    rsp->req);
+			break;
 		case NOTIFY_ACK_TYPE:
 			if (pkt->handle == QLA_TGT_SKIP_HANDLE)
 				qlt_response_pkt_all_vps(vha, rsp,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index bd8c86aeccc2..cdb413486d2b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -61,6 +61,7 @@ static struct rom_cmd {
 	{ MBC_READ_SFP },
 	{ MBC_GET_RNID_PARAMS },
 	{ MBC_GET_SET_ZIO_THRESHOLD },
+	{ MBC_SET_RNID_PARAMS },
 };
 
 static int is_rom_cmd(uint16_t cmd)
@@ -1109,12 +1110,15 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
 		 * FW supports nvme and driver load parameter requested nvme.
 		 * BIT 26 of fw_attributes indicates NVMe support.
 		 */
-		if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
+		if ((ha->fw_attributes_h & 0x400) && (ql2xnvmeenable == 1)) {
 			vha->flags.nvme_enabled = 1;
 			ql_log(ql_log_info, vha, 0xd302,
 			    "%s: FC-NVMe is Enabled (0x%x)\n",
 			     __func__, ha->fw_attributes_h);
 		}
+
+		if ((ha->fw_attributes_h & 0x400) && (ql2xnvmeenable == 2))
+			vha->flags.nvmet_enabled = 1;
 	}
 
 	if (IS_QLA27XX(ha)) {
@@ -1189,6 +1193,101 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
 	return rval;
 }
 
+#define OPCODE_PLOGI_TMPLT 7
+int
+qla2x00_get_plogi_template(scsi_qla_host_t *vha, dma_addr_t buf,
+	uint16_t length)
+{
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	int rval;
+
+	mcp->mb[0] = MBC_GET_RNID_PARAMS;
+	mcp->mb[1] = OPCODE_PLOGI_TMPLT << 8;
+	mcp->mb[2] = MSW(LSD(buf));
+	mcp->mb[3] = LSW(LSD(buf));
+	mcp->mb[6] = MSW(MSD(buf));
+	mcp->mb[7] = LSW(MSD(buf));
+	mcp->mb[8] = length;
+	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->buf_size = length;
+	mcp->flags = MBX_DMA_IN;
+	mcp->tov = MBX_TOV_SECONDS;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	ql_dbg(ql_dbg_mbx, vha, 0x118f,
+	    "%s: %s rval=%x mb[0]=%x,%x.\n", __func__,
+	    (rval == QLA_SUCCESS) ? "Success" : "Failed",
+	    rval, mcp->mb[0], mcp->mb[1]);
+
+	return rval;
+}
+
+#define        OPCODE_LIST_LENGTH      32       /* ELS opcode list */
+#define        OPCODE_ELS_CMD          5        /* MBx1 cmd param */
+/*
+ * qla2x00_set_purex_mode
+ *	Enable purex mode for ELS commands
+ *
+ * Input:
+ *	vha = adapter block pointer.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_set_purex_mode(scsi_qla_host_t *vha)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	uint8_t *els_cmd_map;
+	dma_addr_t els_cmd_map_dma;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
+	    "Entered %s.\n", __func__);
+
+	els_cmd_map = dma_zalloc_coherent(&ha->pdev->dev, OPCODE_LIST_LENGTH,
+	    &els_cmd_map_dma, GFP_KERNEL);
+	if (!els_cmd_map) {
+		ql_log(ql_log_warn, vha, 0x7101,
+		    "Failed to allocate RDP els command param.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+
+	els_cmd_map[0] = 0x28; /* enable PLOGI and LOGO ELS */
+	els_cmd_map[4] = 0x13; /* enable PRLI ELS */
+	els_cmd_map[10] = 0x5;
+
+	mcp->mb[0] = MBC_SET_RNID_PARAMS;
+	mcp->mb[1] = OPCODE_ELS_CMD << 8;
+	mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
+	mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
+	mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
+	mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
+	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = MBX_DMA_OUT;
+	mcp->buf_size = OPCODE_LIST_LENGTH;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	ql_dbg(ql_dbg_mbx, vha, 0x118d,
+	    "%s: %s rval=%x mb[0]=%x,%x.\n", __func__,
+	    (rval == QLA_SUCCESS) ? "Success" : "Failed",
+	    rval, mcp->mb[0], mcp->mb[1]);
+
+	dma_free_coherent(&ha->pdev->dev, OPCODE_LIST_LENGTH,
+	   els_cmd_map, els_cmd_map_dma);
+
+	return rval;
+}
+
 
 /*
  * qla2x00_set_fw_options
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index 4941d107fb1c..0902c3a27adc 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -106,39 +106,6 @@ struct pt_ls4_request {
 	uint32_t dseg1_address[2];
 	uint32_t dseg1_len;
 };
-
-#define PT_LS4_UNSOL 0x56	/* pass-up unsolicited rec FC-NVMe request */
-struct pt_ls4_rx_unsol {
-	uint8_t entry_type;
-	uint8_t entry_count;
-	uint16_t rsvd0;
-	uint16_t rsvd1;
-	uint8_t vp_index;
-	uint8_t rsvd2;
-	uint16_t rsvd3;
-	uint16_t nport_handle;
-	uint16_t frame_size;
-	uint16_t rsvd4;
-	uint32_t exchange_address;
-	uint8_t d_id[3];
-	uint8_t r_ctl;
-	uint8_t s_id[3];
-	uint8_t cs_ctl;
-	uint8_t f_ctl[3];
-	uint8_t type;
-	uint16_t seq_cnt;
-	uint8_t df_ctl;
-	uint8_t seq_id;
-	uint16_t rx_id;
-	uint16_t ox_id;
-	uint32_t param;
-	uint32_t desc0;
-#define PT_LS4_PAYLOAD_OFFSET 0x2c
-#define PT_LS4_FIRST_PACKET_LEN 20
-	uint32_t desc_len;
-	uint32_t payload[3];
-};
-
 /*
  * Global functions prototype in qla_nvme.c source file.
  */
diff --git a/drivers/scsi/qla2xxx/qla_nvmet.c b/drivers/scsi/qla2xxx/qla_nvmet.c
index 9d9a0ed68501..e879508c5433 100644
--- a/drivers/scsi/qla2xxx/qla_nvmet.c
+++ b/drivers/scsi/qla2xxx/qla_nvmet.c
@@ -35,6 +35,42 @@ qla_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
 }
 
 /*
+ * Build NVMET LS response
+ */
+int
+qla_nvmet_ls(srb_t *sp, void *pkt)
+{
+	struct srb_iocb *nvme;
+	struct pt_ls4_request *rsp_pkt = (struct pt_ls4_request *)pkt;
+	int     rval = QLA_SUCCESS;
+
+	nvme = &sp->u.iocb_cmd;
+
+	rsp_pkt->entry_type = PT_LS4_REQUEST;
+	rsp_pkt->entry_count = 1;
+	rsp_pkt->control_flags = cpu_to_le16(CF_LS4_RESPONDER << CF_LS4_SHIFT);
+	rsp_pkt->handle = sp->handle;
+
+	rsp_pkt->nport_handle = sp->fcport->loop_id;
+	rsp_pkt->vp_index = nvme->u.nvme.vp_index;
+	rsp_pkt->exchange_address = cpu_to_le32(nvme->u.nvme.exchange_address);
+
+	rsp_pkt->tx_dseg_count = 1;
+	rsp_pkt->tx_byte_count = cpu_to_le16(nvme->u.nvme.rsp_len);
+	rsp_pkt->dseg0_len = cpu_to_le16(nvme->u.nvme.rsp_len);
+	rsp_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
+	rsp_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
+
+	ql_log(ql_log_info, sp->vha, 0xffff,
+	    "Dumping the NVME-LS response IOCB\n");
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, sp->vha, 0x2075,
+	    (uint8_t *)rsp_pkt, sizeof(*rsp_pkt));
+
+	return rval;
+}
+
+
+/*
  * qlt_nvmet_ls_done -
  * Invoked by the firmware interface to indicate the completion
  * of an LS cmd
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e97f57a2d883..106f8c2bfce1 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -142,8 +142,10 @@ int ql2xnvmeenable;
 #endif
 module_param(ql2xnvmeenable, int, 0644);
 MODULE_PARM_DESC(ql2xnvmeenable,
-    "Enables NVME support. "
-    "0 - no NVMe.  Default is Y");
+		"Enables NVME support.\n"
+		"0 - no NVMe.\n"
+		"1 - initiator,\n"
+		"2 - target. Default is 1\n");
 
 int ql2xenablehba_err_chk = 2;
 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
@@ -3421,6 +3423,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	qlt_add_target(ha, base_vha);
 
+	if (ql2xnvmeenable == 2)
+		qla_nvmet_create_targetport(base_vha);
+
 	clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
 
 	if (test_bit(UNLOADING, &base_vha->dpc_flags))
@@ -3701,6 +3706,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
 	qla_nvme_delete(base_vha);
 
+	qla_nvmet_delete(base_vha);
+
 	dma_free_coherent(&ha->pdev->dev,
 		base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
 
@@ -5024,6 +5031,53 @@ static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
 		qla24xx_sp_unmap(vha, sp);
 	}
 }
+/* NVMET */
+static
+void qla24xx_create_new_nvmet_sess(struct scsi_qla_host *vha,
+	struct qla_work_evt *e)
+{
+	unsigned long flags;
+	fc_port_t *fcport = NULL;
+
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
+	if (fcport) {
+		ql_log(ql_log_info, vha, 0x11020,
+		    "Found fcport: %p for WWN: %8phC\n", fcport,
+		    e->u.new_sess.port_name);
+		fcport->d_id = e->u.new_sess.id;
+
+		/* Session existing with No loop_ID assigned */
+		if (fcport->loop_id == FC_NO_LOOP_ID) {
+			fcport->loop_id = qla2x00_find_new_loop_id(vha, fcport);
+			ql_log(ql_log_info, vha, 0x11021,
+			    "Allocated new loop_id: %#x for fcport: %p\n",
+			    fcport->loop_id, fcport);
+			fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+		}
+	} else {
+		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+		if (fcport) {
+			fcport->d_id = e->u.new_sess.id;
+			fcport->loop_id = qla2x00_find_new_loop_id(vha, fcport);
+			ql_log(ql_log_info, vha, 0x11022,
+			    "Allocated new loop_id: %#x for fcport: %p\n",
+			    fcport->loop_id, fcport);
+
+			fcport->scan_state = QLA_FCPORT_FOUND;
+			fcport->flags |= FCF_FABRIC_DEVICE;
+			fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+
+			memcpy(fcport->port_name, e->u.new_sess.port_name,
+			    WWN_SIZE);
+
+			list_add_tail(&fcport->list, &vha->vp_fcports);
+		}
+	}
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	complete(&vha->purex_plogi_sess);
+}
 
 void
 qla2x00_do_work(struct scsi_qla_host *vha)
@@ -5129,6 +5183,10 @@ qla2x00_do_work(struct scsi_qla_host *vha)
 			qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
 			    e->u.fcport.fcport, false);
 			break;
+		/* FC-NVMe Target */
+		case QLA_EVT_NEW_NVMET_SESS:
+			qla24xx_create_new_nvmet_sess(vha, e);
+			break;
 		}
 		if (e->flags & QLA_EVT_FLAG_FREE)
 			kfree(e);
@@ -6100,6 +6158,12 @@ qla2x00_do_dpc(void *data)
 				set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
 		}
 
+		if (test_and_clear_bit(NVMET_PUREX, &base_vha->dpc_flags)) {
+			ql_log(ql_log_info, base_vha, 0x11022,
+			    "qla2xxx-nvmet: Received a frame on the wire\n");
+			qlt_dequeue_purex(base_vha);
+		}
+
 		if (test_and_clear_bit
 		    (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
 		    !test_bit(UNLOADING, &base_vha->dpc_flags)) {
@@ -6273,6 +6337,13 @@ qla2x00_do_dpc(void *data)
 				    ha->nvme_last_rptd_aen);
 			}
 		}
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		if (test_and_clear_bit(NVMET_PUREX, &base_vha->dpc_flags)) {
+			ql_log(ql_log_info, base_vha, 0x11025,
+				"nvmet: Received a frame on the wire\n");
+			qlt_dequeue_purex(base_vha);
+		}
+#endif
 
 		if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
 		    &base_vha->dpc_flags)) {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index dc680ffe2f49..770d795b2f34 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -40,6 +40,7 @@
 #include <target/target_core_fabric.h>
 
 #include "qla_def.h"
+#include "qla_nvmet.h"
 #include "qla_target.h"
 
 static int ql2xtgt_tape_enable;
@@ -78,6 +79,8 @@ int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
 static int qla_sam_status = SAM_STAT_BUSY;
 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
 
+int qlt_op_target_mode;
+
 /*
  * From scsi/fc/fc_fcp.h
  */
@@ -149,11 +152,16 @@ static inline uint32_t qlt_make_handle(struct qla_qpair *);
  */
 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
 struct kmem_cache *qla_tgt_plogi_cachep;
+static struct kmem_cache *qla_tgt_purex_plogi_cachep;
 static mempool_t *qla_tgt_mgmt_cmd_mempool;
 static struct workqueue_struct *qla_tgt_wq;
+static struct workqueue_struct *qla_nvmet_wq;
 static DEFINE_MUTEX(qla_tgt_mutex);
 static LIST_HEAD(qla_tgt_glist);
 
+/* WQ for nvmet completions */
+struct workqueue_struct *qla_nvmet_comp_wq;
+
 static const char *prot_op_str(u32 prot_op)
 {
 	switch (prot_op) {
@@ -348,13 +356,653 @@ void qlt_unknown_atio_work_fn(struct work_struct *work)
 	qlt_try_to_dequeue_unknown_atios(vha, 0);
 }
 
+#define ELS_RJT 0x01
+#define ELS_ACC 0x02
+
+struct fc_port *qla_nvmet_find_sess_by_s_id(
+	scsi_qla_host_t *vha,
+	const uint32_t s_id)
+{
+	struct fc_port *sess = NULL, *other_sess;
+	uint32_t other_sid;
+
+	list_for_each_entry(other_sess, &vha->vp_fcports, list) {
+		other_sid = other_sess->d_id.b.domain << 16 |
+				other_sess->d_id.b.area << 8 |
+				other_sess->d_id.b.al_pa;
+
+		if (other_sid == s_id) {
+			sess = other_sess;
+			break;
+		}
+	}
+	return sess;
+}
+
+/* Send an ELS response */
+int qlt_send_els_resp(srb_t *sp, struct __els_pt *els_pkt)
+{
+	struct purex_entry_24xx *purex = (struct purex_entry_24xx *)
+					sp->u.snvme_els.ptr;
+	dma_addr_t udma = sp->u.snvme_els.dma_addr;
+	struct fc_port *fcport;
+	port_id_t port_id;
+	uint16_t loop_id;
+
+	port_id.b.domain = purex->s_id[2];
+	port_id.b.area   = purex->s_id[1];
+	port_id.b.al_pa  = purex->s_id[0];
+	port_id.b.rsvd_1 = 0;
+
+	fcport = qla2x00_find_fcport_by_nportid(sp->vha, &port_id, 1);
+	if (fcport)
+		/* There is no session with the swt */
+		loop_id = fcport->loop_id;
+	else
+		loop_id = 0xFFFF;
+
+	ql_log(ql_log_info, sp->vha, 0xfff9,
+	    "sp: %p, purex: %p, udma: %pad, loop_id: 0x%x\n",
+	    sp, purex, &udma, loop_id);
+
+	els_pkt->entry_type = ELS_IOCB_TYPE;
+	els_pkt->entry_count = 1;
+
+	els_pkt->handle = sp->handle;
+	els_pkt->nphdl = cpu_to_le16(loop_id);
+	els_pkt->tx_dsd_cnt = cpu_to_le16(1);
+	els_pkt->vp_index = purex->vp_idx;
+	els_pkt->sof = EST_SOFI3;
+	els_pkt->rcv_exchg_id = cpu_to_le32(purex->rx_xchg_addr);
+	els_pkt->op_code = sp->cmd_type;
+	els_pkt->did_lo = cpu_to_le16(purex->s_id[0] | (purex->s_id[1] << 8));
+	els_pkt->did_hi = purex->s_id[2];
+	els_pkt->sid_hi = purex->d_id[2];
+	els_pkt->sid_lo = cpu_to_le16(purex->d_id[0] | (purex->d_id[1] << 8));
+
+	if (sp->gen2 == ELS_ACC)
+		els_pkt->cntl_flags = cpu_to_le16(EPD_ELS_ACC);
+	else
+		els_pkt->cntl_flags = cpu_to_le16(EPD_ELS_RJT);
+
+	els_pkt->tx_bc = cpu_to_le32(sp->gen1);
+	els_pkt->tx_dsd[0] = cpu_to_le32(LSD(udma));
+	els_pkt->tx_dsd[1] = cpu_to_le32(MSD(udma));
+	els_pkt->tx_dsd_len = cpu_to_le32(sp->gen1);
+	/* Memory Barrier */
+	wmb();
+
+	ql_log(ql_log_info, sp->vha, 0x11030, "Dumping PLOGI ELS\n");
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, sp->vha, 0xffff,
+		(uint8_t *)els_pkt, sizeof(*els_pkt));
+
+	return 0;
+}
+
+static void qlt_nvme_els_done(void *s, int res)
+{
+	struct srb *sp = s;
+
+	ql_log(ql_log_info, sp->vha, 0x11031,
+	    "Done with NVME els command\n");
+
+	ql_log(ql_log_info, sp->vha, 0x11032,
+	    "sp: %p vha: %p, dma_ptr: %p, dma_addr: %pad, len: %#x\n",
+	    sp, sp->vha, sp->u.snvme_els.dma_ptr, &sp->u.snvme_els.dma_addr,
+	    sp->gen1);
+
+	qla2x00_rel_sp(sp);
+}
+
+static int qlt_send_plogi_resp(struct scsi_qla_host *vha, uint8_t op_code,
+	struct purex_entry_24xx *purex, struct fc_port *fcport)
+{
+	int ret, rval, i;
+	dma_addr_t plogi_ack_udma = vha->vha_tgt.qla_tgt->nvme_els_rsp;
+	void *plogi_ack_buf = vha->vha_tgt.qla_tgt->nvme_els_ptr;
+	uint8_t *tmp;
+	uint32_t *opcode;
+	srb_t *sp;
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp) {
+		ql_log(ql_log_info, vha, 0x11033,
+		    "Failed to allocate SRB\n");
+		return -ENOMEM;
+	}
+
+	sp->type = SRB_NVME_ELS_RSP;
+	sp->done = qlt_nvme_els_done;
+	sp->vha = vha;
+
+	ql_log(ql_log_info, vha, 0x11034,
+	    "sp: %p, vha: %p, plogi_ack_buf: %p\n",
+	    sp, vha, plogi_ack_buf);
+
+	sp->u.snvme_els.dma_addr = plogi_ack_udma;
+	sp->u.snvme_els.dma_ptr = plogi_ack_buf;
+	sp->gen1 = 116;
+	sp->gen2 = ELS_ACC;
+	sp->u.snvme_els.ptr = (struct purex_entry_24xx *)purex;
+	sp->cmd_type = ELS_PLOGI;
+
+	tmp = (uint8_t *)plogi_ack_udma;
+
+	tmp += 4;	/* fw doesn't return 1st 4 bytes where opcode goes */
+
+	ret = qla2x00_get_plogi_template(vha, (dma_addr_t)tmp, (116/4 - 1));
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0x11035,
+		    "Failed to get plogi template\n");
+		return -ENOMEM;
+	}
+
+	opcode = (uint32_t *) plogi_ack_buf;
+	*opcode = cpu_to_be32(ELS_ACC << 24);
+
+	for (i = 0; i < 0x1c; i++) {
+		++opcode;
+		*opcode = cpu_to_be32(*opcode);
+	}
+
+	ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xfff3,
+	    "Dumping the PLOGI from fw\n");
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_verbose, vha, 0x70cf,
+		(uint8_t *)plogi_ack_buf, 116);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		qla2x00_rel_sp(sp);
+
+	return 0;
+}
+
+static struct qlt_purex_plogi_ack_t *
+qlt_plogi_find_add(struct scsi_qla_host *vha, port_id_t *id,
+		struct __fc_plogi *rcvd_plogi)
+{
+	struct qlt_purex_plogi_ack_t *pla;
+
+	list_for_each_entry(pla, &vha->plogi_ack_list, list) {
+		if (pla->id.b24 == id->b24)
+			return pla;
+	}
+
+	pla = kmem_cache_zalloc(qla_tgt_purex_plogi_cachep, GFP_ATOMIC);
+	if (!pla) {
+		ql_dbg(ql_dbg_async, vha, 0x5088,
+		       "qla_target(%d): Allocation of plogi_ack failed\n",
+		       vha->vp_idx);
+		return NULL;
+	}
+
+	pla->id = *id;
+	memcpy(&pla->rcvd_plogi, rcvd_plogi, sizeof(struct __fc_plogi));
+	ql_log(ql_log_info, vha, 0xf101,
+	    "New session(%p) created for port: %#x\n",
+	    pla, pla->id.b24);
+
+	list_add_tail(&pla->list, &vha->plogi_ack_list);
+
+	return pla;
+}
+
+static void __swap_wwn(uint8_t *ptr, uint32_t size)
+{
+	uint32_t *iptr = (uint32_t *)ptr;
+	uint32_t *optr = (uint32_t *)ptr;
+	uint32_t i = size >> 2;
+
+	for (; i ; i--)
+		*optr++ = be32_to_cpu(*iptr++);
+}
+
+static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id);
+/*
+ * Parse the PLOGI from the peer port
+ * Retrieve WWPN, WWNN from the payload
+ * Create and fc port if it is a new WWN
+ * else clean up the prev exchange
+ * Return a response
+ */
+static void qlt_process_plogi(struct scsi_qla_host *vha,
+		struct purex_entry_24xx *purex, void *buf)
+{
+	uint64_t pname, nname;
+	struct __fc_plogi *rcvd_plogi = (struct __fc_plogi *)buf;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	uint16_t loop_id;
+	unsigned long flags;
+	struct fc_port *sess = NULL, *conflict_sess = NULL;
+	struct qlt_purex_plogi_ack_t *pla;
+	port_id_t port_id;
+	int sess_handling = 0;
+
+	port_id.b.domain = purex->s_id[2];
+	port_id.b.area   = purex->s_id[1];
+	port_id.b.al_pa  = purex->s_id[0];
+	port_id.b.rsvd_1 = 0;
+
+	if (IS_SW_RESV_ADDR(port_id)) {
+		ql_log(ql_log_info, vha, 0x11036,
+		    "Received plogi from switch, just send an ACC\n");
+		goto send_plogi_resp;
+	}
+
+	loop_id = le16_to_cpu(purex->nport_handle);
+
+	/* Clean up prev commands if any */
+	if (sess_handling) {
+		ql_log(ql_log_info, vha, 0x11037,
+		   "%s %d Cleaning up prev commands\n",
+		   __func__, __LINE__);
+		abort_cmds_for_s_id(vha, &port_id);
+	}
+
+	__swap_wwn(rcvd_plogi->pname, 4);
+	__swap_wwn(&rcvd_plogi->pname[4], 4);
+	pname = wwn_to_u64(rcvd_plogi->pname);
+
+	__swap_wwn(rcvd_plogi->nname, 4);
+	__swap_wwn(&rcvd_plogi->nname[4], 4);
+	nname = wwn_to_u64(rcvd_plogi->nname);
+
+	ql_log(ql_log_info, vha, 0x11038,
+	    "%s %d, pname:%llx, nname:%llx port_id: %#x\n",
+	    __func__, __LINE__, pname, nname, loop_id);
+
+	/* Invalidate other sessions if any */
+	spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
+	sess = qlt_find_sess_invalidate_other(vha, pname,
+	    port_id, loop_id, &conflict_sess);
+	spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
+
+	/* Add the inbound plogi(if from a new device) to the list */
+	pla = qlt_plogi_find_add(vha, &port_id, rcvd_plogi);
+
+	/* If there is no existing session, create one */
+	if (unlikely(!sess)) {
+		ql_log(ql_log_info, vha, 0xf102,
+		    "Creating a new session\n");
+		init_completion(&vha->purex_plogi_sess);
+		qla24xx_post_nvmet_newsess_work(vha, &port_id,
+			rcvd_plogi->pname, pla);
+		wait_for_completion_timeout(&vha->purex_plogi_sess, 500);
+		/* Send a PLOGI response */
+		goto send_plogi_resp;
+	} else {
+		/* Session existing with No loop_ID assigned */
+		if (sess->loop_id == FC_NO_LOOP_ID) {
+			sess->loop_id = qla2x00_find_new_loop_id(vha, sess);
+			ql_log(ql_log_info, vha, 0x11039,
+			    "Allocated new loop_id: %#x for fcport: %p\n",
+			    sess->loop_id, sess);
+		}
+		sess->d_id = port_id;
+
+		sess->fw_login_state = DSC_LS_PLOGI_PEND;
+	}
+send_plogi_resp:
+	/* Send a PLOGI response */
+	qlt_send_plogi_resp(vha, ELS_PLOGI, purex, sess);
+}
+
+static int qlt_process_logo(struct scsi_qla_host *vha,
+		struct purex_entry_24xx *purex, void *buf)
+{
+	struct __fc_logo_acc *logo_acc;
+	dma_addr_t logo_ack_udma = vha->vha_tgt.qla_tgt->nvme_els_rsp;
+	void *logo_ack_buf = vha->vha_tgt.qla_tgt->nvme_els_ptr;
+	srb_t *sp;
+	int rval;
+	uint32_t look_up_sid;
+	fc_port_t *sess = NULL;
+	port_id_t port_id;
+
+	port_id.b.domain = purex->s_id[2];
+	port_id.b.area   = purex->s_id[1];
+	port_id.b.al_pa  = purex->s_id[0];
+	port_id.b.rsvd_1 = 0;
+
+	if (!IS_SW_RESV_ADDR(port_id)) {
+		look_up_sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 |
+				purex->s_id[0];
+		ql_log(ql_log_info, vha, 0x11040,
+			"%s - Look UP sid: %#x\n", __func__, look_up_sid);
+
+		sess = qla_nvmet_find_sess_by_s_id(vha, look_up_sid);
+		if (unlikely(!sess))
+			WARN_ON(1);
+	}
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+	if (!sp) {
+		ql_log(ql_log_info, vha, 0x11041,
+		    "Failed to allocate SRB\n");
+		return -ENOMEM;
+	}
+
+	sp->type = SRB_NVME_ELS_RSP;
+	sp->done = qlt_nvme_els_done;
+	sp->vha = vha;
+	sp->fcport = sess;
+
+	ql_log(ql_log_info, vha, 0x11042,
+	    "sp: %p, vha: %p, logo_ack_buf: %p\n",
+	    sp, vha, logo_ack_buf);
+
+	logo_acc = (struct __fc_logo_acc *)logo_ack_buf;
+	memset(logo_acc, 0, sizeof(*logo_acc));
+	logo_acc->op_code = ELS_ACC;
+
+	/* Send response */
+	sp->u.snvme_els.dma_addr = logo_ack_udma;
+	sp->u.snvme_els.dma_ptr = logo_ack_buf;
+	sp->gen1 = sizeof(struct __fc_logo_acc);
+	sp->gen2 = ELS_ACC;
+	sp->u.snvme_els.ptr = (struct purex_entry_24xx *)purex;
+	sp->cmd_type = ELS_LOGO;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		qla2x00_rel_sp(sp);
+
+	return 0;
+}
+
+static int qlt_process_prli(struct scsi_qla_host *vha,
+		struct purex_entry_24xx *purex, void *buf)
+{
+	struct __fc_prli *prli = (struct __fc_prli *)buf;
+	struct __fc_prli_acc *prli_acc;
+	struct __fc_prli_rjt *prli_rej;
+	dma_addr_t prli_ack_udma = vha->vha_tgt.qla_tgt->nvme_els_rsp;
+	void *prli_ack_buf = vha->vha_tgt.qla_tgt->nvme_els_ptr;
+	srb_t *sp;
+	struct fc_port *sess = NULL;
+	int rval;
+	uint32_t look_up_sid;
+	port_id_t port_id;
+
+	port_id.b.domain = purex->s_id[2];
+	port_id.b.area   = purex->s_id[1];
+	port_id.b.al_pa  = purex->s_id[0];
+	port_id.b.rsvd_1 = 0;
+
+	if (!IS_SW_RESV_ADDR(port_id)) {
+		look_up_sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 |
+				purex->s_id[0];
+		ql_log(ql_log_info, vha, 0x11043,
+		    "%s - Look UP sid: %#x\n", __func__, look_up_sid);
+
+		sess = qla_nvmet_find_sess_by_s_id(vha, look_up_sid);
+		if (unlikely(!sess))
+			WARN_ON(1);
+	}
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+	if (!sp) {
+		ql_log(ql_log_info, vha, 0x11044,
+		    "Failed to allocate SRB\n");
+		return -ENOMEM;
+	}
+
+	sp->type = SRB_NVME_ELS_RSP;
+	sp->done = qlt_nvme_els_done;
+	sp->vha = vha;
+	sp->fcport = sess;
+
+	ql_log(ql_log_info, vha, 0x11045,
+	    "sp: %p, vha: %p, prli_ack_buf: %p, prli_ack_udma: %pad\n",
+	    sp, vha, prli_ack_buf, &prli_ack_udma);
+
+	memset(prli_ack_buf, 0, sizeof(struct __fc_prli_acc));
+
+	/* Parse PRLI */
+	if (prli->prli_type == PRLI_TYPE_FCP) {
+		/* Send a RJT for FCP */
+		prli_rej = (struct __fc_prli_rjt *)prli_ack_buf;
+		prli_rej->op_code = ELS_RJT;
+		prli_rej->reason = PRLI_RJT_REASON;
+	} else if (prli->prli_type == PRLI_TYPE_NVME) {
+		uint32_t spinfo;
+
+		prli_acc = (struct __fc_prli_acc *)prli_ack_buf;
+		prli_acc->op_code = ELS_ACC;
+		prli_acc->type = PRLI_TYPE_NVME;
+		prli_acc->page_length = PRLI_NVME_PAGE_LENGTH;
+		prli_acc->common = cpu_to_be16(PRLI_REQ_EXEC);
+		prli_acc->pyld_length = cpu_to_be16(PRLI_ACC_NVME_RESP_LEN);
+		spinfo = NVME_PRLI_DISC | NVME_PRLI_TRGT;
+		prli_acc->nvme.sp_info = cpu_to_be32(spinfo);
+	}
+
+	/* Send response */
+	sp->u.snvme_els.dma_addr = prli_ack_udma;
+	sp->u.snvme_els.dma_ptr = prli_ack_buf;
+
+	if (prli->prli_type == PRLI_TYPE_FCP) {
+		sp->gen1 = sizeof(struct __fc_prli_rjt);
+		sp->gen2 = ELS_RJT;
+	} else if (prli->prli_type == PRLI_TYPE_NVME) {
+		sp->gen1 = sizeof(struct __fc_prli_acc);
+		sp->gen2 = ELS_ACC;
+	}
+
+	sp->u.snvme_els.ptr = (struct purex_entry_24xx *)purex;
+	sp->cmd_type = ELS_PRLI;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		qla2x00_rel_sp(sp);
+
+	return 0;
+}
+
+static void *qlt_get_next_atio_pkt(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	void *pkt;
+
+	ha->tgt.atio_ring_index++;
+	if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
+		ha->tgt.atio_ring_index = 0;
+		ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+	} else {
+		ha->tgt.atio_ring_ptr++;
+	}
+	pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+
+	return pkt;
+}
+
+static void qlt_process_purex(struct scsi_qla_host *vha,
+		struct qla_tgt_purex_op *p)
+{
+	struct atio_from_isp *atio = &p->atio;
+	struct purex_entry_24xx *purex =
+		(struct purex_entry_24xx *)&atio->u.raw;
+	uint16_t len = purex->frame_size;
+
+	ql_log(ql_log_info, vha, 0xf100,
+	    "Purex IOCB: EC:%#x, Len:%#x ELS_OP:%#x oxid:%#x  rxid:%#x\n",
+	    purex->entry_count, len, purex->pyld[3],
+	    purex->ox_id, purex->rx_id);
+
+	switch (purex->pyld[3]) {
+	case ELS_PLOGI:
+		qlt_process_plogi(vha, purex, p->purex_pyld);
+		break;
+	case ELS_PRLI:
+		qlt_process_prli(vha, purex, p->purex_pyld);
+		break;
+	case ELS_LOGO:
+		qlt_process_logo(vha, purex, p->purex_pyld);
+		break;
+	default:
+		ql_log(ql_log_warn, vha, 0x11046,
+		    "Unexpected ELS 0x%x\n", purex->pyld[3]);
+		break;
+	}
+}
+
+void qlt_dequeue_purex(struct scsi_qla_host *vha)
+{
+	struct qla_tgt_purex_op *p, *t;
+	unsigned long flags;
+
+	list_for_each_entry_safe(p, t, &vha->purex_atio_list, cmd_list) {
+		ql_log(ql_log_info, vha, 0xff1e,
+		    "Processing ATIO %p\n", &p->atio);
+
+		qlt_process_purex(vha, p);
+		spin_lock_irqsave(&vha->cmd_list_lock, flags);
+		list_del(&p->cmd_list);
+		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+		kfree(p->purex_pyld);
+		kfree(p);
+	}
+}
+
+static void qlt_queue_purex(scsi_qla_host_t *vha,
+	struct atio_from_isp *atio)
+{
+	struct qla_tgt_purex_op *p;
+	unsigned long flags;
+	struct purex_entry_24xx *purex =
+		(struct purex_entry_24xx *)&atio->u.raw;
+	uint16_t len = purex->frame_size;
+	uint8_t *purex_pyld_tmp;
+
+	p = kzalloc(sizeof(*p), GFP_ATOMIC);
+	if (p == NULL)
+		goto out;
+
+	p->vha = vha;
+	memcpy(&p->atio, atio, sizeof(*atio));
+
+	ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0xff11,
+	    "Dumping the Purex IOCB received\n");
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0xe012,
+		(uint8_t *)purex, 64);
+
+	p->purex_pyld = kzalloc(sizeof(purex->entry_count * 64), GFP_ATOMIC);
+	if (p->purex_pyld == NULL) {
+		kfree(p);
+		goto out;
+	}
+	purex_pyld_tmp = (uint8_t *)p->purex_pyld;
+	p->purex_pyld_len = len;
+
+	if (len < PUREX_PYLD_SIZE)
+		len = PUREX_PYLD_SIZE;
+
+	memcpy(p->purex_pyld, &purex->d_id, PUREX_PYLD_SIZE);
+	purex_pyld_tmp += PUREX_PYLD_SIZE;
+	len -= PUREX_PYLD_SIZE;
+
+	while (len > 0) {
+		int cpylen;
+		struct __status_cont *cont_atio;
+
+		cont_atio = (struct __status_cont *)qlt_get_next_atio_pkt(vha);
+		cpylen = len > CONT_SENSE_DATA ? CONT_SENSE_DATA : len;
+		ql_log(ql_log_info, vha, 0xff12,
+		    "cont_atio: %p, cpylen: %#x\n", cont_atio, cpylen);
+
+		memcpy(purex_pyld_tmp, &cont_atio->data[0], cpylen);
+
+		purex_pyld_tmp += cpylen;
+		len -= cpylen;
+	}
+
+	ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0xff11,
+	    "Dumping the Purex IOCB(%p) received\n", p->purex_pyld);
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0xe011,
+		(uint8_t *)p->purex_pyld, p->purex_pyld_len);
+
+	INIT_LIST_HEAD(&p->cmd_list);
+
+	spin_lock_irqsave(&vha->cmd_list_lock, flags);
+	list_add_tail(&p->cmd_list, &vha->purex_atio_list);
+	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+out:
+	return;
+}
+
+static void sys_to_be32_cpy(uint8_t *dest, uint8_t *src, uint16_t len)
+{
+	uint32_t *d, *s, i;
+
+	d = (uint32_t *) dest;
+	s = (uint32_t *) src;
+	for (i = 0; i < len; i++)
+		d[i] = cpu_to_be32(s[i]);
+}
+
+/* Prepare an LS req received from the wire to be sent to the nvmet */
+static void *qlt_nvmet_prepare_ls(struct scsi_qla_host *vha,
+	struct pt_ls4_rx_unsol *ls4)
+{
+	int desc_len = cpu_to_le16(ls4->desc_len) + 8;
+	int copy_len, bc;
+	void *buf;
+	uint8_t *cpy_buf;
+	int i;
+	struct __status_cont *cont_atio;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe072,
+	    "%s: desc_len:%d\n", __func__, desc_len);
+
+	buf = kzalloc(desc_len, GFP_ATOMIC);
+	if (!buf)
+		return NULL;
+
+	cpy_buf = buf;
+	bc = desc_len;
+
+	if (bc < PT_LS4_FIRST_PACKET_LEN)
+		copy_len = bc;
+	else
+		copy_len = PT_LS4_FIRST_PACKET_LEN;
+
+	sys_to_be32_cpy(cpy_buf, &((uint8_t *)ls4)[PT_LS4_PAYLOAD_OFFSET],
+				copy_len/4);
+
+	bc -= copy_len;
+	cpy_buf += copy_len;
+
+	cont_atio = (struct __status_cont *)ls4;
+
+	for (i = 1; i < ls4->entry_count && bc > 0; i++) {
+		if (bc < CONT_SENSE_DATA)
+			copy_len = bc;
+		else
+			copy_len = CONT_SENSE_DATA;
+
+		cont_atio = (struct __status_cont *)qlt_get_next_atio_pkt(vha);
+
+		sys_to_be32_cpy(cpy_buf, (uint8_t *)&cont_atio->data,
+			copy_len/4);
+		cpy_buf += copy_len;
+		bc -= copy_len;
+	}
+
+	ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0xc0f1,
+	    "Dump the first 128 bytes of LS request\n");
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+		(uint8_t *)buf, 128);
+
+	return buf;
+}
+
 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
 	struct atio_from_isp *atio, uint8_t ha_locked)
 {
-	ql_dbg(ql_dbg_tgt, vha, 0xe072,
-		"%s: qla_target(%d): type %x ox_id %04x\n",
-		__func__, vha->vp_idx, atio->u.raw.entry_type,
-		be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
+	void *buf;
 
 	switch (atio->u.raw.entry_type) {
 	case ATIO_TYPE7:
@@ -414,48 +1062,80 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
 	{
 		struct abts_recv_from_24xx *entry =
 			(struct abts_recv_from_24xx *)atio;
-		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
-			entry->vp_index);
-		unsigned long flags;
 
-		if (unlikely(!host)) {
-			ql_dbg(ql_dbg_tgt, vha, 0xe00a,
-			    "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
-			    "received, with unknown vp_index %d\n",
-			    vha->vp_idx, entry->vp_index);
+		if (unlikely(atio->u.nvme_isp27.fcnvme_hdr.scsi_fc_id ==
+			NVMEFC_CMD_IU_SCSI_FC_ID)) {
+			qla_nvmet_handle_abts(vha, entry);
+			break;
+		}
+
+		{
+			struct abts_recv_from_24xx *entry =
+				(struct abts_recv_from_24xx *)atio;
+			struct scsi_qla_host *host = qlt_find_host_by_vp_idx
+				(vha, entry->vp_index);
+			unsigned long flags;
+
+			if (unlikely(!host)) {
+				ql_dbg(ql_dbg_tgt, vha, 0xe00a,
+				    "qla_target(%d): Response pkt (ABTS_RECV_24XX) received, with unknown vp_index %d\n",
+				    vha->vp_idx, entry->vp_index);
+				break;
+			}
+			if (!ha_locked)
+				spin_lock_irqsave(&host->hw->hardware_lock,
+				    flags);
+			qlt_24xx_handle_abts(host,
+			    (struct abts_recv_from_24xx *)atio);
+			if (!ha_locked)
+				spin_unlock_irqrestore(
+				    &host->hw->hardware_lock, flags);
 			break;
 		}
-		if (!ha_locked)
-			spin_lock_irqsave(&host->hw->hardware_lock, flags);
-		qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
-		if (!ha_locked)
-			spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
-		break;
 	}
 
-	/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
+	/* NVME */
+	case ATIO_PURLS:
+	{
+		struct scsi_qla_host *host = vha;
+		unsigned long flags;
+
+		/* Received an LS4 from the init, pass it to the NVMEt */
+		ql_log(ql_log_info, vha, 0x11047,
+		    "%s %d Received an LS4 from the initiator on ATIO\n",
+		    __func__, __LINE__);
+		spin_lock_irqsave(&host->hw->hardware_lock, flags);
+		buf = qlt_nvmet_prepare_ls(host,
+		    (struct pt_ls4_rx_unsol *)atio);
+		if (buf)
+			qla_nvmet_handle_ls(host,
+			    (struct pt_ls4_rx_unsol *)atio, buf);
+		spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
+	}
+	break;
+
+	case PUREX_IOCB_TYPE: /* NVMET */
+	{
+		/* Received a PUREX IOCB */
+		/* Queue the iocb and wake up dpc */
+		qlt_queue_purex(vha, atio);
+		set_bit(NVMET_PUREX, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+		break;
+	}
 
 	default:
 		ql_dbg(ql_dbg_tgt, vha, 0xe040,
 		    "qla_target(%d): Received unknown ATIO atio "
 		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0xe011,
+		    (uint8_t *)atio, sizeof(*atio));
 		break;
 	}
 
 	return false;
 }
 
-int qlt_send_els_resp(srb_t *sp, void *pkt)
-{
-	return 0;
-}
-
-int
-qla_nvmet_ls(srb_t *sp, void *rsp_pkt)
-{
-	return 0;
-}
-
 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
 	struct rsp_que *rsp, response_t *pkt)
 {
@@ -552,6 +1232,10 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
 			break;
 		}
 		qlt_response_pkt(host, rsp, pkt);
+		if (unlikely(qlt_op_target_mode))
+			qla24xx_nvmet_abts_resp_iocb(vha,
+				(struct abts_resp_to_24xx *)pkt,
+				rsp->req);
 		break;
 	}
 	default:
@@ -1635,6 +2319,11 @@ static void qlt_release(struct qla_tgt *tgt)
 		    vha->vha_tgt.target_lport_ptr)
 			ha->tgt.tgt_ops->remove_target(vha);
 
+	if (tgt->nvme_els_ptr) {
+		dma_free_coherent(&vha->hw->pdev->dev, 256,
+			tgt->nvme_els_ptr, tgt->nvme_els_rsp);
+	}
+
 	vha->vha_tgt.qla_tgt = NULL;
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
@@ -5660,6 +6349,101 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
 	return 1;
 }
 
+/*
+ * Worker thread that dequeues the nvme cmd off the list and
+ * called nvme-t to process the cmd
+ */
+static void qla_nvmet_work(struct work_struct *work)
+{
+	struct qla_nvmet_cmd *cmd =
+		container_of(work, struct qla_nvmet_cmd, work);
+	scsi_qla_host_t *vha = cmd->vha;
+
+	qla_nvmet_process_cmd(vha, cmd);
+}
+/*
+ * Handle the NVME cmd IU
+ */
+static void qla_nvmet_handle_cmd(struct scsi_qla_host *vha,
+		struct atio_from_isp *atio)
+{
+	struct qla_nvmet_cmd *tgt_cmd;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+	struct fc_port *fcport;
+	struct fcp_hdr *fcp_hdr;
+	uint32_t s_id = 0;
+	void *next_pkt;
+	uint8_t *nvmet_cmd_ptr;
+	uint32_t nvmet_cmd_iulen = 0;
+	uint32_t nvmet_cmd_iulen_min = 64;
+
+	/* Create an NVME cmd and queue it up to the work queue */
+	tgt_cmd = kzalloc(sizeof(struct qla_nvmet_cmd), GFP_ATOMIC);
+	if (tgt_cmd == NULL)
+		return;
+
+	tgt_cmd->vha = vha;
+
+	fcp_hdr = &atio->u.nvme_isp27.fcp_hdr;
+
+	/* Get the session for this command */
+	s_id = fcp_hdr->s_id[0] << 16 | fcp_hdr->s_id[1] << 8
+		| fcp_hdr->s_id[2];
+	tgt_cmd->ox_id = fcp_hdr->ox_id;
+
+	fcport = qla_nvmet_find_sess_by_s_id(vha, s_id);
+	if (unlikely(!fcport)) {
+		ql_log(ql_log_warn, vha, 0x11049,
+			"Cant' find the session for port_id: %#x\n", s_id);
+		kfree(tgt_cmd);
+		return;
+	}
+
+	tgt_cmd->fcport = fcport;
+
+	memcpy(&tgt_cmd->atio, atio, sizeof(*atio));
+
+	/* The FC-NMVE cmd covers 2 ATIO IOCBs */
+
+	nvmet_cmd_ptr = (uint8_t *)&tgt_cmd->nvme_cmd_iu;
+	nvmet_cmd_iulen = be16_to_cpu(atio->u.nvme_isp27.fcnvme_hdr.iu_len) * 4;
+	tgt_cmd->cmd_len = nvmet_cmd_iulen;
+
+	if (unlikely(ha->tgt.atio_ring_index + atio->u.raw.entry_count >
+			ha->tgt.atio_q_length)) {
+		uint8_t i;
+
+		memcpy(nvmet_cmd_ptr, &((uint8_t *)atio)[NVME_ATIO_CMD_OFF],
+			ATIO_NVME_FIRST_PACKET_CMDLEN);
+		nvmet_cmd_ptr += ATIO_NVME_FIRST_PACKET_CMDLEN;
+		nvmet_cmd_iulen -= ATIO_NVME_FIRST_PACKET_CMDLEN;
+
+		for (i = 1; i < atio->u.raw.entry_count; i++) {
+			uint8_t cplen = min(nvmet_cmd_iulen_min,
+			nvmet_cmd_iulen);
+
+			next_pkt = qlt_get_next_atio_pkt(vha);
+			memcpy(nvmet_cmd_ptr, (uint8_t *)next_pkt, cplen);
+			nvmet_cmd_ptr += cplen;
+			nvmet_cmd_iulen -= cplen;
+		}
+	} else {
+		memcpy(nvmet_cmd_ptr, &((uint8_t *)atio)[NVME_ATIO_CMD_OFF],
+			nvmet_cmd_iulen);
+		next_pkt = qlt_get_next_atio_pkt(vha);
+	}
+
+	/* Add cmd to the list */
+	spin_lock_irqsave(&vha->cmd_list_lock, flags);
+	list_add_tail(&tgt_cmd->cmd_list, &vha->qla_cmd_list);
+	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+	/* Queue the work item */
+	INIT_WORK(&tgt_cmd->work, qla_nvmet_work);
+	queue_work(qla_nvmet_wq, &tgt_cmd->work);
+}
+
 /* ha->hardware_lock supposed to be held on entry */
 /* called via callback from qla2xxx */
 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
@@ -5699,6 +6483,13 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
 			break;
 		}
 
+		/* NVME Target*/
+		if (unlikely(atio->u.nvme_isp27.fcnvme_hdr.scsi_fc_id
+				== NVMEFC_CMD_IU_SCSI_FC_ID)) {
+			qla_nvmet_handle_cmd(vha, atio);
+			break;
+		}
+
 		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
 			rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
 			    atio, ha_locked);
@@ -6549,6 +7340,14 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
 	if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
 		ha->tgt.tgt_ops->add_target(base_vha);
 
+	tgt->nvme_els_ptr = dma_alloc_coherent(&base_vha->hw->pdev->dev, 256,
+		&tgt->nvme_els_rsp, GFP_KERNEL);
+	if (!tgt->nvme_els_ptr) {
+		ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
+		    "Unable to allocate DMA buffer for NVME ELS request\n");
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
@@ -6843,6 +7642,7 @@ qlt_rff_id(struct scsi_qla_host *vha)
 	u8 fc4_feature = 0;
 	/*
 	 * FC-4 Feature bit 0 indicates target functionality to the name server.
+	 * NVME FC-4 Feature bit 2 indicates discovery controller
 	 */
 	if (qla_tgt_mode_enabled(vha)) {
 		fc4_feature = BIT_0;
@@ -6880,6 +7680,76 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha)
 
 }
 
+static void
+qlt_27xx_process_nvme_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct atio_from_isp *pkt;
+	int cnt;
+	uint32_t atio_q_in;
+	uint16_t num_atios = 0;
+	uint8_t nvme_pkts = 0;
+
+	if (!ha->flags.fw_started)
+		return;
+
+	pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+	while (num_atios < pkt->u.raw.entry_count) {
+		atio_q_in =	RD_REG_DWORD(ISP_ATIO_Q_IN(vha));
+		if (atio_q_in < ha->tgt.atio_ring_index)
+			num_atios = ha->tgt.atio_q_length -
+				(ha->tgt.atio_ring_index - atio_q_in);
+		else
+			num_atios = atio_q_in - ha->tgt.atio_ring_index;
+		if (num_atios == 0)
+			return;
+	}
+
+	while ((num_atios) || fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
+		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+		cnt = pkt->u.raw.entry_count;
+
+		if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
+			/*
+			 * This packet is corrupted. The header + payload
+			 * can not be trusted. There is no point in passing
+			 * it further up.
+			 */
+			ql_log(ql_log_warn, vha, 0xd03c,
+			    "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
+			    pkt->u.isp24.fcp_hdr.s_id,
+			    be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
+			    le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+
+			adjust_corrupted_atio(pkt);
+			qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
+			    ha_locked, 0);
+		} else {
+			qlt_24xx_atio_pkt_all_vps(vha,
+			    (struct atio_from_isp *)pkt, ha_locked);
+			nvme_pkts++;
+		}
+
+		/* Just move by one index since we have already accounted the
+		 * additional ones while processing individual ATIOs
+		 */
+		ha->tgt.atio_ring_index++;
+		if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
+			ha->tgt.atio_ring_index = 0;
+			ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+		} else
+			ha->tgt.atio_ring_ptr++;
+
+		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+		num_atios -= cnt;
+		/* memory barrier */
+		wmb();
+	}
+
+	/* Adjust ring index */
+	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+}
+
 /*
  * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
  * @ha: SCSI driver HA context
@@ -6891,9 +7761,15 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
 	struct atio_from_isp *pkt;
 	int cnt, i;
 
+	if (unlikely(qlt_op_target_mode)) {
+		qlt_27xx_process_nvme_atio_queue(vha, ha_locked);
+		return;
+	}
+
 	if (!ha->flags.fw_started)
 		return;
 
+	pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
 	while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
 	    fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
 		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
@@ -6919,6 +7795,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
 			    (struct atio_from_isp *)pkt, ha_locked);
 		}
 
+		cnt = 1;
 		for (i = 0; i < cnt; i++) {
 			ha->tgt.atio_ring_index++;
 			if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
@@ -6930,11 +7807,13 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
 			pkt->u.raw.signature = ATIO_PROCESSED;
 			pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
 		}
+		/* memory barrier */
 		wmb();
 	}
 
 	/* Adjust ring index */
 	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+	RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
 }
 
 void
@@ -7231,6 +8110,9 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
 	INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
 	    qlt_unknown_atio_work_fn);
 
+	/* NVMET */
+	INIT_LIST_HEAD(&base_vha->purex_atio_list);
+
 	qlt_clear_mode(base_vha);
 
 	rc = btree_init32(&ha->tgt.host_map);
@@ -7457,13 +8339,25 @@ int __init qlt_init(void)
 		goto out_mgmt_cmd_cachep;
 	}
 
+	qla_tgt_purex_plogi_cachep =
+		kmem_cache_create("qla_tgt_purex_plogi_cachep",
+			sizeof(struct qlt_purex_plogi_ack_t),
+			__alignof__(struct qlt_purex_plogi_ack_t), 0, NULL);
+
+	if (!qla_tgt_purex_plogi_cachep) {
+		ql_log(ql_log_fatal, NULL, 0xe06d,
+		    "kmem_cache_create for qla_tgt_purex_plogi_cachep failed\n");
+		ret = -ENOMEM;
+		goto out_plogi_cachep;
+	}
+
 	qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
 	    mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
 	if (!qla_tgt_mgmt_cmd_mempool) {
 		ql_log(ql_log_fatal, NULL, 0xe06e,
 		    "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
 		ret = -ENOMEM;
-		goto out_plogi_cachep;
+		goto out_purex_plogi_cachep;
 	}
 
 	qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
@@ -7473,6 +8367,25 @@ int __init qlt_init(void)
 		ret = -ENOMEM;
 		goto out_cmd_mempool;
 	}
+
+	qla_nvmet_wq = alloc_workqueue("qla_nvmet_wq", 0, 0);
+	if (!qla_nvmet_wq) {
+		ql_log(ql_log_fatal, NULL, 0xe070,
+		    "alloc_workqueue for qla_nvmet_wq failed\n");
+		ret = -ENOMEM;
+		destroy_workqueue(qla_tgt_wq);
+		goto out_cmd_mempool;
+	}
+
+	qla_nvmet_comp_wq = alloc_workqueue("qla_nvmet_comp_wq", 0, 0);
+	if (!qla_nvmet_comp_wq) {
+		ql_log(ql_log_fatal, NULL, 0xe071,
+		    "alloc_workqueue for qla_nvmet_wq failed\n");
+		ret = -ENOMEM;
+		destroy_workqueue(qla_nvmet_wq);
+		destroy_workqueue(qla_tgt_wq);
+		goto out_cmd_mempool;
+	}
 	/*
 	 * Return 1 to signal that initiator-mode is being disabled
 	 */
@@ -7480,6 +8393,8 @@ int __init qlt_init(void)
 
 out_cmd_mempool:
 	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+out_purex_plogi_cachep:
+	kmem_cache_destroy(qla_tgt_purex_plogi_cachep);
 out_plogi_cachep:
 	kmem_cache_destroy(qla_tgt_plogi_cachep);
 out_mgmt_cmd_cachep:
@@ -7492,8 +8407,19 @@ void qlt_exit(void)
 	if (!QLA_TGT_MODE_ENABLED())
 		return;
 
+	destroy_workqueue(qla_nvmet_comp_wq);
+	destroy_workqueue(qla_nvmet_wq);
 	destroy_workqueue(qla_tgt_wq);
 	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
 	kmem_cache_destroy(qla_tgt_plogi_cachep);
+	kmem_cache_destroy(qla_tgt_purex_plogi_cachep);
 	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
 }
+
+void nvmet_release_sessions(struct scsi_qla_host *vha)
+{
+	struct qlt_purex_plogi_ack_t *pla, *tpla;
+
+	list_for_each_entry_safe(pla, tpla, &vha->plogi_ack_list, list)
+		list_del(&pla->list);
+}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 721da593b1bc..fcb4c9bb4fc1 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -322,6 +322,67 @@ struct atio7_fcp_cmnd {
 	/* uint32_t data_length; */
 } __packed;
 
+struct fc_nvme_hdr {
+	union {
+		struct {
+			uint8_t scsi_id;
+#define NVMEFC_CMD_IU_SCSI_ID   0xfd
+			uint8_t fc_id;
+#define NVMEFC_CMD_IU_FC_ID     0x28
+		};
+		struct {
+			uint16_t scsi_fc_id;
+#define NVMEFC_CMD_IU_SCSI_FC_ID  0x28fd
+		};
+	};
+	uint16_t iu_len;
+	uint8_t rsv1[3];
+	uint8_t flags;
+#define NVMEFC_CMD_WRITE        0x1
+#define NVMEFC_CMD_READ         0x2
+	uint64_t conn_id;
+	uint32_t        csn;
+	uint32_t        dl;
+} __packed;
+
+struct atio7_nvme_cmnd {
+	struct fc_nvme_hdr fcnvme_hdr;
+
+	struct nvme_command nvme_cmd;
+	uint32_t        rsv2[2];
+} __packed;
+
+#define ATIO_PURLS	0x56
+struct pt_ls4_rx_unsol {
+	uint8_t entry_type; /* 0x56 */
+	uint8_t entry_count;
+	uint16_t rsvd0;
+	uint16_t rsvd1;
+	uint8_t vp_index;
+	uint8_t rsvd2;
+	uint16_t rsvd3;
+	uint16_t nport_handle;
+	uint16_t frame_size;
+	uint16_t rsvd4;
+	uint32_t exchange_address;
+	uint8_t d_id[3];
+	uint8_t r_ctl;
+	uint8_t s_id[3];
+	uint8_t cs_ctl;
+	uint8_t f_ctl[3];
+	uint8_t type;
+	uint16_t seq_cnt;
+	uint8_t df_ctl;
+	uint8_t seq_id;
+	uint16_t rx_id;
+	uint16_t ox_id;
+	uint32_t param;
+	uint32_t desc0;
+#define PT_LS4_PAYLOAD_OFFSET 0x2c
+#define PT_LS4_FIRST_PACKET_LEN 20
+	uint32_t desc_len;
+	uint32_t payload[3];
+};
 /*
  * ISP queue -	Accept Target I/O (ATIO) type entry IOCB structure.
  *		This is sent from the ISP to the target driver.
@@ -368,6 +429,21 @@ struct atio_from_isp {
 			uint32_t signature;
 #define ATIO_PROCESSED 0xDEADDEAD		/* Signature */
 		} raw;
+		/* FC-NVME */
+		struct {
+			uint8_t  entry_type;	/* Entry type. */
+			uint8_t  entry_count;	/* Entry count. */
+			uint8_t  fcp_cmnd_len_low;
+			uint8_t  fcp_cmnd_len_high:4;
+			uint8_t  attr:4;
+			uint32_t exchange_addr;
+#define ATIO_NVME_ATIO_CMD_OFF 32
+#define ATIO_NVME_FIRST_PACKET_CMDLEN (64 - ATIO_NVME_ATIO_CMD_OFF)
+			struct fcp_hdr fcp_hdr;
+			struct fc_nvme_hdr fcnvme_hdr;
+			uint8_t	nvmd_cmd[8];
+		} nvme_isp27;
+		struct pt_ls4_rx_unsol pt_ls4;
 	} u;
 } __packed;
 
@@ -836,6 +912,8 @@ struct qla_tgt {
 	int modify_lun_expected;
 	atomic_t tgt_global_resets_count;
 	struct list_head tgt_list_entry;
+	dma_addr_t nvme_els_rsp;
+	void *nvme_els_ptr;
 };
 
 struct qla_tgt_sess_op {
@@ -848,6 +926,16 @@ struct qla_tgt_sess_op {
 	struct rsp_que *rsp;
 };
 
+/* NVMET */
+struct qla_tgt_purex_op {
+	struct scsi_qla_host *vha;
+	struct atio_from_isp atio;
+	uint8_t *purex_pyld;
+	uint16_t purex_pyld_len;
+	struct work_struct work;
+	struct list_head cmd_list;
+};
+
 enum trace_flags {
 	TRC_NEW_CMD = BIT_0,
 	TRC_DO_WORK = BIT_1,
@@ -1112,4 +1200,6 @@ void qlt_send_resp_ctio(struct qla_qpair *, struct qla_tgt_cmd *, uint8_t,
 extern void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *,
     struct qla_tgt_cmd *);
 
+/* 0 for FCP and 1 for NVMET */
+extern int qlt_op_target_mode;
 #endif /* __QLA_TARGET_H */
-- 
2.12.0

  parent reply	other threads:[~2018-09-28 22:46 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-28 22:46 [PATCH v3 0/5] qla2xxx: Add FC-NVMe Target support Himanshu Madhani
2018-09-28 22:46 ` [PATCH v3 1/5] qla2xxx_nvmet: Add FC-NVMe Target Link Service request handling Himanshu Madhani
2018-09-28 22:46 ` [PATCH v3 2/5] qla2xxx_nvmet: Add files for FC-NVMe Target support Himanshu Madhani
2018-10-25 18:23   ` James Smart
2018-10-26 17:36     ` Madhani, Himanshu
2018-09-28 22:46 ` Himanshu Madhani [this message]
2018-09-28 22:46 ` [PATCH v3 4/5] qla2xxx_nvmet: Add SysFS node for FC-NVMe Target Himanshu Madhani
2018-09-28 22:46 ` [PATCH v3 5/5] qla2xxx: Update driver version to 11.00.00.00-k Himanshu Madhani
2018-10-19  4:20 ` [PATCH v3 0/5] qla2xxx: Add FC-NVMe Target support Madhani, Himanshu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180928224626.19777-4-himanshu.madhani@cavium.com \
    --to=himanshu.madhani@cavium.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.