All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun
@ 2011-07-21  7:07 Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 032/103] target: Make t_task a member of se_cmd, not a pointer Nicholas A. Bellinger
                   ` (29 more replies)
  0 siblings, 30 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

These functions don't get a lun, they lookup an existing lun (from fabric)
and ensure it is valid.

Also, iscsi code was checking lun was less than MAX_LUNS_PER_TPG, which
is already being checked in the transport code already.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/loopback/tcm_loop.c  |    4 ++--
 drivers/target/target_core_device.c |   12 ++++--------
 drivers/target/tcm_fc/tfc_cmd.c     |    4 ++--
 include/target/target_core_device.h |    4 ++--
 4 files changed, 10 insertions(+), 14 deletions(-)

diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index e1d17b9..5d7a40e 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -122,7 +122,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
 	/*
 	 * Locate the struct se_lun pointer and attach it to struct se_cmd
 	 */
-	if (transport_get_lun_for_cmd(se_cmd, tl_cmd->sc->device->lun) < 0) {
+	if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
 		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
 		set_host_byte(sc, DID_NO_CONNECT);
 		return NULL;
@@ -391,7 +391,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
 	/*
 	 * Locate the underlying TCM struct se_lun from sc->device->lun
 	 */
-	if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0)
+	if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
 		goto release;
 	/*
 	 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index fd92385..8b07ad2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -59,9 +59,7 @@ static struct se_subsystem_dev *lun0_su_dev;
 /* not static, needed by tpg.c */
 struct se_device *g_lun0_dev;
 
-int transport_get_lun_for_cmd(
-	struct se_cmd *se_cmd,
-	u32 unpacked_lun)
+int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 {
 	struct se_dev_entry *deve;
 	struct se_lun *se_lun = NULL;
@@ -185,11 +183,9 @@ out:
 
 	return 0;
 }
-EXPORT_SYMBOL(transport_get_lun_for_cmd);
+EXPORT_SYMBOL(transport_lookup_cmd_lun);
 
-int transport_get_lun_for_tmr(
-	struct se_cmd *se_cmd,
-	u32 unpacked_lun)
+int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 {
 	struct se_device *dev = NULL;
 	struct se_dev_entry *deve;
@@ -240,7 +236,7 @@ int transport_get_lun_for_tmr(
 
 	return 0;
 }
-EXPORT_SYMBOL(transport_get_lun_for_tmr);
+EXPORT_SYMBOL(transport_lookup_tmr_lun);
 
 /*
  * This function is called from core_scsi3_emulate_pro_register_and_move()
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 19b2b99..a913e84 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -438,7 +438,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
 	switch (fcp->fc_tm_flags) {
 	case FCP_TMF_LUN_RESET:
 		cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
-		if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
+		if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) {
 			/*
 			 * Make sure to clean up newly allocated TMR request
 			 * since "unable to  handle TMR request because failed
@@ -637,7 +637,7 @@ static void ft_send_cmd(struct ft_cmd *cmd)
 	fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
 
 	cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
-	ret = transport_get_lun_for_cmd(&cmd->se_cmd, cmd->lun);
+	ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
 	if (ret < 0) {
 		ft_dump_cmd(cmd, __func__);
 		transport_send_check_condition_and_sense(&cmd->se_cmd,
diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h
index d9745bf..96586cc 100644
--- a/include/target/target_core_device.h
+++ b/include/target/target_core_device.h
@@ -1,8 +1,8 @@
 #ifndef TARGET_CORE_DEVICE_H
 #define TARGET_CORE_DEVICE_H
 
-extern int transport_get_lun_for_cmd(struct se_cmd *, u32);
-extern int transport_get_lun_for_tmr(struct se_cmd *, u32);
+extern int transport_lookup_cmd_lun(struct se_cmd *, u32);
+extern int transport_lookup_tmr_lun(struct se_cmd *, u32);
 extern struct se_dev_entry *core_get_se_deve_from_rtpi(
 					struct se_node_acl *, u16);
 extern int core_free_device_list_for_node(struct se_node_acl *,
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 032/103] target: Make t_task a member of se_cmd, not a pointer
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 033/103] target: Handle functions returning "-2" Nicholas A. Bellinger
                   ` (28 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

t_task_backstore is a member of se_cmd, and t_task is initialized to
points to it in init_se_cmd. Thus, t_task will never be NULL. Convert
t_task to the member, and eliminate t_task_backstore.

This lets us remove all checks for if se_cmd->se_task is NULL, as well.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/loopback/tcm_loop.c     |    4 +-
 drivers/target/target_core_alua.c      |    4 +-
 drivers/target/target_core_cdb.c       |   34 +-
 drivers/target/target_core_device.c    |    6 +-
 drivers/target/target_core_file.c      |    8 +-
 drivers/target/target_core_iblock.c    |    4 +-
 drivers/target/target_core_pr.c        |   22 +-
 drivers/target/target_core_pscsi.c     |   10 +-
 drivers/target/target_core_rd.c        |    8 +-
 drivers/target/target_core_tmr.c       |   62 ++--
 drivers/target/target_core_transport.c |  762 +++++++++++++++----------------
 drivers/target/target_core_ua.c        |    2 +-
 drivers/target/tcm_fc/tfc_cmd.c        |    6 +-
 drivers/target/tcm_fc/tfc_io.c         |    6 +-
 include/target/target_core_base.h      |    4 +-
 15 files changed, 454 insertions(+), 488 deletions(-)

diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 5d7a40e..ee344c4 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -118,7 +118,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
 	 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
 	 */
 	if (scsi_bidi_cmnd(sc))
-		se_cmd->t_task->t_tasks_bidi = 1;
+		se_cmd->t_task.t_tasks_bidi = 1;
 	/*
 	 * Locate the struct se_lun pointer and attach it to struct se_cmd
 	 */
@@ -176,7 +176,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
 		 * For BIDI commands, pass in the extra READ buffer
 		 * to transport_generic_map_mem_to_cmd() below..
 		 */
-		if (se_cmd->t_task->t_tasks_bidi) {
+		if (se_cmd->t_task.t_tasks_bidi) {
 			struct scsi_data_buffer *sdb = scsi_in(sc);
 
 			mem_bidi_ptr = sdb->table.sgl;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index bfc42ad..6dbceb4 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -65,7 +65,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
 	struct se_port *port;
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
-	unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
 	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
 				    Target port group descriptor */
 
@@ -157,7 +157,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
 	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
-	unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
 	unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
 	u32 len = 4; /* Skip over RESERVED area in header */
 	int alua_access_state, primary = 0, rc;
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 7d9ccf3..6908f8a 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -66,7 +66,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
 {
 	struct se_lun *lun = cmd->se_lun;
 	struct se_device *dev = cmd->se_lun->lun_se_dev;
-	unsigned char *buf = cmd->t_task->t_task_buf;
+	unsigned char *buf = cmd->t_task.t_task_buf;
 
 	/*
 	 * Make sure we at least have 6 bytes of INQUIRY response
@@ -621,8 +621,8 @@ static int
 target_emulate_inquiry(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_lun->lun_se_dev;
-	unsigned char *buf = cmd->t_task->t_task_buf;
-	unsigned char *cdb = cmd->t_task->t_task_cdb;
+	unsigned char *buf = cmd->t_task.t_task_buf;
+	unsigned char *cdb = cmd->t_task.t_task_cdb;
 
 	if (!(cdb[1] & 0x1))
 		return target_emulate_inquiry_std(cmd);
@@ -666,7 +666,7 @@ static int
 target_emulate_readcapacity(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_lun->lun_se_dev;
-	unsigned char *buf = cmd->t_task->t_task_buf;
+	unsigned char *buf = cmd->t_task.t_task_buf;
 	unsigned long long blocks_long = dev->transport->get_blocks(dev);
 	u32 blocks;
 
@@ -696,7 +696,7 @@ static int
 target_emulate_readcapacity_16(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_lun->lun_se_dev;
-	unsigned char *buf = cmd->t_task->t_task_buf;
+	unsigned char *buf = cmd->t_task.t_task_buf;
 	unsigned long long blocks = dev->transport->get_blocks(dev);
 
 	buf[0] = (blocks >> 56) & 0xff;
@@ -831,8 +831,8 @@ static int
 target_emulate_modesense(struct se_cmd *cmd, int ten)
 {
 	struct se_device *dev = cmd->se_lun->lun_se_dev;
-	char *cdb = cmd->t_task->t_task_cdb;
-	unsigned char *rbuf = cmd->t_task->t_task_buf;
+	char *cdb = cmd->t_task.t_task_cdb;
+	unsigned char *rbuf = cmd->t_task.t_task_buf;
 	int type = dev->transport->get_device_type(dev);
 	int offset = (ten) ? 8 : 4;
 	int length = 0;
@@ -903,8 +903,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
 static int
 target_emulate_request_sense(struct se_cmd *cmd)
 {
-	unsigned char *cdb = cmd->t_task->t_task_cdb;
-	unsigned char *buf = cmd->t_task->t_task_buf;
+	unsigned char *cdb = cmd->t_task.t_task_cdb;
+	unsigned char *buf = cmd->t_task.t_task_buf;
 	u8 ua_asc = 0, ua_ascq = 0;
 
 	if (cdb[1] & 0x01) {
@@ -965,8 +965,8 @@ target_emulate_unmap(struct se_task *task)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_lun->lun_se_dev;
-	unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
-	unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
+	unsigned char *buf = cmd->t_task.t_task_buf, *ptr = NULL;
+	unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
 	sector_t lba;
 	unsigned int size = cmd->data_length, range;
 	int ret, offset;
@@ -1012,7 +1012,7 @@ target_emulate_write_same(struct se_task *task)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_lun->lun_se_dev;
-	sector_t lba = cmd->t_task->t_task_lba;
+	sector_t lba = cmd->t_task.t_task_lba;
 	unsigned int range;
 	int ret;
 
@@ -1040,7 +1040,7 @@ transport_emulate_control_cdb(struct se_task *task)
 	unsigned short service_action;
 	int ret = 0;
 
-	switch (cmd->t_task->t_task_cdb[0]) {
+	switch (cmd->t_task.t_task_cdb[0]) {
 	case INQUIRY:
 		ret = target_emulate_inquiry(cmd);
 		break;
@@ -1054,13 +1054,13 @@ transport_emulate_control_cdb(struct se_task *task)
 		ret = target_emulate_modesense(cmd, 1);
 		break;
 	case SERVICE_ACTION_IN:
-		switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
+		switch (cmd->t_task.t_task_cdb[1] & 0x1f) {
 		case SAI_READ_CAPACITY_16:
 			ret = target_emulate_readcapacity_16(cmd);
 			break;
 		default:
 			printk(KERN_ERR "Unsupported SA: 0x%02x\n",
-				cmd->t_task->t_task_cdb[1] & 0x1f);
+				cmd->t_task.t_task_cdb[1] & 0x1f);
 			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		}
 		break;
@@ -1085,7 +1085,7 @@ transport_emulate_control_cdb(struct se_task *task)
 		break;
 	case VARIABLE_LENGTH_CMD:
 		service_action =
-			get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
+			get_unaligned_be16(&cmd->t_task.t_task_cdb[8]);
 		switch (service_action) {
 		case WRITE_SAME_32:
 			if (!dev->transport->do_discard) {
@@ -1124,7 +1124,7 @@ transport_emulate_control_cdb(struct se_task *task)
 		break;
 	default:
 		printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
-			cmd->t_task->t_task_cdb[0], dev->transport->name);
+			cmd->t_task.t_task_cdb[0], dev->transport->name);
 		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 	}
 
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8b07ad2..8e8d625 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -174,7 +174,7 @@ out:
 	 */
 	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
 	list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
-	atomic_set(&se_cmd->t_task->transport_lun_active, 1);
+	atomic_set(&se_cmd->t_task.transport_lun_active, 1);
 #if 0
 	printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
 		se_cmd->se_tfo->get_task_tag(se_cmd), se_lun->unpacked_lun);
@@ -663,10 +663,10 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
 	struct se_lun *se_lun;
 	struct se_session *se_sess = se_cmd->se_sess;
 	struct se_task *se_task;
-	unsigned char *buf = se_cmd->t_task->t_task_buf;
+	unsigned char *buf = se_cmd->t_task.t_task_buf;
 	u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
 
-	list_for_each_entry(se_task, &se_cmd->t_task->t_task_list, t_list)
+	list_for_each_entry(se_task, &se_cmd->t_task.t_task_list, t_list)
 		break;
 
 	if (!(se_task)) {
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 46825ce..bd094ba 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -377,7 +377,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	struct fd_dev *fd_dev = dev->dev_ptr;
-	int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
+	int immed = (cmd->t_task.t_task_cdb[1] & 0x2);
 	loff_t start, end;
 	int ret;
 
@@ -391,11 +391,11 @@ static void fd_emulate_sync_cache(struct se_task *task)
 	/*
 	 * Determine if we will be flushing the entire device.
 	 */
-	if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
+	if (cmd->t_task.t_task_lba == 0 && cmd->data_length == 0) {
 		start = 0;
 		end = LLONG_MAX;
 	} else {
-		start = cmd->t_task->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+		start = cmd->t_task.t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
 		if (cmd->data_length)
 			end = start + cmd->data_length;
 		else
@@ -475,7 +475,7 @@ static int fd_do_task(struct se_task *task)
 		if (ret > 0 &&
 		    dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
 		    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-		    cmd->t_task->t_tasks_fua) {
+		    cmd->t_task.t_tasks_fua) {
 			/*
 			 * We might need to be a bit smarter here
 			 * and return some sense data to let the initiator
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index b0bafe4..f2c8c73 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -331,7 +331,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
-	int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
+	int immed = (cmd->t_task.t_task_cdb[1] & 0x2);
 	sector_t error_sector;
 	int ret;
 
@@ -400,7 +400,7 @@ static int iblock_do_task(struct se_task *task)
 		 */
 		if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
 		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-		     task->task_se_cmd->t_task->t_tasks_fua))
+		     task->task_se_cmd->t_task.t_tasks_fua))
 			rw = WRITE_FUA;
 		else
 			rw = WRITE;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 27a7525..06f9afb 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -157,8 +157,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
 	struct se_session *sess = cmd->se_sess;
 	struct se_portal_group *tpg = sess->se_tpg;
 
-	if ((cmd->t_task->t_task_cdb[1] & 0x01) &&
-	    (cmd->t_task->t_task_cdb[1] & 0x02)) {
+	if ((cmd->t_task.t_task_cdb[1] & 0x01) &&
+	    (cmd->t_task.t_task_cdb[1] & 0x02)) {
 		printk(KERN_ERR "LongIO and Obselete Bits set, returning"
 				" ILLEGAL_REQUEST\n");
 		return PYX_TRANSPORT_ILLEGAL_REQUEST;
@@ -216,7 +216,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
 	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
 	struct t10_pr_registration *pr_reg;
 	struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
-	unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
+	unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
 	int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
 	int conflict = 0;
 
@@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port(
 	struct list_head tid_dest_list;
 	struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
 	struct target_core_fabric_ops *tmp_tf_ops;
-	unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
 	unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
 	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
 	u32 tpdl, tid_len = 0;
@@ -3307,7 +3307,7 @@ static int core_scsi3_emulate_pro_register_and_move(
 	struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
 	struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
 	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
-	unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
 	unsigned char *initiator_str;
 	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
 	u32 tid_len, tmp_tid_len;
@@ -3723,7 +3723,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
  */
 static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
 {
-	unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
 	u64 res_key, sa_res_key;
 	int sa, scope, type, aptpl;
 	int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
@@ -3830,7 +3830,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
 	struct se_device *se_dev = cmd->se_lun->lun_se_dev;
 	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
 	struct t10_pr_registration *pr_reg;
-	unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
 	u32 add_len = 0, off = 8;
 
 	if (cmd->data_length < 8) {
@@ -3885,7 +3885,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
 	struct se_device *se_dev = cmd->se_lun->lun_se_dev;
 	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
 	struct t10_pr_registration *pr_reg;
-	unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
 	u64 pr_res_key;
 	u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
 
@@ -3965,7 +3965,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_lun->lun_se_dev;
 	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
-	unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
 	u16 add_len = 8; /* Hardcoded to 8. */
 
 	if (cmd->data_length < 6) {
@@ -4020,7 +4020,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
 	struct se_portal_group *se_tpg;
 	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
 	struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
-	unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
 	u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
 	u32 off = 8; /* off into first Full Status descriptor */
 	int format_code = 0;
@@ -4174,7 +4174,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
 
 int core_scsi3_emulate_pr(struct se_cmd *cmd)
 {
-	unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
+	unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
 	struct se_device *dev = cmd->se_dev;
 	/*
 	 * Following spc2r20 5.5.1 Reservations overview:
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 823da67..ecfe889 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -697,7 +697,7 @@ static int pscsi_transport_complete(struct se_task *task)
 
 		if (task->task_se_cmd->se_deve->lun_flags &
 				TRANSPORT_LUNFLAGS_READ_ONLY) {
-			unsigned char *buf = task->task_se_cmd->t_task->t_task_buf;
+			unsigned char *buf = task->task_se_cmd->t_task.t_task_buf;
 
 			if (cdb[0] == MODE_SENSE_10) {
 				if (!(buf[3] & 0x80))
@@ -763,7 +763,7 @@ static struct se_task *
 pscsi_alloc_task(struct se_cmd *cmd)
 {
 	struct pscsi_plugin_task *pt;
-	unsigned char *cdb = cmd->t_task->t_task_cdb;
+	unsigned char *cdb = cmd->t_task.t_task_cdb;
 
 	pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
 	if (!pt) {
@@ -776,7 +776,7 @@ pscsi_alloc_task(struct se_cmd *cmd)
 	 * allocate the extended CDB buffer for per struct se_task context
 	 * pt->pscsi_cdb now.
 	 */
-	if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) {
+	if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) {
 
 		pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
 		if (!(pt->pscsi_cdb)) {
@@ -889,7 +889,7 @@ static void pscsi_free_task(struct se_task *task)
 	 * Release the extended CDB allocation from pscsi_alloc_task()
 	 * if one exists.
 	 */
-	if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb)
+	if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb)
 		kfree(pt->pscsi_cdb);
 	/*
 	 * We do not release the bio(s) here associated with this task, as
@@ -1266,7 +1266,7 @@ static int pscsi_map_task_non_SG(struct se_task *task)
 		return 0;
 
 	ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
-			pt->pscsi_req, cmd->t_task->t_task_buf,
+			pt->pscsi_req, cmd->t_task.t_task_buf,
 			task->task_size, GFP_KERNEL);
 	if (ret < 0) {
 		printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 63cd5a7..4db4ac3 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -737,7 +737,7 @@ check_eot:
 	}
 
 out:
-	task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt;
+	task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt;
 #ifdef DEBUG_RAMDISK_DR
 	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
 			*se_mem_cnt);
@@ -819,7 +819,7 @@ static int rd_DIRECT_without_offset(
 	}
 
 out:
-	task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt;
+	task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt;
 #ifdef DEBUG_RAMDISK_DR
 	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
 			*se_mem_cnt);
@@ -880,14 +880,14 @@ static int rd_DIRECT_do_se_mem_map(
 	 * across multiple struct se_task->task_sg[].
 	 */
 	ret = transport_init_task_sg(task,
-			list_entry(cmd->t_task->t_mem_list->next,
+			list_entry(cmd->t_task.t_mem_list->next,
 				   struct se_mem, se_list),
 			task_offset);
 	if (ret <= 0)
 		return ret;
 
 	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
-			list_entry(cmd->t_task->t_mem_list->next,
+			list_entry(cmd->t_task.t_mem_list->next,
 				   struct se_mem, se_list),
 			out_se_mem, se_mem_cnt, task_offset_in);
 }
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 2f73749..50f49f2 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -179,14 +179,14 @@ int core_tmr_lun_reset(
 			continue;
 		spin_unlock(&dev->se_tmr_lock);
 
-		spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-		if (!(atomic_read(&cmd->t_task->t_transport_active))) {
-			spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+		if (!(atomic_read(&cmd->t_task.t_transport_active))) {
+			spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 			spin_lock(&dev->se_tmr_lock);
 			continue;
 		}
 		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
-			spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+			spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 			spin_lock(&dev->se_tmr_lock);
 			continue;
 		}
@@ -194,7 +194,7 @@ int core_tmr_lun_reset(
 			" Response: 0x%02x, t_state: %d\n",
 			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
 			tmr_p->function, tmr_p->response, cmd->t_state);
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 		transport_cmd_finish_abort_tmr(cmd);
 		spin_lock(&dev->se_tmr_lock);
@@ -230,12 +230,6 @@ int core_tmr_lun_reset(
 		}
 		cmd = task->task_se_cmd;
 
-		if (!cmd->t_task) {
-			printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:"
-				" %p ITT: 0x%08x\n", task, cmd,
-				cmd->se_tfo->get_task_tag(cmd));
-			continue;
-		}
 		/*
 		 * For PREEMPT_AND_ABORT usage, only process commands
 		 * with a matching reservation key.
@@ -254,38 +248,38 @@ int core_tmr_lun_reset(
 		atomic_set(&task->task_state_active, 0);
 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
-		spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 		DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
 			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
 			"def_t_state: %d/%d cdb: 0x%02x\n",
 			(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
 			cmd->se_tfo->get_task_tag(cmd), 0,
 			cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
-			cmd->deferred_t_state, cmd->t_task->t_task_cdb[0]);
+			cmd->deferred_t_state, cmd->t_task.t_task_cdb[0]);
 		DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
 			" t_task_cdbs: %d t_task_cdbs_left: %d"
 			" t_task_cdbs_sent: %d -- t_transport_active: %d"
 			" t_transport_stop: %d t_transport_sent: %d\n",
 			cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
-			cmd->t_task->t_task_cdbs,
-			atomic_read(&cmd->t_task->t_task_cdbs_left),
-			atomic_read(&cmd->t_task->t_task_cdbs_sent),
-			atomic_read(&cmd->t_task->t_transport_active),
-			atomic_read(&cmd->t_task->t_transport_stop),
-			atomic_read(&cmd->t_task->t_transport_sent));
+			cmd->t_task.t_task_cdbs,
+			atomic_read(&cmd->t_task.t_task_cdbs_left),
+			atomic_read(&cmd->t_task.t_task_cdbs_sent),
+			atomic_read(&cmd->t_task.t_transport_active),
+			atomic_read(&cmd->t_task.t_transport_stop),
+			atomic_read(&cmd->t_task.t_transport_sent));
 
 		if (atomic_read(&task->task_active)) {
 			atomic_set(&task->task_stop, 1);
 			spin_unlock_irqrestore(
-				&cmd->t_task->t_state_lock, flags);
+				&cmd->t_task.t_state_lock, flags);
 
 			DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
 				" for dev: %p\n", task, dev);
 			wait_for_completion(&task->task_stop_comp);
 			DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
 				" dev: %p\n", task, dev);
-			spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-			atomic_dec(&cmd->t_task->t_task_cdbs_left);
+			spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+			atomic_dec(&cmd->t_task.t_task_cdbs_left);
 
 			atomic_set(&task->task_active, 0);
 			atomic_set(&task->task_stop, 0);
@@ -295,24 +289,24 @@ int core_tmr_lun_reset(
 		}
 		__transport_stop_task_timer(task, &flags);
 
-		if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) {
+		if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) {
 			spin_unlock_irqrestore(
-					&cmd->t_task->t_state_lock, flags);
+					&cmd->t_task.t_state_lock, flags);
 			DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
 				" t_task_cdbs_ex_left: %d\n", task, dev,
-				atomic_read(&cmd->t_task->t_task_cdbs_ex_left));
+				atomic_read(&cmd->t_task.t_task_cdbs_ex_left));
 
 			spin_lock_irqsave(&dev->execute_task_lock, flags);
 			continue;
 		}
-		fe_count = atomic_read(&cmd->t_task->t_fe_count);
+		fe_count = atomic_read(&cmd->t_task.t_fe_count);
 
-		if (atomic_read(&cmd->t_task->t_transport_active)) {
+		if (atomic_read(&cmd->t_task.t_transport_active)) {
 			DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
 				" task: %p, t_fe_count: %d dev: %p\n", task,
 				fe_count, dev);
-			atomic_set(&cmd->t_task->t_transport_aborted, 1);
-			spin_unlock_irqrestore(&cmd->t_task->t_state_lock,
+			atomic_set(&cmd->t_task.t_transport_aborted, 1);
+			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
 						flags);
 			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
 
@@ -321,8 +315,8 @@ int core_tmr_lun_reset(
 		}
 		DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
 			" t_fe_count: %d dev: %p\n", task, fe_count, dev);
-		atomic_set(&cmd->t_task->t_transport_aborted, 1);
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		atomic_set(&cmd->t_task.t_transport_aborted, 1);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
 
 		spin_lock_irqsave(&dev->execute_task_lock, flags);
@@ -365,7 +359,7 @@ int core_tmr_lun_reset(
 		if (prout_cmd == cmd)
 			continue;
 
-		atomic_dec(&cmd->t_task->t_transport_queue_active);
+		atomic_dec(&cmd->t_task.t_transport_queue_active);
 		atomic_dec(&qobj->queue_cnt);
 		list_del(&qr->qr_list);
 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
@@ -376,7 +370,7 @@ int core_tmr_lun_reset(
 		DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
 			" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
 			"Preempt" : "", cmd, state,
-			atomic_read(&cmd->t_task->t_fe_count));
+			atomic_read(&cmd->t_task.t_fe_count));
 		/*
 		 * Signal that the command has failed via cmd->se_cmd_flags,
 		 * and call TFO->new_cmd_failure() to wakeup any fabric
@@ -388,7 +382,7 @@ int core_tmr_lun_reset(
 		transport_new_cmd_failure(cmd);
 
 		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
-				atomic_read(&cmd->t_task->t_fe_count));
+				atomic_read(&cmd->t_task.t_fe_count));
 		spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
 	}
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6198a7e..d7ee5603 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -574,7 +574,7 @@ void transport_deregister_session(struct se_session *se_sess)
 EXPORT_SYMBOL(transport_deregister_session);
 
 /*
- * Called with cmd->t_task->t_state_lock held.
+ * Called with cmd->t_task.t_state_lock held.
  */
 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
 {
@@ -582,10 +582,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
 	struct se_task *task;
 	unsigned long flags;
 
-	if (!cmd->t_task)
-		return;
-
-	list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) {
+	list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
 		dev = task->se_dev;
 		if (!(dev))
 			continue;
@@ -603,7 +600,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
 		atomic_set(&task->task_state_active, 0);
-		atomic_dec(&cmd->t_task->t_task_cdbs_ex_left);
+		atomic_dec(&cmd->t_task.t_task_cdbs_ex_left);
 	}
 }
 
@@ -622,32 +619,32 @@ static int transport_cmd_check_stop(
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	/*
 	 * Determine if IOCTL context caller in requesting the stopping of this
 	 * command for LUN shutdown purposes.
 	 */
-	if (atomic_read(&cmd->t_task->transport_lun_stop)) {
-		DEBUG_CS("%s:%d atomic_read(&cmd->t_task->transport_lun_stop)"
+	if (atomic_read(&cmd->t_task.transport_lun_stop)) {
+		DEBUG_CS("%s:%d atomic_read(&cmd->t_task.transport_lun_stop)"
 			" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
 			cmd->se_tfo->get_task_tag(cmd));
 
 		cmd->deferred_t_state = cmd->t_state;
 		cmd->t_state = TRANSPORT_DEFERRED_CMD;
-		atomic_set(&cmd->t_task->t_transport_active, 0);
+		atomic_set(&cmd->t_task.t_transport_active, 0);
 		if (transport_off == 2)
 			transport_all_task_dev_remove_state(cmd);
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
-		complete(&cmd->t_task->transport_lun_stop_comp);
+		complete(&cmd->t_task.transport_lun_stop_comp);
 		return 1;
 	}
 	/*
 	 * Determine if frontend context caller is requesting the stopping of
 	 * this command for frontend exceptions.
 	 */
-	if (atomic_read(&cmd->t_task->t_transport_stop)) {
-		DEBUG_CS("%s:%d atomic_read(&cmd->t_task->t_transport_stop) =="
+	if (atomic_read(&cmd->t_task.t_transport_stop)) {
+		DEBUG_CS("%s:%d atomic_read(&cmd->t_task.t_transport_stop) =="
 			" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
 			cmd->se_tfo->get_task_tag(cmd));
 
@@ -662,13 +659,13 @@ static int transport_cmd_check_stop(
 		 */
 		if (transport_off == 2)
 			cmd->se_lun = NULL;
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
-		complete(&cmd->t_task->t_transport_stop_comp);
+		complete(&cmd->t_task.t_transport_stop_comp);
 		return 1;
 	}
 	if (transport_off) {
-		atomic_set(&cmd->t_task->t_transport_active, 0);
+		atomic_set(&cmd->t_task.t_transport_active, 0);
 		if (transport_off == 2) {
 			transport_all_task_dev_remove_state(cmd);
 			/*
@@ -683,18 +680,18 @@ static int transport_cmd_check_stop(
 			 */
 			if (cmd->se_tfo->check_stop_free != NULL) {
 				spin_unlock_irqrestore(
-					&cmd->t_task->t_state_lock, flags);
+					&cmd->t_task.t_state_lock, flags);
 
 				cmd->se_tfo->check_stop_free(cmd);
 				return 1;
 			}
 		}
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 		return 0;
 	} else if (t_state)
 		cmd->t_state = t_state;
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	return 0;
 }
@@ -712,21 +709,21 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
 	if (!lun)
 		return;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-	if (!(atomic_read(&cmd->t_task->transport_dev_active))) {
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		goto check_lun;
 	}
-	atomic_set(&cmd->t_task->transport_dev_active, 0);
+	atomic_set(&cmd->t_task.transport_dev_active, 0);
 	transport_all_task_dev_remove_state(cmd);
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 
 check_lun:
 	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
-	if (atomic_read(&cmd->t_task->transport_lun_active)) {
+	if (atomic_read(&cmd->t_task.transport_lun_active)) {
 		list_del(&cmd->se_lun_list);
-		atomic_set(&cmd->t_task->transport_lun_active, 0);
+		atomic_set(&cmd->t_task.transport_lun_active, 0);
 #if 0
 		printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
 			cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
@@ -777,15 +774,15 @@ static int transport_add_cmd_to_queue(
 	qr->state = t_state;
 
 	if (t_state) {
-		spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 		cmd->t_state = t_state;
-		atomic_set(&cmd->t_task->t_transport_active, 1);
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		atomic_set(&cmd->t_task.t_transport_active, 1);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 	}
 
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
 	list_add_tail(&qr->qr_list, &qobj->qobj_list);
-	atomic_inc(&cmd->t_task->t_transport_queue_active);
+	atomic_inc(&cmd->t_task.t_transport_queue_active);
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
 	atomic_inc(&qobj->queue_cnt);
@@ -812,7 +809,7 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj)
 		break;
 
 	if (qr->cmd)
-		atomic_dec(&qr->cmd->t_task->t_transport_queue_active);
+		atomic_dec(&qr->cmd->t_task.t_transport_queue_active);
 
 	list_del(&qr->qr_list);
 	atomic_dec(&qobj->queue_cnt);
@@ -828,7 +825,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
 	unsigned long flags;
 
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-	if (!(atomic_read(&cmd->t_task->t_transport_queue_active))) {
+	if (!(atomic_read(&cmd->t_task.t_transport_queue_active))) {
 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 		return;
 	}
@@ -837,17 +834,17 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
 		if (qr->cmd != cmd)
 			continue;
 
-		atomic_dec(&qr->cmd->t_task->t_transport_queue_active);
+		atomic_dec(&qr->cmd->t_task.t_transport_queue_active);
 		atomic_dec(&qobj->queue_cnt);
 		list_del(&qr->qr_list);
 		kfree(qr);
 	}
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
-	if (atomic_read(&cmd->t_task->t_transport_queue_active)) {
+	if (atomic_read(&cmd->t_task.t_transport_queue_active)) {
 		printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
 			cmd->se_tfo->get_task_tag(cmd),
-			atomic_read(&cmd->t_task->t_transport_queue_active));
+			atomic_read(&cmd->t_task.t_transport_queue_active));
 	}
 }
 
@@ -857,7 +854,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
  */
 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
 {
-	struct se_task *task = list_entry(cmd->t_task->t_task_list.next,
+	struct se_task *task = list_entry(cmd->t_task.t_task_list.next,
 				struct se_task, t_list);
 
 	if (good) {
@@ -887,12 +884,12 @@ void transport_complete_task(struct se_task *task, int success)
 	unsigned long flags;
 #if 0
 	printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
-			cmd->t_task->t_task_cdb[0], dev);
+			cmd->t_task.t_task_cdb[0], dev);
 #endif
 	if (dev)
 		atomic_inc(&dev->depth_left);
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	atomic_set(&task->task_active, 0);
 
 	/*
@@ -914,14 +911,14 @@ void transport_complete_task(struct se_task *task, int success)
 	 */
 	if (atomic_read(&task->task_stop)) {
 		/*
-		 * Decrement cmd->t_task->t_se_count if this task had
+		 * Decrement cmd->t_task.t_se_count if this task had
 		 * previously thrown its timeout exception handler.
 		 */
 		if (atomic_read(&task->task_timeout)) {
-			atomic_dec(&cmd->t_task->t_se_count);
+			atomic_dec(&cmd->t_task.t_se_count);
 			atomic_set(&task->task_timeout, 0);
 		}
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 		complete(&task->task_stop_comp);
 		return;
@@ -933,33 +930,33 @@ void transport_complete_task(struct se_task *task, int success)
 	 */
 	if (atomic_read(&task->task_timeout)) {
 		if (!(atomic_dec_and_test(
-				&cmd->t_task->t_task_cdbs_timeout_left))) {
-			spin_unlock_irqrestore(&cmd->t_task->t_state_lock,
+				&cmd->t_task.t_task_cdbs_timeout_left))) {
+			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
 				flags);
 			return;
 		}
 		t_state = TRANSPORT_COMPLETE_TIMEOUT;
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 		transport_add_cmd_to_queue(cmd, t_state);
 		return;
 	}
-	atomic_dec(&cmd->t_task->t_task_cdbs_timeout_left);
+	atomic_dec(&cmd->t_task.t_task_cdbs_timeout_left);
 
 	/*
 	 * Decrement the outstanding t_task_cdbs_left count.  The last
 	 * struct se_task from struct se_cmd will complete itself into the
 	 * device queue depending upon int success.
 	 */
-	if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) {
+	if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) {
 		if (!success)
-			cmd->t_task->t_tasks_failed = 1;
+			cmd->t_task.t_tasks_failed = 1;
 
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		return;
 	}
 
-	if (!success || cmd->t_task->t_tasks_failed) {
+	if (!success || cmd->t_task.t_tasks_failed) {
 		t_state = TRANSPORT_COMPLETE_FAILURE;
 		if (!task->task_error_status) {
 			task->task_error_status =
@@ -968,10 +965,10 @@ void transport_complete_task(struct se_task *task, int success)
 				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		}
 	} else {
-		atomic_set(&cmd->t_task->t_transport_complete, 1);
+		atomic_set(&cmd->t_task.t_transport_complete, 1);
 		t_state = TRANSPORT_COMPLETE_OK;
 	}
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	transport_add_cmd_to_queue(cmd, t_state);
 }
@@ -1064,8 +1061,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
 	struct se_task *task;
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-	list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) {
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
 		dev = task->se_dev;
 
 		if (atomic_read(&task->task_state_active))
@@ -1081,7 +1078,7 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
 
 		spin_unlock(&dev->execute_task_lock);
 	}
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 }
 
 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
@@ -1091,7 +1088,7 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev->execute_task_lock, flags);
-	list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) {
+	list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
 		if (atomic_read(&task->task_execute_queue))
 			continue;
 		/*
@@ -1697,14 +1694,14 @@ transport_generic_get_task(struct se_cmd *cmd,
 	INIT_LIST_HEAD(&task->t_execute_list);
 	INIT_LIST_HEAD(&task->t_state_list);
 	init_completion(&task->task_stop_comp);
-	task->task_no = cmd->t_task->t_tasks_no++;
+	task->task_no = cmd->t_task.t_tasks_no++;
 	task->task_se_cmd = cmd;
 	task->se_dev = dev;
 	task->task_data_direction = data_direction;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-	list_add_tail(&task->t_list, &cmd->t_task->t_task_list);
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	list_add_tail(&task->t_list, &cmd->t_task.t_task_list);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	return task;
 }
@@ -1733,17 +1730,13 @@ void transport_init_se_cmd(
 	INIT_LIST_HEAD(&cmd->se_lun_list);
 	INIT_LIST_HEAD(&cmd->se_delayed_list);
 	INIT_LIST_HEAD(&cmd->se_ordered_list);
-	/*
-	 * Setup t_task pointer to t_task_backstore
-	 */
-	cmd->t_task = &cmd->t_task_backstore;
 
-	INIT_LIST_HEAD(&cmd->t_task->t_task_list);
-	init_completion(&cmd->t_task->transport_lun_fe_stop_comp);
-	init_completion(&cmd->t_task->transport_lun_stop_comp);
-	init_completion(&cmd->t_task->t_transport_stop_comp);
-	spin_lock_init(&cmd->t_task->t_state_lock);
-	atomic_set(&cmd->t_task->transport_dev_active, 1);
+	INIT_LIST_HEAD(&cmd->t_task.t_task_list);
+	init_completion(&cmd->t_task.transport_lun_fe_stop_comp);
+	init_completion(&cmd->t_task.transport_lun_stop_comp);
+	init_completion(&cmd->t_task.t_transport_stop_comp);
+	spin_lock_init(&cmd->t_task.t_state_lock);
+	atomic_set(&cmd->t_task.transport_dev_active, 1);
 
 	cmd->se_tfo = tfo;
 	cmd->se_sess = se_sess;
@@ -1788,8 +1781,8 @@ void transport_free_se_cmd(
 	/*
 	 * Check and free any extended CDB buffer that was allocated
 	 */
-	if (se_cmd->t_task->t_task_cdb != se_cmd->t_task->__t_task_cdb)
-		kfree(se_cmd->t_task->t_task_cdb);
+	if (se_cmd->t_task.t_task_cdb != se_cmd->t_task.__t_task_cdb)
+		kfree(se_cmd->t_task.t_task_cdb);
 }
 EXPORT_SYMBOL(transport_free_se_cmd);
 
@@ -1828,26 +1821,26 @@ int transport_generic_allocate_tasks(
 	 * allocate the additional extended CDB buffer now..  Otherwise
 	 * setup the pointer from __t_task_cdb to t_task_cdb.
 	 */
-	if (scsi_command_size(cdb) > sizeof(cmd->t_task->__t_task_cdb)) {
-		cmd->t_task->t_task_cdb = kzalloc(scsi_command_size(cdb),
+	if (scsi_command_size(cdb) > sizeof(cmd->t_task.__t_task_cdb)) {
+		cmd->t_task.t_task_cdb = kzalloc(scsi_command_size(cdb),
 						GFP_KERNEL);
-		if (!(cmd->t_task->t_task_cdb)) {
-			printk(KERN_ERR "Unable to allocate cmd->t_task->t_task_cdb"
-				" %u > sizeof(cmd->t_task->__t_task_cdb): %lu ops\n",
+		if (!(cmd->t_task.t_task_cdb)) {
+			printk(KERN_ERR "Unable to allocate cmd->t_task.t_task_cdb"
+				" %u > sizeof(cmd->t_task.__t_task_cdb): %lu ops\n",
 				scsi_command_size(cdb),
-				(unsigned long)sizeof(cmd->t_task->__t_task_cdb));
+				(unsigned long)sizeof(cmd->t_task.__t_task_cdb));
 			return -ENOMEM;
 		}
 	} else
-		cmd->t_task->t_task_cdb = &cmd->t_task->__t_task_cdb[0];
+		cmd->t_task.t_task_cdb = &cmd->t_task.__t_task_cdb[0];
 	/*
 	 * Copy the original CDB into cmd->t_task.
 	 */
-	memcpy(cmd->t_task->t_task_cdb, cdb, scsi_command_size(cdb));
+	memcpy(cmd->t_task.t_task_cdb, cdb, scsi_command_size(cdb));
 	/*
 	 * Setup the received CDB based on SCSI defined opcodes and
 	 * perform unit attention, persistent reservations and ALUA
-	 * checks for virtual device backends.  The cmd->t_task->t_task_cdb
+	 * checks for virtual device backends.  The cmd->t_task.t_task_cdb
 	 * pointer is expected to be setup before we reach this point.
 	 */
 	ret = transport_generic_cmd_sequencer(cmd, cdb);
@@ -1973,9 +1966,9 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 	/*
 	 * No tasks remain in the execution queue
 	 */
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	list_for_each_entry_safe(task, task_tmp,
-				&cmd->t_task->t_task_list, t_list) {
+				&cmd->t_task.t_task_list, t_list) {
 		DEBUG_TS("task_no[%d] - Processing task %p\n",
 				task->task_no, task);
 		/*
@@ -1984,14 +1977,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 		 */
 		if (!atomic_read(&task->task_sent) &&
 		    !atomic_read(&task->task_active)) {
-			spin_unlock_irqrestore(&cmd->t_task->t_state_lock,
+			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
 					flags);
 			transport_remove_task_from_execute_queue(task,
 					task->se_dev);
 
 			DEBUG_TS("task_no[%d] - Removed from execute queue\n",
 				task->task_no);
-			spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+			spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 			continue;
 		}
 
@@ -2001,7 +1994,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 		 */
 		if (atomic_read(&task->task_active)) {
 			atomic_set(&task->task_stop, 1);
-			spin_unlock_irqrestore(&cmd->t_task->t_state_lock,
+			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
 					flags);
 
 			DEBUG_TS("task_no[%d] - Waiting to complete\n",
@@ -2010,8 +2003,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 			DEBUG_TS("task_no[%d] - Stopped successfully\n",
 				task->task_no);
 
-			spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-			atomic_dec(&cmd->t_task->t_task_cdbs_left);
+			spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+			atomic_dec(&cmd->t_task.t_task_cdbs_left);
 
 			atomic_set(&task->task_active, 0);
 			atomic_set(&task->task_stop, 0);
@@ -2022,7 +2015,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 
 		__transport_stop_task_timer(task, &flags);
 	}
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	return ret;
 }
@@ -2038,7 +2031,7 @@ static void transport_generic_request_failure(
 {
 	DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
 		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
-		cmd->t_task->t_task_cdb[0]);
+		cmd->t_task.t_task_cdb[0]);
 	DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
 		" %d/%d transport_error_status: %d\n",
 		cmd->se_tfo->get_cmd_state(cmd),
@@ -2047,13 +2040,13 @@ static void transport_generic_request_failure(
 	DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
 		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
 		" t_transport_active: %d t_transport_stop: %d"
-		" t_transport_sent: %d\n", cmd->t_task->t_task_cdbs,
-		atomic_read(&cmd->t_task->t_task_cdbs_left),
-		atomic_read(&cmd->t_task->t_task_cdbs_sent),
-		atomic_read(&cmd->t_task->t_task_cdbs_ex_left),
-		atomic_read(&cmd->t_task->t_transport_active),
-		atomic_read(&cmd->t_task->t_transport_stop),
-		atomic_read(&cmd->t_task->t_transport_sent));
+		" t_transport_sent: %d\n", cmd->t_task.t_task_cdbs,
+		atomic_read(&cmd->t_task.t_task_cdbs_left),
+		atomic_read(&cmd->t_task.t_task_cdbs_sent),
+		atomic_read(&cmd->t_task.t_task_cdbs_ex_left),
+		atomic_read(&cmd->t_task.t_transport_active),
+		atomic_read(&cmd->t_task.t_transport_stop),
+		atomic_read(&cmd->t_task.t_transport_sent));
 
 	transport_stop_all_task_timers(cmd);
 
@@ -2135,7 +2128,7 @@ static void transport_generic_request_failure(
 		break;
 	default:
 		printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
-			cmd->t_task->t_task_cdb[0],
+			cmd->t_task.t_task_cdb[0],
 			cmd->transport_error_status);
 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
 		break;
@@ -2156,19 +2149,19 @@ static void transport_direct_request_timeout(struct se_cmd *cmd)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-	if (!(atomic_read(&cmd->t_task->t_transport_timeout))) {
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	if (!(atomic_read(&cmd->t_task.t_transport_timeout))) {
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		return;
 	}
-	if (atomic_read(&cmd->t_task->t_task_cdbs_timeout_left)) {
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	if (atomic_read(&cmd->t_task.t_task_cdbs_timeout_left)) {
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		return;
 	}
 
-	atomic_sub(atomic_read(&cmd->t_task->t_transport_timeout),
-		   &cmd->t_task->t_se_count);
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	atomic_sub(atomic_read(&cmd->t_task.t_transport_timeout),
+		   &cmd->t_task.t_se_count);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 }
 
 static void transport_generic_request_timeout(struct se_cmd *cmd)
@@ -2176,16 +2169,16 @@ static void transport_generic_request_timeout(struct se_cmd *cmd)
 	unsigned long flags;
 
 	/*
-	 * Reset cmd->t_task->t_se_count to allow transport_generic_remove()
+	 * Reset cmd->t_task.t_se_count to allow transport_generic_remove()
 	 * to allow last call to free memory resources.
 	 */
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-	if (atomic_read(&cmd->t_task->t_transport_timeout) > 1) {
-		int tmp = (atomic_read(&cmd->t_task->t_transport_timeout) - 1);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	if (atomic_read(&cmd->t_task.t_transport_timeout) > 1) {
+		int tmp = (atomic_read(&cmd->t_task.t_transport_timeout) - 1);
 
-		atomic_sub(tmp, &cmd->t_task->t_se_count);
+		atomic_sub(tmp, &cmd->t_task.t_se_count);
 	}
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	transport_generic_remove(cmd, 0, 0);
 }
@@ -2201,8 +2194,8 @@ transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
 		return -ENOMEM;
 	}
 
-	cmd->t_task->t_tasks_se_num = 0;
-	cmd->t_task->t_task_buf = buf;
+	cmd->t_task.t_tasks_se_num = 0;
+	cmd->t_task.t_task_buf = buf;
 
 	return 0;
 }
@@ -2244,9 +2237,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags);
 	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
-	spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags);
 }
 
 /*
@@ -2260,9 +2253,9 @@ static void transport_task_timeout_handler(unsigned long data)
 
 	DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	if (task->task_flags & TF_STOP) {
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		return;
 	}
 	task->task_flags &= ~TF_RUNNING;
@@ -2273,13 +2266,13 @@ static void transport_task_timeout_handler(unsigned long data)
 	if (!(atomic_read(&task->task_active))) {
 		DEBUG_TT("transport task: %p cmd: %p timeout task_active"
 				" == 0\n", task, cmd);
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		return;
 	}
 
-	atomic_inc(&cmd->t_task->t_se_count);
-	atomic_inc(&cmd->t_task->t_transport_timeout);
-	cmd->t_task->t_tasks_failed = 1;
+	atomic_inc(&cmd->t_task.t_se_count);
+	atomic_inc(&cmd->t_task.t_transport_timeout);
+	cmd->t_task.t_tasks_failed = 1;
 
 	atomic_set(&task->task_timeout, 1);
 	task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
@@ -2288,28 +2281,28 @@ static void transport_task_timeout_handler(unsigned long data)
 	if (atomic_read(&task->task_stop)) {
 		DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
 				" == 1\n", task, cmd);
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		complete(&task->task_stop_comp);
 		return;
 	}
 
-	if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) {
+	if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) {
 		DEBUG_TT("transport task: %p cmd: %p timeout non zero"
 				" t_task_cdbs_left\n", task, cmd);
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		return;
 	}
 	DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
 			task, cmd);
 
 	cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
 }
 
 /*
- * Called with cmd->t_task->t_state_lock held.
+ * Called with cmd->t_task.t_state_lock held.
  */
 static void transport_start_task_timer(struct se_task *task)
 {
@@ -2339,7 +2332,7 @@ static void transport_start_task_timer(struct se_task *task)
 }
 
 /*
- * Called with spin_lock_irq(&cmd->t_task->t_state_lock) held.
+ * Called with spin_lock_irq(&cmd->t_task.t_state_lock) held.
  */
 void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
 {
@@ -2349,11 +2342,11 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
 		return;
 
 	task->task_flags |= TF_STOP;
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, *flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, *flags);
 
 	del_timer_sync(&task->task_timer);
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, *flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, *flags);
 	task->task_flags &= ~TF_RUNNING;
 	task->task_flags &= ~TF_STOP;
 }
@@ -2363,11 +2356,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd)
 	struct se_task *task = NULL, *task_tmp;
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	list_for_each_entry_safe(task, task_tmp,
-				&cmd->t_task->t_task_list, t_list)
+				&cmd->t_task.t_task_list, t_list)
 		__transport_stop_task_timer(task, &flags);
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 }
 
 static inline int transport_tcq_window_closed(struct se_device *dev)
@@ -2416,7 +2409,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 
 		DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
 				" list, se_ordered_id: %u\n",
-				cmd->t_task->t_task_cdb[0],
+				cmd->t_task.t_task_cdb[0],
 				cmd->se_ordered_id);
 		/*
 		 * Add ORDERED command to tail of execution queue if
@@ -2450,7 +2443,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 
 		DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
 			" delayed CMD list, se_ordered_id: %u\n",
-			cmd->t_task->t_task_cdb[0], cmd->sam_task_attr,
+			cmd->t_task.t_task_cdb[0], cmd->sam_task_attr,
 			cmd->se_ordered_id);
 		/*
 		 * Return zero to let transport_execute_tasks() know
@@ -2548,17 +2541,17 @@ check_depth:
 
 	cmd = task->task_se_cmd;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	atomic_set(&task->task_active, 1);
 	atomic_set(&task->task_sent, 1);
-	atomic_inc(&cmd->t_task->t_task_cdbs_sent);
+	atomic_inc(&cmd->t_task.t_task_cdbs_sent);
 
-	if (atomic_read(&cmd->t_task->t_task_cdbs_sent) ==
-	    cmd->t_task->t_task_cdbs)
+	if (atomic_read(&cmd->t_task.t_task_cdbs_sent) ==
+	    cmd->t_task.t_task_cdbs)
 		atomic_set(&cmd->transport_sent, 1);
 
 	transport_start_task_timer(task);
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 	/*
 	 * The struct se_cmd->transport_emulate_cdb() function pointer is used
 	 * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
@@ -2623,10 +2616,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)
 	 * Any unsolicited data will get dumped for failed command inside of
 	 * the fabric plugin
 	 */
-	spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags);
 	se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
 	se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-	spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags);
 
 	se_cmd->se_tfo->new_cmd_failure(se_cmd);
 }
@@ -2836,17 +2829,17 @@ static void transport_xor_callback(struct se_cmd *cmd)
 		return;
 	}
 	/*
-	 * Copy the scatterlist WRITE buffer located at cmd->t_task->t_mem_list
+	 * Copy the scatterlist WRITE buffer located at cmd->t_task.t_mem_list
 	 * into the locally allocated *buf
 	 */
-	transport_memcpy_se_mem_read_contig(cmd, buf, cmd->t_task->t_mem_list);
+	transport_memcpy_se_mem_read_contig(cmd, buf, cmd->t_task.t_mem_list);
 	/*
 	 * Now perform the XOR against the BIDI read memory located at
-	 * cmd->t_task->t_mem_bidi_list
+	 * cmd->t_task.t_mem_bidi_list
 	 */
 
 	offset = 0;
-	list_for_each_entry(se_mem, cmd->t_task->t_mem_bidi_list, se_list) {
+	list_for_each_entry(se_mem, cmd->t_task.t_mem_bidi_list, se_list) {
 		addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
 		if (!(addr))
 			goto out;
@@ -2874,14 +2867,14 @@ static int transport_get_sense_data(struct se_cmd *cmd)
 
 	WARN_ON(!cmd->se_lun);
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		return 0;
 	}
 
 	list_for_each_entry_safe(task, task_tmp,
-				&cmd->t_task->t_task_list, t_list) {
+				&cmd->t_task.t_task_list, t_list) {
 
 		if (!task->task_sense)
 			continue;
@@ -2903,7 +2896,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
 				cmd->se_tfo->get_task_tag(cmd), task->task_no);
 			continue;
 		}
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 		offset = cmd->se_tfo->set_fabric_sense_len(cmd,
 				TRANSPORT_SENSE_BUFFER);
@@ -2921,7 +2914,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
 				cmd->scsi_status);
 		return 0;
 	}
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	return -1;
 }
@@ -3036,7 +3029,7 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_6;
-		cmd->t_task->t_task_lba = transport_lba_21(cdb);
+		cmd->t_task.t_task_lba = transport_lba_21(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case READ_10:
@@ -3045,7 +3038,7 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_10;
-		cmd->t_task->t_task_lba = transport_lba_32(cdb);
+		cmd->t_task.t_task_lba = transport_lba_32(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case READ_12:
@@ -3054,7 +3047,7 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_12;
-		cmd->t_task->t_task_lba = transport_lba_32(cdb);
+		cmd->t_task.t_task_lba = transport_lba_32(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case READ_16:
@@ -3063,7 +3056,7 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_16;
-		cmd->t_task->t_task_lba = transport_lba_64(cdb);
+		cmd->t_task.t_task_lba = transport_lba_64(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case WRITE_6:
@@ -3072,7 +3065,7 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_6;
-		cmd->t_task->t_task_lba = transport_lba_21(cdb);
+		cmd->t_task.t_task_lba = transport_lba_21(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case WRITE_10:
@@ -3081,8 +3074,8 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_10;
-		cmd->t_task->t_task_lba = transport_lba_32(cdb);
-		cmd->t_task->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->t_task.t_task_lba = transport_lba_32(cdb);
+		cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case WRITE_12:
@@ -3091,8 +3084,8 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_12;
-		cmd->t_task->t_task_lba = transport_lba_32(cdb);
-		cmd->t_task->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->t_task.t_task_lba = transport_lba_32(cdb);
+		cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case WRITE_16:
@@ -3101,20 +3094,20 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_16;
-		cmd->t_task->t_task_lba = transport_lba_64(cdb);
-		cmd->t_task->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->t_task.t_task_lba = transport_lba_64(cdb);
+		cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case XDWRITEREAD_10:
 		if ((cmd->data_direction != DMA_TO_DEVICE) ||
-		    !(cmd->t_task->t_tasks_bidi))
+		    !(cmd->t_task.t_tasks_bidi))
 			goto out_invalid_cdb_field;
 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
 		if (sector_ret)
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_10;
-		cmd->t_task->t_task_lba = transport_lba_32(cdb);
+		cmd->t_task.t_task_lba = transport_lba_32(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		passthrough = (dev->transport->transport_type ==
 				TRANSPORT_PLUGIN_PHBA_PDEV);
@@ -3127,7 +3120,7 @@ static int transport_generic_cmd_sequencer(
 		 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
 		 */
 		cmd->transport_complete_callback = &transport_xor_callback;
-		cmd->t_task->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
 		break;
 	case VARIABLE_LENGTH_CMD:
 		service_action = get_unaligned_be16(&cdb[8]);
@@ -3149,7 +3142,7 @@ static int transport_generic_cmd_sequencer(
 			 * XDWRITE_READ_32 logic.
 			 */
 			cmd->transport_split_cdb = &split_cdb_XX_32;
-			cmd->t_task->t_task_lba = transport_lba_64_ext(cdb);
+			cmd->t_task.t_task_lba = transport_lba_64_ext(cdb);
 			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 
 			/*
@@ -3163,14 +3156,14 @@ static int transport_generic_cmd_sequencer(
 			 * transport_generic_complete_ok()
 			 */
 			cmd->transport_complete_callback = &transport_xor_callback;
-			cmd->t_task->t_tasks_fua = (cdb[10] & 0x8);
+			cmd->t_task.t_tasks_fua = (cdb[10] & 0x8);
 			break;
 		case WRITE_SAME_32:
 			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
 			if (sector_ret)
 				goto out_unsupported_cdb;
 			size = transport_get_size(sectors, cdb, cmd);
-			cmd->t_task->t_task_lba = get_unaligned_be64(&cdb[12]);
+			cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[12]);
 			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 
 			/*
@@ -3405,10 +3398,10 @@ static int transport_generic_cmd_sequencer(
 		 */
 		if (cdb[0] == SYNCHRONIZE_CACHE) {
 			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
-			cmd->t_task->t_task_lba = transport_lba_32(cdb);
+			cmd->t_task.t_task_lba = transport_lba_32(cdb);
 		} else {
 			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
-			cmd->t_task->t_task_lba = transport_lba_64(cdb);
+			cmd->t_task.t_task_lba = transport_lba_64(cdb);
 		}
 		if (sector_ret)
 			goto out_unsupported_cdb;
@@ -3454,7 +3447,7 @@ static int transport_generic_cmd_sequencer(
 		if (sector_ret)
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
-		cmd->t_task->t_task_lba = get_unaligned_be16(&cdb[2]);
+		cmd->t_task.t_task_lba = get_unaligned_be16(&cdb[2]);
 		passthrough = (dev->transport->transport_type ==
 				TRANSPORT_PLUGIN_PHBA_PDEV);
 		/*
@@ -3777,8 +3770,8 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
 		 */
 		if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
 			transport_memcpy_write_contig(cmd,
-				 cmd->t_task->t_task_pt_sgl,
-				 cmd->t_task->t_task_buf);
+				 cmd->t_task.t_task_pt_sgl,
+				 cmd->t_task.t_task_buf);
 
 		cmd->se_tfo->queue_data_in(cmd);
 		break;
@@ -3792,7 +3785,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
 		/*
 		 * Check if we need to send READ payload for BIDI-COMMAND
 		 */
-		if (cmd->t_task->t_mem_bidi_list != NULL) {
+		if (cmd->t_task.t_mem_bidi_list != NULL) {
 			spin_lock(&cmd->se_lun->lun_sep_lock);
 			if (cmd->se_lun->lun_sep) {
 				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
@@ -3819,9 +3812,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
 	struct se_task *task, *task_tmp;
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	list_for_each_entry_safe(task, task_tmp,
-				&cmd->t_task->t_task_list, t_list) {
+				&cmd->t_task.t_task_list, t_list) {
 		if (atomic_read(&task->task_active))
 			continue;
 
@@ -3830,15 +3823,15 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
 
 		list_del(&task->t_list);
 
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		if (task->se_dev)
 			task->se_dev->transport->free_task(task);
 		else
 			printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
 				task->task_no);
-		spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	}
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 }
 
 static inline void transport_free_pages(struct se_cmd *cmd)
@@ -3851,9 +3844,9 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 	if (cmd->se_dev->transport->do_se_mem_map)
 		free_page = 0;
 
-	if (cmd->t_task->t_task_buf) {
-		kfree(cmd->t_task->t_task_buf);
-		cmd->t_task->t_task_buf = NULL;
+	if (cmd->t_task.t_task_buf) {
+		kfree(cmd->t_task.t_task_buf);
+		cmd->t_task.t_task_buf = NULL;
 		return;
 	}
 
@@ -3863,11 +3856,11 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 	if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
 		return;
 
-	if (!(cmd->t_task->t_tasks_se_num))
+	if (!(cmd->t_task.t_tasks_se_num))
 		return;
 
 	list_for_each_entry_safe(se_mem, se_mem_tmp,
-			cmd->t_task->t_mem_list, se_list) {
+			cmd->t_task.t_mem_list, se_list) {
 		/*
 		 * We only release call __free_page(struct se_mem->se_page) when
 		 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
@@ -3879,9 +3872,9 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 		kmem_cache_free(se_mem_cache, se_mem);
 	}
 
-	if (cmd->t_task->t_mem_bidi_list && cmd->t_task->t_tasks_se_bidi_num) {
+	if (cmd->t_task.t_mem_bidi_list && cmd->t_task.t_tasks_se_bidi_num) {
 		list_for_each_entry_safe(se_mem, se_mem_tmp,
-				cmd->t_task->t_mem_bidi_list, se_list) {
+				cmd->t_task.t_mem_bidi_list, se_list) {
 			/*
 			 * We only release call __free_page(struct se_mem->se_page) when
 			 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
@@ -3894,11 +3887,11 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 		}
 	}
 
-	kfree(cmd->t_task->t_mem_bidi_list);
-	cmd->t_task->t_mem_bidi_list = NULL;
-	kfree(cmd->t_task->t_mem_list);
-	cmd->t_task->t_mem_list = NULL;
-	cmd->t_task->t_tasks_se_num = 0;
+	kfree(cmd->t_task.t_mem_bidi_list);
+	cmd->t_task.t_mem_bidi_list = NULL;
+	kfree(cmd->t_task.t_mem_list);
+	cmd->t_task.t_mem_list = NULL;
+	cmd->t_task.t_tasks_se_num = 0;
 }
 
 static inline void transport_release_tasks(struct se_cmd *cmd)
@@ -3910,23 +3903,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-	if (atomic_read(&cmd->t_task->t_fe_count)) {
-		if (!(atomic_dec_and_test(&cmd->t_task->t_fe_count))) {
-			spin_unlock_irqrestore(&cmd->t_task->t_state_lock,
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	if (atomic_read(&cmd->t_task.t_fe_count)) {
+		if (!(atomic_dec_and_test(&cmd->t_task.t_fe_count))) {
+			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
 					flags);
 			return 1;
 		}
 	}
 
-	if (atomic_read(&cmd->t_task->t_se_count)) {
-		if (!(atomic_dec_and_test(&cmd->t_task->t_se_count))) {
-			spin_unlock_irqrestore(&cmd->t_task->t_state_lock,
+	if (atomic_read(&cmd->t_task.t_se_count)) {
+		if (!(atomic_dec_and_test(&cmd->t_task.t_se_count))) {
+			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
 					flags);
 			return 1;
 		}
 	}
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	return 0;
 }
@@ -3938,14 +3931,14 @@ static void transport_release_fe_cmd(struct se_cmd *cmd)
 	if (transport_dec_and_check(cmd))
 		return;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-	if (!(atomic_read(&cmd->t_task->transport_dev_active))) {
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		goto free_pages;
 	}
-	atomic_set(&cmd->t_task->transport_dev_active, 0);
+	atomic_set(&cmd->t_task.transport_dev_active, 0);
 	transport_all_task_dev_remove_state(cmd);
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	transport_release_tasks(cmd);
 free_pages:
@@ -3961,33 +3954,30 @@ static int transport_generic_remove(
 {
 	unsigned long flags;
 
-	if (!(cmd->t_task))
-		goto release_cmd;
-
 	if (transport_dec_and_check(cmd)) {
 		if (session_reinstatement) {
-			spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+			spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 			transport_all_task_dev_remove_state(cmd);
-			spin_unlock_irqrestore(&cmd->t_task->t_state_lock,
+			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
 					flags);
 		}
 		return 1;
 	}
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-	if (!(atomic_read(&cmd->t_task->transport_dev_active))) {
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		goto free_pages;
 	}
-	atomic_set(&cmd->t_task->transport_dev_active, 0);
+	atomic_set(&cmd->t_task.transport_dev_active, 0);
 	transport_all_task_dev_remove_state(cmd);
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	transport_release_tasks(cmd);
+
 free_pages:
 	transport_free_pages(cmd);
 
-release_cmd:
 	if (release_to_pool) {
 		transport_release_cmd_to_pool(cmd);
 	} else {
@@ -4032,8 +4022,8 @@ int transport_generic_map_mem_to_cmd(
 			return -ENOSYS;
 		}
 
-		cmd->t_task->t_mem_list = (struct list_head *)mem;
-		cmd->t_task->t_tasks_se_num = sg_mem_num;
+		cmd->t_task.t_mem_list = (struct list_head *)mem;
+		cmd->t_task.t_tasks_se_num = sg_mem_num;
 		cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
 		return 0;
 	}
@@ -4048,36 +4038,36 @@ int transport_generic_map_mem_to_cmd(
 		 * processed into a TCM struct se_subsystem_dev, we do the mapping
 		 * from the passed physical memory to struct se_mem->se_page here.
 		 */
-		cmd->t_task->t_mem_list = transport_init_se_mem_list();
-		if (!(cmd->t_task->t_mem_list))
+		cmd->t_task.t_mem_list = transport_init_se_mem_list();
+		if (!(cmd->t_task.t_mem_list))
 			return -ENOMEM;
 
 		ret = transport_map_sg_to_mem(cmd,
-			cmd->t_task->t_mem_list, mem, &se_mem_cnt_out);
+			cmd->t_task.t_mem_list, mem, &se_mem_cnt_out);
 		if (ret < 0)
 			return -ENOMEM;
 
-		cmd->t_task->t_tasks_se_num = se_mem_cnt_out;
+		cmd->t_task.t_tasks_se_num = se_mem_cnt_out;
 		/*
 		 * Setup BIDI READ list of struct se_mem elements
 		 */
 		if ((mem_bidi_in) && (sg_mem_bidi_num)) {
-			cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list();
-			if (!(cmd->t_task->t_mem_bidi_list)) {
-				kfree(cmd->t_task->t_mem_list);
+			cmd->t_task.t_mem_bidi_list = transport_init_se_mem_list();
+			if (!(cmd->t_task.t_mem_bidi_list)) {
+				kfree(cmd->t_task.t_mem_list);
 				return -ENOMEM;
 			}
 			se_mem_cnt_out = 0;
 
 			ret = transport_map_sg_to_mem(cmd,
-				cmd->t_task->t_mem_bidi_list, mem_bidi_in,
+				cmd->t_task.t_mem_bidi_list, mem_bidi_in,
 				&se_mem_cnt_out);
 			if (ret < 0) {
-				kfree(cmd->t_task->t_mem_list);
+				kfree(cmd->t_task.t_mem_list);
 				return -ENOMEM;
 			}
 
-			cmd->t_task->t_tasks_se_bidi_num = se_mem_cnt_out;
+			cmd->t_task.t_tasks_se_bidi_num = se_mem_cnt_out;
 		}
 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
 
@@ -4097,7 +4087,7 @@ int transport_generic_map_mem_to_cmd(
 		 * struct scatterlist format.
 		 */
 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
-		cmd->t_task->t_task_pt_sgl = mem;
+		cmd->t_task.t_task_pt_sgl = mem;
 	}
 
 	return 0;
@@ -4114,19 +4104,19 @@ static int transport_get_sectors(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_lun->lun_se_dev;
 
-	cmd->t_task->t_tasks_sectors =
+	cmd->t_task.t_tasks_sectors =
 		(cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
-	if (!(cmd->t_task->t_tasks_sectors))
-		cmd->t_task->t_tasks_sectors = 1;
+	if (!(cmd->t_task.t_tasks_sectors))
+		cmd->t_task.t_tasks_sectors = 1;
 
 	if (dev->transport->get_device_type(dev) != TYPE_DISK)
 		return 0;
 
-	if ((cmd->t_task->t_task_lba + cmd->t_task->t_tasks_sectors) >
+	if ((cmd->t_task.t_task_lba + cmd->t_task.t_tasks_sectors) >
 	     transport_dev_end_lba(dev)) {
 		printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
 			" transport_dev_end_lba(): %llu\n",
-			cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors,
+			cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors,
 			transport_dev_end_lba(dev));
 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
@@ -4143,21 +4133,21 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 
 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
 		task_cdbs++;
-		cmd->t_task->t_task_cdbs++;
+		cmd->t_task.t_task_cdbs++;
 	} else {
 		int set_counts = 1;
 
 		/*
 		 * Setup any BIDI READ tasks and memory from
-		 * cmd->t_task->t_mem_bidi_list so the READ struct se_tasks
+		 * cmd->t_task.t_mem_bidi_list so the READ struct se_tasks
 		 * are queued first for the non pSCSI passthrough case.
 		 */
-		if ((cmd->t_task->t_mem_bidi_list != NULL) &&
+		if ((cmd->t_task.t_mem_bidi_list != NULL) &&
 		    (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
 			rc = transport_generic_get_cdb_count(cmd,
-				cmd->t_task->t_task_lba,
-				cmd->t_task->t_tasks_sectors,
-				DMA_FROM_DEVICE, cmd->t_task->t_mem_bidi_list,
+				cmd->t_task.t_task_lba,
+				cmd->t_task.t_tasks_sectors,
+				DMA_FROM_DEVICE, cmd->t_task.t_mem_bidi_list,
 				set_counts);
 			if (!(rc)) {
 				cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
@@ -4168,13 +4158,13 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 			set_counts = 0;
 		}
 		/*
-		 * Setup the tasks and memory from cmd->t_task->t_mem_list
+		 * Setup the tasks and memory from cmd->t_task.t_mem_list
 		 * Note for BIDI transfers this will contain the WRITE payload
 		 */
 		task_cdbs = transport_generic_get_cdb_count(cmd,
-				cmd->t_task->t_task_lba,
-				cmd->t_task->t_tasks_sectors,
-				cmd->data_direction, cmd->t_task->t_mem_list,
+				cmd->t_task.t_task_lba,
+				cmd->t_task.t_tasks_sectors,
+				cmd->data_direction, cmd->t_task.t_mem_list,
 				set_counts);
 		if (!(task_cdbs)) {
 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
@@ -4182,19 +4172,19 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 			return PYX_TRANSPORT_LU_COMM_FAILURE;
 		}
-		cmd->t_task->t_task_cdbs += task_cdbs;
+		cmd->t_task.t_task_cdbs += task_cdbs;
 
 #if 0
 		printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
 			" %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
-			cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors,
-			cmd->t_task->t_task_cdbs);
+			cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors,
+			cmd->t_task.t_task_cdbs);
 #endif
 	}
 
-	atomic_set(&cmd->t_task->t_task_cdbs_left, task_cdbs);
-	atomic_set(&cmd->t_task->t_task_cdbs_ex_left, task_cdbs);
-	atomic_set(&cmd->t_task->t_task_cdbs_timeout_left, task_cdbs);
+	atomic_set(&cmd->t_task.t_task_cdbs_left, task_cdbs);
+	atomic_set(&cmd->t_task.t_task_cdbs_ex_left, task_cdbs);
+	atomic_set(&cmd->t_task.t_task_cdbs_timeout_left, task_cdbs);
 	return 0;
 }
 
@@ -4218,8 +4208,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
 	unsigned char *buf;
 	struct se_mem *se_mem;
 
-	cmd->t_task->t_mem_list = transport_init_se_mem_list();
-	if (!(cmd->t_task->t_mem_list))
+	cmd->t_task.t_mem_list = transport_init_se_mem_list();
+	if (!(cmd->t_task.t_mem_list))
 		return -ENOMEM;
 
 	/*
@@ -4231,10 +4221,10 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
 	/*
 	 * Setup BIDI-COMMAND READ list of struct se_mem elements
 	 */
-	if (cmd->t_task->t_tasks_bidi) {
-		cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list();
-		if (!(cmd->t_task->t_mem_bidi_list)) {
-			kfree(cmd->t_task->t_mem_list);
+	if (cmd->t_task.t_tasks_bidi) {
+		cmd->t_task.t_mem_bidi_list = transport_init_se_mem_list();
+		if (!(cmd->t_task.t_mem_bidi_list)) {
+			kfree(cmd->t_task.t_mem_list);
 			return -ENOMEM;
 		}
 	}
@@ -4263,8 +4253,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
 		memset(buf, 0, se_mem->se_len);
 		kunmap_atomic(buf, KM_IRQ0);
 
-		list_add_tail(&se_mem->se_list, cmd->t_task->t_mem_list);
-		cmd->t_task->t_tasks_se_num++;
+		list_add_tail(&se_mem->se_list, cmd->t_task.t_mem_list);
+		cmd->t_task.t_tasks_se_num++;
 
 		DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
 			" Offset(%u)\n", se_mem->se_page, se_mem->se_len,
@@ -4274,7 +4264,7 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
 	}
 
 	DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
-			cmd->t_task->t_tasks_se_num);
+			cmd->t_task.t_tasks_se_num);
 
 	return 0;
 out:
@@ -4306,7 +4296,7 @@ int transport_init_task_sg(
 				sg_length = se_mem->se_len;
 
 				if (!(list_is_last(&se_mem->se_list,
-						se_cmd->t_task->t_mem_list)))
+						se_cmd->t_task.t_mem_list)))
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 			} else {
@@ -4326,7 +4316,7 @@ int transport_init_task_sg(
 				sg_length = (se_mem->se_len - task_offset);
 
 				if (!(list_is_last(&se_mem->se_list,
-						se_cmd->t_task->t_mem_list)))
+						se_cmd->t_task.t_mem_list)))
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 			}
@@ -4367,7 +4357,7 @@ next:
 	 * Setup task->task_sg_bidi for SCSI READ payload for
 	 * TCM/pSCSI passthrough if present for BIDI-COMMAND
 	 */
-	if ((se_cmd->t_task->t_mem_bidi_list != NULL) &&
+	if ((se_cmd->t_task.t_mem_bidi_list != NULL) &&
 	    (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
 		task->task_sg_bidi = kzalloc(task_sg_num_padded *
 				sizeof(struct scatterlist), GFP_KERNEL);
@@ -4551,7 +4541,7 @@ int transport_map_mem_to_sg(
 				sg->length = se_mem->se_len;
 
 				if (!(list_is_last(&se_mem->se_list,
-						se_cmd->t_task->t_mem_list))) {
+						se_cmd->t_task.t_mem_list))) {
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 					(*se_mem_cnt)++;
@@ -4587,7 +4577,7 @@ int transport_map_mem_to_sg(
 				sg->length = (se_mem->se_len - *task_offset);
 
 				if (!(list_is_last(&se_mem->se_list,
-						se_cmd->t_task->t_mem_list))) {
+						se_cmd->t_task.t_mem_list))) {
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 					(*se_mem_cnt)++;
@@ -4645,7 +4635,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
 	 * Walk the struct se_task list and setup scatterlist chains
 	 * for each contiguosly allocated struct se_task->task_sg[].
 	 */
-	list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) {
+	list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
 		if (!(task->task_sg) || !(task->task_padded_sg))
 			continue;
 
@@ -4656,7 +4646,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
 			 * Either add chain or mark end of scatterlist
 			 */
 			if (!(list_is_last(&task->t_list,
-					&cmd->t_task->t_task_list))) {
+					&cmd->t_task.t_task_list))) {
 				/*
 				 * Clear existing SGL termination bit set in
 				 * transport_init_task_sg(), see sg_mark_end()
@@ -4682,7 +4672,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
 		/*
 		 * Check for single task..
 		 */
-		if (!(list_is_last(&task->t_list, &cmd->t_task->t_task_list))) {
+		if (!(list_is_last(&task->t_list, &cmd->t_task.t_task_list))) {
 			/*
 			 * Clear existing SGL termination bit set in
 			 * transport_init_task_sg(), see sg_mark_end()
@@ -4700,15 +4690,15 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
 	 * Setup the starting pointer and total t_tasks_sg_linked_no including
 	 * padding SGs for linking and to mark the end.
 	 */
-	cmd->t_task->t_tasks_sg_chained = sg_first;
-	cmd->t_task->t_tasks_sg_chained_no = sg_count;
+	cmd->t_task.t_tasks_sg_chained = sg_first;
+	cmd->t_task.t_tasks_sg_chained_no = sg_count;
 
-	DEBUG_CMD_M("Setup cmd: %p cmd->t_task->t_tasks_sg_chained: %p and"
-		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task->t_tasks_sg_chained,
-		cmd->t_task->t_tasks_sg_chained_no);
+	DEBUG_CMD_M("Setup cmd: %p cmd->t_task.t_tasks_sg_chained: %p and"
+		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task.t_tasks_sg_chained,
+		cmd->t_task.t_tasks_sg_chained_no);
 
-	for_each_sg(cmd->t_task->t_tasks_sg_chained, sg,
-			cmd->t_task->t_tasks_sg_chained_no, i) {
+	for_each_sg(cmd->t_task.t_tasks_sg_chained, sg,
+			cmd->t_task.t_tasks_sg_chained_no, i) {
 
 		DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
 			i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
@@ -4741,7 +4731,7 @@ static int transport_do_se_mem_map(
 				in_mem, in_se_mem, out_se_mem, se_mem_cnt,
 				task_offset_in);
 		if (ret == 0)
-			task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt;
+			task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt;
 
 		return ret;
 	}
@@ -4810,10 +4800,10 @@ static u32 transport_generic_get_cdb_count(
 	 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
 	 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
 	 */
-	if ((cmd->t_task->t_mem_bidi_list != NULL) &&
-	    !(list_empty(cmd->t_task->t_mem_bidi_list)) &&
+	if ((cmd->t_task.t_mem_bidi_list != NULL) &&
+	    !(list_empty(cmd->t_task.t_mem_bidi_list)) &&
 	    (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
-		se_mem_bidi = list_entry(cmd->t_task->t_mem_bidi_list->next,
+		se_mem_bidi = list_entry(cmd->t_task.t_mem_bidi_list->next,
 					struct se_mem, se_list);
 
 	while (sectors) {
@@ -4836,15 +4826,15 @@ static u32 transport_generic_get_cdb_count(
 
 		cdb = dev->transport->get_cdb(task);
 		if ((cdb)) {
-			memcpy(cdb, cmd->t_task->t_task_cdb,
-				scsi_command_size(cmd->t_task->t_task_cdb));
+			memcpy(cdb, cmd->t_task.t_task_cdb,
+				scsi_command_size(cmd->t_task.t_task_cdb));
 			cmd->transport_split_cdb(task->task_lba,
 					&task->task_sectors, cdb);
 		}
 
 		/*
 		 * Perform the SE OBJ plugin and/or Transport plugin specific
-		 * mapping for cmd->t_task->t_mem_list. And setup the
+		 * mapping for cmd->t_task.t_mem_list. And setup the
 		 * task->task_sg and if necessary task->task_sg_bidi
 		 */
 		ret = transport_do_se_mem_map(dev, task, mem_list,
@@ -4855,7 +4845,7 @@ static u32 transport_generic_get_cdb_count(
 
 		se_mem = se_mem_lout;
 		/*
-		 * Setup the cmd->t_task->t_mem_bidi_list -> task->task_sg_bidi
+		 * Setup the cmd->t_task.t_mem_bidi_list -> task->task_sg_bidi
 		 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
 		 *
 		 * Note that the first call to transport_do_se_mem_map() above will
@@ -4865,7 +4855,7 @@ static u32 transport_generic_get_cdb_count(
 		 */
 		if (task->task_sg_bidi != NULL) {
 			ret = transport_do_se_mem_map(dev, task,
-				cmd->t_task->t_mem_bidi_list, NULL,
+				cmd->t_task.t_mem_bidi_list, NULL,
 				se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
 				&task_offset_in);
 			if (ret < 0)
@@ -4888,8 +4878,8 @@ static u32 transport_generic_get_cdb_count(
 	}
 
 	if (set_counts) {
-		atomic_inc(&cmd->t_task->t_fe_count);
-		atomic_inc(&cmd->t_task->t_se_count);
+		atomic_inc(&cmd->t_task.t_fe_count);
+		atomic_inc(&cmd->t_task.t_se_count);
 	}
 
 	DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
@@ -4915,26 +4905,26 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
 
 	cdb = dev->transport->get_cdb(task);
 	if (cdb)
-		memcpy(cdb, cmd->t_task->t_task_cdb,
-			scsi_command_size(cmd->t_task->t_task_cdb));
+		memcpy(cdb, cmd->t_task.t_task_cdb,
+			scsi_command_size(cmd->t_task.t_task_cdb));
 
 	task->task_size = cmd->data_length;
 	task->task_sg_num =
 		(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
 
-	atomic_inc(&cmd->t_task->t_fe_count);
-	atomic_inc(&cmd->t_task->t_se_count);
+	atomic_inc(&cmd->t_task.t_fe_count);
+	atomic_inc(&cmd->t_task.t_se_count);
 
 	if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
 		struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
 		u32 se_mem_cnt = 0, task_offset = 0;
 
-		if (!list_empty(cmd->t_task->t_mem_list))
-			se_mem = list_entry(cmd->t_task->t_mem_list->next,
+		if (!list_empty(cmd->t_task.t_mem_list))
+			se_mem = list_entry(cmd->t_task.t_mem_list->next,
 					struct se_mem, se_list);
 
 		ret = transport_do_se_mem_map(dev, task,
-				cmd->t_task->t_mem_list, NULL, se_mem,
+				cmd->t_task.t_mem_list, NULL, se_mem,
 				&se_mem_lout, &se_mem_cnt, &task_offset);
 		if (ret < 0)
 			return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
@@ -4976,7 +4966,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
 	 * Determine is the TCM fabric module has already allocated physical
 	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
 	 * to setup beforehand the linked list of physical memory at
-	 * cmd->t_task->t_mem_list of struct se_mem->se_page
+	 * cmd->t_task.t_mem_list of struct se_mem->se_page
 	 */
 	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
 		ret = transport_allocate_resources(cmd);
@@ -5005,7 +4995,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
 	}
 
 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
-		list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) {
+		list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
 			if (atomic_read(&task->task_sent))
 				continue;
 			if (!dev->transport->map_task_SG)
@@ -5052,9 +5042,9 @@ void transport_generic_process_write(struct se_cmd *cmd)
 	 * original EDTL
 	 */
 	if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
-		if (!cmd->t_task->t_tasks_se_num) {
+		if (!cmd->t_task.t_tasks_se_num) {
 			unsigned char *dst, *buf =
-				(unsigned char *)cmd->t_task->t_task_buf;
+				(unsigned char *)cmd->t_task.t_task_buf;
 
 			dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
 			if (!(dst)) {
@@ -5066,15 +5056,15 @@ void transport_generic_process_write(struct se_cmd *cmd)
 			}
 			memcpy(dst, buf, cmd->cmd_spdtl);
 
-			kfree(cmd->t_task->t_task_buf);
-			cmd->t_task->t_task_buf = dst;
+			kfree(cmd->t_task.t_task_buf);
+			cmd->t_task.t_task_buf = dst;
 		} else {
 			struct scatterlist *sg =
-				(struct scatterlist *sg)cmd->t_task->t_task_buf;
+				(struct scatterlist *sg)cmd->t_task.t_task_buf;
 			struct scatterlist *orig_sg;
 
 			orig_sg = kzalloc(sizeof(struct scatterlist) *
-					cmd->t_task->t_tasks_se_num,
+					cmd->t_task.t_tasks_se_num,
 					GFP_KERNEL))) {
 			if (!(orig_sg)) {
 				printk(KERN_ERR "Unable to allocate memory"
@@ -5084,9 +5074,9 @@ void transport_generic_process_write(struct se_cmd *cmd)
 				return;
 			}
 
-			memcpy(orig_sg, cmd->t_task->t_task_buf,
+			memcpy(orig_sg, cmd->t_task.t_task_buf,
 					sizeof(struct scatterlist) *
-					cmd->t_task->t_tasks_se_num);
+					cmd->t_task.t_tasks_se_num);
 
 			cmd->data_length = cmd->cmd_spdtl;
 			/*
@@ -5117,22 +5107,22 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	cmd->t_state = TRANSPORT_WRITE_PENDING;
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 	/*
 	 * For the TCM control CDBs using a contiguous buffer, do the memcpy
 	 * from the passed Linux/SCSI struct scatterlist located at
-	 * se_cmd->t_task->t_task_pt_buf to the contiguous buffer at
-	 * se_cmd->t_task->t_task_buf.
+	 * se_cmd->t_task.t_task_pt_buf to the contiguous buffer at
+	 * se_cmd->t_task.t_task_buf.
 	 */
 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
 		transport_memcpy_read_contig(cmd,
-				cmd->t_task->t_task_buf,
-				cmd->t_task->t_task_pt_sgl);
+				cmd->t_task.t_task_buf,
+				cmd->t_task.t_task_pt_sgl);
 	/*
 	 * Clear the se_cmd for WRITE_PENDING status in order to set
-	 * cmd->t_task->t_transport_active=0 so that transport_generic_handle_data
+	 * cmd->t_task.t_transport_active=0 so that transport_generic_handle_data
 	 * can be called from HW target mode interrupt code.  This is safe
 	 * to be called with transport_off=1 before the cmd->se_tfo->write_pending
 	 * because the se_cmd->se_lun pointer is not being cleared.
@@ -5156,7 +5146,6 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
  */
 void transport_release_cmd_to_pool(struct se_cmd *cmd)
 {
-	BUG_ON(!cmd->t_task);
 	BUG_ON(!cmd->se_tfo);
 
 	transport_free_se_cmd(cmd);
@@ -5174,7 +5163,7 @@ void transport_generic_free_cmd(
 	int release_to_pool,
 	int session_reinstatement)
 {
-	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !cmd->t_task)
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
 		transport_release_cmd_to_pool(cmd);
 	else {
 		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
@@ -5220,28 +5209,28 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
 	 * If the frontend has already requested this struct se_cmd to
 	 * be stopped, we can safely ignore this struct se_cmd.
 	 */
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-	if (atomic_read(&cmd->t_task->t_transport_stop)) {
-		atomic_set(&cmd->t_task->transport_lun_stop, 0);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	if (atomic_read(&cmd->t_task.t_transport_stop)) {
+		atomic_set(&cmd->t_task.transport_lun_stop, 0);
 		DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
 			" TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		transport_cmd_check_stop(cmd, 1, 0);
 		return -EPERM;
 	}
-	atomic_set(&cmd->t_task->transport_lun_fe_stop, 1);
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	atomic_set(&cmd->t_task.transport_lun_fe_stop, 1);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq);
 
 	ret = transport_stop_tasks_for_cmd(cmd);
 
 	DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
-			" %d\n", cmd, cmd->t_task->t_task_cdbs, ret);
+			" %d\n", cmd, cmd->t_task.t_task_cdbs, ret);
 	if (!ret) {
 		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
 				cmd->se_tfo->get_task_tag(cmd));
-		wait_for_completion(&cmd->t_task->transport_lun_stop_comp);
+		wait_for_completion(&cmd->t_task.transport_lun_stop_comp);
 		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
 				cmd->se_tfo->get_task_tag(cmd));
 	}
@@ -5271,26 +5260,19 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
 			struct se_cmd, se_lun_list);
 		list_del(&cmd->se_lun_list);
 
-		if (!(cmd->t_task)) {
-			printk(KERN_ERR "ITT: 0x%08x, cmd->t_task = NULL"
-				"[i,t]_state: %u/%u\n",
-				cmd->se_tfo->get_task_tag(cmd),
-				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
-			BUG();
-		}
-		atomic_set(&cmd->t_task->transport_lun_active, 0);
+		atomic_set(&cmd->t_task.transport_lun_active, 0);
 		/*
 		 * This will notify iscsi_target_transport.c:
 		 * transport_cmd_check_stop() that a LUN shutdown is in
 		 * progress for the iscsi_cmd_t.
 		 */
-		spin_lock(&cmd->t_task->t_state_lock);
-		DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task->transport"
+		spin_lock(&cmd->t_task.t_state_lock);
+		DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task.transport"
 			"_lun_stop for  ITT: 0x%08x\n",
 			cmd->se_lun->unpacked_lun,
 			cmd->se_tfo->get_task_tag(cmd));
-		atomic_set(&cmd->t_task->transport_lun_stop, 1);
-		spin_unlock(&cmd->t_task->t_state_lock);
+		atomic_set(&cmd->t_task.transport_lun_stop, 1);
+		spin_unlock(&cmd->t_task.t_state_lock);
 
 		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
 
@@ -5318,14 +5300,14 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
 			cmd->se_lun->unpacked_lun,
 			cmd->se_tfo->get_task_tag(cmd));
 
-		spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags);
-		if (!(atomic_read(&cmd->t_task->transport_dev_active))) {
-			spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags);
+		spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags);
+		if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
+			spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags);
 			goto check_cond;
 		}
-		atomic_set(&cmd->t_task->transport_dev_active, 0);
+		atomic_set(&cmd->t_task.transport_dev_active, 0);
 		transport_all_task_dev_remove_state(cmd);
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags);
 
 		transport_free_dev_tasks(cmd);
 		/*
@@ -5342,24 +5324,24 @@ check_cond:
 		 * be released, notify the waiting thread now that LU has
 		 * finished accessing it.
 		 */
-		spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags);
-		if (atomic_read(&cmd->t_task->transport_lun_fe_stop)) {
+		spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags);
+		if (atomic_read(&cmd->t_task.transport_lun_fe_stop)) {
 			DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
 				" struct se_cmd: %p ITT: 0x%08x\n",
 				lun->unpacked_lun,
 				cmd, cmd->se_tfo->get_task_tag(cmd));
 
-			spin_unlock_irqrestore(&cmd->t_task->t_state_lock,
+			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
 					cmd_flags);
 			transport_cmd_check_stop(cmd, 1, 0);
-			complete(&cmd->t_task->transport_lun_fe_stop_comp);
+			complete(&cmd->t_task.transport_lun_fe_stop_comp);
 			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
 			continue;
 		}
 		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
 			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
 
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags);
 		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
 	}
 	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
@@ -5405,15 +5387,15 @@ static void transport_generic_wait_for_tasks(
 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
 		return;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	/*
 	 * If we are already stopped due to an external event (ie: LUN shutdown)
 	 * sleep until the connection can have the passed struct se_cmd back.
-	 * The cmd->t_task->transport_lun_stopped_sem will be upped by
+	 * The cmd->t_task.transport_lun_stopped_sem will be upped by
 	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
 	 * has completed its operation on the struct se_cmd.
 	 */
-	if (atomic_read(&cmd->t_task->transport_lun_stop)) {
+	if (atomic_read(&cmd->t_task.transport_lun_stop)) {
 
 		DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
 			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
@@ -5426,10 +5408,10 @@ static void transport_generic_wait_for_tasks(
 		 * We go ahead and up transport_lun_stop_comp just to be sure
 		 * here.
 		 */
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
-		complete(&cmd->t_task->transport_lun_stop_comp);
-		wait_for_completion(&cmd->t_task->transport_lun_fe_stop_comp);
-		spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		complete(&cmd->t_task.transport_lun_stop_comp);
+		wait_for_completion(&cmd->t_task.transport_lun_fe_stop_comp);
+		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 
 		transport_all_task_dev_remove_state(cmd);
 		/*
@@ -5442,13 +5424,13 @@ static void transport_generic_wait_for_tasks(
 			"stop_comp); for ITT: 0x%08x\n",
 			cmd->se_tfo->get_task_tag(cmd));
 
-		atomic_set(&cmd->t_task->transport_lun_stop, 0);
+		atomic_set(&cmd->t_task.transport_lun_stop, 0);
 	}
-	if (!atomic_read(&cmd->t_task->t_transport_active) ||
-	     atomic_read(&cmd->t_task->t_transport_aborted))
+	if (!atomic_read(&cmd->t_task.t_transport_active) ||
+	     atomic_read(&cmd->t_task.t_transport_aborted))
 		goto remove;
 
-	atomic_set(&cmd->t_task->t_transport_stop, 1);
+	atomic_set(&cmd->t_task.t_transport_stop, 1);
 
 	DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
 		" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
@@ -5456,21 +5438,21 @@ static void transport_generic_wait_for_tasks(
 		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
 		cmd->deferred_t_state);
 
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq);
 
-	wait_for_completion(&cmd->t_task->t_transport_stop_comp);
+	wait_for_completion(&cmd->t_task.t_transport_stop_comp);
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-	atomic_set(&cmd->t_task->t_transport_active, 0);
-	atomic_set(&cmd->t_task->t_transport_stop, 0);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	atomic_set(&cmd->t_task.t_transport_active, 0);
+	atomic_set(&cmd->t_task.t_transport_stop, 0);
 
 	DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
-		"&cmd->t_task->t_transport_stop_comp) for ITT: 0x%08x\n",
+		"&cmd->t_task.t_transport_stop_comp) for ITT: 0x%08x\n",
 		cmd->se_tfo->get_task_tag(cmd));
 remove:
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 	if (!remove_cmd)
 		return;
 
@@ -5509,13 +5491,13 @@ int transport_send_check_condition_and_sense(
 	int offset;
 	u8 asc = 0, ascq = 0;
 
-	spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
-		spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 		return 0;
 	}
 	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
-	spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
 	if (!reason && from_transport)
 		goto after_reason;
@@ -5674,14 +5656,14 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
 {
 	int ret = 0;
 
-	if (atomic_read(&cmd->t_task->t_transport_aborted) != 0) {
+	if (atomic_read(&cmd->t_task.t_transport_aborted) != 0) {
 		if (!(send_status) ||
 		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
 			return 1;
 #if 0
 		printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
 			" status for CDB: 0x%02x ITT: 0x%08x\n",
-			cmd->t_task->t_task_cdb[0],
+			cmd->t_task.t_task_cdb[0],
 			cmd->se_tfo->get_task_tag(cmd));
 #endif
 		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
@@ -5702,7 +5684,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
 	 */
 	if (cmd->data_direction == DMA_TO_DEVICE) {
 		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
-			atomic_inc(&cmd->t_task->t_transport_aborted);
+			atomic_inc(&cmd->t_task.t_transport_aborted);
 			smp_mb__after_atomic_inc();
 			cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 			transport_new_cmd_failure(cmd);
@@ -5712,7 +5694,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 #if 0
 	printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
-		" ITT: 0x%08x\n", cmd->t_task->t_task_cdb[0],
+		" ITT: 0x%08x\n", cmd->t_task.t_task_cdb[0],
 		cmd->se_tfo->get_task_tag(cmd));
 #endif
 	cmd->se_tfo->queue_status(cmd);
@@ -5803,15 +5785,9 @@ static void transport_processing_shutdown(struct se_device *dev)
 		}
 		cmd = task->task_se_cmd;
 
-		if (!cmd->t_task) {
-			printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:"
-				" %p ITT: 0x%08x\n", task, cmd,
-				cmd->se_tfo->get_task_tag(cmd));
-			continue;
-		}
 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
-		spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
 
 		DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
 			" i_state/def_i_state: %d/%d, t_state/def_t_state:"
@@ -5819,22 +5795,22 @@ static void transport_processing_shutdown(struct se_device *dev)
 			cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn,
 			cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state,
 			cmd->t_state, cmd->deferred_t_state,
-			cmd->t_task->t_task_cdb[0]);
+			cmd->t_task.t_task_cdb[0]);
 		DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
 			" %d t_task_cdbs_sent: %d -- t_transport_active: %d"
 			" t_transport_stop: %d t_transport_sent: %d\n",
 			cmd->se_tfo->get_task_tag(cmd),
-			cmd->t_task->t_task_cdbs,
-			atomic_read(&cmd->t_task->t_task_cdbs_left),
-			atomic_read(&cmd->t_task->t_task_cdbs_sent),
-			atomic_read(&cmd->t_task->t_transport_active),
-			atomic_read(&cmd->t_task->t_transport_stop),
-			atomic_read(&cmd->t_task->t_transport_sent));
+			cmd->t_task.t_task_cdbs,
+			atomic_read(&cmd->t_task.t_task_cdbs_left),
+			atomic_read(&cmd->t_task.t_task_cdbs_sent),
+			atomic_read(&cmd->t_task.t_transport_active),
+			atomic_read(&cmd->t_task.t_transport_stop),
+			atomic_read(&cmd->t_task.t_transport_sent));
 
 		if (atomic_read(&task->task_active)) {
 			atomic_set(&task->task_stop, 1);
 			spin_unlock_irqrestore(
-				&cmd->t_task->t_state_lock, flags);
+				&cmd->t_task.t_state_lock, flags);
 
 			DEBUG_DO("Waiting for task: %p to shutdown for dev:"
 				" %p\n", task, dev);
@@ -5842,8 +5818,8 @@ static void transport_processing_shutdown(struct se_device *dev)
 			DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
 				task, dev);
 
-			spin_lock_irqsave(&cmd->t_task->t_state_lock, flags);
-			atomic_dec(&cmd->t_task->t_task_cdbs_left);
+			spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+			atomic_dec(&cmd->t_task.t_task_cdbs_left);
 
 			atomic_set(&task->task_active, 0);
 			atomic_set(&task->task_stop, 0);
@@ -5853,25 +5829,25 @@ static void transport_processing_shutdown(struct se_device *dev)
 		}
 		__transport_stop_task_timer(task, &flags);
 
-		if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) {
+		if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) {
 			spin_unlock_irqrestore(
-					&cmd->t_task->t_state_lock, flags);
+					&cmd->t_task.t_state_lock, flags);
 
 			DEBUG_DO("Skipping task: %p, dev: %p for"
 				" t_task_cdbs_ex_left: %d\n", task, dev,
-				atomic_read(&cmd->t_task->t_task_cdbs_ex_left));
+				atomic_read(&cmd->t_task.t_task_cdbs_ex_left));
 
 			spin_lock_irqsave(&dev->execute_task_lock, flags);
 			continue;
 		}
 
-		if (atomic_read(&cmd->t_task->t_transport_active)) {
+		if (atomic_read(&cmd->t_task.t_transport_active)) {
 			DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
 					" %p\n", task, dev);
 
-			if (atomic_read(&cmd->t_task->t_fe_count)) {
+			if (atomic_read(&cmd->t_task.t_fe_count)) {
 				spin_unlock_irqrestore(
-					&cmd->t_task->t_state_lock, flags);
+					&cmd->t_task.t_state_lock, flags);
 				transport_send_check_condition_and_sense(
 					cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
 					0);
@@ -5882,7 +5858,7 @@ static void transport_processing_shutdown(struct se_device *dev)
 				transport_cmd_check_stop(cmd, 1, 0);
 			} else {
 				spin_unlock_irqrestore(
-					&cmd->t_task->t_state_lock, flags);
+					&cmd->t_task.t_state_lock, flags);
 
 				transport_remove_cmd_from_queue(cmd,
 					&cmd->se_lun->lun_se_dev->dev_queue_obj);
@@ -5899,9 +5875,9 @@ static void transport_processing_shutdown(struct se_device *dev)
 		DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
 				task, dev);
 
-		if (atomic_read(&cmd->t_task->t_fe_count)) {
+		if (atomic_read(&cmd->t_task.t_fe_count)) {
 			spin_unlock_irqrestore(
-				&cmd->t_task->t_state_lock, flags);
+				&cmd->t_task.t_state_lock, flags);
 			transport_send_check_condition_and_sense(cmd,
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 			transport_remove_cmd_from_queue(cmd,
@@ -5911,7 +5887,7 @@ static void transport_processing_shutdown(struct se_device *dev)
 			transport_cmd_check_stop(cmd, 1, 0);
 		} else {
 			spin_unlock_irqrestore(
-				&cmd->t_task->t_state_lock, flags);
+				&cmd->t_task.t_state_lock, flags);
 
 			transport_remove_cmd_from_queue(cmd,
 				&cmd->se_lun->lun_se_dev->dev_queue_obj);
@@ -5935,7 +5911,7 @@ static void transport_processing_shutdown(struct se_device *dev)
 		DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
 				cmd, state);
 
-		if (atomic_read(&cmd->t_task->t_fe_count)) {
+		if (atomic_read(&cmd->t_task.t_fe_count)) {
 			transport_send_check_condition_and_sense(cmd,
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 16f41d1..33a96e3 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -270,7 +270,7 @@ void core_scsi3_ua_for_check_condition(
 		nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
 		(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
 		"Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
-		cmd->orig_fe_lun, cmd->t_task->t_task_cdb[0], *asc, *ascq);
+		cmd->orig_fe_lun, cmd->t_task.t_task_cdb[0], *asc, *ascq);
 }
 
 int core_scsi3_ua_clear_for_request_sense(
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index a913e84..b98f986 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -72,7 +72,7 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
 		caller, cmd, cmd->cdb);
 	printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
 
-	task = se_cmd->t_task;
+	task = &se_cmd->t_task;
 	printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
 	       caller, cmd, task, task->t_tasks_se_num,
 	       task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
@@ -262,9 +262,9 @@ int ft_write_pending(struct se_cmd *se_cmd)
 				 * TCM/LIO target
 				 */
 				transport_do_task_sg_chain(se_cmd);
-				cmd->sg = se_cmd->t_task->t_tasks_sg_chained;
+				cmd->sg = se_cmd->t_task.t_tasks_sg_chained;
 				cmd->sg_cnt =
-					se_cmd->t_task->t_tasks_sg_chained_no;
+					se_cmd->t_task.t_tasks_sg_chained_no;
 			}
 			if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
 						    cmd->sg, cmd->sg_cnt))
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 47efcfb..e21d839 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -90,8 +90,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 	lport = ep->lp;
 	cmd->seq = lport->tt.seq_start_next(cmd->seq);
 
-	task = se_cmd->t_task;
-	BUG_ON(!task);
+	task = &se_cmd->t_task;
 	remaining = se_cmd->data_length;
 
 	/*
@@ -236,8 +235,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 	u32 f_ctl;
 	void *buf;
 
-	task = se_cmd->t_task;
-	BUG_ON(!task);
+	task = &se_cmd->t_task;
 
 	fh = fc_frame_header_get(fp);
 	if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index b0b83ed..bdee755 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -534,9 +534,7 @@ struct se_cmd {
 	/* Only used for internal passthrough and legacy TCM fabric modules */
 	struct se_session	*se_sess;
 	struct se_tmr_req	*se_tmr_req;
-	/* t_task is setup to t_task_backstore in transport_init_se_cmd() */
-	struct se_transport_task *t_task;
-	struct se_transport_task t_task_backstore;
+	struct se_transport_task t_task;
 	struct target_core_fabric_ops *se_tfo;
 	int (*transport_emulate_cdb)(struct se_cmd *);
 	void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 033/103] target: Handle functions returning "-2"
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 032/103] target: Make t_task a member of se_cmd, not a pointer Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 034/103] target: Use cmd->se_dev over cmd->se_lun->lun_se_dev Nicholas A. Bellinger
                   ` (27 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

These functions escaped the first round of fixing retvals to use
errno values. It also looks like some callers check the error return
value and do different things, so I replaced all -2s with -EINVAL, and
fixed up callsites accordingly. This may have introduced some bugs,
hopefully not.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/loopback/tcm_loop.c     |    4 ++--
 drivers/target/target_core_transport.c |   12 ++++++------
 drivers/target/tcm_fc/tfc_cmd.c        |    4 ++--
 3 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index ee344c4..38e936b 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -150,10 +150,10 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
 	 * Allocate the necessary tasks to complete the received CDB+data
 	 */
 	ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd);
-	if (ret == -1) {
+	if (ret == -ENOMEM) {
 		/* Out of Resources */
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
-	} else if (ret == -2) {
+	} else if (ret == -EINVAL) {
 		/*
 		 * Handle case for SAM_STAT_RESERVATION_CONFLICT
 		 */
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d7ee5603..4caab19 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1852,7 +1852,7 @@ int transport_generic_allocate_tasks(
 	if (transport_check_alloc_task_attr(cmd) < 0) {
 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-		return -2;
+		return -EINVAL;
 	}
 	spin_lock(&cmd->se_lun->lun_sep_lock);
 	if (cmd->se_lun->lun_sep)
@@ -2951,7 +2951,7 @@ transport_handle_reservation_conflict(struct se_cmd *cmd)
 		core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
 			cmd->orig_fe_lun, 0x2C,
 			ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
-	return -2;
+	return -EINVAL;
 }
 
 /*	transport_generic_cmd_sequencer():
@@ -2982,7 +2982,7 @@ static int transport_generic_cmd_sequencer(
 				&transport_nop_wait_for_tasks;
 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
-		return -2;
+		return -EINVAL;
 	}
 	/*
 	 * Check status of Asymmetric Logical Unit Assignment port
@@ -3004,7 +3004,7 @@ static int transport_generic_cmd_sequencer(
 			transport_set_sense_codes(cmd, 0x04, alua_ascq);
 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
-			return -2;
+			return -EINVAL;
 		}
 		goto out_invalid_cdb_field;
 	}
@@ -3553,11 +3553,11 @@ static int transport_generic_cmd_sequencer(
 out_unsupported_cdb:
 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-	return -2;
+	return -EINVAL;
 out_invalid_cdb_field:
 	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-	return -2;
+	return -EINVAL;
 }
 
 static inline void transport_release_tasks(struct se_cmd *);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index b98f986..161e3c9 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -650,13 +650,13 @@ static void ft_send_cmd(struct ft_cmd *cmd)
 	FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
 	ft_dump_cmd(cmd, __func__);
 
-	if (ret == -1) {
+	if (ret == -ENOMEM) {
 		transport_send_check_condition_and_sense(se_cmd,
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 		transport_generic_free_cmd(se_cmd, 0, 1, 0);
 		return;
 	}
-	if (ret == -2) {
+	if (ret == -EINVAL) {
 		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
 			ft_queue_status(se_cmd);
 		else
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 034/103] target: Use cmd->se_dev over cmd->se_lun->lun_se_dev
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 032/103] target: Make t_task a member of se_cmd, not a pointer Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 033/103] target: Handle functions returning "-2" Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 035/103] target: Embed qr in struct se_cmd Nicholas A. Bellinger
                   ` (26 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

Once transport_device_setup_cmd() is called, cmd->se_dev has the same
value as cmd->se_lun->lun_se_dev, and it is ok to refer to this location
when needing to get the se_dev from the cmd, instead of the longer version.

(nab: Fix Fix virtual lun0 se_cmd->se_orig_obj_ptr initialization)

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_alua.c      |    4 +-
 drivers/target/target_core_cdb.c       |   28 +++---
 drivers/target/target_core_device.c    |  141 ++++++++++++++-----------------
 drivers/target/target_core_file.c      |    2 +-
 drivers/target/target_core_iblock.c    |    4 +-
 drivers/target/target_core_pr.c        |   64 +++++++-------
 drivers/target/target_core_rd.c        |    2 +-
 drivers/target/target_core_transport.c |   86 ++++++++++----------
 drivers/target/target_core_ua.c        |    2 +-
 9 files changed, 160 insertions(+), 173 deletions(-)

diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 6dbceb4..76abd86 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -61,7 +61,7 @@ struct t10_alua_lu_gp *default_lu_gp;
  */
 int core_emulate_report_target_port_groups(struct se_cmd *cmd)
 {
-	struct se_subsystem_dev *su_dev = cmd->se_lun->lun_se_dev->se_sub_dev;
+	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
 	struct se_port *port;
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
@@ -151,7 +151,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
  */
 int core_emulate_set_target_port_groups(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
 	struct se_port *port, *l_port = cmd->se_lun->lun_sep;
 	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 6908f8a..95195d7 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -65,7 +65,7 @@ static int
 target_emulate_inquiry_std(struct se_cmd *cmd)
 {
 	struct se_lun *lun = cmd->se_lun;
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	unsigned char *buf = cmd->t_task.t_task_buf;
 
 	/*
@@ -128,7 +128,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
 	 * Registered Extended LUN WWN has been set via ConfigFS
 	 * during device creation/restart.
 	 */
-	if (cmd->se_lun->lun_se_dev->se_sub_dev->su_dev_flags &
+	if (cmd->se_dev->se_sub_dev->su_dev_flags &
 			SDF_EMULATED_VPD_UNIT_SERIAL) {
 		buf[3] = 3;
 		buf[5] = 0x80;
@@ -143,7 +143,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
 static int
 target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	u16 len = 0;
 
 	buf[1] = 0x80;
@@ -176,7 +176,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
 static int
 target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct se_lun *lun = cmd->se_lun;
 	struct se_port *port = NULL;
 	struct se_portal_group *tpg = NULL;
@@ -477,7 +477,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
 	buf[5] = 0x07;
 
 	/* If WriteCache emulation is enabled, set V_SUP */
-	if (cmd->se_lun->lun_se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
+	if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
 		buf[6] = 0x01;
 	return 0;
 }
@@ -486,7 +486,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
 static int
 target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	int have_tp = 0;
 
 	/*
@@ -568,7 +568,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 static int
 target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 
 	/*
 	 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
@@ -620,7 +620,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
 static int
 target_emulate_inquiry(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	unsigned char *buf = cmd->t_task.t_task_buf;
 	unsigned char *cdb = cmd->t_task.t_task_cdb;
 
@@ -665,7 +665,7 @@ target_emulate_inquiry(struct se_cmd *cmd)
 static int
 target_emulate_readcapacity(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	unsigned char *buf = cmd->t_task.t_task_buf;
 	unsigned long long blocks_long = dev->transport->get_blocks(dev);
 	u32 blocks;
@@ -695,7 +695,7 @@ target_emulate_readcapacity(struct se_cmd *cmd)
 static int
 target_emulate_readcapacity_16(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	unsigned char *buf = cmd->t_task.t_task_buf;
 	unsigned long long blocks = dev->transport->get_blocks(dev);
 
@@ -830,7 +830,7 @@ target_modesense_dpofua(unsigned char *buf, int type)
 static int
 target_emulate_modesense(struct se_cmd *cmd, int ten)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	char *cdb = cmd->t_task.t_task_cdb;
 	unsigned char *rbuf = cmd->t_task.t_task_buf;
 	int type = dev->transport->get_device_type(dev);
@@ -964,7 +964,7 @@ static int
 target_emulate_unmap(struct se_task *task)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	unsigned char *buf = cmd->t_task.t_task_buf, *ptr = NULL;
 	unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
 	sector_t lba;
@@ -1011,7 +1011,7 @@ static int
 target_emulate_write_same(struct se_task *task)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	sector_t lba = cmd->t_task.t_task_lba;
 	unsigned int range;
 	int ret;
@@ -1036,7 +1036,7 @@ int
 transport_emulate_control_cdb(struct se_task *task)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	unsigned short service_action;
 	int ret = 0;
 
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8e8d625..c7b9ef7 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -61,11 +61,10 @@ struct se_device *g_lun0_dev;
 
 int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 {
-	struct se_dev_entry *deve;
 	struct se_lun *se_lun = NULL;
 	struct se_session *se_sess = se_cmd->se_sess;
+	struct se_device *dev;
 	unsigned long flags;
-	int read_only = 0;
 
 	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
 		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
@@ -74,91 +73,84 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 	}
 
 	spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
-	deve = se_cmd->se_deve =
-			&se_sess->se_node_acl->device_list[unpacked_lun];
-	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
-		if (se_cmd) {
-			deve->total_cmds++;
-			deve->total_bytes += se_cmd->data_length;
-
-			if (se_cmd->data_direction == DMA_TO_DEVICE) {
-				if (deve->lun_flags &
-						TRANSPORT_LUNFLAGS_READ_ONLY) {
-					read_only = 1;
-					goto out;
-				}
-				deve->write_bytes += se_cmd->data_length;
-			} else if (se_cmd->data_direction ==
-				   DMA_FROM_DEVICE) {
-				deve->read_bytes += se_cmd->data_length;
-			}
+	se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+	if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+		struct se_dev_entry *deve = se_cmd->se_deve;
+
+		deve->total_cmds++;
+		deve->total_bytes += se_cmd->data_length;
+
+		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+				" Access for 0x%08x\n",
+				se_cmd->se_tfo->get_fabric_name(),
+				unpacked_lun);
+			spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
+			return -EACCES;
 		}
+
+		if (se_cmd->data_direction == DMA_TO_DEVICE)
+			deve->write_bytes += se_cmd->data_length;
+		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+			deve->read_bytes += se_cmd->data_length;
+
 		deve->deve_cmds++;
 
-		se_lun = se_cmd->se_lun = deve->se_lun;
+		se_lun = deve->se_lun;
+		se_cmd->se_lun = deve->se_lun;
 		se_cmd->pr_res_key = deve->pr_res_key;
 		se_cmd->orig_fe_lun = unpacked_lun;
 		se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 	}
-out:
 	spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
 
 	if (!se_lun) {
-		if (read_only) {
-			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+		/*
+		 * Use the se_portal_group->tpg_virt_lun0 to allow for
+		 * REPORT_LUNS, et al to be returned when no active
+		 * MappedLUN=0 exists for this Initiator Port.
+		 */
+		if (unpacked_lun != 0) {
+			se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
 			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-			printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+			printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 				" Access for 0x%08x\n",
 				se_cmd->se_tfo->get_fabric_name(),
 				unpacked_lun);
+			return -ENODEV;
+		}
+		/*
+		 * Force WRITE PROTECT for virtual LUN 0
+		 */
+		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+		    (se_cmd->data_direction != DMA_NONE)) {
+			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 			return -EACCES;
-		} else {
-			/*
-			 * Use the se_portal_group->tpg_virt_lun0 to allow for
-			 * REPORT_LUNS, et al to be returned when no active
-			 * MappedLUN=0 exists for this Initiator Port.
-			 */
-			if (unpacked_lun != 0) {
-				se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
-				se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-				printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
-					" Access for 0x%08x\n",
-					se_cmd->se_tfo->get_fabric_name(),
-					unpacked_lun);
-				return -ENODEV;
-			}
-			/*
-			 * Force WRITE PROTECT for virtual LUN 0
-			 */
-			if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
-			    (se_cmd->data_direction != DMA_NONE)) {
-				se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
-				se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-				return -EACCES;
-			}
-#if 0
-			printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
-				se_cmd->se_tfo->get_fabric_name());
-#endif
-			se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
-			se_cmd->orig_fe_lun = 0;
-			se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
-			se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 		}
+
+		se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+		se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+		se_cmd->orig_fe_lun = 0;
+		se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
+		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 	}
 	/*
 	 * Determine if the struct se_lun is online.
+	 * FIXME: Check for LUN_RESET + UNIT Attention
 	 */
-/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
 	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
 		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 		return -ENODEV;
 	}
 
-	{
-	struct se_device *dev = se_lun->lun_se_dev;
+	/* TODO: get rid of this and use atomics for stats */
+	dev = se_lun->lun_se_dev;
 	spin_lock_irq(&dev->stats_lock);
 	dev->num_cmds++;
 	if (se_cmd->data_direction == DMA_TO_DEVICE)
@@ -166,7 +158,6 @@ out:
 	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
 		dev->read_bytes += se_cmd->data_length;
 	spin_unlock_irq(&dev->stats_lock);
-	}
 
 	/*
 	 * Add the iscsi_cmd_t to the struct se_lun's cmd list.  This list is used
@@ -175,10 +166,6 @@ out:
 	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
 	list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
 	atomic_set(&se_cmd->t_task.transport_lun_active, 1);
-#if 0
-	printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
-		se_cmd->se_tfo->get_task_tag(se_cmd), se_lun->unpacked_lun);
-#endif
 	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
 
 	return 0;
@@ -187,7 +174,6 @@ EXPORT_SYMBOL(transport_lookup_cmd_lun);
 
 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 {
-	struct se_device *dev = NULL;
 	struct se_dev_entry *deve;
 	struct se_lun *se_lun = NULL;
 	struct se_session *se_sess = se_cmd->se_sess;
@@ -200,15 +186,17 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 	}
 
 	spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
-	deve = se_cmd->se_deve =
-			&se_sess->se_node_acl->device_list[unpacked_lun];
+	se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+	deve = se_cmd->se_deve;
+
 	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
-		se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
-		dev = se_lun->lun_se_dev;
+		se_tmr->tmr_lun = deve->se_lun;
+		se_cmd->se_lun = deve->se_lun;
+		se_lun = deve->se_lun;
+		se_tmr->tmr_dev = se_lun->lun_se_dev;
 		se_cmd->pr_res_key = deve->pr_res_key;
 		se_cmd->orig_fe_lun = unpacked_lun;
-		se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
-/*		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
+		se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
 	}
 	spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
 
@@ -222,17 +210,16 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 	}
 	/*
 	 * Determine if the struct se_lun is online.
+	 * FIXME: Check for LUN_RESET + UNIT Attention
 	 */
-/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
 	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
 		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 		return -ENODEV;
 	}
-	se_tmr->tmr_dev = dev;
 
-	spin_lock(&dev->se_tmr_lock);
-	list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
-	spin_unlock(&dev->se_tmr_lock);
+	spin_lock(&se_tmr->tmr_dev->se_tmr_lock);
+	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
+	spin_unlock(&se_tmr->tmr_dev->se_tmr_lock);
 
 	return 0;
 }
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index bd094ba..2e7ea74 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -279,7 +279,7 @@ fd_alloc_task(struct se_cmd *cmd)
 		return NULL;
 	}
 
-	fd_req->fd_dev = cmd->se_lun->lun_se_dev->dev_ptr;
+	fd_req->fd_dev = cmd->se_dev->dev_ptr;
 
 	return &fd_req->fd_task;
 }
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index f2c8c73..c73baef 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -240,7 +240,7 @@ iblock_alloc_task(struct se_cmd *cmd)
 		return NULL;
 	}
 
-	ib_req->ib_dev = cmd->se_lun->lun_se_dev->dev_ptr;
+	ib_req->ib_dev = cmd->se_dev->dev_ptr;
 	atomic_set(&ib_req->ib_bio_cnt, 0);
 	return &ib_req->ib_task;
 }
@@ -608,7 +608,7 @@ static struct bio *iblock_get_bio(
 static int iblock_map_task_SG(struct se_task *task)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
 	struct iblock_req *ib_req = IBLOCK_REQ(task);
 	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 06f9afb..19406a3 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1471,7 +1471,7 @@ static int core_scsi3_decode_spec_i_port(
 	int all_tg_pt,
 	int aptpl)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct se_port *tmp_port;
 	struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
 	struct se_session *se_sess = cmd->se_sess;
@@ -1509,7 +1509,7 @@ static int core_scsi3_decode_spec_i_port(
 	tidh_new->dest_node_acl = se_sess->se_node_acl;
 	tidh_new->dest_se_deve = local_se_deve;
 
-	local_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev,
+	local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
 				se_sess->se_node_acl, local_se_deve, l_isid,
 				sa_res_key, all_tg_pt, aptpl);
 	if (!(local_pr_reg)) {
@@ -1741,7 +1741,7 @@ static int core_scsi3_decode_spec_i_port(
 		 * and then call __core_scsi3_add_registration() in the
 		 * 2nd loop which will never fail.
 		 */
-		dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev,
+		dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
 				dest_node_acl, dest_se_deve, iport_ptr,
 				sa_res_key, all_tg_pt, aptpl);
 		if (!(dest_pr_reg)) {
@@ -1787,7 +1787,7 @@ static int core_scsi3_decode_spec_i_port(
 		prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
 						PR_REG_ISID_ID_LEN);
 
-		__core_scsi3_add_registration(cmd->se_lun->lun_se_dev, dest_node_acl,
+		__core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
 					dest_pr_reg, 0, 0);
 
 		printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
@@ -2071,7 +2071,7 @@ static int core_scsi3_emulate_pro_register(
 	int ignore_key)
 {
 	struct se_session *se_sess = cmd->se_sess;
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct se_dev_entry *se_deve;
 	struct se_lun *se_lun = cmd->se_lun;
 	struct se_portal_group *se_tpg;
@@ -2117,7 +2117,7 @@ static int core_scsi3_emulate_pro_register(
 			 * Port Endpoint that the PRO was received from on the
 			 * Logical Unit of the SCSI device server.
 			 */
-			ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev,
+			ret = core_scsi3_alloc_registration(cmd->se_dev,
 					se_sess->se_node_acl, se_deve, isid_ptr,
 					sa_res_key, all_tg_pt, aptpl,
 					ignore_key, 0);
@@ -2145,7 +2145,7 @@ static int core_scsi3_emulate_pro_register(
 		 */
 		if (!(aptpl)) {
 			pr_tmpl->pr_aptpl_active = 0;
-			core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0);
+			core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
 			printk("SPC-3 PR: Set APTPL Bit Deactivated for"
 					" REGISTER\n");
 			return 0;
@@ -2155,10 +2155,10 @@ static int core_scsi3_emulate_pro_register(
 		 * update the APTPL metadata information using its
 		 * preallocated *pr_reg->pr_aptpl_buf.
 		 */
-		pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev,
+		pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
 				se_sess->se_node_acl, se_sess);
 
-		ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev,
+		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 				&pr_reg->pr_aptpl_buf[0],
 				pr_tmpl->pr_aptpl_buf_len);
 		if (!(ret)) {
@@ -2223,7 +2223,7 @@ static int core_scsi3_emulate_pro_register(
 		 */
 		if (!(sa_res_key)) {
 			pr_holder = core_scsi3_check_implict_release(
-					cmd->se_lun->lun_se_dev, pr_reg);
+					cmd->se_dev, pr_reg);
 			if (pr_holder < 0) {
 				kfree(pr_aptpl_buf);
 				core_scsi3_put_pr_reg(pr_reg);
@@ -2260,7 +2260,7 @@ static int core_scsi3_emulate_pro_register(
 			/*
 			 * Release the calling I_T Nexus registration now..
 			 */
-			__core_scsi3_free_registration(cmd->se_lun->lun_se_dev, pr_reg,
+			__core_scsi3_free_registration(cmd->se_dev, pr_reg,
 							NULL, 1);
 			/*
 			 * From spc4r17, section 5.7.11.3 Unregistering
@@ -2315,7 +2315,7 @@ static int core_scsi3_emulate_pro_register(
 			 * READ_KEYS service action.
 			 */
 			pr_reg->pr_res_generation = core_scsi3_pr_generation(
-							cmd->se_lun->lun_se_dev);
+							cmd->se_dev);
 			pr_reg->pr_res_key = sa_res_key;
 			printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
 				" Key for %s to: 0x%016Lx PRgeneration:"
@@ -2398,7 +2398,7 @@ static int core_scsi3_pro_reserve(
 	/*
 	 * Locate the existing *pr_reg via struct se_node_acl pointers
 	 */
-	pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl,
+	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
 				se_sess);
 	if (!(pr_reg)) {
 		printk(KERN_ERR "SPC-3 PR: Unable to locate"
@@ -2527,7 +2527,7 @@ static int core_scsi3_pro_reserve(
 	spin_unlock(&dev->dev_reservation_lock);
 
 	if (pr_tmpl->pr_aptpl_active) {
-		ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev,
+		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 				&pr_reg->pr_aptpl_buf[0],
 				pr_tmpl->pr_aptpl_buf_len);
 		if (!(ret))
@@ -2758,7 +2758,7 @@ static int core_scsi3_emulate_pro_release(
 
 write_aptpl:
 	if (pr_tmpl->pr_aptpl_active) {
-		ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev,
+		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 				&pr_reg->pr_aptpl_buf[0],
 				pr_tmpl->pr_aptpl_buf_len);
 		if (!(ret))
@@ -2783,7 +2783,7 @@ static int core_scsi3_emulate_pro_clear(
 	/*
 	 * Locate the existing *pr_reg via struct se_node_acl pointers
 	 */
-	pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev,
+	pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev,
 			se_sess->se_node_acl, se_sess);
 	if (!(pr_reg_n)) {
 		printk(KERN_ERR "SPC-3 PR: Unable to locate"
@@ -2849,7 +2849,7 @@ static int core_scsi3_emulate_pro_clear(
 		cmd->se_tfo->get_fabric_name());
 
 	if (pr_tmpl->pr_aptpl_active) {
-		core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0);
+		core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
 		printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
 				" for CLEAR\n");
 	}
@@ -2954,7 +2954,7 @@ static int core_scsi3_pro_preempt(
 	u64 sa_res_key,
 	int abort)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct se_dev_entry *se_deve;
 	struct se_node_acl *pr_reg_nacl;
 	struct se_session *se_sess = cmd->se_sess;
@@ -2969,7 +2969,7 @@ static int core_scsi3_pro_preempt(
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 
 	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
-	pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl,
+	pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
 				se_sess);
 	if (!(pr_reg_n)) {
 		printk(KERN_ERR "SPC-3 PR: Unable to locate"
@@ -3111,7 +3111,7 @@ static int core_scsi3_pro_preempt(
 		spin_unlock(&dev->dev_reservation_lock);
 
 		if (pr_tmpl->pr_aptpl_active) {
-			ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev,
+			ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 					&pr_reg_n->pr_aptpl_buf[0],
 					pr_tmpl->pr_aptpl_buf_len);
 			if (!(ret))
@@ -3121,7 +3121,7 @@ static int core_scsi3_pro_preempt(
 		}
 
 		core_scsi3_put_pr_reg(pr_reg_n);
-		core_scsi3_pr_generation(cmd->se_lun->lun_se_dev);
+		core_scsi3_pr_generation(cmd->se_dev);
 		return 0;
 	}
 	/*
@@ -3247,7 +3247,7 @@ static int core_scsi3_pro_preempt(
 	}
 
 	if (pr_tmpl->pr_aptpl_active) {
-		ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev,
+		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 				&pr_reg_n->pr_aptpl_buf[0],
 				pr_tmpl->pr_aptpl_buf_len);
 		if (!(ret))
@@ -3256,7 +3256,7 @@ static int core_scsi3_pro_preempt(
 	}
 
 	core_scsi3_put_pr_reg(pr_reg_n);
-	core_scsi3_pr_generation(cmd->se_lun->lun_se_dev);
+	core_scsi3_pr_generation(cmd->se_dev);
 	return 0;
 }
 
@@ -3298,7 +3298,7 @@ static int core_scsi3_emulate_pro_register_and_move(
 	int unreg)
 {
 	struct se_session *se_sess = cmd->se_sess;
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct se_dev_entry *se_deve, *dest_se_deve = NULL;
 	struct se_lun *se_lun = cmd->se_lun;
 	struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
@@ -3330,7 +3330,7 @@ static int core_scsi3_emulate_pro_register_and_move(
 	 *
 	 * Locate the existing *pr_reg via struct se_node_acl pointers
 	 */
-	pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl,
+	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
 				se_sess);
 	if (!(pr_reg)) {
 		printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
@@ -3612,7 +3612,7 @@ after_iport_check:
 	dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
 					iport_ptr);
 	if (!(dest_pr_reg)) {
-		ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev,
+		ret = core_scsi3_alloc_registration(cmd->se_dev,
 				dest_node_acl, dest_se_deve, iport_ptr,
 				sa_res_key, 0, aptpl, 2, 1);
 		if (ret != 0) {
@@ -3683,12 +3683,12 @@ after_iport_check:
 	 */
 	if (!(aptpl)) {
 		pr_tmpl->pr_aptpl_active = 0;
-		core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0);
+		core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
 		printk("SPC-3 PR: Set APTPL Bit Deactivated for"
 				" REGISTER_AND_MOVE\n");
 	} else {
 		pr_tmpl->pr_aptpl_active = 1;
-		ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev,
+		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 				&dest_pr_reg->pr_aptpl_buf[0],
 				pr_tmpl->pr_aptpl_buf_len);
 		if (!(ret))
@@ -3827,7 +3827,7 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
  */
 static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
 {
-	struct se_device *se_dev = cmd->se_lun->lun_se_dev;
+	struct se_device *se_dev = cmd->se_dev;
 	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
 	struct t10_pr_registration *pr_reg;
 	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
@@ -3882,7 +3882,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
  */
 static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
 {
-	struct se_device *se_dev = cmd->se_lun->lun_se_dev;
+	struct se_device *se_dev = cmd->se_dev;
 	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
 	struct t10_pr_registration *pr_reg;
 	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
@@ -3963,7 +3963,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
  */
 static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
 	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
 	u16 add_len = 8; /* Hardcoded to 8. */
@@ -4014,7 +4014,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
  */
 static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
 {
-	struct se_device *se_dev = cmd->se_lun->lun_se_dev;
+	struct se_device *se_dev = cmd->se_dev;
 	struct se_node_acl *se_nacl;
 	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
 	struct se_portal_group *se_tpg;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4db4ac3..4215f5f 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -336,7 +336,7 @@ rd_alloc_task(struct se_cmd *cmd)
 		printk(KERN_ERR "Unable to allocate struct rd_request\n");
 		return NULL;
 	}
-	rd_req->rd_dev = cmd->se_lun->lun_se_dev->dev_ptr;
+	rd_req->rd_dev = cmd->se_dev->dev_ptr;
 
 	return &rd_req->rd_task;
 }
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4caab19..8a71ad8 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -734,7 +734,7 @@ check_lun:
 
 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
-	transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj);
+	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
 	transport_lun_remove_cmd(cmd);
 
 	if (transport_cmd_check_stop_to_fabric(cmd))
@@ -745,7 +745,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 
 void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
 {
-	transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj);
+	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
 
 	if (transport_cmd_check_stop_to_fabric(cmd))
 		return;
@@ -1083,7 +1083,7 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
 
 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct se_task *task, *task_prev = NULL;
 	unsigned long flags;
 
@@ -1681,7 +1681,7 @@ transport_generic_get_task(struct se_cmd *cmd,
 		enum dma_data_direction data_direction)
 {
 	struct se_task *task;
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	unsigned long flags;
 
 	task = dev->transport->alloc_task(cmd);
@@ -1753,7 +1753,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
 	 * Check if SAM Task Attribute emulation is enabled for this
 	 * struct se_device storage object
 	 */
-	if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
 		return 0;
 
 	if (cmd->sam_task_attr == MSG_ACA_TAG) {
@@ -1765,7 +1765,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
 	 * Used to determine when ORDERED commands should go from
 	 * Dormant to Active status.
 	 */
-	cmd->se_ordered_id = atomic_inc_return(&cmd->se_lun->lun_se_dev->dev_ordered_id);
+	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
 	smp_mb__after_atomic_inc();
 	DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
 			cmd->se_ordered_id, cmd->sam_task_attr,
@@ -2384,14 +2384,14 @@ static inline int transport_tcq_window_closed(struct se_device *dev)
  */
 static inline int transport_execute_task_attr(struct se_cmd *cmd)
 {
-	if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
 		return 1;
 	/*
 	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
 	 * to allow the passed struct se_cmd list of tasks to the front of the list.
 	 */
 	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-		atomic_inc(&cmd->se_lun->lun_se_dev->dev_hoq_count);
+		atomic_inc(&cmd->se_dev->dev_hoq_count);
 		smp_mb__after_atomic_inc();
 		DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
 			" 0x%02x, se_ordered_id: %u\n",
@@ -2399,12 +2399,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 			cmd->se_ordered_id);
 		return 1;
 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-		spin_lock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock);
+		spin_lock(&cmd->se_dev->ordered_cmd_lock);
 		list_add_tail(&cmd->se_ordered_list,
-				&cmd->se_lun->lun_se_dev->ordered_cmd_list);
-		spin_unlock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock);
+				&cmd->se_dev->ordered_cmd_list);
+		spin_unlock(&cmd->se_dev->ordered_cmd_lock);
 
-		atomic_inc(&cmd->se_lun->lun_se_dev->dev_ordered_sync);
+		atomic_inc(&cmd->se_dev->dev_ordered_sync);
 		smp_mb__after_atomic_inc();
 
 		DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
@@ -2416,13 +2416,13 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 		 * no other older commands exist that need to be
 		 * completed first.
 		 */
-		if (!(atomic_read(&cmd->se_lun->lun_se_dev->simple_cmds)))
+		if (!(atomic_read(&cmd->se_dev->simple_cmds)))
 			return 1;
 	} else {
 		/*
 		 * For SIMPLE and UNTAGGED Task Attribute commands
 		 */
-		atomic_inc(&cmd->se_lun->lun_se_dev->simple_cmds);
+		atomic_inc(&cmd->se_dev->simple_cmds);
 		smp_mb__after_atomic_inc();
 	}
 	/*
@@ -2430,16 +2430,16 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 	 * add the dormant task(s) built for the passed struct se_cmd to the
 	 * execution queue and become in Active state for this struct se_device.
 	 */
-	if (atomic_read(&cmd->se_lun->lun_se_dev->dev_ordered_sync) != 0) {
+	if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
 		/*
 		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
 		 * will be drained upon completion of HEAD_OF_QUEUE task.
 		 */
-		spin_lock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock);
+		spin_lock(&cmd->se_dev->delayed_cmd_lock);
 		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
 		list_add_tail(&cmd->se_delayed_list,
-				&cmd->se_lun->lun_se_dev->delayed_cmd_list);
-		spin_unlock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock);
+				&cmd->se_dev->delayed_cmd_list);
+		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
 
 		DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
 			" delayed CMD list, se_ordered_id: %u\n",
@@ -2498,7 +2498,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)
 	 * storage object.
 	 */
 execute_tasks:
-	__transport_execute_tasks(cmd->se_lun->lun_se_dev);
+	__transport_execute_tasks(cmd->se_dev);
 	return 0;
 }
 
@@ -2631,7 +2631,7 @@ static inline u32 transport_get_sectors_6(
 	struct se_cmd *cmd,
 	int *ret)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 
 	/*
 	 * Assume TYPE_DISK for non struct se_device objects.
@@ -2659,7 +2659,7 @@ static inline u32 transport_get_sectors_10(
 	struct se_cmd *cmd,
 	int *ret)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 
 	/*
 	 * Assume TYPE_DISK for non struct se_device objects.
@@ -2689,7 +2689,7 @@ static inline u32 transport_get_sectors_12(
 	struct se_cmd *cmd,
 	int *ret)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 
 	/*
 	 * Assume TYPE_DISK for non struct se_device objects.
@@ -2719,7 +2719,7 @@ static inline u32 transport_get_sectors_16(
 	struct se_cmd *cmd,
 	int *ret)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 
 	/*
 	 * Assume TYPE_DISK for non struct se_device objects.
@@ -2761,7 +2761,7 @@ static inline u32 transport_get_size(
 	unsigned char *cdb,
 	struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 
 	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
 		if (cdb[1] & 1) { /* sectors */
@@ -2968,7 +2968,7 @@ static int transport_generic_cmd_sequencer(
 	struct se_cmd *cmd,
 	unsigned char *cdb)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
 	int ret = 0, sector_ret = 0, passthrough;
 	u32 sectors = 0, size = 0, pr_reg_type = 0;
@@ -3292,7 +3292,7 @@ static int transport_generic_cmd_sequencer(
 		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
 		 * See spc4r17 section 5.3
 		 */
-		if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
 			cmd->sam_task_attr = MSG_HEAD_TAG;
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
 		break;
@@ -3500,7 +3500,7 @@ static int transport_generic_cmd_sequencer(
 		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
 		 * See spc4r17 section 5.3
 		 */
-		if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
 			cmd->sam_task_attr = MSG_HEAD_TAG;
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
 		break;
@@ -3655,7 +3655,7 @@ static void transport_memcpy_se_mem_read_contig(
  */
 static void transport_complete_task_attr(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct se_cmd *cmd_p, *cmd_tmp;
 	int new_active_tasks = 0;
 
@@ -3726,7 +3726,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
 	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
 	 * Attribute.
 	 */
-	if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
 		transport_complete_task_attr(cmd);
 	/*
 	 * Check if we need to retrieve a sense buffer from
@@ -4102,7 +4102,7 @@ static inline long long transport_dev_end_lba(struct se_device *dev)
 
 static int transport_get_sectors(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 
 	cmd->t_task.t_tasks_sectors =
 		(cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
@@ -4128,7 +4128,7 @@ static int transport_get_sectors(struct se_cmd *cmd)
 
 static int transport_new_cmd_obj(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	u32 task_cdbs = 0, rc;
 
 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
@@ -4280,7 +4280,7 @@ int transport_init_task_sg(
 	u32 task_offset)
 {
 	struct se_cmd *se_cmd = task->task_se_cmd;
-	struct se_device *se_dev = se_cmd->se_lun->lun_se_dev;
+	struct se_device *se_dev = se_cmd->se_dev;
 	struct se_mem *se_mem = in_se_mem;
 	struct target_core_fabric_ops *tfo = se_cmd->se_tfo;
 	u32 sg_length, task_size = task->task_size, task_sg_num_padded;
@@ -4781,7 +4781,7 @@ static u32 transport_generic_get_cdb_count(
 	struct se_task *task;
 	struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
 	struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	int max_sectors_set = 0, ret;
 	u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
 
@@ -4894,7 +4894,7 @@ out:
 static int
 transport_map_control_cmd_to_task(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	unsigned char *cdb;
 	struct se_task *task;
 	int ret;
@@ -4959,7 +4959,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
 {
 	struct se_portal_group *se_tpg;
 	struct se_task *task;
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	int ret = 0;
 
 	/*
@@ -5221,7 +5221,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
 	atomic_set(&cmd->t_task.transport_lun_fe_stop, 1);
 	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
-	wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq);
+	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
 
 	ret = transport_stop_tasks_for_cmd(cmd);
 
@@ -5234,7 +5234,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
 		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
 				cmd->se_tfo->get_task_tag(cmd));
 	}
-	transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj);
+	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
 
 	return 0;
 }
@@ -5440,7 +5440,7 @@ static void transport_generic_wait_for_tasks(
 
 	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
 
-	wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq);
+	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
 
 	wait_for_completion(&cmd->t_task.t_transport_stop_comp);
 
@@ -5707,7 +5707,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
 int transport_generic_do_tmr(struct se_cmd *cmd)
 {
 	struct se_cmd *ref_cmd;
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct se_tmr_req *tmr = cmd->se_tmr_req;
 	int ret;
 
@@ -5852,7 +5852,7 @@ static void transport_processing_shutdown(struct se_device *dev)
 					cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
 					0);
 				transport_remove_cmd_from_queue(cmd,
-					&cmd->se_lun->lun_se_dev->dev_queue_obj);
+					&cmd->se_dev->dev_queue_obj);
 
 				transport_lun_remove_cmd(cmd);
 				transport_cmd_check_stop(cmd, 1, 0);
@@ -5861,7 +5861,7 @@ static void transport_processing_shutdown(struct se_device *dev)
 					&cmd->t_task.t_state_lock, flags);
 
 				transport_remove_cmd_from_queue(cmd,
-					&cmd->se_lun->lun_se_dev->dev_queue_obj);
+					&cmd->se_dev->dev_queue_obj);
 
 				transport_lun_remove_cmd(cmd);
 
@@ -5881,7 +5881,7 @@ static void transport_processing_shutdown(struct se_device *dev)
 			transport_send_check_condition_and_sense(cmd,
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 			transport_remove_cmd_from_queue(cmd,
-				&cmd->se_lun->lun_se_dev->dev_queue_obj);
+				&cmd->se_dev->dev_queue_obj);
 
 			transport_lun_remove_cmd(cmd);
 			transport_cmd_check_stop(cmd, 1, 0);
@@ -5890,7 +5890,7 @@ static void transport_processing_shutdown(struct se_device *dev)
 				&cmd->t_task.t_state_lock, flags);
 
 			transport_remove_cmd_from_queue(cmd,
-				&cmd->se_lun->lun_se_dev->dev_queue_obj);
+				&cmd->se_dev->dev_queue_obj);
 			transport_lun_remove_cmd(cmd);
 
 			if (transport_cmd_check_stop(cmd, 1, 0))
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 33a96e3..3b8b02c 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -208,7 +208,7 @@ void core_scsi3_ua_for_check_condition(
 	u8 *asc,
 	u8 *ascq)
 {
-	struct se_device *dev = cmd->se_lun->lun_se_dev;
+	struct se_device *dev = cmd->se_dev;
 	struct se_dev_entry *deve;
 	struct se_session *sess = cmd->se_sess;
 	struct se_node_acl *nacl;
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 035/103] target: Embed qr in struct se_cmd
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (2 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 034/103] target: Use cmd->se_dev over cmd->se_lun->lun_se_dev Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 036/103] target: Replace embedded struct se_queue_req with a list_head Nicholas A. Bellinger
                   ` (25 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

This saves a call to kmalloc, and means we don't need to remember to
free the struct se_queue_req.

This also makes it possible to make transport_add_cmd_to_queue never
return an error code, which is nice since it wasn't being checked.

Removed an incorrect comment above get_qr_from_queue; cmd_queue_lock
is acquired in the function now, not before.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |   25 +++++--------------------
 include/target/target_core_base.h      |    1 +
 2 files changed, 6 insertions(+), 20 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 8a71ad8..969cbe2 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -753,25 +753,18 @@ void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
 	transport_generic_remove(cmd, 0, 0);
 }
 
-static int transport_add_cmd_to_queue(
+static void transport_add_cmd_to_queue(
 	struct se_cmd *cmd,
 	int t_state)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct se_queue_obj *qobj = &dev->dev_queue_obj;
-	struct se_queue_req *qr;
 	unsigned long flags;
 
-	qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
-	if (!(qr)) {
-		printk(KERN_ERR "Unable to allocate memory for"
-				" struct se_queue_req\n");
-		return -ENOMEM;
-	}
-	INIT_LIST_HEAD(&qr->qr_list);
+	INIT_LIST_HEAD(&cmd->se_qr.qr_list);
 
-	qr->cmd = cmd;
-	qr->state = t_state;
+	cmd->se_qr.cmd = cmd;
+	cmd->se_qr.state = t_state;
 
 	if (t_state) {
 		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
@@ -781,18 +774,14 @@ static int transport_add_cmd_to_queue(
 	}
 
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-	list_add_tail(&qr->qr_list, &qobj->qobj_list);
+	list_add_tail(&cmd->se_qr.qr_list, &qobj->qobj_list);
 	atomic_inc(&cmd->t_task.t_transport_queue_active);
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
 	atomic_inc(&qobj->queue_cnt);
 	wake_up_interruptible(&qobj->thread_wq);
-	return 0;
 }
 
-/*
- * Called with struct se_queue_obj->cmd_queue_lock held.
- */
 static struct se_queue_req *
 transport_get_qr_from_queue(struct se_queue_obj *qobj)
 {
@@ -837,7 +826,6 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
 		atomic_dec(&qr->cmd->t_task.t_transport_queue_active);
 		atomic_dec(&qobj->queue_cnt);
 		list_del(&qr->qr_list);
-		kfree(qr);
 	}
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
@@ -1193,7 +1181,6 @@ static void transport_release_all_cmds(struct se_device *dev)
 		cmd = qr->cmd;
 		t_state = qr->state;
 		list_del(&qr->qr_list);
-		kfree(qr);
 		spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
 				flags);
 
@@ -5906,7 +5893,6 @@ static void transport_processing_shutdown(struct se_device *dev)
 	while ((qr = transport_get_qr_from_queue(&dev->dev_queue_obj))) {
 		cmd = qr->cmd;
 		state = qr->state;
-		kfree(qr);
 
 		DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
 				cmd, state);
@@ -5962,7 +5948,6 @@ get_cmd:
 
 		cmd = qr->cmd;
 		t_state = qr->state;
-		kfree(qr);
 
 		switch (t_state) {
 		case TRANSPORT_NEW_CMD_MAP:
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index bdee755..e4818cd 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -535,6 +535,7 @@ struct se_cmd {
 	struct se_session	*se_sess;
 	struct se_tmr_req	*se_tmr_req;
 	struct se_transport_task t_task;
+	struct se_queue_req	se_qr;
 	struct target_core_fabric_ops *se_tfo;
 	int (*transport_emulate_cdb)(struct se_cmd *);
 	void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 036/103] target: Replace embedded struct se_queue_req with a list_head
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (3 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 035/103] target: Embed qr in struct se_cmd Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 037/103] target: Rename list_heads that are nodes in struct se_cmd to "*_node" Nicholas A. Bellinger
                   ` (24 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

We don't actually need the additional members of the se_queue_req struct.
cmd can be found by container_of, and t_state is identical to cmd->t_state.

Also, rename transport_get_qr_from_queue to transport_get_cmd_from_queue,
and modify callers accordingly.

(Roland: Fix se_queue_req removal leftovers OOPs)

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_tmr.c       |   27 ++---------
 drivers/target/target_core_transport.c |   76 ++++++++++++--------------------
 include/target/target_core_base.h      |    2 +-
 3 files changed, 35 insertions(+), 70 deletions(-)

diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 50f49f2..e1f99f7 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -113,15 +113,14 @@ int core_tmr_lun_reset(
 	struct list_head *preempt_and_abort_list,
 	struct se_cmd *prout_cmd)
 {
-	struct se_cmd *cmd;
-	struct se_queue_req *qr, *qr_tmp;
+	struct se_cmd *cmd, *tcmd;
 	struct se_node_acl *tmr_nacl = NULL;
 	struct se_portal_group *tmr_tpg = NULL;
 	struct se_queue_obj *qobj = &dev->dev_queue_obj;
 	struct se_tmr_req *tmr_p, *tmr_pp;
 	struct se_task *task, *task_tmp;
 	unsigned long flags;
-	int fe_count, state, tas;
+	int fe_count, tas;
 	/*
 	 * TASK_ABORTED status bit, this is configurable via ConfigFS
 	 * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page
@@ -331,20 +330,7 @@ int core_tmr_lun_reset(
 	 * reference, otherwise the struct se_cmd is released.
 	 */
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-	list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
-		cmd = (struct se_cmd *)qr->cmd;
-		if (!(cmd)) {
-			/*
-			 * Skip these for non PREEMPT_AND_ABORT usage..
-			 */
-			if (preempt_and_abort_list != NULL)
-				continue;
-
-			atomic_dec(&qobj->queue_cnt);
-			list_del(&qr->qr_list);
-			kfree(qr);
-			continue;
-		}
+	list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
 		/*
 		 * For PREEMPT_AND_ABORT usage, only process commands
 		 * with a matching reservation key.
@@ -361,15 +347,12 @@ int core_tmr_lun_reset(
 
 		atomic_dec(&cmd->t_task.t_transport_queue_active);
 		atomic_dec(&qobj->queue_cnt);
-		list_del(&qr->qr_list);
+		list_del(&cmd->se_queue_node);
 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
-		state = qr->state;
-		kfree(qr);
-
 		DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
 			" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
-			"Preempt" : "", cmd, state,
+			"Preempt" : "", cmd, cmd->t_state,
 			atomic_read(&cmd->t_task.t_fe_count));
 		/*
 		 * Signal that the command has failed via cmd->se_cmd_flags,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 969cbe2..9b76d33 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -761,10 +761,7 @@ static void transport_add_cmd_to_queue(
 	struct se_queue_obj *qobj = &dev->dev_queue_obj;
 	unsigned long flags;
 
-	INIT_LIST_HEAD(&cmd->se_qr.qr_list);
-
-	cmd->se_qr.cmd = cmd;
-	cmd->se_qr.state = t_state;
+	INIT_LIST_HEAD(&cmd->se_queue_node);
 
 	if (t_state) {
 		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
@@ -774,7 +771,7 @@ static void transport_add_cmd_to_queue(
 	}
 
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-	list_add_tail(&cmd->se_qr.qr_list, &qobj->qobj_list);
+	list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
 	atomic_inc(&cmd->t_task.t_transport_queue_active);
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
@@ -782,10 +779,10 @@ static void transport_add_cmd_to_queue(
 	wake_up_interruptible(&qobj->thread_wq);
 }
 
-static struct se_queue_req *
-transport_get_qr_from_queue(struct se_queue_obj *qobj)
+static struct se_cmd *
+transport_get_cmd_from_queue(struct se_queue_obj *qobj)
 {
-	struct se_queue_req *qr;
+	struct se_cmd *cmd;
 	unsigned long flags;
 
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
@@ -793,24 +790,21 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj)
 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 		return NULL;
 	}
+	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
 
-	list_for_each_entry(qr, &qobj->qobj_list, qr_list)
-		break;
-
-	if (qr->cmd)
-		atomic_dec(&qr->cmd->t_task.t_transport_queue_active);
+	atomic_dec(&cmd->t_task.t_transport_queue_active);
 
-	list_del(&qr->qr_list);
+	list_del(&cmd->se_queue_node);
 	atomic_dec(&qobj->queue_cnt);
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
-	return qr;
+	return cmd;
 }
 
 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
 		struct se_queue_obj *qobj)
 {
-	struct se_queue_req *qr = NULL, *qr_p = NULL;
+	struct se_cmd *t;
 	unsigned long flags;
 
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
@@ -819,14 +813,13 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
 		return;
 	}
 
-	list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
-		if (qr->cmd != cmd)
-			continue;
-
-		atomic_dec(&qr->cmd->t_task.t_transport_queue_active);
-		atomic_dec(&qobj->queue_cnt);
-		list_del(&qr->qr_list);
-	}
+	list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
+		if (t == cmd) {
+			atomic_dec(&cmd->t_task.t_transport_queue_active);
+			atomic_dec(&qobj->queue_cnt);
+			list_del(&cmd->se_queue_node);
+			break;
+		}
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
 	if (atomic_read(&cmd->t_task.t_transport_queue_active)) {
@@ -1169,18 +1162,15 @@ void transport_dump_dev_state(
  */
 static void transport_release_all_cmds(struct se_device *dev)
 {
-	struct se_cmd *cmd = NULL;
-	struct se_queue_req *qr = NULL, *qr_p = NULL;
+	struct se_cmd *cmd, *tcmd;
 	int bug_out = 0, t_state;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
-	list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj.qobj_list,
-				qr_list) {
-
-		cmd = qr->cmd;
-		t_state = qr->state;
-		list_del(&qr->qr_list);
+	list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
+				se_queue_node) {
+		t_state = cmd->t_state;
+		list_del(&cmd->se_queue_node);
 		spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
 				flags);
 
@@ -5757,9 +5747,7 @@ transport_get_task_from_state_list(struct se_device *dev)
 static void transport_processing_shutdown(struct se_device *dev)
 {
 	struct se_cmd *cmd;
-	struct se_queue_req *qr;
 	struct se_task *task;
-	u8 state;
 	unsigned long flags;
 	/*
 	 * Empty the struct se_device's struct se_task state list.
@@ -5890,12 +5878,10 @@ static void transport_processing_shutdown(struct se_device *dev)
 	/*
 	 * Empty the struct se_device's struct se_cmd list.
 	 */
-	while ((qr = transport_get_qr_from_queue(&dev->dev_queue_obj))) {
-		cmd = qr->cmd;
-		state = qr->state;
+	while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
 
 		DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
-				cmd, state);
+				cmd, cmd->t_state);
 
 		if (atomic_read(&cmd->t_task.t_fe_count)) {
 			transport_send_check_condition_and_sense(cmd,
@@ -5917,10 +5903,9 @@ static void transport_processing_shutdown(struct se_device *dev)
  */
 static int transport_processing_thread(void *param)
 {
-	int ret, t_state;
+	int ret;
 	struct se_cmd *cmd;
 	struct se_device *dev = (struct se_device *) param;
-	struct se_queue_req *qr;
 
 	set_user_nice(current, -20);
 
@@ -5942,14 +5927,11 @@ static int transport_processing_thread(void *param)
 get_cmd:
 		__transport_execute_tasks(dev);
 
-		qr = transport_get_qr_from_queue(&dev->dev_queue_obj);
-		if (!(qr))
+		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
+		if (!cmd)
 			continue;
 
-		cmd = qr->cmd;
-		t_state = qr->state;
-
-		switch (t_state) {
+		switch (cmd->t_state) {
 		case TRANSPORT_NEW_CMD_MAP:
 			if (!(cmd->se_tfo->new_cmd_map)) {
 				printk(KERN_ERR "cmd->se_tfo->new_cmd_map is"
@@ -6000,7 +5982,7 @@ get_cmd:
 		default:
 			printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
 				" %d for ITT: 0x%08x i_state: %d on SE LUN:"
-				" %u\n", t_state, cmd->deferred_t_state,
+				" %u\n", cmd->t_state, cmd->deferred_t_state,
 				cmd->se_tfo->get_task_tag(cmd),
 				cmd->se_tfo->get_cmd_state(cmd),
 				cmd->se_lun->unpacked_lun);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index e4818cd..67d490f 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -535,7 +535,7 @@ struct se_cmd {
 	struct se_session	*se_sess;
 	struct se_tmr_req	*se_tmr_req;
 	struct se_transport_task t_task;
-	struct se_queue_req	se_qr;
+	struct list_head	se_queue_node;
 	struct target_core_fabric_ops *se_tfo;
 	int (*transport_emulate_cdb)(struct se_cmd *);
 	void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 037/103] target: Rename list_heads that are nodes in struct se_cmd to "*_node"
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (4 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 036/103] target: Replace embedded struct se_queue_req with a list_head Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 038/103] target: Fold transport_device_setup_cmd() into lookup_{tmr,cmd}_lun() Nicholas A. Bellinger
                   ` (23 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

This just help make it clear that se_cmd is being placed on lots of lists,
it doesn't have any list heads itself.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_device.c    |    2 +-
 drivers/target/target_core_transport.c |   26 +++++++++++++-------------
 include/target/target_core_base.h      |    6 +++---
 3 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index c7b9ef7..c8d4654 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -164,7 +164,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 	 * for tracking state of struct se_cmds during LUN shutdown events.
 	 */
 	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
-	list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
+	list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
 	atomic_set(&se_cmd->t_task.transport_lun_active, 1);
 	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
 
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 9b76d33..bd445ff 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -722,7 +722,7 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
 check_lun:
 	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
 	if (atomic_read(&cmd->t_task.transport_lun_active)) {
-		list_del(&cmd->se_lun_list);
+		list_del(&cmd->se_lun_node);
 		atomic_set(&cmd->t_task.transport_lun_active, 0);
 #if 0
 		printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
@@ -1704,9 +1704,9 @@ void transport_init_se_cmd(
 	int task_attr,
 	unsigned char *sense_buffer)
 {
-	INIT_LIST_HEAD(&cmd->se_lun_list);
-	INIT_LIST_HEAD(&cmd->se_delayed_list);
-	INIT_LIST_HEAD(&cmd->se_ordered_list);
+	INIT_LIST_HEAD(&cmd->se_lun_node);
+	INIT_LIST_HEAD(&cmd->se_delayed_node);
+	INIT_LIST_HEAD(&cmd->se_ordered_node);
 
 	INIT_LIST_HEAD(&cmd->t_task.t_task_list);
 	init_completion(&cmd->t_task.transport_lun_fe_stop_comp);
@@ -2377,7 +2377,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 		return 1;
 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
 		spin_lock(&cmd->se_dev->ordered_cmd_lock);
-		list_add_tail(&cmd->se_ordered_list,
+		list_add_tail(&cmd->se_ordered_node,
 				&cmd->se_dev->ordered_cmd_list);
 		spin_unlock(&cmd->se_dev->ordered_cmd_lock);
 
@@ -2414,7 +2414,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 		 */
 		spin_lock(&cmd->se_dev->delayed_cmd_lock);
 		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
-		list_add_tail(&cmd->se_delayed_list,
+		list_add_tail(&cmd->se_delayed_node,
 				&cmd->se_dev->delayed_cmd_list);
 		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
 
@@ -3652,7 +3652,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
 			cmd->se_ordered_id);
 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
 		spin_lock(&dev->ordered_cmd_lock);
-		list_del(&cmd->se_ordered_list);
+		list_del(&cmd->se_ordered_node);
 		atomic_dec(&dev->dev_ordered_sync);
 		smp_mb__after_atomic_dec();
 		spin_unlock(&dev->ordered_cmd_lock);
@@ -3668,9 +3668,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
 	 */
 	spin_lock(&dev->delayed_cmd_lock);
 	list_for_each_entry_safe(cmd_p, cmd_tmp,
-			&dev->delayed_cmd_list, se_delayed_list) {
+			&dev->delayed_cmd_list, se_delayed_node) {
 
-		list_del(&cmd_p->se_delayed_list);
+		list_del(&cmd_p->se_delayed_node);
 		spin_unlock(&dev->delayed_cmd_lock);
 
 		DEBUG_STA("Calling add_tasks() for"
@@ -5232,10 +5232,10 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
 	 * Initiator Port.
 	 */
 	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
-	while (!list_empty_careful(&lun->lun_cmd_list)) {
-		cmd = list_entry(lun->lun_cmd_list.next,
-			struct se_cmd, se_lun_list);
-		list_del(&cmd->se_lun_list);
+	while (!list_empty(&lun->lun_cmd_list)) {
+		cmd = list_first_entry(&lun->lun_cmd_list,
+		       struct se_cmd, se_lun_node);
+		list_del(&cmd->se_lun_node);
 
 		atomic_set(&cmd->t_task.transport_lun_active, 0);
 		/*
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 67d490f..c84afc3 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -523,9 +523,9 @@ struct se_cmd {
 	atomic_t                transport_sent;
 	/* Used for sense data */
 	void			*sense_buffer;
-	struct list_head	se_delayed_list;
-	struct list_head	se_ordered_list;
-	struct list_head	se_lun_list;
+	struct list_head	se_delayed_node;
+	struct list_head	se_ordered_node;
+	struct list_head	se_lun_node;
 	struct se_device      *se_dev;
 	struct se_dev_entry   *se_deve;
 	struct se_device	*se_obj_ptr;
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 038/103] target: Fold transport_device_setup_cmd() into lookup_{tmr,cmd}_lun()
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (5 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 037/103] target: Rename list_heads that are nodes in struct se_cmd to "*_node" Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 039/103] target: Make t_mem_list and t_mem_list_bidi members of t_task Nicholas A. Bellinger
                   ` (22 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

The function just sets cmd->se_dev based on the lun's se_dev. Since
lookup_tmr_lun is the function that finds the se_lun for a cmd, we can
just set se_dev there.

I've done a lot of checking of fabrics to make sure this is ok, but also
I'd think since cmd->se_lun gets set in lookup_tmr, all the old callers of
setup_cmd must already have been calling lookup_tmr first.

(nab: Fix transport_lookup_tmr_lun failure case)

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/loopback/tcm_loop.c     |    1 -
 drivers/target/target_core_device.c    |    8 +++++++-
 drivers/target/target_core_transport.c |    8 --------
 include/target/target_core_transport.h |    1 -
 4 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 38e936b..cd73940 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -128,7 +128,6 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
 		return NULL;
 	}
 
-	transport_device_setup_cmd(se_cmd);
 	return se_cmd;
 }
 
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index c8d4654..ea92f75 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -149,6 +149,9 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 		return -ENODEV;
 	}
 
+	/* Directly associate cmd with se_dev */
+	se_cmd->se_dev = se_lun->lun_se_dev;
+
 	/* TODO: get rid of this and use atomics for stats */
 	dev = se_lun->lun_se_dev;
 	spin_lock_irq(&dev->stats_lock);
@@ -193,7 +196,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 		se_tmr->tmr_lun = deve->se_lun;
 		se_cmd->se_lun = deve->se_lun;
 		se_lun = deve->se_lun;
-		se_tmr->tmr_dev = se_lun->lun_se_dev;
 		se_cmd->pr_res_key = deve->pr_res_key;
 		se_cmd->orig_fe_lun = unpacked_lun;
 		se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
@@ -217,6 +219,10 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 		return -ENODEV;
 	}
 
+	/* Directly associate cmd with se_dev */
+	se_cmd->se_dev = se_lun->lun_se_dev;
+	se_tmr->tmr_dev = se_lun->lun_se_dev;
+
 	spin_lock(&se_tmr->tmr_dev->se_tmr_lock);
 	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
 	spin_unlock(&se_tmr->tmr_dev->se_tmr_lock);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index bd445ff..646649a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1685,12 +1685,6 @@ transport_generic_get_task(struct se_cmd *cmd,
 
 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
 
-void transport_device_setup_cmd(struct se_cmd *cmd)
-{
-	cmd->se_dev = cmd->se_lun->lun_se_dev;
-}
-EXPORT_SYMBOL(transport_device_setup_cmd);
-
 /*
  * Used by fabric modules containing a local struct se_cmd within their
  * fabric dependent per I/O descriptor.
@@ -1782,7 +1776,6 @@ int transport_generic_allocate_tasks(
 	 */
 	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
 
-	transport_device_setup_cmd(cmd);
 	/*
 	 * Ensure that the received CDB is less than the max (252 + 8) bytes
 	 * for VARIABLE_LENGTH_CMD
@@ -1917,7 +1910,6 @@ int transport_generic_handle_tmr(
 	 * This is needed for early exceptions.
 	 */
 	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
-	transport_device_setup_cmd(cmd);
 
 	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
 	return 0;
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 1dd4d18..acd5914 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -159,7 +159,6 @@ extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
 					struct se_subsystem_dev *, u32,
 					void *, struct se_dev_limits *,
 					const char *, const char *);
-extern void transport_device_setup_cmd(struct se_cmd *);
 extern void transport_init_se_cmd(struct se_cmd *,
 					struct target_core_fabric_ops *,
 					struct se_session *, u32, int, int,
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 039/103] target: Make t_mem_list and t_mem_list_bidi members of t_task
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (6 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 038/103] target: Fold transport_device_setup_cmd() into lookup_{tmr,cmd}_lun() Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 040/103] target: Add comment & cleanup transport_map_sg_to_mem() Nicholas A. Bellinger
                   ` (21 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

This lets us eliminate 2 allocations and associated bookkeeping and error
checking. Remove transport_init_se_mem_list().

Change instances of list_entry(foo->next) to list_first_entry(foo).

Change names of variables that count sg entries to contain "count", as is
common as a kernel convention.

Remove SCF_PASSTHROUGH_SG_TO_MEM flag, not needed.

Use pointers to struct scatterlist instead of void *.

Minor other cleanups.

(nab: Fix list_empty(&cmd->t_task.t_mem_bidi_list) inversion bugs)

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/loopback/tcm_loop.c     |   37 +++-----
 drivers/target/target_core_rd.c        |    4 +-
 drivers/target/target_core_transport.c |  156 ++++++++++----------------------
 drivers/target/tcm_fc/tfc_cmd.c        |   12 ++--
 drivers/target/tcm_fc/tfc_io.c         |    4 +-
 include/target/target_core_base.h      |    6 +-
 6 files changed, 74 insertions(+), 145 deletions(-)

diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index cd73940..e0e8aaa 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -142,13 +142,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
 				struct tcm_loop_cmd, tl_se_cmd);
 	struct scsi_cmnd *sc = tl_cmd->sc;
-	void *mem_ptr, *mem_bidi_ptr = NULL;
-	u32 sg_no_bidi = 0;
+	struct scatterlist *sgl_bidi = NULL;
+	u32 sgl_bidi_count = 0;
 	int ret;
 	/*
 	 * Allocate the necessary tasks to complete the received CDB+data
 	 */
-	ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd);
+	ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
 	if (ret == -ENOMEM) {
 		/* Out of Resources */
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -164,35 +164,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
 		 */
 		return PYX_TRANSPORT_USE_SENSE_REASON;
 	}
+
 	/*
-	 * Setup the struct scatterlist memory from the received
-	 * struct scsi_cmnd.
+	 * For BIDI commands, pass in the extra READ buffer
+	 * to transport_generic_map_mem_to_cmd() below..
 	 */
-	if (scsi_sg_count(sc)) {
-		se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM;
-		mem_ptr = scsi_sglist(sc);
-		/*
-		 * For BIDI commands, pass in the extra READ buffer
-		 * to transport_generic_map_mem_to_cmd() below..
-		 */
-		if (se_cmd->t_task.t_tasks_bidi) {
-			struct scsi_data_buffer *sdb = scsi_in(sc);
+	if (se_cmd->t_task.t_tasks_bidi) {
+		struct scsi_data_buffer *sdb = scsi_in(sc);
 
-			mem_bidi_ptr = sdb->table.sgl;
-			sg_no_bidi = sdb->table.nents;
-		}
-	} else {
-		/*
-		 * Used for DMA_NONE
-		 */
-		mem_ptr = NULL;
+		sgl_bidi = sdb->table.sgl;
+		sgl_bidi_count = sdb->table.nents;
 	}
+
 	/*
 	 * Map the SG memory into struct se_mem->page linked list using the same
 	 * physical memory at sg->page_link.
 	 */
-	ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr,
-			scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi);
+	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
 	if (ret < 0)
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4215f5f..384a8e2 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -880,14 +880,14 @@ static int rd_DIRECT_do_se_mem_map(
 	 * across multiple struct se_task->task_sg[].
 	 */
 	ret = transport_init_task_sg(task,
-			list_entry(cmd->t_task.t_mem_list->next,
+			list_first_entry(&cmd->t_task.t_mem_list,
 				   struct se_mem, se_list),
 			task_offset);
 	if (ret <= 0)
 		return ret;
 
 	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
-			list_entry(cmd->t_task.t_mem_list->next,
+			list_first_entry(&cmd->t_task.t_mem_list,
 				   struct se_mem, se_list),
 			out_se_mem, se_mem_cnt, task_offset_in);
 }
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 646649a..44f9afe 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -215,9 +215,8 @@ static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
 static int transport_generic_remove(struct se_cmd *cmd,
 		int release_to_pool, int session_reinstatement);
 static int transport_get_sectors(struct se_cmd *cmd);
-static struct list_head *transport_init_se_mem_list(void);
 static int transport_map_sg_to_mem(struct se_cmd *cmd,
-		struct list_head *se_mem_list, void *in_mem,
+		struct list_head *se_mem_list, struct scatterlist *sgl,
 		u32 *se_mem_cnt);
 static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
 		unsigned char *dst, struct list_head *se_mem_list);
@@ -1702,6 +1701,8 @@ void transport_init_se_cmd(
 	INIT_LIST_HEAD(&cmd->se_delayed_node);
 	INIT_LIST_HEAD(&cmd->se_ordered_node);
 
+	INIT_LIST_HEAD(&cmd->t_task.t_mem_list);
+	INIT_LIST_HEAD(&cmd->t_task.t_mem_bidi_list);
 	INIT_LIST_HEAD(&cmd->t_task.t_task_list);
 	init_completion(&cmd->t_task.transport_lun_fe_stop_comp);
 	init_completion(&cmd->t_task.transport_lun_stop_comp);
@@ -2801,14 +2802,14 @@ static void transport_xor_callback(struct se_cmd *cmd)
 	 * Copy the scatterlist WRITE buffer located at cmd->t_task.t_mem_list
 	 * into the locally allocated *buf
 	 */
-	transport_memcpy_se_mem_read_contig(cmd, buf, cmd->t_task.t_mem_list);
+	transport_memcpy_se_mem_read_contig(cmd, buf, &cmd->t_task.t_mem_list);
 	/*
 	 * Now perform the XOR against the BIDI read memory located at
 	 * cmd->t_task.t_mem_bidi_list
 	 */
 
 	offset = 0;
-	list_for_each_entry(se_mem, cmd->t_task.t_mem_bidi_list, se_list) {
+	list_for_each_entry(se_mem, &cmd->t_task.t_mem_bidi_list, se_list) {
 		addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
 		if (!(addr))
 			goto out;
@@ -3754,7 +3755,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
 		/*
 		 * Check if we need to send READ payload for BIDI-COMMAND
 		 */
-		if (cmd->t_task.t_mem_bidi_list != NULL) {
+		if (!list_empty(&cmd->t_task.t_mem_bidi_list)) {
 			spin_lock(&cmd->se_lun->lun_sep_lock);
 			if (cmd->se_lun->lun_sep) {
 				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
@@ -3829,7 +3830,7 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 		return;
 
 	list_for_each_entry_safe(se_mem, se_mem_tmp,
-			cmd->t_task.t_mem_list, se_list) {
+			&cmd->t_task.t_mem_list, se_list) {
 		/*
 		 * We only release call __free_page(struct se_mem->se_page) when
 		 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
@@ -3841,9 +3842,9 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 		kmem_cache_free(se_mem_cache, se_mem);
 	}
 
-	if (cmd->t_task.t_mem_bidi_list && cmd->t_task.t_tasks_se_bidi_num) {
+	if (!list_empty(&cmd->t_task.t_mem_bidi_list) && cmd->t_task.t_tasks_se_bidi_num) {
 		list_for_each_entry_safe(se_mem, se_mem_tmp,
-				cmd->t_task.t_mem_bidi_list, se_list) {
+				&cmd->t_task.t_mem_bidi_list, se_list) {
 			/*
 			 * We only release call __free_page(struct se_mem->se_page) when
 			 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
@@ -3856,10 +3857,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 		}
 	}
 
-	kfree(cmd->t_task.t_mem_bidi_list);
-	cmd->t_task.t_mem_bidi_list = NULL;
-	kfree(cmd->t_task.t_mem_list);
-	cmd->t_task.t_mem_list = NULL;
 	cmd->t_task.t_tasks_se_num = 0;
 }
 
@@ -3970,35 +3967,19 @@ free_pages:
  */
 int transport_generic_map_mem_to_cmd(
 	struct se_cmd *cmd,
-	struct scatterlist *mem,
-	u32 sg_mem_num,
-	struct scatterlist *mem_bidi_in,
-	u32 sg_mem_bidi_num)
+	struct scatterlist *sgl,
+	u32 sgl_count,
+	struct scatterlist *sgl_bidi,
+	u32 sgl_bidi_count)
 {
-	u32 se_mem_cnt_out = 0;
+	u32 mapped_sg_count = 0;
 	int ret;
 
-	if (!(mem) || !(sg_mem_num))
+	if (!sgl || !sgl_count)
 		return 0;
-	/*
-	 * Passed *mem will contain a list_head containing preformatted
-	 * struct se_mem elements...
-	 */
-	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
-		if ((mem_bidi_in) || (sg_mem_bidi_num)) {
-			printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
-				" with BIDI-COMMAND\n");
-			return -ENOSYS;
-		}
 
-		cmd->t_task.t_mem_list = (struct list_head *)mem;
-		cmd->t_task.t_tasks_se_num = sg_mem_num;
-		cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
-		return 0;
-	}
 	/*
-	 * Otherwise, assume the caller is passing a struct scatterlist
-	 * array from include/linux/scatterlist.h
+	 * Convert sgls (sgl, sgl_bidi) to list of se_mems
 	 */
 	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
 	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
@@ -4007,41 +3988,29 @@ int transport_generic_map_mem_to_cmd(
 		 * processed into a TCM struct se_subsystem_dev, we do the mapping
 		 * from the passed physical memory to struct se_mem->se_page here.
 		 */
-		cmd->t_task.t_mem_list = transport_init_se_mem_list();
-		if (!(cmd->t_task.t_mem_list))
-			return -ENOMEM;
-
 		ret = transport_map_sg_to_mem(cmd,
-			cmd->t_task.t_mem_list, mem, &se_mem_cnt_out);
+			&cmd->t_task.t_mem_list, sgl, &mapped_sg_count);
 		if (ret < 0)
 			return -ENOMEM;
 
-		cmd->t_task.t_tasks_se_num = se_mem_cnt_out;
+		cmd->t_task.t_tasks_se_num = mapped_sg_count;
 		/*
 		 * Setup BIDI READ list of struct se_mem elements
 		 */
-		if ((mem_bidi_in) && (sg_mem_bidi_num)) {
-			cmd->t_task.t_mem_bidi_list = transport_init_se_mem_list();
-			if (!(cmd->t_task.t_mem_bidi_list)) {
-				kfree(cmd->t_task.t_mem_list);
-				return -ENOMEM;
-			}
-			se_mem_cnt_out = 0;
-
+		if (sgl_bidi && sgl_bidi_count) {
+			mapped_sg_count = 0;
 			ret = transport_map_sg_to_mem(cmd,
-				cmd->t_task.t_mem_bidi_list, mem_bidi_in,
-				&se_mem_cnt_out);
-			if (ret < 0) {
-				kfree(cmd->t_task.t_mem_list);
+				&cmd->t_task.t_mem_bidi_list, sgl_bidi,
+				&mapped_sg_count);
+			if (ret < 0)
 				return -ENOMEM;
-			}
 
-			cmd->t_task.t_tasks_se_bidi_num = se_mem_cnt_out;
+			cmd->t_task.t_tasks_se_bidi_num = mapped_sg_count;
 		}
 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
 
 	} else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
-		if (mem_bidi_in || sg_mem_bidi_num) {
+		if (sgl_bidi || sgl_bidi_count) {
 			printk(KERN_ERR "BIDI-Commands not supported using "
 				"SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
 			return -ENOSYS;
@@ -4056,7 +4025,8 @@ int transport_generic_map_mem_to_cmd(
 		 * struct scatterlist format.
 		 */
 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
-		cmd->t_task.t_task_pt_sgl = mem;
+		cmd->t_task.t_task_pt_sgl = sgl;
+		/* don't need sgl count? We assume it contains cmd->data_length data */
 	}
 
 	return 0;
@@ -4111,12 +4081,12 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 		 * cmd->t_task.t_mem_bidi_list so the READ struct se_tasks
 		 * are queued first for the non pSCSI passthrough case.
 		 */
-		if ((cmd->t_task.t_mem_bidi_list != NULL) &&
+		if (!list_empty(&cmd->t_task.t_mem_bidi_list) &&
 		    (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
 			rc = transport_generic_get_cdb_count(cmd,
 				cmd->t_task.t_task_lba,
 				cmd->t_task.t_tasks_sectors,
-				DMA_FROM_DEVICE, cmd->t_task.t_mem_bidi_list,
+				DMA_FROM_DEVICE, &cmd->t_task.t_mem_bidi_list,
 				set_counts);
 			if (!(rc)) {
 				cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
@@ -4133,7 +4103,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 		task_cdbs = transport_generic_get_cdb_count(cmd,
 				cmd->t_task.t_task_lba,
 				cmd->t_task.t_tasks_sectors,
-				cmd->data_direction, cmd->t_task.t_mem_list,
+				cmd->data_direction, &cmd->t_task.t_mem_list,
 				set_counts);
 		if (!(task_cdbs)) {
 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
@@ -4157,47 +4127,18 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 	return 0;
 }
 
-static struct list_head *transport_init_se_mem_list(void)
-{
-	struct list_head *se_mem_list;
-
-	se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
-	if (!(se_mem_list)) {
-		printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
-		return NULL;
-	}
-	INIT_LIST_HEAD(se_mem_list);
-
-	return se_mem_list;
-}
-
 static int
 transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
 {
 	unsigned char *buf;
 	struct se_mem *se_mem;
 
-	cmd->t_task.t_mem_list = transport_init_se_mem_list();
-	if (!(cmd->t_task.t_mem_list))
-		return -ENOMEM;
-
 	/*
 	 * If the device uses memory mapping this is enough.
 	 */
 	if (cmd->se_dev->transport->do_se_mem_map)
 		return 0;
 
-	/*
-	 * Setup BIDI-COMMAND READ list of struct se_mem elements
-	 */
-	if (cmd->t_task.t_tasks_bidi) {
-		cmd->t_task.t_mem_bidi_list = transport_init_se_mem_list();
-		if (!(cmd->t_task.t_mem_bidi_list)) {
-			kfree(cmd->t_task.t_mem_list);
-			return -ENOMEM;
-		}
-	}
-
 	while (length) {
 		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
 		if (!(se_mem)) {
@@ -4222,7 +4163,7 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
 		memset(buf, 0, se_mem->se_len);
 		kunmap_atomic(buf, KM_IRQ0);
 
-		list_add_tail(&se_mem->se_list, cmd->t_task.t_mem_list);
+		list_add_tail(&se_mem->se_list, &cmd->t_task.t_mem_list);
 		cmd->t_task.t_tasks_se_num++;
 
 		DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
@@ -4265,7 +4206,7 @@ int transport_init_task_sg(
 				sg_length = se_mem->se_len;
 
 				if (!(list_is_last(&se_mem->se_list,
-						se_cmd->t_task.t_mem_list)))
+						&se_cmd->t_task.t_mem_list)))
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 			} else {
@@ -4285,7 +4226,7 @@ int transport_init_task_sg(
 				sg_length = (se_mem->se_len - task_offset);
 
 				if (!(list_is_last(&se_mem->se_list,
-						se_cmd->t_task.t_mem_list)))
+						&se_cmd->t_task.t_mem_list)))
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 			}
@@ -4326,7 +4267,7 @@ next:
 	 * Setup task->task_sg_bidi for SCSI READ payload for
 	 * TCM/pSCSI passthrough if present for BIDI-COMMAND
 	 */
-	if ((se_cmd->t_task.t_mem_bidi_list != NULL) &&
+	if (!list_empty(&se_cmd->t_task.t_mem_bidi_list) &&
 	    (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
 		task->task_sg_bidi = kzalloc(task_sg_num_padded *
 				sizeof(struct scatterlist), GFP_KERNEL);
@@ -4417,19 +4358,19 @@ static inline int transport_set_tasks_sectors(
 				max_sectors_set);
 }
 
+/*
+ * Convert a sgl into a linked list of se_mems.
+ */
 static int transport_map_sg_to_mem(
 	struct se_cmd *cmd,
 	struct list_head *se_mem_list,
-	void *in_mem,
+	struct scatterlist *sg,
 	u32 *se_mem_cnt)
 {
 	struct se_mem *se_mem;
-	struct scatterlist *sg;
 	u32 sg_count = 1, cmd_size = cmd->data_length;
 
-	WARN_ON(!in_mem);
-
-	sg = (struct scatterlist *)in_mem;
+	WARN_ON(!sg);
 
 	while (cmd_size) {
 		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
@@ -4510,7 +4451,7 @@ int transport_map_mem_to_sg(
 				sg->length = se_mem->se_len;
 
 				if (!(list_is_last(&se_mem->se_list,
-						se_cmd->t_task.t_mem_list))) {
+						&se_cmd->t_task.t_mem_list))) {
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 					(*se_mem_cnt)++;
@@ -4546,7 +4487,7 @@ int transport_map_mem_to_sg(
 				sg->length = (se_mem->se_len - *task_offset);
 
 				if (!(list_is_last(&se_mem->se_list,
-						se_cmd->t_task.t_mem_list))) {
+						&se_cmd->t_task.t_mem_list))) {
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 					(*se_mem_cnt)++;
@@ -4764,15 +4705,14 @@ static u32 transport_generic_get_cdb_count(
 	 * mem_list will ever be empty at this point.
 	 */
 	if (!(list_empty(mem_list)))
-		se_mem = list_entry(mem_list->next, struct se_mem, se_list);
+		se_mem = list_first_entry(mem_list, struct se_mem, se_list);
 	/*
 	 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
 	 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
 	 */
-	if ((cmd->t_task.t_mem_bidi_list != NULL) &&
-	    !(list_empty(cmd->t_task.t_mem_bidi_list)) &&
+	if (!list_empty(&cmd->t_task.t_mem_bidi_list) &&
 	    (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
-		se_mem_bidi = list_entry(cmd->t_task.t_mem_bidi_list->next,
+		se_mem_bidi = list_first_entry(&cmd->t_task.t_mem_bidi_list,
 					struct se_mem, se_list);
 
 	while (sectors) {
@@ -4824,7 +4764,7 @@ static u32 transport_generic_get_cdb_count(
 		 */
 		if (task->task_sg_bidi != NULL) {
 			ret = transport_do_se_mem_map(dev, task,
-				cmd->t_task.t_mem_bidi_list, NULL,
+				&cmd->t_task.t_mem_bidi_list, NULL,
 				se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
 				&task_offset_in);
 			if (ret < 0)
@@ -4888,12 +4828,12 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
 		struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
 		u32 se_mem_cnt = 0, task_offset = 0;
 
-		if (!list_empty(cmd->t_task.t_mem_list))
-			se_mem = list_entry(cmd->t_task.t_mem_list->next,
+		if (!list_empty(&cmd->t_task.t_mem_list))
+			se_mem = list_first_entry(&cmd->t_task.t_mem_list,
 					struct se_mem, se_list);
 
 		ret = transport_do_se_mem_map(dev, task,
-				cmd->t_task.t_mem_list, NULL, se_mem,
+				&cmd->t_task.t_mem_list, NULL, se_mem,
 				&se_mem_lout, &se_mem_cnt, &task_offset);
 		if (ret < 0)
 			return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 161e3c9..6d9553b 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -76,12 +76,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
 	printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
 	       caller, cmd, task, task->t_tasks_se_num,
 	       task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
-	if (task->t_mem_list)
-		list_for_each_entry(mem, task->t_mem_list, se_list)
-			printk(KERN_INFO "%s: cmd %p mem %p page %p "
-			       "len 0x%x off 0x%x\n",
-			       caller, cmd, mem,
-			       mem->se_page, mem->se_len, mem->se_off);
+
+	list_for_each_entry(mem, &task->t_mem_list, se_list)
+		printk(KERN_INFO "%s: cmd %p mem %p page %p "
+		       "len 0x%x off 0x%x\n",
+		       caller, cmd, mem,
+		       mem->se_page, mem->se_len, mem->se_off);
 	sp = cmd->seq;
 	if (sp) {
 		ep = fc_seq_exch(sp);
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index e21d839..f18af6e 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -97,7 +97,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 	 * Setup to use first mem list entry if any.
 	 */
 	if (task->t_tasks_se_num) {
-		mem = list_first_entry(task->t_mem_list,
+		mem = list_first_entry(&task->t_mem_list,
 			 struct se_mem, se_list);
 		mem_len = mem->se_len;
 		mem_off = mem->se_off;
@@ -313,7 +313,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 	 * Setup to use first mem list entry if any.
 	 */
 	if (task->t_tasks_se_num) {
-		mem = list_first_entry(task->t_mem_list,
+		mem = list_first_entry(&task->t_mem_list,
 				       struct se_mem, se_list);
 		mem_len = mem->se_len;
 		mem_off = mem->se_off;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c84afc3..94c838d 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -123,7 +123,7 @@ enum se_cmd_flags_table {
 	SCF_SENT_DELAYED_TAS		= 0x00020000,
 	SCF_ALUA_NON_OPTIMIZED		= 0x00040000,
 	SCF_DELAYED_CMD_FROM_SAM_ATTR	= 0x00080000,
-	SCF_PASSTHROUGH_SG_TO_MEM	= 0x00100000,
+	SCF_UNUSED			= 0x00100000,
 	SCF_PASSTHROUGH_CONTIG_TO_SG	= 0x00200000,
 	SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
 	SCF_EMULATE_SYNC_CACHE		= 0x00800000,
@@ -452,9 +452,9 @@ struct se_transport_task {
 	 * and other HW target mode fabric modules.
 	 */
 	struct scatterlist	*t_task_pt_sgl;
-	struct list_head	*t_mem_list;
+	struct list_head	t_mem_list;
 	/* Used for BIDI READ */
-	struct list_head	*t_mem_bidi_list;
+	struct list_head	t_mem_bidi_list;
 	struct list_head	t_task_list;
 } ____cacheline_aligned;
 
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 040/103] target: Add comment & cleanup transport_map_sg_to_mem()
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (7 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 039/103] target: Make t_mem_list and t_mem_list_bidi members of t_task Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 041/103] target: Remove unneeded checks in transport_free_pages() Nicholas A. Bellinger
                   ` (20 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

I initially freaked out and wrote a patch that ensured this function
didn't return a partially allocated list (possible if a later mem
allocation failed), and cleaned up if the second call to map_sg_to_mem
failed. But it isn't necessary -- on error we end up eventually calling
transport_free_pages, which walks both and frees all se_mem entries.

I added a comment to explain this, and also removed a second variable
getting incremented for each loop iter, because the two would never be
different.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |   26 +++++++++++++-------------
 1 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 44f9afe..f2849ad 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -4365,14 +4365,19 @@ static int transport_map_sg_to_mem(
 	struct se_cmd *cmd,
 	struct list_head *se_mem_list,
 	struct scatterlist *sg,
-	u32 *se_mem_cnt)
+	u32 *sg_count)
 {
 	struct se_mem *se_mem;
-	u32 sg_count = 1, cmd_size = cmd->data_length;
+	u32 cmd_size = cmd->data_length;
 
 	WARN_ON(!sg);
 
 	while (cmd_size) {
+		/*
+		 * NOTE: it is safe to return -ENOMEM at any time in creating this
+		 * list because transport_free_pages() will eventually be called, and is
+		 * smart enough to deallocate all list items for sg and sg_bidi lists.
+		 */
 		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
 		if (!(se_mem)) {
 			printk(KERN_ERR "Unable to allocate struct se_mem\n");
@@ -4389,26 +4394,21 @@ static int transport_map_sg_to_mem(
 		if (cmd_size > sg->length) {
 			se_mem->se_len = sg->length;
 			sg = sg_next(sg);
-			sg_count++;
 		} else
 			se_mem->se_len = cmd_size;
 
 		cmd_size -= se_mem->se_len;
+		(*sg_count)++;
 
-		DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
-				*se_mem_cnt, cmd_size);
+		DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n",
+				sg_count, cmd_size);
 		DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
 				se_mem->se_page, se_mem->se_off, se_mem->se_len);
 
 		list_add_tail(&se_mem->se_list, se_mem_list);
-		(*se_mem_cnt)++;
 	}
 
-	DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
-		" struct se_mem\n", sg_count, *se_mem_cnt);
-
-	if (sg_count != *se_mem_cnt)
-		BUG();
+	DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count);
 
 	return 0;
 }
@@ -4610,8 +4610,8 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
 	for_each_sg(cmd->t_task.t_tasks_sg_chained, sg,
 			cmd->t_task.t_tasks_sg_chained_no, i) {
 
-		DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
-			i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
+		DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n",
+			i, sg, sg_page(sg), sg->length, sg->offset);
 		if (sg_is_chain(sg))
 			DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
 		if (sg_is_last(sg))
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 041/103] target: Remove unneeded checks in transport_free_pages()
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (8 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 040/103] target: Add comment & cleanup transport_map_sg_to_mem() Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 042/103] target: Fix WRITE_SAME_16 t_task_lba assignment bug Nicholas A. Bellinger
                   ` (19 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

If t_mem_bidi_list is empty, iterating over it without checking
if it's empty, or that t_tasks_se_bidi_num is nonzero, will still
be correct. Also applies to needing to check t_tasks_se_num before
looping.

Zero out t_tasks_se_bidi_num to match handling of t_tasks_se_num.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |   29 ++++++++++++-----------------
 1 files changed, 12 insertions(+), 17 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index f2849ad..d0cd601 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3826,9 +3826,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 	if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
 		return;
 
-	if (!(cmd->t_task.t_tasks_se_num))
-		return;
-
 	list_for_each_entry_safe(se_mem, se_mem_tmp,
 			&cmd->t_task.t_mem_list, se_list) {
 		/*
@@ -3841,23 +3838,21 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 		list_del(&se_mem->se_list);
 		kmem_cache_free(se_mem_cache, se_mem);
 	}
+	cmd->t_task.t_tasks_se_num = 0;
 
-	if (!list_empty(&cmd->t_task.t_mem_bidi_list) && cmd->t_task.t_tasks_se_bidi_num) {
-		list_for_each_entry_safe(se_mem, se_mem_tmp,
-				&cmd->t_task.t_mem_bidi_list, se_list) {
-			/*
-			 * We only release call __free_page(struct se_mem->se_page) when
-			 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
-			 */
-			if (free_page)
-				__free_page(se_mem->se_page);
+	list_for_each_entry_safe(se_mem, se_mem_tmp,
+				 &cmd->t_task.t_mem_bidi_list, se_list) {
+		/*
+		 * We only release call __free_page(struct se_mem->se_page) when
+		 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
+		 */
+		if (free_page)
+			__free_page(se_mem->se_page);
 
-			list_del(&se_mem->se_list);
-			kmem_cache_free(se_mem_cache, se_mem);
-		}
+		list_del(&se_mem->se_list);
+		kmem_cache_free(se_mem_cache, se_mem);
 	}
-
-	cmd->t_task.t_tasks_se_num = 0;
+	cmd->t_task.t_tasks_se_bidi_num = 0;
 }
 
 static inline void transport_release_tasks(struct se_cmd *cmd)
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 042/103] target: Fix WRITE_SAME_16 t_task_lba assignment bug
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (9 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 041/103] target: Remove unneeded checks in transport_free_pages() Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 043/103] target: Fix WRITE_SAME_[16,32] number of blocks=0 case Nicholas A. Bellinger
                   ` (18 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Nicholas Bellinger

From: Nicholas Bellinger <nab@linux-iscsi.org>

This patch fixes a bug in the assignment of cmd->t_task.t_task_lba with
WRITE_SAME_16 to correctly use get_unaligned_be64() for the 64-bit LBA.

Reported-by: Chris Greiveldinger <chris.greiveldinger@rnanetworks.com>
Signed-off-by: Nicholas A. Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d0cd601..a29f6d3 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3417,7 +3417,7 @@ static int transport_generic_cmd_sequencer(
 		if (sector_ret)
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
-		cmd->t_task.t_task_lba = get_unaligned_be16(&cdb[2]);
+		cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[2]);
 		passthrough = (dev->transport->transport_type ==
 				TRANSPORT_PLUGIN_PHBA_PDEV);
 		/*
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 043/103] target: Fix WRITE_SAME_[16,32] number of blocks=0 case
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (10 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 042/103] target: Fix WRITE_SAME_16 t_task_lba assignment bug Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 044/103] tcm_fc: Makefile cleanups Nicholas A. Bellinger
                   ` (17 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Nicholas Bellinger

From: Nicholas Bellinger <nab@linux-iscsi.org>

This patch fixes the handling of WRITE_SAME_[16,32] emulation where a
WRITE_SAME_* CDB with number of blocks=0 was being rejected by SCSI
expected data transfer length overflow checking in target core.

It changes both CDB cases in transport_generic_cmd_sequencer() to use
dev->se_sub_dev->se_dev_attrib.block_size to match what sg_write_same
is sending us with --num=0.  It also fixes target_emulate_write_same()
to properly determine the num_blocks with --num=0 case to determine the
remaining range for dev->transport->do_discard().

Reported-by: Chris Greiveldinger <chris.greiveldinger@rnanetworks.com>
Signed-off-by: Nicholas A. Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_cdb.c       |   28 ++++++++++++++++++++--------
 drivers/target/target_core_transport.c |   14 ++++++++++++--
 2 files changed, 32 insertions(+), 10 deletions(-)

diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 95195d7..8d5a0fc 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -1008,18 +1008,30 @@ target_emulate_unmap(struct se_task *task)
  * Note this is not used for TCM/pSCSI passthrough
  */
 static int
-target_emulate_write_same(struct se_task *task)
+target_emulate_write_same(struct se_task *task, int write_same32)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
-	sector_t lba = cmd->t_task.t_task_lba;
-	unsigned int range;
+	sector_t range, lba = cmd->t_task.t_task_lba;
+	unsigned int num_blocks;
 	int ret;
+	/*
+	 * Extract num_blocks from the WRITE_SAME_* CDB.  Then use the explict
+	 * range when non zero is supplied, otherwise calculate the remaining
+	 * range based on ->get_blocks() - starting LBA.
+	 */
+	if (write_same32)
+		num_blocks = get_unaligned_be32(&cmd->t_task.t_task_cdb[28]);
+	else
+		num_blocks = get_unaligned_be32(&cmd->t_task.t_task_cdb[10]);
 
-	range = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
+	if (num_blocks != 0)
+		range = num_blocks;
+	else
+		range = (dev->transport->get_blocks(dev) - lba);
 
-	printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
-			 (unsigned long long)lba, range);
+	printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
+		 (unsigned long long)lba, (unsigned long long)range);
 
 	ret = dev->transport->do_discard(dev, lba, range);
 	if (ret < 0) {
@@ -1081,7 +1093,7 @@ transport_emulate_control_cdb(struct se_task *task)
 					" for: %s\n", dev->transport->name);
 			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		}
-		ret = target_emulate_write_same(task);
+		ret = target_emulate_write_same(task, 0);
 		break;
 	case VARIABLE_LENGTH_CMD:
 		service_action =
@@ -1094,7 +1106,7 @@ transport_emulate_control_cdb(struct se_task *task)
 					dev->transport->name);
 				return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 			}
-			ret = target_emulate_write_same(task);
+			ret = target_emulate_write_same(task, 1);
 			break;
 		default:
 			printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index a29f6d3..bf401da 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3132,7 +3132,12 @@ static int transport_generic_cmd_sequencer(
 			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
 			if (sector_ret)
 				goto out_unsupported_cdb;
-			size = transport_get_size(sectors, cdb, cmd);
+
+			if (sectors != 0)
+				size = transport_get_size(sectors, cdb, cmd);
+			else
+				size = dev->se_sub_dev->se_dev_attrib.block_size;
+
 			cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[12]);
 			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 
@@ -3416,7 +3421,12 @@ static int transport_generic_cmd_sequencer(
 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
 		if (sector_ret)
 			goto out_unsupported_cdb;
-		size = transport_get_size(sectors, cdb, cmd);
+
+		if (sectors != 0)
+			size = transport_get_size(sectors, cdb, cmd);
+		else
+			size = dev->se_sub_dev->se_dev_attrib.block_size;
+
 		cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[2]);
 		passthrough = (dev->transport->transport_type ==
 				TRANSPORT_PLUGIN_PHBA_PDEV);
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 044/103] tcm_fc: Makefile cleanups
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (11 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 043/103] target: Fix WRITE_SAME_[16,32] number of blocks=0 case Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 045/103] tcm_fc: Convert to wake_up_process and schedule_timeout_interruptible Nicholas A. Bellinger
                   ` (16 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Nicholas Bellinger

From: Nicholas Bellinger <nab@linux-iscsi.org>

This patch removes the unnecessary EXTRA_CFLAGS includes, and drops the
unused -DTCM_FC_DEBUG define.

Reported-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Nicholas A. Bellinger <nab@linux-iscsi.org>
---
 drivers/target/tcm_fc/Makefile |   17 ++++-------------
 1 files changed, 4 insertions(+), 13 deletions(-)

diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
index 7a5c2b6..20b14bb 100644
--- a/drivers/target/tcm_fc/Makefile
+++ b/drivers/target/tcm_fc/Makefile
@@ -1,15 +1,6 @@
-EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \
-		-I$(srctree)/drivers/scsi/ \
-		-I$(srctree)/include/scsi/ \
-		-I$(srctree)/drivers/target/tcm_fc/
-
-tcm_fc-y +=	tfc_cmd.o \
-		tfc_conf.o \
-		tfc_io.o \
-		tfc_sess.o
+tcm_fc-y +=		tfc_cmd.o \
+			tfc_conf.o \
+			tfc_io.o \
+			tfc_sess.o
 
 obj-$(CONFIG_TCM_FC)	+= tcm_fc.o
-
-ifdef CONFIGFS_TCM_FC_DEBUG
-EXTRA_CFLAGS	+= -DTCM_FC_DEBUG
-endif
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 045/103] tcm_fc: Convert to wake_up_process and schedule_timeout_interruptible
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (12 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 044/103] tcm_fc: Makefile cleanups Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 046/103] target: Replace custom sg<->buf functions with lib funcs Nicholas A. Bellinger
                   ` (15 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Nicholas Bellinger

From: Nicholas Bellinger <nab@linux-iscsi.org>

This patch converts ft_queue_cmd() to use wake_up_process() and
ft_thread() to use schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT)
instead of wait_event_interruptible().  This fixes a potential race with
the wait_event_interruptible() conditional with qobj->queue_cnt in
ft_thread().

This patch also drops the unnecessary set_user_nice(current, -20) in
ft_thread(), and drops extra () around two if (!(acl)) conditionals in
tfc_conf.c.

Reported-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Nicholas A. Bellinger <nab@linux-iscsi.org>
---
 drivers/target/tcm_fc/tfc_cmd.c  |   17 ++++++++---------
 drivers/target/tcm_fc/tfc_conf.c |    4 ++--
 2 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 6d9553b..9c99402 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -96,15 +96,17 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
 
 static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
 {
-	struct se_queue_obj *qobj;
+	struct ft_tpg *tpg = sess->tport->tpg;
+	struct se_queue_obj *qobj = &tpg->qobj;
 	unsigned long flags;
 
 	qobj = &sess->tport->tpg->qobj;
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
 	list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
-	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 	atomic_inc(&qobj->queue_cnt);
-	wake_up_interruptible(&qobj->thread_wq);
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	wake_up_process(tpg->thread);
 }
 
 static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
@@ -696,15 +698,12 @@ int ft_thread(void *arg)
 	struct ft_tpg *tpg = arg;
 	struct se_queue_obj *qobj = &tpg->qobj;
 	struct ft_cmd *cmd;
-	int ret;
-
-	set_user_nice(current, -20);
 
 	while (!kthread_should_stop()) {
-		ret = wait_event_interruptible(qobj->thread_wq,
-			atomic_read(&qobj->queue_cnt) || kthread_should_stop());
-		if (ret < 0 || kthread_should_stop())
+		schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+		if (kthread_should_stop())
 			goto out;
+
 		cmd = ft_dequeue_cmd(qobj);
 		if (cmd)
 			ft_exec_req(cmd);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 58e4745..e3176f5 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -223,7 +223,7 @@ static struct se_node_acl *ft_add_acl(
 		return ERR_PTR(-EINVAL);
 
 	acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
-	if (!(acl))
+	if (!acl)
 		return ERR_PTR(-ENOMEM);
 	acl->node_auth.port_name = wwpn;
 
@@ -280,7 +280,7 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
 	struct ft_node_acl *acl;
 
 	acl = kzalloc(sizeof(*acl), GFP_KERNEL);
-	if (!(acl)) {
+	if (!acl) {
 		printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
 		return NULL;
 	}
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 046/103] target: Replace custom sg<->buf functions with lib funcs
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (13 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 045/103] tcm_fc: Convert to wake_up_process and schedule_timeout_interruptible Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:07 ` [PATCH 047/103] target: Simplify sector limiting code Nicholas A. Bellinger
                   ` (14 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

Use sg_copy_to/from_buffer instead of transport_memcpy_read/write_contig.
They are the same, except we also need to track the number of sg entries,
which we do by adding a count to se_transport_task.

Also, fix some comment typos.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |   80 ++++---------------------------
 include/target/target_core_base.h      |    1 +
 2 files changed, 12 insertions(+), 69 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index bf401da..887314f 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3542,66 +3542,6 @@ out_invalid_cdb_field:
 
 static inline void transport_release_tasks(struct se_cmd *);
 
-/*
- * This function will copy a contiguous *src buffer into a destination
- * struct scatterlist array.
- */
-static void transport_memcpy_write_contig(
-	struct se_cmd *cmd,
-	struct scatterlist *sg_d,
-	unsigned char *src)
-{
-	u32 i = 0, length = 0, total_length = cmd->data_length;
-	void *dst;
-
-	while (total_length) {
-		length = sg_d[i].length;
-
-		if (length > total_length)
-			length = total_length;
-
-		dst = sg_virt(&sg_d[i]);
-
-		memcpy(dst, src, length);
-
-		if (!(total_length -= length))
-			return;
-
-		src += length;
-		i++;
-	}
-}
-
-/*
- * This function will copy a struct scatterlist array *sg_s into a destination
- * contiguous *dst buffer.
- */
-static void transport_memcpy_read_contig(
-	struct se_cmd *cmd,
-	unsigned char *dst,
-	struct scatterlist *sg_s)
-{
-	u32 i = 0, length = 0, total_length = cmd->data_length;
-	void *src;
-
-	while (total_length) {
-		length = sg_s[i].length;
-
-		if (length > total_length)
-			length = total_length;
-
-		src = sg_virt(&sg_s[i]);
-
-		memcpy(dst, src, length);
-
-		if (!(total_length -= length))
-			return;
-
-		dst += length;
-		i++;
-	}
-}
-
 static void transport_memcpy_se_mem_read_contig(
 	struct se_cmd *cmd,
 	unsigned char *dst,
@@ -3744,14 +3684,15 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
 		}
 		spin_unlock(&cmd->se_lun->lun_sep_lock);
 		/*
-		 * If enabled by TCM fabirc module pre-registered SGL
+		 * If enabled by TCM fabric module pre-registered SGL
 		 * memory, perform the memcpy() from the TCM internal
-		 * contigious buffer back to the original SGL.
+		 * contiguous buffer back to the original SGL.
 		 */
 		if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
-			transport_memcpy_write_contig(cmd,
-				 cmd->t_task.t_task_pt_sgl,
-				 cmd->t_task.t_task_buf);
+			sg_copy_from_buffer(cmd->t_task.t_task_pt_sgl,
+					    cmd->t_task.t_task_pt_sgl_num,
+					    cmd->t_task.t_task_buf,
+					    cmd->data_length);
 
 		cmd->se_tfo->queue_data_in(cmd);
 		break;
@@ -4031,7 +3972,7 @@ int transport_generic_map_mem_to_cmd(
 		 */
 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
 		cmd->t_task.t_task_pt_sgl = sgl;
-		/* don't need sgl count? We assume it contains cmd->data_length data */
+		cmd->t_task.t_task_pt_sgl_num = sgl_count;
 	}
 
 	return 0;
@@ -5031,9 +4972,10 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
 	 * se_cmd->t_task.t_task_buf.
 	 */
 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
-		transport_memcpy_read_contig(cmd,
-				cmd->t_task.t_task_buf,
-				cmd->t_task.t_task_pt_sgl);
+		sg_copy_to_buffer(cmd->t_task.t_task_pt_sgl,
+				    cmd->t_task.t_task_pt_sgl_num,
+				    cmd->t_task.t_task_buf,
+				    cmd->data_length);
 	/*
 	 * Clear the se_cmd for WRITE_PENDING status in order to set
 	 * cmd->t_task.t_transport_active=0 so that transport_generic_handle_data
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 94c838d..ff1b461 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -452,6 +452,7 @@ struct se_transport_task {
 	 * and other HW target mode fabric modules.
 	 */
 	struct scatterlist	*t_task_pt_sgl;
+	u32			t_task_pt_sgl_num;
 	struct list_head	t_mem_list;
 	/* Used for BIDI READ */
 	struct list_head	t_mem_bidi_list;
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 047/103] target: Simplify sector limiting code
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (14 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 046/103] target: Replace custom sg<->buf functions with lib funcs Nicholas A. Bellinger
@ 2011-07-21  7:07 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 048/103] target: get_cdb should never return NULL Nicholas A. Bellinger
                   ` (13 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:07 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

It was possible to simplify the code that checked/limited sectors, since
it boiled down to 1) can't go beyond lun's last lba and 2) limit to
underlying device's max_sectors attribute.

Give each local variable in transport_generic_get_cdb_count its own line.

BUG_ON(!mem_list), it should never be null now.

Replace max_sectors_set check with a test if limit_task_sectors returned 0.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |   98 +++++++++----------------------
 1 files changed, 29 insertions(+), 69 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 887314f..d5df995 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -4249,59 +4249,19 @@ next:
 	return task->task_sg_num;
 }
 
-static inline int transport_set_tasks_sectors_disk(
-	struct se_task *task,
+/* Reduce sectors if they are too long for the device */
+static inline sector_t transport_limit_task_sectors(
 	struct se_device *dev,
 	unsigned long long lba,
-	u32 sectors,
-	int *max_sectors_set)
+	sector_t sectors)
 {
-	if ((lba + sectors) > transport_dev_end_lba(dev)) {
-		task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
-
-		if (task->task_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
-			task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
-			*max_sectors_set = 1;
-		}
-	} else {
-		if (sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
-			task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
-			*max_sectors_set = 1;
-		} else
-			task->task_sectors = sectors;
-	}
-
-	return 0;
-}
+	sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
 
-static inline int transport_set_tasks_sectors_non_disk(
-	struct se_task *task,
-	struct se_device *dev,
-	unsigned long long lba,
-	u32 sectors,
-	int *max_sectors_set)
-{
-	if (sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
-		task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
-		*max_sectors_set = 1;
-	} else
-		task->task_sectors = sectors;
-
-	return 0;
-}
+	if (dev->transport->get_device_type(dev) == TYPE_DISK)
+		if ((lba + sectors) > transport_dev_end_lba(dev))
+			sectors = ((transport_dev_end_lba(dev) - lba) + 1);
 
-static inline int transport_set_tasks_sectors(
-	struct se_task *task,
-	struct se_device *dev,
-	unsigned long long lba,
-	u32 sectors,
-	int *max_sectors_set)
-{
-	return (dev->transport->get_device_type(dev) == TYPE_DISK) ?
-		transport_set_tasks_sectors_disk(task, dev, lba, sectors,
-				max_sectors_set) :
-		transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
-				max_sectors_set);
+	return sectors;
 }
 
 /*
@@ -4625,6 +4585,9 @@ static int transport_do_se_mem_map(
 				task_offset_in);
 }
 
+/*
+ * Break up cmd into chunks transport can handle
+ */
 static u32 transport_generic_get_cdb_count(
 	struct se_cmd *cmd,
 	unsigned long long lba,
@@ -4635,17 +4598,18 @@ static u32 transport_generic_get_cdb_count(
 {
 	unsigned char *cdb = NULL;
 	struct se_task *task;
-	struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
-	struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
+	struct se_mem *se_mem = NULL;
+	struct se_mem *se_mem_lout = NULL;
+	struct se_mem *se_mem_bidi = NULL;
+	struct se_mem *se_mem_bidi_lout = NULL;
 	struct se_device *dev = cmd->se_dev;
-	int max_sectors_set = 0, ret;
-	u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
+	int ret;
+	u32 task_offset_in = 0;
+	u32 se_mem_cnt = 0;
+	u32 se_mem_bidi_cnt = 0;
+	u32 task_cdbs = 0;
 
-	if (!mem_list) {
-		printk(KERN_ERR "mem_list is NULL in transport_generic_get"
-				"_cdb_count()\n");
-		return 0;
-	}
+	BUG_ON(!mem_list);
 	/*
 	 * While using RAMDISK_DR backstores is the only case where
 	 * mem_list will ever be empty at this point.
@@ -4662,18 +4626,22 @@ static u32 transport_generic_get_cdb_count(
 					struct se_mem, se_list);
 
 	while (sectors) {
+		sector_t limited_sectors;
+
 		DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
 			cmd->se_tfo->get_task_tag(cmd), lba, sectors,
 			transport_dev_end_lba(dev));
 
+		limited_sectors = transport_limit_task_sectors(dev, lba, sectors);
+		if (!limited_sectors)
+			break;
+
 		task = transport_generic_get_task(cmd, data_direction);
-		if (!(task))
+		if (!task)
 			goto out;
 
-		transport_set_tasks_sectors(task, dev, lba, sectors,
-				&max_sectors_set);
-
 		task->task_lba = lba;
+		task->task_sectors = limited_sectors;
 		lba += task->task_sectors;
 		sectors -= task->task_sectors;
 		task->task_size = (task->task_sectors *
@@ -4722,14 +4690,6 @@ static u32 transport_generic_get_cdb_count(
 
 		DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
 				task_cdbs, task->task_sg_num);
-
-		if (max_sectors_set) {
-			max_sectors_set = 0;
-			continue;
-		}
-
-		if (!sectors)
-			break;
 	}
 
 	if (set_counts) {
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 048/103] target: get_cdb should never return NULL
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (15 preceding siblings ...)
  2011-07-21  7:07 ` [PATCH 047/103] target: Simplify sector limiting code Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 049/103] target: Simplify transport_memcpy_se_mem_read_contig Nicholas A. Bellinger
                   ` (12 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

dev->transport->get_cdb(task) should just be returning a pointer to
the cdb space that's already allocated in the task, so it should never
be NULL.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |   21 ++++++++++++---------
 1 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d5df995..c01a81b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -4648,12 +4648,15 @@ static u32 transport_generic_get_cdb_count(
 				   dev->se_sub_dev->se_dev_attrib.block_size);
 
 		cdb = dev->transport->get_cdb(task);
-		if ((cdb)) {
-			memcpy(cdb, cmd->t_task.t_task_cdb,
-				scsi_command_size(cmd->t_task.t_task_cdb));
-			cmd->transport_split_cdb(task->task_lba,
-					&task->task_sectors, cdb);
-		}
+		/* Should be part of task, can't fail */
+		BUG_ON(!cdb);
+
+		memcpy(cdb, cmd->t_task.t_task_cdb,
+		       scsi_command_size(cmd->t_task.t_task_cdb));
+
+		/* Update new cdb with updated lba/sectors */
+		cmd->transport_split_cdb(task->task_lba,
+					 &task->task_sectors, cdb);
 
 		/*
 		 * Perform the SE OBJ plugin and/or Transport plugin specific
@@ -4719,9 +4722,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
 		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
 
 	cdb = dev->transport->get_cdb(task);
-	if (cdb)
-		memcpy(cdb, cmd->t_task.t_task_cdb,
-			scsi_command_size(cmd->t_task.t_task_cdb));
+	BUG_ON(!cdb);
+	memcpy(cdb, cmd->t_task.t_task_cdb,
+	       scsi_command_size(cmd->t_task.t_task_cdb));
 
 	task->task_size = cmd->data_length;
 	task->task_sg_num =
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 049/103] target: Simplify transport_memcpy_se_mem_read_contig
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (16 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 048/103] target: get_cdb should never return NULL Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 050/103] target: Use assignment rather than increment for t_task_cdbs Nicholas A. Bellinger
                   ` (11 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

transport_memcpy_se_mem_read_contig() doesn't need to take a *cmd as a
parameter, as it just needs the data_length so go ahead and pass this
directly.

Also use min_t for great justice.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |   27 +++++++++++----------------
 1 files changed, 11 insertions(+), 16 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c01a81b..df9a9a2 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -218,8 +218,8 @@ static int transport_get_sectors(struct se_cmd *cmd);
 static int transport_map_sg_to_mem(struct se_cmd *cmd,
 		struct list_head *se_mem_list, struct scatterlist *sgl,
 		u32 *se_mem_cnt);
-static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
-		unsigned char *dst, struct list_head *se_mem_list);
+static void transport_memcpy_se_mem_read_contig(unsigned char *dst,
+		struct list_head *se_mem_list, u32 len);
 static void transport_release_fe_cmd(struct se_cmd *cmd);
 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
 		struct se_queue_obj *qobj);
@@ -2802,7 +2802,8 @@ static void transport_xor_callback(struct se_cmd *cmd)
 	 * Copy the scatterlist WRITE buffer located at cmd->t_task.t_mem_list
 	 * into the locally allocated *buf
 	 */
-	transport_memcpy_se_mem_read_contig(cmd, buf, &cmd->t_task.t_mem_list);
+	transport_memcpy_se_mem_read_contig(buf, &cmd->t_task.t_mem_list,
+					    cmd->data_length);
 	/*
 	 * Now perform the XOR against the BIDI read memory located at
 	 * cmd->t_task.t_mem_bidi_list
@@ -3543,27 +3544,21 @@ out_invalid_cdb_field:
 static inline void transport_release_tasks(struct se_cmd *);
 
 static void transport_memcpy_se_mem_read_contig(
-	struct se_cmd *cmd,
 	unsigned char *dst,
-	struct list_head *se_mem_list)
+	struct list_head *se_mem_list,
+	u32 tot_len)
 {
 	struct se_mem *se_mem;
 	void *src;
-	u32 length = 0, total_length = cmd->data_length;
+	u32 length;
 
 	list_for_each_entry(se_mem, se_mem_list, se_list) {
-		length = se_mem->se_len;
-
-		if (length > total_length)
-			length = total_length;
-
+		length = min_t(u32, se_mem->se_len, tot_len);
 		src = page_address(se_mem->se_page) + se_mem->se_off;
-
 		memcpy(dst, src, length);
-
-		if (!(total_length -= length))
-			return;
-
+		tot_len -= length;
+		if (!tot_len)
+			break;
 		dst += length;
 	}
 }
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 050/103] target: Use assignment rather than increment for t_task_cdbs
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (17 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 049/103] target: Simplify transport_memcpy_se_mem_read_contig Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 051/103] target: Don't pass dma_size to generic_get_mem Nicholas A. Bellinger
                   ` (10 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

Increment makes it seem like t_task_cdbs could've had a nonzero value
before, that we want to preserve. This does not seem to ever be the case,
since there is no looping in transport_new_cmd_obj.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |    9 +++++----
 1 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index df9a9a2..da6bd74 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -4009,11 +4009,12 @@ static int transport_get_sectors(struct se_cmd *cmd)
 static int transport_new_cmd_obj(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-	u32 task_cdbs = 0, rc;
+	u32 task_cdbs;
+	u32 rc;
 
 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
-		task_cdbs++;
-		cmd->t_task.t_task_cdbs++;
+		task_cdbs = 1;
+		cmd->t_task.t_task_cdbs = 1;
 	} else {
 		int set_counts = 1;
 
@@ -4052,7 +4053,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 			return PYX_TRANSPORT_LU_COMM_FAILURE;
 		}
-		cmd->t_task.t_task_cdbs += task_cdbs;
+		cmd->t_task.t_task_cdbs = task_cdbs;
 
 #if 0
 		printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 051/103] target: Don't pass dma_size to generic_get_mem
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (18 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 050/103] target: Use assignment rather than increment for t_task_cdbs Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 052/103] target: Pass sg with type scatterlist in transport_map_sg_to_mem Nicholas A. Bellinger
                   ` (9 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

Since the caller is just passing PAGE_SIZE for dma_size, we can just use
PAGE_SIZE in the function (since we're allocating pages) and omit the
parameter.

(hch: Add __GFP_ZERO usage to zero page)

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |   20 +++++---------------
 1 files changed, 5 insertions(+), 15 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index da6bd74..9bbc626 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -210,8 +210,7 @@ static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
 		unsigned long long starting_lba, u32 sectors,
 		enum dma_data_direction data_direction,
 		struct list_head *mem_list, int set_counts);
-static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
-		u32 dma_size);
+static int transport_generic_get_mem(struct se_cmd *cmd, u32 length);
 static int transport_generic_remove(struct se_cmd *cmd,
 		int release_to_pool, int session_reinstatement);
 static int transport_get_sectors(struct se_cmd *cmd);
@@ -2896,7 +2895,7 @@ static int transport_allocate_resources(struct se_cmd *cmd)
 
 	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
 	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
-		return transport_generic_get_mem(cmd, length, PAGE_SIZE);
+		return transport_generic_get_mem(cmd, length);
 	else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
 		return transport_generic_allocate_buf(cmd, length);
 	else
@@ -4070,9 +4069,8 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 }
 
 static int
-transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
+transport_generic_get_mem(struct se_cmd *cmd, u32 length)
 {
-	unsigned char *buf;
 	struct se_mem *se_mem;
 
 	/*
@@ -4089,22 +4087,14 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
 		}
 
 /* #warning FIXME Allocate contigous pages for struct se_mem elements */
-		se_mem->se_page = alloc_pages(GFP_KERNEL, 0);
+		se_mem->se_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
 		if (!(se_mem->se_page)) {
 			printk(KERN_ERR "alloc_pages() failed\n");
 			goto out;
 		}
 
-		buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
-		if (!(buf)) {
-			printk(KERN_ERR "kmap_atomic() failed\n");
-			goto out;
-		}
 		INIT_LIST_HEAD(&se_mem->se_list);
-		se_mem->se_len = (length > dma_size) ? dma_size : length;
-		memset(buf, 0, se_mem->se_len);
-		kunmap_atomic(buf, KM_IRQ0);
-
+		se_mem->se_len = min_t(u32, length, PAGE_SIZE);
 		list_add_tail(&se_mem->se_list, &cmd->t_task.t_mem_list);
 		cmd->t_task.t_tasks_se_num++;
 
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 052/103] target: Pass sg with type scatterlist in transport_map_sg_to_mem
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (19 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 051/103] target: Don't pass dma_size to generic_get_mem Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 053/103] target: Move task_sg_num next to task_sg in struct se_task Nicholas A. Bellinger
                   ` (8 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

Instead of passing as void * and then casting to sg, we can just
pass it as a struct scatterlist*.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |    3 +--
 include/target/target_core_transport.h |    2 +-
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 9bbc626..ccc5319 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -4312,7 +4312,7 @@ static int transport_map_sg_to_mem(
 int transport_map_mem_to_sg(
 	struct se_task *task,
 	struct list_head *se_mem_list,
-	void *in_mem,
+	struct scatterlist *sg,
 	struct se_mem *in_se_mem,
 	struct se_mem **out_se_mem,
 	u32 *se_mem_cnt,
@@ -4320,7 +4320,6 @@ int transport_map_mem_to_sg(
 {
 	struct se_cmd *se_cmd = task->task_se_cmd;
 	struct se_mem *se_mem = in_se_mem;
-	struct scatterlist *sg = (struct scatterlist *)in_mem;
 	u32 task_size = task->task_size, sg_no = 0;
 
 	if (!sg) {
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index acd5914..4bbd88b 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -184,7 +184,7 @@ extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
 extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
 extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32);
 extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
-					void *, struct se_mem *,
+					struct scatterlist *, struct se_mem *,
 					struct se_mem **, u32 *, u32 *);
 extern void transport_do_task_sg_chain(struct se_cmd *);
 extern void transport_generic_process_write(struct se_cmd *);
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 053/103] target: Move task_sg_num next to task_sg in struct se_task
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (20 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 052/103] target: Pass sg with type scatterlist in transport_map_sg_to_mem Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 054/103] target: inline struct se_transport_task into struct se_cmd Nicholas A. Bellinger
                   ` (7 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

Also, remove task_sg_offset, which is unused.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 include/target/target_core_base.h |    3 +--
 1 files changed, 1 insertions(+), 2 deletions(-)

diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index ff1b461..77f570d 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -462,6 +462,7 @@ struct se_transport_task {
 struct se_task {
 	unsigned char	task_sense;
 	struct scatterlist *task_sg;
+	u32		task_sg_num;
 	struct scatterlist *task_sg_bidi;
 	u8		task_scsi_status;
 	u8		task_flags;
@@ -472,8 +473,6 @@ struct se_task {
 	u32		task_no;
 	u32		task_sectors;
 	u32		task_size;
-	u32		task_sg_num;
-	u32		task_sg_offset;
 	enum dma_data_direction	task_data_direction;
 	struct se_cmd *task_se_cmd;
 	struct se_device	*se_dev;
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 054/103] target: inline struct se_transport_task into struct se_cmd
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (21 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 053/103] target: Move task_sg_num next to task_sg in struct se_task Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 055/103] target: Change name & semantics of transport_get_sectors() Nicholas A. Bellinger
                   ` (6 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

Continuing the previous patch to make se_transport_task a member
of se_cmd, this removes the nested se_transport_task struct.
There doesn't appear to be any need for a second struct, and
combining the two may let us find redundancies between the two
that we can do without.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/loopback/tcm_loop.c     |    4 +-
 drivers/target/target_core_alua.c      |    4 +-
 drivers/target/target_core_cdb.c       |   39 +-
 drivers/target/target_core_device.c    |    6 +-
 drivers/target/target_core_file.c      |    8 +-
 drivers/target/target_core_iblock.c    |    4 +-
 drivers/target/target_core_pr.c        |   22 +-
 drivers/target/target_core_pscsi.c     |   10 +-
 drivers/target/target_core_rd.c        |    8 +-
 drivers/target/target_core_tmr.c       |   56 ++--
 drivers/target/target_core_transport.c |  710 ++++++++++++++++----------------
 drivers/target/target_core_ua.c        |    2 +-
 drivers/target/tcm_fc/tfc_cmd.c        |   14 +-
 drivers/target/tcm_fc/tfc_io.c         |   21 +-
 include/target/target_core_base.h      |  106 +++---
 15 files changed, 500 insertions(+), 514 deletions(-)

diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index e0e8aaa..f6f78f1 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -118,7 +118,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
 	 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
 	 */
 	if (scsi_bidi_cmnd(sc))
-		se_cmd->t_task.t_tasks_bidi = 1;
+		se_cmd->t_tasks_bidi = 1;
 	/*
 	 * Locate the struct se_lun pointer and attach it to struct se_cmd
 	 */
@@ -169,7 +169,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
 	 * For BIDI commands, pass in the extra READ buffer
 	 * to transport_generic_map_mem_to_cmd() below..
 	 */
-	if (se_cmd->t_task.t_tasks_bidi) {
+	if (se_cmd->t_tasks_bidi) {
 		struct scsi_data_buffer *sdb = scsi_in(sc);
 
 		sgl_bidi = sdb->table.sgl;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 76abd86..76d506f 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -65,7 +65,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
 	struct se_port *port;
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
 				    Target port group descriptor */
 
@@ -157,7 +157,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
 	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
 	u32 len = 4; /* Skip over RESERVED area in header */
 	int alua_access_state, primary = 0, rc;
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 8d5a0fc..09ef3f8 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -66,7 +66,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
 {
 	struct se_lun *lun = cmd->se_lun;
 	struct se_device *dev = cmd->se_dev;
-	unsigned char *buf = cmd->t_task.t_task_buf;
+	unsigned char *buf = cmd->t_task_buf;
 
 	/*
 	 * Make sure we at least have 6 bytes of INQUIRY response
@@ -621,8 +621,8 @@ static int
 target_emulate_inquiry(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-	unsigned char *buf = cmd->t_task.t_task_buf;
-	unsigned char *cdb = cmd->t_task.t_task_cdb;
+	unsigned char *buf = cmd->t_task_buf;
+	unsigned char *cdb = cmd->t_task_cdb;
 
 	if (!(cdb[1] & 0x1))
 		return target_emulate_inquiry_std(cmd);
@@ -666,7 +666,7 @@ static int
 target_emulate_readcapacity(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-	unsigned char *buf = cmd->t_task.t_task_buf;
+	unsigned char *buf = cmd->t_task_buf;
 	unsigned long long blocks_long = dev->transport->get_blocks(dev);
 	u32 blocks;
 
@@ -696,7 +696,7 @@ static int
 target_emulate_readcapacity_16(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-	unsigned char *buf = cmd->t_task.t_task_buf;
+	unsigned char *buf = cmd->t_task_buf;
 	unsigned long long blocks = dev->transport->get_blocks(dev);
 
 	buf[0] = (blocks >> 56) & 0xff;
@@ -831,8 +831,8 @@ static int
 target_emulate_modesense(struct se_cmd *cmd, int ten)
 {
 	struct se_device *dev = cmd->se_dev;
-	char *cdb = cmd->t_task.t_task_cdb;
-	unsigned char *rbuf = cmd->t_task.t_task_buf;
+	char *cdb = cmd->t_task_cdb;
+	unsigned char *rbuf = cmd->t_task_buf;
 	int type = dev->transport->get_device_type(dev);
 	int offset = (ten) ? 8 : 4;
 	int length = 0;
@@ -903,8 +903,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
 static int
 target_emulate_request_sense(struct se_cmd *cmd)
 {
-	unsigned char *cdb = cmd->t_task.t_task_cdb;
-	unsigned char *buf = cmd->t_task.t_task_buf;
+	unsigned char *cdb = cmd->t_task_cdb;
+	unsigned char *buf = cmd->t_task_buf;
 	u8 ua_asc = 0, ua_ascq = 0;
 
 	if (cdb[1] & 0x01) {
@@ -965,8 +965,8 @@ target_emulate_unmap(struct se_task *task)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
-	unsigned char *buf = cmd->t_task.t_task_buf, *ptr = NULL;
-	unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
+	unsigned char *buf = cmd->t_task_buf, *ptr = NULL;
+	unsigned char *cdb = &cmd->t_task_cdb[0];
 	sector_t lba;
 	unsigned int size = cmd->data_length, range;
 	int ret, offset;
@@ -1012,7 +1012,8 @@ target_emulate_write_same(struct se_task *task, int write_same32)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
-	sector_t range, lba = cmd->t_task.t_task_lba;
+	sector_t range;
+	sector_t lba = cmd->t_task_lba;
 	unsigned int num_blocks;
 	int ret;
 	/*
@@ -1021,9 +1022,9 @@ target_emulate_write_same(struct se_task *task, int write_same32)
 	 * range based on ->get_blocks() - starting LBA.
 	 */
 	if (write_same32)
-		num_blocks = get_unaligned_be32(&cmd->t_task.t_task_cdb[28]);
+		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
 	else
-		num_blocks = get_unaligned_be32(&cmd->t_task.t_task_cdb[10]);
+		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
 
 	if (num_blocks != 0)
 		range = num_blocks;
@@ -1052,7 +1053,7 @@ transport_emulate_control_cdb(struct se_task *task)
 	unsigned short service_action;
 	int ret = 0;
 
-	switch (cmd->t_task.t_task_cdb[0]) {
+	switch (cmd->t_task_cdb[0]) {
 	case INQUIRY:
 		ret = target_emulate_inquiry(cmd);
 		break;
@@ -1066,13 +1067,13 @@ transport_emulate_control_cdb(struct se_task *task)
 		ret = target_emulate_modesense(cmd, 1);
 		break;
 	case SERVICE_ACTION_IN:
-		switch (cmd->t_task.t_task_cdb[1] & 0x1f) {
+		switch (cmd->t_task_cdb[1] & 0x1f) {
 		case SAI_READ_CAPACITY_16:
 			ret = target_emulate_readcapacity_16(cmd);
 			break;
 		default:
 			printk(KERN_ERR "Unsupported SA: 0x%02x\n",
-				cmd->t_task.t_task_cdb[1] & 0x1f);
+				cmd->t_task_cdb[1] & 0x1f);
 			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		}
 		break;
@@ -1097,7 +1098,7 @@ transport_emulate_control_cdb(struct se_task *task)
 		break;
 	case VARIABLE_LENGTH_CMD:
 		service_action =
-			get_unaligned_be16(&cmd->t_task.t_task_cdb[8]);
+			get_unaligned_be16(&cmd->t_task_cdb[8]);
 		switch (service_action) {
 		case WRITE_SAME_32:
 			if (!dev->transport->do_discard) {
@@ -1136,7 +1137,7 @@ transport_emulate_control_cdb(struct se_task *task)
 		break;
 	default:
 		printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
-			cmd->t_task.t_task_cdb[0], dev->transport->name);
+			cmd->t_task_cdb[0], dev->transport->name);
 		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 	}
 
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ea92f75..c674a5d 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -168,7 +168,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 	 */
 	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
 	list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
-	atomic_set(&se_cmd->t_task.transport_lun_active, 1);
+	atomic_set(&se_cmd->transport_lun_active, 1);
 	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
 
 	return 0;
@@ -656,10 +656,10 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
 	struct se_lun *se_lun;
 	struct se_session *se_sess = se_cmd->se_sess;
 	struct se_task *se_task;
-	unsigned char *buf = se_cmd->t_task.t_task_buf;
+	unsigned char *buf = se_cmd->t_task_buf;
 	u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
 
-	list_for_each_entry(se_task, &se_cmd->t_task.t_task_list, t_list)
+	list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
 		break;
 
 	if (!(se_task)) {
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 2e7ea74..5c47f42 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -377,7 +377,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	struct fd_dev *fd_dev = dev->dev_ptr;
-	int immed = (cmd->t_task.t_task_cdb[1] & 0x2);
+	int immed = (cmd->t_task_cdb[1] & 0x2);
 	loff_t start, end;
 	int ret;
 
@@ -391,11 +391,11 @@ static void fd_emulate_sync_cache(struct se_task *task)
 	/*
 	 * Determine if we will be flushing the entire device.
 	 */
-	if (cmd->t_task.t_task_lba == 0 && cmd->data_length == 0) {
+	if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
 		start = 0;
 		end = LLONG_MAX;
 	} else {
-		start = cmd->t_task.t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+		start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
 		if (cmd->data_length)
 			end = start + cmd->data_length;
 		else
@@ -475,7 +475,7 @@ static int fd_do_task(struct se_task *task)
 		if (ret > 0 &&
 		    dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
 		    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-		    cmd->t_task.t_tasks_fua) {
+		    cmd->t_tasks_fua) {
 			/*
 			 * We might need to be a bit smarter here
 			 * and return some sense data to let the initiator
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c73baef..814a85b 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -331,7 +331,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
-	int immed = (cmd->t_task.t_task_cdb[1] & 0x2);
+	int immed = (cmd->t_task_cdb[1] & 0x2);
 	sector_t error_sector;
 	int ret;
 
@@ -400,7 +400,7 @@ static int iblock_do_task(struct se_task *task)
 		 */
 		if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
 		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-		     task->task_se_cmd->t_task.t_tasks_fua))
+		     task->task_se_cmd->t_tasks_fua))
 			rw = WRITE_FUA;
 		else
 			rw = WRITE;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 19406a3..4fdede8 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -157,8 +157,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
 	struct se_session *sess = cmd->se_sess;
 	struct se_portal_group *tpg = sess->se_tpg;
 
-	if ((cmd->t_task.t_task_cdb[1] & 0x01) &&
-	    (cmd->t_task.t_task_cdb[1] & 0x02)) {
+	if ((cmd->t_task_cdb[1] & 0x01) &&
+	    (cmd->t_task_cdb[1] & 0x02)) {
 		printk(KERN_ERR "LongIO and Obselete Bits set, returning"
 				" ILLEGAL_REQUEST\n");
 		return PYX_TRANSPORT_ILLEGAL_REQUEST;
@@ -216,7 +216,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
 	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
 	struct t10_pr_registration *pr_reg;
 	struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
-	unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
+	unsigned char *cdb = &cmd->t_task_cdb[0];
 	int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
 	int conflict = 0;
 
@@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port(
 	struct list_head tid_dest_list;
 	struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
 	struct target_core_fabric_ops *tmp_tf_ops;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
 	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
 	u32 tpdl, tid_len = 0;
@@ -3307,7 +3307,7 @@ static int core_scsi3_emulate_pro_register_and_move(
 	struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
 	struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
 	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	unsigned char *initiator_str;
 	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
 	u32 tid_len, tmp_tid_len;
@@ -3723,7 +3723,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
  */
 static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
 {
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u64 res_key, sa_res_key;
 	int sa, scope, type, aptpl;
 	int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
@@ -3830,7 +3830,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
 	struct se_device *se_dev = cmd->se_dev;
 	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
 	struct t10_pr_registration *pr_reg;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u32 add_len = 0, off = 8;
 
 	if (cmd->data_length < 8) {
@@ -3885,7 +3885,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
 	struct se_device *se_dev = cmd->se_dev;
 	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
 	struct t10_pr_registration *pr_reg;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u64 pr_res_key;
 	u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
 
@@ -3965,7 +3965,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u16 add_len = 8; /* Hardcoded to 8. */
 
 	if (cmd->data_length < 6) {
@@ -4020,7 +4020,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
 	struct se_portal_group *se_tpg;
 	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
 	struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
 	u32 off = 8; /* off into first Full Status descriptor */
 	int format_code = 0;
@@ -4174,7 +4174,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
 
 int core_scsi3_emulate_pr(struct se_cmd *cmd)
 {
-	unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
+	unsigned char *cdb = &cmd->t_task_cdb[0];
 	struct se_device *dev = cmd->se_dev;
 	/*
 	 * Following spc2r20 5.5.1 Reservations overview:
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ecfe889..a62350d 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -697,7 +697,7 @@ static int pscsi_transport_complete(struct se_task *task)
 
 		if (task->task_se_cmd->se_deve->lun_flags &
 				TRANSPORT_LUNFLAGS_READ_ONLY) {
-			unsigned char *buf = task->task_se_cmd->t_task.t_task_buf;
+			unsigned char *buf = task->task_se_cmd->t_task_buf;
 
 			if (cdb[0] == MODE_SENSE_10) {
 				if (!(buf[3] & 0x80))
@@ -763,7 +763,7 @@ static struct se_task *
 pscsi_alloc_task(struct se_cmd *cmd)
 {
 	struct pscsi_plugin_task *pt;
-	unsigned char *cdb = cmd->t_task.t_task_cdb;
+	unsigned char *cdb = cmd->t_task_cdb;
 
 	pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
 	if (!pt) {
@@ -776,7 +776,7 @@ pscsi_alloc_task(struct se_cmd *cmd)
 	 * allocate the extended CDB buffer for per struct se_task context
 	 * pt->pscsi_cdb now.
 	 */
-	if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) {
+	if (cmd->t_task_cdb != cmd->__t_task_cdb) {
 
 		pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
 		if (!(pt->pscsi_cdb)) {
@@ -889,7 +889,7 @@ static void pscsi_free_task(struct se_task *task)
 	 * Release the extended CDB allocation from pscsi_alloc_task()
 	 * if one exists.
 	 */
-	if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb)
+	if (cmd->t_task_cdb != cmd->__t_task_cdb)
 		kfree(pt->pscsi_cdb);
 	/*
 	 * We do not release the bio(s) here associated with this task, as
@@ -1266,7 +1266,7 @@ static int pscsi_map_task_non_SG(struct se_task *task)
 		return 0;
 
 	ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
-			pt->pscsi_req, cmd->t_task.t_task_buf,
+			pt->pscsi_req, cmd->t_task_buf,
 			task->task_size, GFP_KERNEL);
 	if (ret < 0) {
 		printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 384a8e2..4f9416d 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -737,7 +737,7 @@ check_eot:
 	}
 
 out:
-	task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt;
+	task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
 #ifdef DEBUG_RAMDISK_DR
 	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
 			*se_mem_cnt);
@@ -819,7 +819,7 @@ static int rd_DIRECT_without_offset(
 	}
 
 out:
-	task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt;
+	task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
 #ifdef DEBUG_RAMDISK_DR
 	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
 			*se_mem_cnt);
@@ -880,14 +880,14 @@ static int rd_DIRECT_do_se_mem_map(
 	 * across multiple struct se_task->task_sg[].
 	 */
 	ret = transport_init_task_sg(task,
-			list_first_entry(&cmd->t_task.t_mem_list,
+			list_first_entry(&cmd->t_mem_list,
 				   struct se_mem, se_list),
 			task_offset);
 	if (ret <= 0)
 		return ret;
 
 	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
-			list_first_entry(&cmd->t_task.t_mem_list,
+			list_first_entry(&cmd->t_mem_list,
 				   struct se_mem, se_list),
 			out_se_mem, se_mem_cnt, task_offset_in);
 }
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index e1f99f7..4235e72 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -178,14 +178,14 @@ int core_tmr_lun_reset(
 			continue;
 		spin_unlock(&dev->se_tmr_lock);
 
-		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-		if (!(atomic_read(&cmd->t_task.t_transport_active))) {
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
+		if (!(atomic_read(&cmd->t_transport_active))) {
+			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 			spin_lock(&dev->se_tmr_lock);
 			continue;
 		}
 		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 			spin_lock(&dev->se_tmr_lock);
 			continue;
 		}
@@ -193,7 +193,7 @@ int core_tmr_lun_reset(
 			" Response: 0x%02x, t_state: %d\n",
 			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
 			tmr_p->function, tmr_p->response, cmd->t_state);
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 		transport_cmd_finish_abort_tmr(cmd);
 		spin_lock(&dev->se_tmr_lock);
@@ -247,38 +247,38 @@ int core_tmr_lun_reset(
 		atomic_set(&task->task_state_active, 0);
 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
-		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
 		DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
 			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
 			"def_t_state: %d/%d cdb: 0x%02x\n",
 			(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
 			cmd->se_tfo->get_task_tag(cmd), 0,
 			cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
-			cmd->deferred_t_state, cmd->t_task.t_task_cdb[0]);
+			cmd->deferred_t_state, cmd->t_task_cdb[0]);
 		DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
 			" t_task_cdbs: %d t_task_cdbs_left: %d"
 			" t_task_cdbs_sent: %d -- t_transport_active: %d"
 			" t_transport_stop: %d t_transport_sent: %d\n",
 			cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
-			cmd->t_task.t_task_cdbs,
-			atomic_read(&cmd->t_task.t_task_cdbs_left),
-			atomic_read(&cmd->t_task.t_task_cdbs_sent),
-			atomic_read(&cmd->t_task.t_transport_active),
-			atomic_read(&cmd->t_task.t_transport_stop),
-			atomic_read(&cmd->t_task.t_transport_sent));
+			cmd->t_task_cdbs,
+			atomic_read(&cmd->t_task_cdbs_left),
+			atomic_read(&cmd->t_task_cdbs_sent),
+			atomic_read(&cmd->t_transport_active),
+			atomic_read(&cmd->t_transport_stop),
+			atomic_read(&cmd->t_transport_sent));
 
 		if (atomic_read(&task->task_active)) {
 			atomic_set(&task->task_stop, 1);
 			spin_unlock_irqrestore(
-				&cmd->t_task.t_state_lock, flags);
+				&cmd->t_state_lock, flags);
 
 			DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
 				" for dev: %p\n", task, dev);
 			wait_for_completion(&task->task_stop_comp);
 			DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
 				" dev: %p\n", task, dev);
-			spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-			atomic_dec(&cmd->t_task.t_task_cdbs_left);
+			spin_lock_irqsave(&cmd->t_state_lock, flags);
+			atomic_dec(&cmd->t_task_cdbs_left);
 
 			atomic_set(&task->task_active, 0);
 			atomic_set(&task->task_stop, 0);
@@ -288,24 +288,24 @@ int core_tmr_lun_reset(
 		}
 		__transport_stop_task_timer(task, &flags);
 
-		if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) {
+		if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) {
 			spin_unlock_irqrestore(
-					&cmd->t_task.t_state_lock, flags);
+					&cmd->t_state_lock, flags);
 			DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
 				" t_task_cdbs_ex_left: %d\n", task, dev,
-				atomic_read(&cmd->t_task.t_task_cdbs_ex_left));
+				atomic_read(&cmd->t_task_cdbs_ex_left));
 
 			spin_lock_irqsave(&dev->execute_task_lock, flags);
 			continue;
 		}
-		fe_count = atomic_read(&cmd->t_task.t_fe_count);
+		fe_count = atomic_read(&cmd->t_fe_count);
 
-		if (atomic_read(&cmd->t_task.t_transport_active)) {
+		if (atomic_read(&cmd->t_transport_active)) {
 			DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
 				" task: %p, t_fe_count: %d dev: %p\n", task,
 				fe_count, dev);
-			atomic_set(&cmd->t_task.t_transport_aborted, 1);
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
+			atomic_set(&cmd->t_transport_aborted, 1);
+			spin_unlock_irqrestore(&cmd->t_state_lock,
 						flags);
 			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
 
@@ -314,8 +314,8 @@ int core_tmr_lun_reset(
 		}
 		DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
 			" t_fe_count: %d dev: %p\n", task, fe_count, dev);
-		atomic_set(&cmd->t_task.t_transport_aborted, 1);
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		atomic_set(&cmd->t_transport_aborted, 1);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
 
 		spin_lock_irqsave(&dev->execute_task_lock, flags);
@@ -345,7 +345,7 @@ int core_tmr_lun_reset(
 		if (prout_cmd == cmd)
 			continue;
 
-		atomic_dec(&cmd->t_task.t_transport_queue_active);
+		atomic_dec(&cmd->t_transport_queue_active);
 		atomic_dec(&qobj->queue_cnt);
 		list_del(&cmd->se_queue_node);
 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
@@ -353,7 +353,7 @@ int core_tmr_lun_reset(
 		DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
 			" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
 			"Preempt" : "", cmd, cmd->t_state,
-			atomic_read(&cmd->t_task.t_fe_count));
+			atomic_read(&cmd->t_fe_count));
 		/*
 		 * Signal that the command has failed via cmd->se_cmd_flags,
 		 * and call TFO->new_cmd_failure() to wakeup any fabric
@@ -365,7 +365,7 @@ int core_tmr_lun_reset(
 		transport_new_cmd_failure(cmd);
 
 		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
-				atomic_read(&cmd->t_task.t_fe_count));
+				atomic_read(&cmd->t_fe_count));
 		spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
 	}
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ccc5319..343dfb0 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -572,7 +572,7 @@ void transport_deregister_session(struct se_session *se_sess)
 EXPORT_SYMBOL(transport_deregister_session);
 
 /*
- * Called with cmd->t_task.t_state_lock held.
+ * Called with cmd->t_state_lock held.
  */
 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
 {
@@ -580,7 +580,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
 	struct se_task *task;
 	unsigned long flags;
 
-	list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
+	list_for_each_entry(task, &cmd->t_task_list, t_list) {
 		dev = task->se_dev;
 		if (!(dev))
 			continue;
@@ -598,7 +598,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
 		atomic_set(&task->task_state_active, 0);
-		atomic_dec(&cmd->t_task.t_task_cdbs_ex_left);
+		atomic_dec(&cmd->t_task_cdbs_ex_left);
 	}
 }
 
@@ -617,32 +617,32 @@ static int transport_cmd_check_stop(
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	/*
 	 * Determine if IOCTL context caller in requesting the stopping of this
 	 * command for LUN shutdown purposes.
 	 */
-	if (atomic_read(&cmd->t_task.transport_lun_stop)) {
-		DEBUG_CS("%s:%d atomic_read(&cmd->t_task.transport_lun_stop)"
+	if (atomic_read(&cmd->transport_lun_stop)) {
+		DEBUG_CS("%s:%d atomic_read(&cmd->transport_lun_stop)"
 			" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
 			cmd->se_tfo->get_task_tag(cmd));
 
 		cmd->deferred_t_state = cmd->t_state;
 		cmd->t_state = TRANSPORT_DEFERRED_CMD;
-		atomic_set(&cmd->t_task.t_transport_active, 0);
+		atomic_set(&cmd->t_transport_active, 0);
 		if (transport_off == 2)
 			transport_all_task_dev_remove_state(cmd);
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-		complete(&cmd->t_task.transport_lun_stop_comp);
+		complete(&cmd->transport_lun_stop_comp);
 		return 1;
 	}
 	/*
 	 * Determine if frontend context caller is requesting the stopping of
 	 * this command for frontend exceptions.
 	 */
-	if (atomic_read(&cmd->t_task.t_transport_stop)) {
-		DEBUG_CS("%s:%d atomic_read(&cmd->t_task.t_transport_stop) =="
+	if (atomic_read(&cmd->t_transport_stop)) {
+		DEBUG_CS("%s:%d atomic_read(&cmd->t_transport_stop) =="
 			" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
 			cmd->se_tfo->get_task_tag(cmd));
 
@@ -657,13 +657,13 @@ static int transport_cmd_check_stop(
 		 */
 		if (transport_off == 2)
 			cmd->se_lun = NULL;
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-		complete(&cmd->t_task.t_transport_stop_comp);
+		complete(&cmd->t_transport_stop_comp);
 		return 1;
 	}
 	if (transport_off) {
-		atomic_set(&cmd->t_task.t_transport_active, 0);
+		atomic_set(&cmd->t_transport_active, 0);
 		if (transport_off == 2) {
 			transport_all_task_dev_remove_state(cmd);
 			/*
@@ -678,18 +678,18 @@ static int transport_cmd_check_stop(
 			 */
 			if (cmd->se_tfo->check_stop_free != NULL) {
 				spin_unlock_irqrestore(
-					&cmd->t_task.t_state_lock, flags);
+					&cmd->t_state_lock, flags);
 
 				cmd->se_tfo->check_stop_free(cmd);
 				return 1;
 			}
 		}
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 		return 0;
 	} else if (t_state)
 		cmd->t_state = t_state;
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	return 0;
 }
@@ -707,21 +707,21 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
 	if (!lun)
 		return;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-	if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (!(atomic_read(&cmd->transport_dev_active))) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		goto check_lun;
 	}
-	atomic_set(&cmd->t_task.transport_dev_active, 0);
+	atomic_set(&cmd->transport_dev_active, 0);
 	transport_all_task_dev_remove_state(cmd);
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 
 check_lun:
 	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
-	if (atomic_read(&cmd->t_task.transport_lun_active)) {
+	if (atomic_read(&cmd->transport_lun_active)) {
 		list_del(&cmd->se_lun_node);
-		atomic_set(&cmd->t_task.transport_lun_active, 0);
+		atomic_set(&cmd->transport_lun_active, 0);
 #if 0
 		printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
 			cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
@@ -762,15 +762,15 @@ static void transport_add_cmd_to_queue(
 	INIT_LIST_HEAD(&cmd->se_queue_node);
 
 	if (t_state) {
-		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
 		cmd->t_state = t_state;
-		atomic_set(&cmd->t_task.t_transport_active, 1);
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		atomic_set(&cmd->t_transport_active, 1);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 	}
 
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
 	list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
-	atomic_inc(&cmd->t_task.t_transport_queue_active);
+	atomic_inc(&cmd->t_transport_queue_active);
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
 	atomic_inc(&qobj->queue_cnt);
@@ -790,7 +790,7 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
 	}
 	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
 
-	atomic_dec(&cmd->t_task.t_transport_queue_active);
+	atomic_dec(&cmd->t_transport_queue_active);
 
 	list_del(&cmd->se_queue_node);
 	atomic_dec(&qobj->queue_cnt);
@@ -806,24 +806,24 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
 	unsigned long flags;
 
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-	if (!(atomic_read(&cmd->t_task.t_transport_queue_active))) {
+	if (!(atomic_read(&cmd->t_transport_queue_active))) {
 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 		return;
 	}
 
 	list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
 		if (t == cmd) {
-			atomic_dec(&cmd->t_task.t_transport_queue_active);
+			atomic_dec(&cmd->t_transport_queue_active);
 			atomic_dec(&qobj->queue_cnt);
 			list_del(&cmd->se_queue_node);
 			break;
 		}
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
-	if (atomic_read(&cmd->t_task.t_transport_queue_active)) {
+	if (atomic_read(&cmd->t_transport_queue_active)) {
 		printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
 			cmd->se_tfo->get_task_tag(cmd),
-			atomic_read(&cmd->t_task.t_transport_queue_active));
+			atomic_read(&cmd->t_transport_queue_active));
 	}
 }
 
@@ -833,7 +833,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
  */
 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
 {
-	struct se_task *task = list_entry(cmd->t_task.t_task_list.next,
+	struct se_task *task = list_entry(cmd->t_task_list.next,
 				struct se_task, t_list);
 
 	if (good) {
@@ -863,12 +863,12 @@ void transport_complete_task(struct se_task *task, int success)
 	unsigned long flags;
 #if 0
 	printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
-			cmd->t_task.t_task_cdb[0], dev);
+			cmd->t_task_cdb[0], dev);
 #endif
 	if (dev)
 		atomic_inc(&dev->depth_left);
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	atomic_set(&task->task_active, 0);
 
 	/*
@@ -890,14 +890,14 @@ void transport_complete_task(struct se_task *task, int success)
 	 */
 	if (atomic_read(&task->task_stop)) {
 		/*
-		 * Decrement cmd->t_task.t_se_count if this task had
+		 * Decrement cmd->t_se_count if this task had
 		 * previously thrown its timeout exception handler.
 		 */
 		if (atomic_read(&task->task_timeout)) {
-			atomic_dec(&cmd->t_task.t_se_count);
+			atomic_dec(&cmd->t_se_count);
 			atomic_set(&task->task_timeout, 0);
 		}
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 		complete(&task->task_stop_comp);
 		return;
@@ -909,33 +909,33 @@ void transport_complete_task(struct se_task *task, int success)
 	 */
 	if (atomic_read(&task->task_timeout)) {
 		if (!(atomic_dec_and_test(
-				&cmd->t_task.t_task_cdbs_timeout_left))) {
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
+				&cmd->t_task_cdbs_timeout_left))) {
+			spin_unlock_irqrestore(&cmd->t_state_lock,
 				flags);
 			return;
 		}
 		t_state = TRANSPORT_COMPLETE_TIMEOUT;
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 		transport_add_cmd_to_queue(cmd, t_state);
 		return;
 	}
-	atomic_dec(&cmd->t_task.t_task_cdbs_timeout_left);
+	atomic_dec(&cmd->t_task_cdbs_timeout_left);
 
 	/*
 	 * Decrement the outstanding t_task_cdbs_left count.  The last
 	 * struct se_task from struct se_cmd will complete itself into the
 	 * device queue depending upon int success.
 	 */
-	if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) {
+	if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) {
 		if (!success)
-			cmd->t_task.t_tasks_failed = 1;
+			cmd->t_tasks_failed = 1;
 
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return;
 	}
 
-	if (!success || cmd->t_task.t_tasks_failed) {
+	if (!success || cmd->t_tasks_failed) {
 		t_state = TRANSPORT_COMPLETE_FAILURE;
 		if (!task->task_error_status) {
 			task->task_error_status =
@@ -944,10 +944,10 @@ void transport_complete_task(struct se_task *task, int success)
 				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		}
 	} else {
-		atomic_set(&cmd->t_task.t_transport_complete, 1);
+		atomic_set(&cmd->t_transport_complete, 1);
 		t_state = TRANSPORT_COMPLETE_OK;
 	}
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	transport_add_cmd_to_queue(cmd, t_state);
 }
@@ -1040,8 +1040,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
 	struct se_task *task;
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-	list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	list_for_each_entry(task, &cmd->t_task_list, t_list) {
 		dev = task->se_dev;
 
 		if (atomic_read(&task->task_state_active))
@@ -1057,7 +1057,7 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
 
 		spin_unlock(&dev->execute_task_lock);
 	}
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 }
 
 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
@@ -1067,7 +1067,7 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev->execute_task_lock, flags);
-	list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
+	list_for_each_entry(task, &cmd->t_task_list, t_list) {
 		if (atomic_read(&task->task_execute_queue))
 			continue;
 		/*
@@ -1669,14 +1669,14 @@ transport_generic_get_task(struct se_cmd *cmd,
 	INIT_LIST_HEAD(&task->t_execute_list);
 	INIT_LIST_HEAD(&task->t_state_list);
 	init_completion(&task->task_stop_comp);
-	task->task_no = cmd->t_task.t_tasks_no++;
+	task->task_no = cmd->t_tasks_no++;
 	task->task_se_cmd = cmd;
 	task->se_dev = dev;
 	task->task_data_direction = data_direction;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-	list_add_tail(&task->t_list, &cmd->t_task.t_task_list);
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	list_add_tail(&task->t_list, &cmd->t_task_list);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	return task;
 }
@@ -1700,14 +1700,14 @@ void transport_init_se_cmd(
 	INIT_LIST_HEAD(&cmd->se_delayed_node);
 	INIT_LIST_HEAD(&cmd->se_ordered_node);
 
-	INIT_LIST_HEAD(&cmd->t_task.t_mem_list);
-	INIT_LIST_HEAD(&cmd->t_task.t_mem_bidi_list);
-	INIT_LIST_HEAD(&cmd->t_task.t_task_list);
-	init_completion(&cmd->t_task.transport_lun_fe_stop_comp);
-	init_completion(&cmd->t_task.transport_lun_stop_comp);
-	init_completion(&cmd->t_task.t_transport_stop_comp);
-	spin_lock_init(&cmd->t_task.t_state_lock);
-	atomic_set(&cmd->t_task.transport_dev_active, 1);
+	INIT_LIST_HEAD(&cmd->t_mem_list);
+	INIT_LIST_HEAD(&cmd->t_mem_bidi_list);
+	INIT_LIST_HEAD(&cmd->t_task_list);
+	init_completion(&cmd->transport_lun_fe_stop_comp);
+	init_completion(&cmd->transport_lun_stop_comp);
+	init_completion(&cmd->t_transport_stop_comp);
+	spin_lock_init(&cmd->t_state_lock);
+	atomic_set(&cmd->transport_dev_active, 1);
 
 	cmd->se_tfo = tfo;
 	cmd->se_sess = se_sess;
@@ -1752,8 +1752,8 @@ void transport_free_se_cmd(
 	/*
 	 * Check and free any extended CDB buffer that was allocated
 	 */
-	if (se_cmd->t_task.t_task_cdb != se_cmd->t_task.__t_task_cdb)
-		kfree(se_cmd->t_task.t_task_cdb);
+	if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb)
+		kfree(se_cmd->t_task_cdb);
 }
 EXPORT_SYMBOL(transport_free_se_cmd);
 
@@ -1791,26 +1791,26 @@ int transport_generic_allocate_tasks(
 	 * allocate the additional extended CDB buffer now..  Otherwise
 	 * setup the pointer from __t_task_cdb to t_task_cdb.
 	 */
-	if (scsi_command_size(cdb) > sizeof(cmd->t_task.__t_task_cdb)) {
-		cmd->t_task.t_task_cdb = kzalloc(scsi_command_size(cdb),
+	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
+		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
 						GFP_KERNEL);
-		if (!(cmd->t_task.t_task_cdb)) {
-			printk(KERN_ERR "Unable to allocate cmd->t_task.t_task_cdb"
-				" %u > sizeof(cmd->t_task.__t_task_cdb): %lu ops\n",
+		if (!(cmd->t_task_cdb)) {
+			printk(KERN_ERR "Unable to allocate cmd->t_task_cdb"
+				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
 				scsi_command_size(cdb),
-				(unsigned long)sizeof(cmd->t_task.__t_task_cdb));
+				(unsigned long)sizeof(cmd->__t_task_cdb));
 			return -ENOMEM;
 		}
 	} else
-		cmd->t_task.t_task_cdb = &cmd->t_task.__t_task_cdb[0];
+		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
 	/*
-	 * Copy the original CDB into cmd->t_task.
+	 * Copy the original CDB into cmd->
 	 */
-	memcpy(cmd->t_task.t_task_cdb, cdb, scsi_command_size(cdb));
+	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
 	/*
 	 * Setup the received CDB based on SCSI defined opcodes and
 	 * perform unit attention, persistent reservations and ALUA
-	 * checks for virtual device backends.  The cmd->t_task.t_task_cdb
+	 * checks for virtual device backends.  The cmd->t_task_cdb
 	 * pointer is expected to be setup before we reach this point.
 	 */
 	ret = transport_generic_cmd_sequencer(cmd, cdb);
@@ -1935,9 +1935,9 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 	/*
 	 * No tasks remain in the execution queue
 	 */
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	list_for_each_entry_safe(task, task_tmp,
-				&cmd->t_task.t_task_list, t_list) {
+				&cmd->t_task_list, t_list) {
 		DEBUG_TS("task_no[%d] - Processing task %p\n",
 				task->task_no, task);
 		/*
@@ -1946,14 +1946,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 		 */
 		if (!atomic_read(&task->task_sent) &&
 		    !atomic_read(&task->task_active)) {
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
+			spin_unlock_irqrestore(&cmd->t_state_lock,
 					flags);
 			transport_remove_task_from_execute_queue(task,
 					task->se_dev);
 
 			DEBUG_TS("task_no[%d] - Removed from execute queue\n",
 				task->task_no);
-			spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+			spin_lock_irqsave(&cmd->t_state_lock, flags);
 			continue;
 		}
 
@@ -1963,7 +1963,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 		 */
 		if (atomic_read(&task->task_active)) {
 			atomic_set(&task->task_stop, 1);
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
+			spin_unlock_irqrestore(&cmd->t_state_lock,
 					flags);
 
 			DEBUG_TS("task_no[%d] - Waiting to complete\n",
@@ -1972,8 +1972,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 			DEBUG_TS("task_no[%d] - Stopped successfully\n",
 				task->task_no);
 
-			spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-			atomic_dec(&cmd->t_task.t_task_cdbs_left);
+			spin_lock_irqsave(&cmd->t_state_lock, flags);
+			atomic_dec(&cmd->t_task_cdbs_left);
 
 			atomic_set(&task->task_active, 0);
 			atomic_set(&task->task_stop, 0);
@@ -1984,7 +1984,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 
 		__transport_stop_task_timer(task, &flags);
 	}
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	return ret;
 }
@@ -2000,7 +2000,7 @@ static void transport_generic_request_failure(
 {
 	DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
 		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
-		cmd->t_task.t_task_cdb[0]);
+		cmd->t_task_cdb[0]);
 	DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
 		" %d/%d transport_error_status: %d\n",
 		cmd->se_tfo->get_cmd_state(cmd),
@@ -2009,13 +2009,13 @@ static void transport_generic_request_failure(
 	DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
 		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
 		" t_transport_active: %d t_transport_stop: %d"
-		" t_transport_sent: %d\n", cmd->t_task.t_task_cdbs,
-		atomic_read(&cmd->t_task.t_task_cdbs_left),
-		atomic_read(&cmd->t_task.t_task_cdbs_sent),
-		atomic_read(&cmd->t_task.t_task_cdbs_ex_left),
-		atomic_read(&cmd->t_task.t_transport_active),
-		atomic_read(&cmd->t_task.t_transport_stop),
-		atomic_read(&cmd->t_task.t_transport_sent));
+		" t_transport_sent: %d\n", cmd->t_task_cdbs,
+		atomic_read(&cmd->t_task_cdbs_left),
+		atomic_read(&cmd->t_task_cdbs_sent),
+		atomic_read(&cmd->t_task_cdbs_ex_left),
+		atomic_read(&cmd->t_transport_active),
+		atomic_read(&cmd->t_transport_stop),
+		atomic_read(&cmd->t_transport_sent));
 
 	transport_stop_all_task_timers(cmd);
 
@@ -2097,7 +2097,7 @@ static void transport_generic_request_failure(
 		break;
 	default:
 		printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
-			cmd->t_task.t_task_cdb[0],
+			cmd->t_task_cdb[0],
 			cmd->transport_error_status);
 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
 		break;
@@ -2118,19 +2118,19 @@ static void transport_direct_request_timeout(struct se_cmd *cmd)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-	if (!(atomic_read(&cmd->t_task.t_transport_timeout))) {
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (!(atomic_read(&cmd->t_transport_timeout))) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return;
 	}
-	if (atomic_read(&cmd->t_task.t_task_cdbs_timeout_left)) {
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return;
 	}
 
-	atomic_sub(atomic_read(&cmd->t_task.t_transport_timeout),
-		   &cmd->t_task.t_se_count);
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	atomic_sub(atomic_read(&cmd->t_transport_timeout),
+		   &cmd->t_se_count);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 }
 
 static void transport_generic_request_timeout(struct se_cmd *cmd)
@@ -2138,16 +2138,16 @@ static void transport_generic_request_timeout(struct se_cmd *cmd)
 	unsigned long flags;
 
 	/*
-	 * Reset cmd->t_task.t_se_count to allow transport_generic_remove()
+	 * Reset cmd->t_se_count to allow transport_generic_remove()
 	 * to allow last call to free memory resources.
 	 */
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-	if (atomic_read(&cmd->t_task.t_transport_timeout) > 1) {
-		int tmp = (atomic_read(&cmd->t_task.t_transport_timeout) - 1);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (atomic_read(&cmd->t_transport_timeout) > 1) {
+		int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
 
-		atomic_sub(tmp, &cmd->t_task.t_se_count);
+		atomic_sub(tmp, &cmd->t_se_count);
 	}
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	transport_generic_remove(cmd, 0, 0);
 }
@@ -2163,8 +2163,8 @@ transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
 		return -ENOMEM;
 	}
 
-	cmd->t_task.t_tasks_se_num = 0;
-	cmd->t_task.t_task_buf = buf;
+	cmd->t_tasks_se_num = 0;
+	cmd->t_task_buf = buf;
 
 	return 0;
 }
@@ -2206,9 +2206,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
 	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
-	spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 }
 
 /*
@@ -2222,9 +2222,9 @@ static void transport_task_timeout_handler(unsigned long data)
 
 	DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	if (task->task_flags & TF_STOP) {
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return;
 	}
 	task->task_flags &= ~TF_RUNNING;
@@ -2235,13 +2235,13 @@ static void transport_task_timeout_handler(unsigned long data)
 	if (!(atomic_read(&task->task_active))) {
 		DEBUG_TT("transport task: %p cmd: %p timeout task_active"
 				" == 0\n", task, cmd);
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return;
 	}
 
-	atomic_inc(&cmd->t_task.t_se_count);
-	atomic_inc(&cmd->t_task.t_transport_timeout);
-	cmd->t_task.t_tasks_failed = 1;
+	atomic_inc(&cmd->t_se_count);
+	atomic_inc(&cmd->t_transport_timeout);
+	cmd->t_tasks_failed = 1;
 
 	atomic_set(&task->task_timeout, 1);
 	task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
@@ -2250,28 +2250,28 @@ static void transport_task_timeout_handler(unsigned long data)
 	if (atomic_read(&task->task_stop)) {
 		DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
 				" == 1\n", task, cmd);
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		complete(&task->task_stop_comp);
 		return;
 	}
 
-	if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) {
+	if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) {
 		DEBUG_TT("transport task: %p cmd: %p timeout non zero"
 				" t_task_cdbs_left\n", task, cmd);
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return;
 	}
 	DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
 			task, cmd);
 
 	cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
 }
 
 /*
- * Called with cmd->t_task.t_state_lock held.
+ * Called with cmd->t_state_lock held.
  */
 static void transport_start_task_timer(struct se_task *task)
 {
@@ -2301,7 +2301,7 @@ static void transport_start_task_timer(struct se_task *task)
 }
 
 /*
- * Called with spin_lock_irq(&cmd->t_task.t_state_lock) held.
+ * Called with spin_lock_irq(&cmd->t_state_lock) held.
  */
 void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
 {
@@ -2311,11 +2311,11 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
 		return;
 
 	task->task_flags |= TF_STOP;
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, *flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
 
 	del_timer_sync(&task->task_timer);
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, *flags);
+	spin_lock_irqsave(&cmd->t_state_lock, *flags);
 	task->task_flags &= ~TF_RUNNING;
 	task->task_flags &= ~TF_STOP;
 }
@@ -2325,11 +2325,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd)
 	struct se_task *task = NULL, *task_tmp;
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	list_for_each_entry_safe(task, task_tmp,
-				&cmd->t_task.t_task_list, t_list)
+				&cmd->t_task_list, t_list)
 		__transport_stop_task_timer(task, &flags);
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 }
 
 static inline int transport_tcq_window_closed(struct se_device *dev)
@@ -2364,7 +2364,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 		smp_mb__after_atomic_inc();
 		DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
 			" 0x%02x, se_ordered_id: %u\n",
-			cmd->t_task->t_task_cdb[0],
+			cmd->_task_cdb[0],
 			cmd->se_ordered_id);
 		return 1;
 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
@@ -2378,7 +2378,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 
 		DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
 				" list, se_ordered_id: %u\n",
-				cmd->t_task.t_task_cdb[0],
+				cmd->t_task_cdb[0],
 				cmd->se_ordered_id);
 		/*
 		 * Add ORDERED command to tail of execution queue if
@@ -2412,7 +2412,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 
 		DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
 			" delayed CMD list, se_ordered_id: %u\n",
-			cmd->t_task.t_task_cdb[0], cmd->sam_task_attr,
+			cmd->t_task_cdb[0], cmd->sam_task_attr,
 			cmd->se_ordered_id);
 		/*
 		 * Return zero to let transport_execute_tasks() know
@@ -2486,7 +2486,7 @@ static int __transport_execute_tasks(struct se_device *dev)
 
 	/*
 	 * Check if there is enough room in the device and HBA queue to send
-	 * struct se_transport_task's to the selected transport.
+	 * struct se_tasks to the selected transport.
 	 */
 check_depth:
 	if (!atomic_read(&dev->depth_left))
@@ -2510,17 +2510,17 @@ check_depth:
 
 	cmd = task->task_se_cmd;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	atomic_set(&task->task_active, 1);
 	atomic_set(&task->task_sent, 1);
-	atomic_inc(&cmd->t_task.t_task_cdbs_sent);
+	atomic_inc(&cmd->t_task_cdbs_sent);
 
-	if (atomic_read(&cmd->t_task.t_task_cdbs_sent) ==
-	    cmd->t_task.t_task_cdbs)
+	if (atomic_read(&cmd->t_task_cdbs_sent) ==
+	    cmd->t_task_cdbs)
 		atomic_set(&cmd->transport_sent, 1);
 
 	transport_start_task_timer(task);
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 	/*
 	 * The struct se_cmd->transport_emulate_cdb() function pointer is used
 	 * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
@@ -2585,10 +2585,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)
 	 * Any unsolicited data will get dumped for failed command inside of
 	 * the fabric plugin
 	 */
-	spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
 	se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
 	se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-	spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 
 	se_cmd->se_tfo->new_cmd_failure(se_cmd);
 }
@@ -2798,18 +2798,18 @@ static void transport_xor_callback(struct se_cmd *cmd)
 		return;
 	}
 	/*
-	 * Copy the scatterlist WRITE buffer located at cmd->t_task.t_mem_list
+	 * Copy the scatterlist WRITE buffer located at cmd->t_mem_list
 	 * into the locally allocated *buf
 	 */
-	transport_memcpy_se_mem_read_contig(buf, &cmd->t_task.t_mem_list,
+	transport_memcpy_se_mem_read_contig(buf, &cmd->t_mem_list,
 					    cmd->data_length);
 	/*
 	 * Now perform the XOR against the BIDI read memory located at
-	 * cmd->t_task.t_mem_bidi_list
+	 * cmd->t_mem_bidi_list
 	 */
 
 	offset = 0;
-	list_for_each_entry(se_mem, &cmd->t_task.t_mem_bidi_list, se_list) {
+	list_for_each_entry(se_mem, &cmd->t_mem_bidi_list, se_list) {
 		addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
 		if (!(addr))
 			goto out;
@@ -2837,14 +2837,14 @@ static int transport_get_sense_data(struct se_cmd *cmd)
 
 	WARN_ON(!cmd->se_lun);
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return 0;
 	}
 
 	list_for_each_entry_safe(task, task_tmp,
-				&cmd->t_task.t_task_list, t_list) {
+				&cmd->t_task_list, t_list) {
 
 		if (!task->task_sense)
 			continue;
@@ -2866,7 +2866,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
 				cmd->se_tfo->get_task_tag(cmd), task->task_no);
 			continue;
 		}
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 		offset = cmd->se_tfo->set_fabric_sense_len(cmd,
 				TRANSPORT_SENSE_BUFFER);
@@ -2884,7 +2884,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
 				cmd->scsi_status);
 		return 0;
 	}
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	return -1;
 }
@@ -2999,7 +2999,7 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_6;
-		cmd->t_task.t_task_lba = transport_lba_21(cdb);
+		cmd->t_task_lba = transport_lba_21(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case READ_10:
@@ -3008,7 +3008,7 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_10;
-		cmd->t_task.t_task_lba = transport_lba_32(cdb);
+		cmd->t_task_lba = transport_lba_32(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case READ_12:
@@ -3017,7 +3017,7 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_12;
-		cmd->t_task.t_task_lba = transport_lba_32(cdb);
+		cmd->t_task_lba = transport_lba_32(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case READ_16:
@@ -3026,7 +3026,7 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_16;
-		cmd->t_task.t_task_lba = transport_lba_64(cdb);
+		cmd->t_task_lba = transport_lba_64(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case WRITE_6:
@@ -3035,7 +3035,7 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_6;
-		cmd->t_task.t_task_lba = transport_lba_21(cdb);
+		cmd->t_task_lba = transport_lba_21(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case WRITE_10:
@@ -3044,8 +3044,8 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_10;
-		cmd->t_task.t_task_lba = transport_lba_32(cdb);
-		cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
+		cmd->t_task_lba = transport_lba_32(cdb);
+		cmd->t_tasks_fua = (cdb[1] & 0x8);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case WRITE_12:
@@ -3054,8 +3054,8 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_12;
-		cmd->t_task.t_task_lba = transport_lba_32(cdb);
-		cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
+		cmd->t_task_lba = transport_lba_32(cdb);
+		cmd->t_tasks_fua = (cdb[1] & 0x8);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case WRITE_16:
@@ -3064,20 +3064,20 @@ static int transport_generic_cmd_sequencer(
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_16;
-		cmd->t_task.t_task_lba = transport_lba_64(cdb);
-		cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
+		cmd->t_task_lba = transport_lba_64(cdb);
+		cmd->t_tasks_fua = (cdb[1] & 0x8);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case XDWRITEREAD_10:
 		if ((cmd->data_direction != DMA_TO_DEVICE) ||
-		    !(cmd->t_task.t_tasks_bidi))
+		    !(cmd->t_tasks_bidi))
 			goto out_invalid_cdb_field;
 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
 		if (sector_ret)
 			goto out_unsupported_cdb;
 		size = transport_get_size(sectors, cdb, cmd);
 		cmd->transport_split_cdb = &split_cdb_XX_10;
-		cmd->t_task.t_task_lba = transport_lba_32(cdb);
+		cmd->t_task_lba = transport_lba_32(cdb);
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		passthrough = (dev->transport->transport_type ==
 				TRANSPORT_PLUGIN_PHBA_PDEV);
@@ -3090,7 +3090,7 @@ static int transport_generic_cmd_sequencer(
 		 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
 		 */
 		cmd->transport_complete_callback = &transport_xor_callback;
-		cmd->t_task.t_tasks_fua = (cdb[1] & 0x8);
+		cmd->t_tasks_fua = (cdb[1] & 0x8);
 		break;
 	case VARIABLE_LENGTH_CMD:
 		service_action = get_unaligned_be16(&cdb[8]);
@@ -3112,7 +3112,7 @@ static int transport_generic_cmd_sequencer(
 			 * XDWRITE_READ_32 logic.
 			 */
 			cmd->transport_split_cdb = &split_cdb_XX_32;
-			cmd->t_task.t_task_lba = transport_lba_64_ext(cdb);
+			cmd->t_task_lba = transport_lba_64_ext(cdb);
 			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 
 			/*
@@ -3126,7 +3126,7 @@ static int transport_generic_cmd_sequencer(
 			 * transport_generic_complete_ok()
 			 */
 			cmd->transport_complete_callback = &transport_xor_callback;
-			cmd->t_task.t_tasks_fua = (cdb[10] & 0x8);
+			cmd->t_tasks_fua = (cdb[10] & 0x8);
 			break;
 		case WRITE_SAME_32:
 			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3138,7 +3138,7 @@ static int transport_generic_cmd_sequencer(
 			else
 				size = dev->se_sub_dev->se_dev_attrib.block_size;
 
-			cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[12]);
+			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
 			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 
 			/*
@@ -3373,10 +3373,10 @@ static int transport_generic_cmd_sequencer(
 		 */
 		if (cdb[0] == SYNCHRONIZE_CACHE) {
 			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
-			cmd->t_task.t_task_lba = transport_lba_32(cdb);
+			cmd->t_task_lba = transport_lba_32(cdb);
 		} else {
 			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
-			cmd->t_task.t_task_lba = transport_lba_64(cdb);
+			cmd->t_task_lba = transport_lba_64(cdb);
 		}
 		if (sector_ret)
 			goto out_unsupported_cdb;
@@ -3427,7 +3427,7 @@ static int transport_generic_cmd_sequencer(
 		else
 			size = dev->se_sub_dev->se_dev_attrib.block_size;
 
-		cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[2]);
+		cmd->t_task_lba = get_unaligned_be16(&cdb[2]);
 		passthrough = (dev->transport->transport_type ==
 				TRANSPORT_PLUGIN_PHBA_PDEV);
 		/*
@@ -3683,9 +3683,9 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
 		 * contiguous buffer back to the original SGL.
 		 */
 		if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
-			sg_copy_from_buffer(cmd->t_task.t_task_pt_sgl,
-					    cmd->t_task.t_task_pt_sgl_num,
-					    cmd->t_task.t_task_buf,
+			sg_copy_from_buffer(cmd->t_task_pt_sgl,
+					    cmd->t_task_pt_sgl_num,
+					    cmd->t_task_buf,
 					    cmd->data_length);
 
 		cmd->se_tfo->queue_data_in(cmd);
@@ -3700,7 +3700,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
 		/*
 		 * Check if we need to send READ payload for BIDI-COMMAND
 		 */
-		if (!list_empty(&cmd->t_task.t_mem_bidi_list)) {
+		if (!list_empty(&cmd->t_mem_bidi_list)) {
 			spin_lock(&cmd->se_lun->lun_sep_lock);
 			if (cmd->se_lun->lun_sep) {
 				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
@@ -3727,9 +3727,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
 	struct se_task *task, *task_tmp;
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	list_for_each_entry_safe(task, task_tmp,
-				&cmd->t_task.t_task_list, t_list) {
+				&cmd->t_task_list, t_list) {
 		if (atomic_read(&task->task_active))
 			continue;
 
@@ -3738,15 +3738,15 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
 
 		list_del(&task->t_list);
 
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		if (task->se_dev)
 			task->se_dev->transport->free_task(task);
 		else
 			printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
 				task->task_no);
-		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
 	}
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 }
 
 static inline void transport_free_pages(struct se_cmd *cmd)
@@ -3759,9 +3759,9 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 	if (cmd->se_dev->transport->do_se_mem_map)
 		free_page = 0;
 
-	if (cmd->t_task.t_task_buf) {
-		kfree(cmd->t_task.t_task_buf);
-		cmd->t_task.t_task_buf = NULL;
+	if (cmd->t_task_buf) {
+		kfree(cmd->t_task_buf);
+		cmd->t_task_buf = NULL;
 		return;
 	}
 
@@ -3772,7 +3772,7 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 		return;
 
 	list_for_each_entry_safe(se_mem, se_mem_tmp,
-			&cmd->t_task.t_mem_list, se_list) {
+			&cmd->t_mem_list, se_list) {
 		/*
 		 * We only release call __free_page(struct se_mem->se_page) when
 		 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
@@ -3783,10 +3783,10 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 		list_del(&se_mem->se_list);
 		kmem_cache_free(se_mem_cache, se_mem);
 	}
-	cmd->t_task.t_tasks_se_num = 0;
+	cmd->t_tasks_se_num = 0;
 
 	list_for_each_entry_safe(se_mem, se_mem_tmp,
-				 &cmd->t_task.t_mem_bidi_list, se_list) {
+				 &cmd->t_mem_bidi_list, se_list) {
 		/*
 		 * We only release call __free_page(struct se_mem->se_page) when
 		 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
@@ -3797,7 +3797,7 @@ static inline void transport_free_pages(struct se_cmd *cmd)
 		list_del(&se_mem->se_list);
 		kmem_cache_free(se_mem_cache, se_mem);
 	}
-	cmd->t_task.t_tasks_se_bidi_num = 0;
+	cmd->t_tasks_se_bidi_num = 0;
 }
 
 static inline void transport_release_tasks(struct se_cmd *cmd)
@@ -3809,23 +3809,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-	if (atomic_read(&cmd->t_task.t_fe_count)) {
-		if (!(atomic_dec_and_test(&cmd->t_task.t_fe_count))) {
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (atomic_read(&cmd->t_fe_count)) {
+		if (!(atomic_dec_and_test(&cmd->t_fe_count))) {
+			spin_unlock_irqrestore(&cmd->t_state_lock,
 					flags);
 			return 1;
 		}
 	}
 
-	if (atomic_read(&cmd->t_task.t_se_count)) {
-		if (!(atomic_dec_and_test(&cmd->t_task.t_se_count))) {
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
+	if (atomic_read(&cmd->t_se_count)) {
+		if (!(atomic_dec_and_test(&cmd->t_se_count))) {
+			spin_unlock_irqrestore(&cmd->t_state_lock,
 					flags);
 			return 1;
 		}
 	}
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	return 0;
 }
@@ -3837,14 +3837,14 @@ static void transport_release_fe_cmd(struct se_cmd *cmd)
 	if (transport_dec_and_check(cmd))
 		return;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-	if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (!(atomic_read(&cmd->transport_dev_active))) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		goto free_pages;
 	}
-	atomic_set(&cmd->t_task.transport_dev_active, 0);
+	atomic_set(&cmd->transport_dev_active, 0);
 	transport_all_task_dev_remove_state(cmd);
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	transport_release_tasks(cmd);
 free_pages:
@@ -3862,22 +3862,22 @@ static int transport_generic_remove(
 
 	if (transport_dec_and_check(cmd)) {
 		if (session_reinstatement) {
-			spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+			spin_lock_irqsave(&cmd->t_state_lock, flags);
 			transport_all_task_dev_remove_state(cmd);
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
+			spin_unlock_irqrestore(&cmd->t_state_lock,
 					flags);
 		}
 		return 1;
 	}
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-	if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (!(atomic_read(&cmd->transport_dev_active))) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		goto free_pages;
 	}
-	atomic_set(&cmd->t_task.transport_dev_active, 0);
+	atomic_set(&cmd->transport_dev_active, 0);
 	transport_all_task_dev_remove_state(cmd);
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	transport_release_tasks(cmd);
 
@@ -3929,23 +3929,23 @@ int transport_generic_map_mem_to_cmd(
 		 * from the passed physical memory to struct se_mem->se_page here.
 		 */
 		ret = transport_map_sg_to_mem(cmd,
-			&cmd->t_task.t_mem_list, sgl, &mapped_sg_count);
+			&cmd->t_mem_list, sgl, &mapped_sg_count);
 		if (ret < 0)
 			return -ENOMEM;
 
-		cmd->t_task.t_tasks_se_num = mapped_sg_count;
+		cmd->t_tasks_se_num = mapped_sg_count;
 		/*
 		 * Setup BIDI READ list of struct se_mem elements
 		 */
 		if (sgl_bidi && sgl_bidi_count) {
 			mapped_sg_count = 0;
 			ret = transport_map_sg_to_mem(cmd,
-				&cmd->t_task.t_mem_bidi_list, sgl_bidi,
+				&cmd->t_mem_bidi_list, sgl_bidi,
 				&mapped_sg_count);
 			if (ret < 0)
 				return -ENOMEM;
 
-			cmd->t_task.t_tasks_se_bidi_num = mapped_sg_count;
+			cmd->t_tasks_se_bidi_num = mapped_sg_count;
 		}
 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
 
@@ -3965,8 +3965,8 @@ int transport_generic_map_mem_to_cmd(
 		 * struct scatterlist format.
 		 */
 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
-		cmd->t_task.t_task_pt_sgl = sgl;
-		cmd->t_task.t_task_pt_sgl_num = sgl_count;
+		cmd->t_task_pt_sgl = sgl;
+		cmd->t_task_pt_sgl_num = sgl_count;
 	}
 
 	return 0;
@@ -3983,19 +3983,19 @@ static int transport_get_sectors(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
 
-	cmd->t_task.t_tasks_sectors =
+	cmd->t_tasks_sectors =
 		(cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
-	if (!(cmd->t_task.t_tasks_sectors))
-		cmd->t_task.t_tasks_sectors = 1;
+	if (!(cmd->t_tasks_sectors))
+		cmd->t_tasks_sectors = 1;
 
 	if (dev->transport->get_device_type(dev) != TYPE_DISK)
 		return 0;
 
-	if ((cmd->t_task.t_task_lba + cmd->t_task.t_tasks_sectors) >
+	if ((cmd->t_task_lba + cmd->t_tasks_sectors) >
 	     transport_dev_end_lba(dev)) {
 		printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
 			" transport_dev_end_lba(): %llu\n",
-			cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors,
+			cmd->t_task_lba, cmd->t_tasks_sectors,
 			transport_dev_end_lba(dev));
 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
@@ -4013,21 +4013,21 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 
 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
 		task_cdbs = 1;
-		cmd->t_task.t_task_cdbs = 1;
+		cmd->t_task_cdbs = 1;
 	} else {
 		int set_counts = 1;
 
 		/*
 		 * Setup any BIDI READ tasks and memory from
-		 * cmd->t_task.t_mem_bidi_list so the READ struct se_tasks
+		 * cmd->t_mem_bidi_list so the READ struct se_tasks
 		 * are queued first for the non pSCSI passthrough case.
 		 */
-		if (!list_empty(&cmd->t_task.t_mem_bidi_list) &&
+		if (!list_empty(&cmd->t_mem_bidi_list) &&
 		    (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
 			rc = transport_generic_get_cdb_count(cmd,
-				cmd->t_task.t_task_lba,
-				cmd->t_task.t_tasks_sectors,
-				DMA_FROM_DEVICE, &cmd->t_task.t_mem_bidi_list,
+				cmd->t_task_lba,
+				cmd->t_tasks_sectors,
+				DMA_FROM_DEVICE, &cmd->t_mem_bidi_list,
 				set_counts);
 			if (!(rc)) {
 				cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
@@ -4038,13 +4038,13 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 			set_counts = 0;
 		}
 		/*
-		 * Setup the tasks and memory from cmd->t_task.t_mem_list
+		 * Setup the tasks and memory from cmd->t_mem_list
 		 * Note for BIDI transfers this will contain the WRITE payload
 		 */
 		task_cdbs = transport_generic_get_cdb_count(cmd,
-				cmd->t_task.t_task_lba,
-				cmd->t_task.t_tasks_sectors,
-				cmd->data_direction, &cmd->t_task.t_mem_list,
+				cmd->t_task_lba,
+				cmd->t_tasks_sectors,
+				cmd->data_direction, &cmd->t_mem_list,
 				set_counts);
 		if (!(task_cdbs)) {
 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
@@ -4052,19 +4052,19 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 			return PYX_TRANSPORT_LU_COMM_FAILURE;
 		}
-		cmd->t_task.t_task_cdbs = task_cdbs;
+		cmd->t_task_cdbs = task_cdbs;
 
 #if 0
 		printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
 			" %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
-			cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors,
-			cmd->t_task.t_task_cdbs);
+			cmd->t_task_lba, cmd->t_tasks_sectors,
+			cmd->t_task_cdbs);
 #endif
 	}
 
-	atomic_set(&cmd->t_task.t_task_cdbs_left, task_cdbs);
-	atomic_set(&cmd->t_task.t_task_cdbs_ex_left, task_cdbs);
-	atomic_set(&cmd->t_task.t_task_cdbs_timeout_left, task_cdbs);
+	atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
+	atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
+	atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
 	return 0;
 }
 
@@ -4095,8 +4095,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length)
 
 		INIT_LIST_HEAD(&se_mem->se_list);
 		se_mem->se_len = min_t(u32, length, PAGE_SIZE);
-		list_add_tail(&se_mem->se_list, &cmd->t_task.t_mem_list);
-		cmd->t_task.t_tasks_se_num++;
+		list_add_tail(&se_mem->se_list, &cmd->t_mem_list);
+		cmd->t_tasks_se_num++;
 
 		DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
 			" Offset(%u)\n", se_mem->se_page, se_mem->se_len,
@@ -4106,7 +4106,7 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length)
 	}
 
 	DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
-			cmd->t_task.t_tasks_se_num);
+			cmd->t_tasks_se_num);
 
 	return 0;
 out:
@@ -4138,7 +4138,7 @@ int transport_init_task_sg(
 				sg_length = se_mem->se_len;
 
 				if (!(list_is_last(&se_mem->se_list,
-						&se_cmd->t_task.t_mem_list)))
+						&se_cmd->t_mem_list)))
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 			} else {
@@ -4158,7 +4158,7 @@ int transport_init_task_sg(
 				sg_length = (se_mem->se_len - task_offset);
 
 				if (!(list_is_last(&se_mem->se_list,
-						&se_cmd->t_task.t_mem_list)))
+						&se_cmd->t_mem_list)))
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 			}
@@ -4199,7 +4199,7 @@ next:
 	 * Setup task->task_sg_bidi for SCSI READ payload for
 	 * TCM/pSCSI passthrough if present for BIDI-COMMAND
 	 */
-	if (!list_empty(&se_cmd->t_task.t_mem_bidi_list) &&
+	if (!list_empty(&se_cmd->t_mem_bidi_list) &&
 	    (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
 		task->task_sg_bidi = kzalloc(task_sg_num_padded *
 				sizeof(struct scatterlist), GFP_KERNEL);
@@ -4342,7 +4342,7 @@ int transport_map_mem_to_sg(
 				sg->length = se_mem->se_len;
 
 				if (!(list_is_last(&se_mem->se_list,
-						&se_cmd->t_task.t_mem_list))) {
+						&se_cmd->t_mem_list))) {
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 					(*se_mem_cnt)++;
@@ -4378,7 +4378,7 @@ int transport_map_mem_to_sg(
 				sg->length = (se_mem->se_len - *task_offset);
 
 				if (!(list_is_last(&se_mem->se_list,
-						&se_cmd->t_task.t_mem_list))) {
+						&se_cmd->t_mem_list))) {
 					se_mem = list_entry(se_mem->se_list.next,
 							struct se_mem, se_list);
 					(*se_mem_cnt)++;
@@ -4436,7 +4436,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
 	 * Walk the struct se_task list and setup scatterlist chains
 	 * for each contiguosly allocated struct se_task->task_sg[].
 	 */
-	list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
+	list_for_each_entry(task, &cmd->t_task_list, t_list) {
 		if (!(task->task_sg) || !(task->task_padded_sg))
 			continue;
 
@@ -4447,7 +4447,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
 			 * Either add chain or mark end of scatterlist
 			 */
 			if (!(list_is_last(&task->t_list,
-					&cmd->t_task.t_task_list))) {
+					&cmd->t_task_list))) {
 				/*
 				 * Clear existing SGL termination bit set in
 				 * transport_init_task_sg(), see sg_mark_end()
@@ -4473,7 +4473,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
 		/*
 		 * Check for single task..
 		 */
-		if (!(list_is_last(&task->t_list, &cmd->t_task.t_task_list))) {
+		if (!(list_is_last(&task->t_list, &cmd->t_task_list))) {
 			/*
 			 * Clear existing SGL termination bit set in
 			 * transport_init_task_sg(), see sg_mark_end()
@@ -4491,15 +4491,15 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
 	 * Setup the starting pointer and total t_tasks_sg_linked_no including
 	 * padding SGs for linking and to mark the end.
 	 */
-	cmd->t_task.t_tasks_sg_chained = sg_first;
-	cmd->t_task.t_tasks_sg_chained_no = sg_count;
+	cmd->t_tasks_sg_chained = sg_first;
+	cmd->t_tasks_sg_chained_no = sg_count;
 
-	DEBUG_CMD_M("Setup cmd: %p cmd->t_task.t_tasks_sg_chained: %p and"
-		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task.t_tasks_sg_chained,
-		cmd->t_task.t_tasks_sg_chained_no);
+	DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
+		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
+		cmd->t_tasks_sg_chained_no);
 
-	for_each_sg(cmd->t_task.t_tasks_sg_chained, sg,
-			cmd->t_task.t_tasks_sg_chained_no, i) {
+	for_each_sg(cmd->t_tasks_sg_chained, sg,
+			cmd->t_tasks_sg_chained_no, i) {
 
 		DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n",
 			i, sg, sg_page(sg), sg->length, sg->offset);
@@ -4532,7 +4532,7 @@ static int transport_do_se_mem_map(
 				in_mem, in_se_mem, out_se_mem, se_mem_cnt,
 				task_offset_in);
 		if (ret == 0)
-			task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt;
+			task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
 
 		return ret;
 	}
@@ -4605,9 +4605,9 @@ static u32 transport_generic_get_cdb_count(
 	 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
 	 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
 	 */
-	if (!list_empty(&cmd->t_task.t_mem_bidi_list) &&
+	if (!list_empty(&cmd->t_mem_bidi_list) &&
 	    (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
-		se_mem_bidi = list_first_entry(&cmd->t_task.t_mem_bidi_list,
+		se_mem_bidi = list_first_entry(&cmd->t_mem_bidi_list,
 					struct se_mem, se_list);
 
 	while (sectors) {
@@ -4636,8 +4636,8 @@ static u32 transport_generic_get_cdb_count(
 		/* Should be part of task, can't fail */
 		BUG_ON(!cdb);
 
-		memcpy(cdb, cmd->t_task.t_task_cdb,
-		       scsi_command_size(cmd->t_task.t_task_cdb));
+		memcpy(cdb, cmd->t_task_cdb,
+		       scsi_command_size(cmd->t_task_cdb));
 
 		/* Update new cdb with updated lba/sectors */
 		cmd->transport_split_cdb(task->task_lba,
@@ -4645,7 +4645,7 @@ static u32 transport_generic_get_cdb_count(
 
 		/*
 		 * Perform the SE OBJ plugin and/or Transport plugin specific
-		 * mapping for cmd->t_task.t_mem_list. And setup the
+		 * mapping for cmd->t_mem_list. And setup the
 		 * task->task_sg and if necessary task->task_sg_bidi
 		 */
 		ret = transport_do_se_mem_map(dev, task, mem_list,
@@ -4656,7 +4656,7 @@ static u32 transport_generic_get_cdb_count(
 
 		se_mem = se_mem_lout;
 		/*
-		 * Setup the cmd->t_task.t_mem_bidi_list -> task->task_sg_bidi
+		 * Setup the cmd->t_mem_bidi_list -> task->task_sg_bidi
 		 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
 		 *
 		 * Note that the first call to transport_do_se_mem_map() above will
@@ -4666,7 +4666,7 @@ static u32 transport_generic_get_cdb_count(
 		 */
 		if (task->task_sg_bidi != NULL) {
 			ret = transport_do_se_mem_map(dev, task,
-				&cmd->t_task.t_mem_bidi_list, NULL,
+				&cmd->t_mem_bidi_list, NULL,
 				se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
 				&task_offset_in);
 			if (ret < 0)
@@ -4681,8 +4681,8 @@ static u32 transport_generic_get_cdb_count(
 	}
 
 	if (set_counts) {
-		atomic_inc(&cmd->t_task.t_fe_count);
-		atomic_inc(&cmd->t_task.t_se_count);
+		atomic_inc(&cmd->t_fe_count);
+		atomic_inc(&cmd->t_se_count);
 	}
 
 	DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
@@ -4708,26 +4708,26 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
 
 	cdb = dev->transport->get_cdb(task);
 	BUG_ON(!cdb);
-	memcpy(cdb, cmd->t_task.t_task_cdb,
-	       scsi_command_size(cmd->t_task.t_task_cdb));
+	memcpy(cdb, cmd->t_task_cdb,
+	       scsi_command_size(cmd->t_task_cdb));
 
 	task->task_size = cmd->data_length;
 	task->task_sg_num =
 		(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
 
-	atomic_inc(&cmd->t_task.t_fe_count);
-	atomic_inc(&cmd->t_task.t_se_count);
+	atomic_inc(&cmd->t_fe_count);
+	atomic_inc(&cmd->t_se_count);
 
 	if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
 		struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
 		u32 se_mem_cnt = 0, task_offset = 0;
 
-		if (!list_empty(&cmd->t_task.t_mem_list))
-			se_mem = list_first_entry(&cmd->t_task.t_mem_list,
+		if (!list_empty(&cmd->t_mem_list))
+			se_mem = list_first_entry(&cmd->t_mem_list,
 					struct se_mem, se_list);
 
 		ret = transport_do_se_mem_map(dev, task,
-				&cmd->t_task.t_mem_list, NULL, se_mem,
+				&cmd->t_mem_list, NULL, se_mem,
 				&se_mem_lout, &se_mem_cnt, &task_offset);
 		if (ret < 0)
 			return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
@@ -4769,7 +4769,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
 	 * Determine is the TCM fabric module has already allocated physical
 	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
 	 * to setup beforehand the linked list of physical memory at
-	 * cmd->t_task.t_mem_list of struct se_mem->se_page
+	 * cmd->t_mem_list of struct se_mem->se_page
 	 */
 	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
 		ret = transport_allocate_resources(cmd);
@@ -4798,7 +4798,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
 	}
 
 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
-		list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) {
+		list_for_each_entry(task, &cmd->t_task_list, t_list) {
 			if (atomic_read(&task->task_sent))
 				continue;
 			if (!dev->transport->map_task_SG)
@@ -4845,9 +4845,9 @@ void transport_generic_process_write(struct se_cmd *cmd)
 	 * original EDTL
 	 */
 	if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
-		if (!cmd->t_task.t_tasks_se_num) {
+		if (!cmd->t_tasks_se_num) {
 			unsigned char *dst, *buf =
-				(unsigned char *)cmd->t_task.t_task_buf;
+				(unsigned char *)cmd->t_task_buf;
 
 			dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
 			if (!(dst)) {
@@ -4859,15 +4859,15 @@ void transport_generic_process_write(struct se_cmd *cmd)
 			}
 			memcpy(dst, buf, cmd->cmd_spdtl);
 
-			kfree(cmd->t_task.t_task_buf);
-			cmd->t_task.t_task_buf = dst;
+			kfree(cmd->t_task_buf);
+			cmd->t_task_buf = dst;
 		} else {
 			struct scatterlist *sg =
-				(struct scatterlist *sg)cmd->t_task.t_task_buf;
+				(struct scatterlist *sg)cmd->t_task_buf;
 			struct scatterlist *orig_sg;
 
 			orig_sg = kzalloc(sizeof(struct scatterlist) *
-					cmd->t_task.t_tasks_se_num,
+					cmd->t_tasks_se_num,
 					GFP_KERNEL))) {
 			if (!(orig_sg)) {
 				printk(KERN_ERR "Unable to allocate memory"
@@ -4877,9 +4877,9 @@ void transport_generic_process_write(struct se_cmd *cmd)
 				return;
 			}
 
-			memcpy(orig_sg, cmd->t_task.t_task_buf,
+			memcpy(orig_sg, cmd->t_task_buf,
 					sizeof(struct scatterlist) *
-					cmd->t_task.t_tasks_se_num);
+					cmd->t_tasks_se_num);
 
 			cmd->data_length = cmd->cmd_spdtl;
 			/*
@@ -4910,23 +4910,23 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	cmd->t_state = TRANSPORT_WRITE_PENDING;
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 	/*
 	 * For the TCM control CDBs using a contiguous buffer, do the memcpy
 	 * from the passed Linux/SCSI struct scatterlist located at
-	 * se_cmd->t_task.t_task_pt_buf to the contiguous buffer at
-	 * se_cmd->t_task.t_task_buf.
+	 * se_cmd->t_task_pt_buf to the contiguous buffer at
+	 * se_cmd->t_task_buf.
 	 */
 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
-		sg_copy_to_buffer(cmd->t_task.t_task_pt_sgl,
-				    cmd->t_task.t_task_pt_sgl_num,
-				    cmd->t_task.t_task_buf,
+		sg_copy_to_buffer(cmd->t_task_pt_sgl,
+				    cmd->t_task_pt_sgl_num,
+				    cmd->t_task_buf,
 				    cmd->data_length);
 	/*
 	 * Clear the se_cmd for WRITE_PENDING status in order to set
-	 * cmd->t_task.t_transport_active=0 so that transport_generic_handle_data
+	 * cmd->t_transport_active=0 so that transport_generic_handle_data
 	 * can be called from HW target mode interrupt code.  This is safe
 	 * to be called with transport_off=1 before the cmd->se_tfo->write_pending
 	 * because the se_cmd->se_lun pointer is not being cleared.
@@ -5013,28 +5013,28 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
 	 * If the frontend has already requested this struct se_cmd to
 	 * be stopped, we can safely ignore this struct se_cmd.
 	 */
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-	if (atomic_read(&cmd->t_task.t_transport_stop)) {
-		atomic_set(&cmd->t_task.transport_lun_stop, 0);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (atomic_read(&cmd->t_transport_stop)) {
+		atomic_set(&cmd->transport_lun_stop, 0);
 		DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
 			" TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		transport_cmd_check_stop(cmd, 1, 0);
 		return -EPERM;
 	}
-	atomic_set(&cmd->t_task.transport_lun_fe_stop, 1);
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	atomic_set(&cmd->transport_lun_fe_stop, 1);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
 
 	ret = transport_stop_tasks_for_cmd(cmd);
 
 	DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
-			" %d\n", cmd, cmd->t_task.t_task_cdbs, ret);
+			" %d\n", cmd, cmd->t_task_cdbs, ret);
 	if (!ret) {
 		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
 				cmd->se_tfo->get_task_tag(cmd));
-		wait_for_completion(&cmd->t_task.transport_lun_stop_comp);
+		wait_for_completion(&cmd->transport_lun_stop_comp);
 		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
 				cmd->se_tfo->get_task_tag(cmd));
 	}
@@ -5064,19 +5064,19 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
 		       struct se_cmd, se_lun_node);
 		list_del(&cmd->se_lun_node);
 
-		atomic_set(&cmd->t_task.transport_lun_active, 0);
+		atomic_set(&cmd->transport_lun_active, 0);
 		/*
 		 * This will notify iscsi_target_transport.c:
 		 * transport_cmd_check_stop() that a LUN shutdown is in
 		 * progress for the iscsi_cmd_t.
 		 */
-		spin_lock(&cmd->t_task.t_state_lock);
-		DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task.transport"
+		spin_lock(&cmd->t_state_lock);
+		DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->transport"
 			"_lun_stop for  ITT: 0x%08x\n",
 			cmd->se_lun->unpacked_lun,
 			cmd->se_tfo->get_task_tag(cmd));
-		atomic_set(&cmd->t_task.transport_lun_stop, 1);
-		spin_unlock(&cmd->t_task.t_state_lock);
+		atomic_set(&cmd->transport_lun_stop, 1);
+		spin_unlock(&cmd->t_state_lock);
 
 		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
 
@@ -5104,14 +5104,14 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
 			cmd->se_lun->unpacked_lun,
 			cmd->se_tfo->get_task_tag(cmd));
 
-		spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags);
-		if (!(atomic_read(&cmd->t_task.transport_dev_active))) {
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags);
+		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
+		if (!(atomic_read(&cmd->transport_dev_active))) {
+			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
 			goto check_cond;
 		}
-		atomic_set(&cmd->t_task.transport_dev_active, 0);
+		atomic_set(&cmd->transport_dev_active, 0);
 		transport_all_task_dev_remove_state(cmd);
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
 
 		transport_free_dev_tasks(cmd);
 		/*
@@ -5128,24 +5128,24 @@ check_cond:
 		 * be released, notify the waiting thread now that LU has
 		 * finished accessing it.
 		 */
-		spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags);
-		if (atomic_read(&cmd->t_task.transport_lun_fe_stop)) {
+		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
+		if (atomic_read(&cmd->transport_lun_fe_stop)) {
 			DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
 				" struct se_cmd: %p ITT: 0x%08x\n",
 				lun->unpacked_lun,
 				cmd, cmd->se_tfo->get_task_tag(cmd));
 
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
+			spin_unlock_irqrestore(&cmd->t_state_lock,
 					cmd_flags);
 			transport_cmd_check_stop(cmd, 1, 0);
-			complete(&cmd->t_task.transport_lun_fe_stop_comp);
+			complete(&cmd->transport_lun_fe_stop_comp);
 			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
 			continue;
 		}
 		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
 			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
 
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
 		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
 	}
 	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
@@ -5191,15 +5191,15 @@ static void transport_generic_wait_for_tasks(
 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
 		return;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	/*
 	 * If we are already stopped due to an external event (ie: LUN shutdown)
 	 * sleep until the connection can have the passed struct se_cmd back.
-	 * The cmd->t_task.transport_lun_stopped_sem will be upped by
+	 * The cmd->transport_lun_stopped_sem will be upped by
 	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
 	 * has completed its operation on the struct se_cmd.
 	 */
-	if (atomic_read(&cmd->t_task.transport_lun_stop)) {
+	if (atomic_read(&cmd->transport_lun_stop)) {
 
 		DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
 			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
@@ -5212,10 +5212,10 @@ static void transport_generic_wait_for_tasks(
 		 * We go ahead and up transport_lun_stop_comp just to be sure
 		 * here.
 		 */
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
-		complete(&cmd->t_task.transport_lun_stop_comp);
-		wait_for_completion(&cmd->t_task.transport_lun_fe_stop_comp);
-		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		complete(&cmd->transport_lun_stop_comp);
+		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
 
 		transport_all_task_dev_remove_state(cmd);
 		/*
@@ -5228,13 +5228,13 @@ static void transport_generic_wait_for_tasks(
 			"stop_comp); for ITT: 0x%08x\n",
 			cmd->se_tfo->get_task_tag(cmd));
 
-		atomic_set(&cmd->t_task.transport_lun_stop, 0);
+		atomic_set(&cmd->transport_lun_stop, 0);
 	}
-	if (!atomic_read(&cmd->t_task.t_transport_active) ||
-	     atomic_read(&cmd->t_task.t_transport_aborted))
+	if (!atomic_read(&cmd->t_transport_active) ||
+	     atomic_read(&cmd->t_transport_aborted))
 		goto remove;
 
-	atomic_set(&cmd->t_task.t_transport_stop, 1);
+	atomic_set(&cmd->t_transport_stop, 1);
 
 	DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
 		" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
@@ -5242,21 +5242,21 @@ static void transport_generic_wait_for_tasks(
 		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
 		cmd->deferred_t_state);
 
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
 
-	wait_for_completion(&cmd->t_task.t_transport_stop_comp);
+	wait_for_completion(&cmd->t_transport_stop_comp);
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-	atomic_set(&cmd->t_task.t_transport_active, 0);
-	atomic_set(&cmd->t_task.t_transport_stop, 0);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	atomic_set(&cmd->t_transport_active, 0);
+	atomic_set(&cmd->t_transport_stop, 0);
 
 	DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
-		"&cmd->t_task.t_transport_stop_comp) for ITT: 0x%08x\n",
+		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
 		cmd->se_tfo->get_task_tag(cmd));
 remove:
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 	if (!remove_cmd)
 		return;
 
@@ -5295,13 +5295,13 @@ int transport_send_check_condition_and_sense(
 	int offset;
 	u8 asc = 0, ascq = 0;
 
-	spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return 0;
 	}
 	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
-	spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 	if (!reason && from_transport)
 		goto after_reason;
@@ -5460,14 +5460,14 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
 {
 	int ret = 0;
 
-	if (atomic_read(&cmd->t_task.t_transport_aborted) != 0) {
+	if (atomic_read(&cmd->t_transport_aborted) != 0) {
 		if (!(send_status) ||
 		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
 			return 1;
 #if 0
 		printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
 			" status for CDB: 0x%02x ITT: 0x%08x\n",
-			cmd->t_task.t_task_cdb[0],
+			cmd->t_task_cdb[0],
 			cmd->se_tfo->get_task_tag(cmd));
 #endif
 		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
@@ -5488,7 +5488,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
 	 */
 	if (cmd->data_direction == DMA_TO_DEVICE) {
 		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
-			atomic_inc(&cmd->t_task.t_transport_aborted);
+			atomic_inc(&cmd->t_transport_aborted);
 			smp_mb__after_atomic_inc();
 			cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 			transport_new_cmd_failure(cmd);
@@ -5498,7 +5498,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 #if 0
 	printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
-		" ITT: 0x%08x\n", cmd->t_task.t_task_cdb[0],
+		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
 		cmd->se_tfo->get_task_tag(cmd));
 #endif
 	cmd->se_tfo->queue_status(cmd);
@@ -5589,7 +5589,7 @@ static void transport_processing_shutdown(struct se_device *dev)
 
 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
-		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
 
 		DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
 			" i_state/def_i_state: %d/%d, t_state/def_t_state:"
@@ -5597,22 +5597,22 @@ static void transport_processing_shutdown(struct se_device *dev)
 			cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn,
 			cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state,
 			cmd->t_state, cmd->deferred_t_state,
-			cmd->t_task.t_task_cdb[0]);
+			cmd->t_task_cdb[0]);
 		DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
 			" %d t_task_cdbs_sent: %d -- t_transport_active: %d"
 			" t_transport_stop: %d t_transport_sent: %d\n",
 			cmd->se_tfo->get_task_tag(cmd),
-			cmd->t_task.t_task_cdbs,
-			atomic_read(&cmd->t_task.t_task_cdbs_left),
-			atomic_read(&cmd->t_task.t_task_cdbs_sent),
-			atomic_read(&cmd->t_task.t_transport_active),
-			atomic_read(&cmd->t_task.t_transport_stop),
-			atomic_read(&cmd->t_task.t_transport_sent));
+			cmd->t_task_cdbs,
+			atomic_read(&cmd->t_task_cdbs_left),
+			atomic_read(&cmd->t_task_cdbs_sent),
+			atomic_read(&cmd->t_transport_active),
+			atomic_read(&cmd->t_transport_stop),
+			atomic_read(&cmd->t_transport_sent));
 
 		if (atomic_read(&task->task_active)) {
 			atomic_set(&task->task_stop, 1);
 			spin_unlock_irqrestore(
-				&cmd->t_task.t_state_lock, flags);
+				&cmd->t_state_lock, flags);
 
 			DEBUG_DO("Waiting for task: %p to shutdown for dev:"
 				" %p\n", task, dev);
@@ -5620,8 +5620,8 @@ static void transport_processing_shutdown(struct se_device *dev)
 			DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
 				task, dev);
 
-			spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-			atomic_dec(&cmd->t_task.t_task_cdbs_left);
+			spin_lock_irqsave(&cmd->t_state_lock, flags);
+			atomic_dec(&cmd->t_task_cdbs_left);
 
 			atomic_set(&task->task_active, 0);
 			atomic_set(&task->task_stop, 0);
@@ -5631,25 +5631,25 @@ static void transport_processing_shutdown(struct se_device *dev)
 		}
 		__transport_stop_task_timer(task, &flags);
 
-		if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) {
+		if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) {
 			spin_unlock_irqrestore(
-					&cmd->t_task.t_state_lock, flags);
+					&cmd->t_state_lock, flags);
 
 			DEBUG_DO("Skipping task: %p, dev: %p for"
 				" t_task_cdbs_ex_left: %d\n", task, dev,
-				atomic_read(&cmd->t_task.t_task_cdbs_ex_left));
+				atomic_read(&cmd->t_task_cdbs_ex_left));
 
 			spin_lock_irqsave(&dev->execute_task_lock, flags);
 			continue;
 		}
 
-		if (atomic_read(&cmd->t_task.t_transport_active)) {
+		if (atomic_read(&cmd->t_transport_active)) {
 			DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
 					" %p\n", task, dev);
 
-			if (atomic_read(&cmd->t_task.t_fe_count)) {
+			if (atomic_read(&cmd->t_fe_count)) {
 				spin_unlock_irqrestore(
-					&cmd->t_task.t_state_lock, flags);
+					&cmd->t_state_lock, flags);
 				transport_send_check_condition_and_sense(
 					cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
 					0);
@@ -5660,7 +5660,7 @@ static void transport_processing_shutdown(struct se_device *dev)
 				transport_cmd_check_stop(cmd, 1, 0);
 			} else {
 				spin_unlock_irqrestore(
-					&cmd->t_task.t_state_lock, flags);
+					&cmd->t_state_lock, flags);
 
 				transport_remove_cmd_from_queue(cmd,
 					&cmd->se_dev->dev_queue_obj);
@@ -5677,9 +5677,9 @@ static void transport_processing_shutdown(struct se_device *dev)
 		DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
 				task, dev);
 
-		if (atomic_read(&cmd->t_task.t_fe_count)) {
+		if (atomic_read(&cmd->t_fe_count)) {
 			spin_unlock_irqrestore(
-				&cmd->t_task.t_state_lock, flags);
+				&cmd->t_state_lock, flags);
 			transport_send_check_condition_and_sense(cmd,
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 			transport_remove_cmd_from_queue(cmd,
@@ -5689,7 +5689,7 @@ static void transport_processing_shutdown(struct se_device *dev)
 			transport_cmd_check_stop(cmd, 1, 0);
 		} else {
 			spin_unlock_irqrestore(
-				&cmd->t_task.t_state_lock, flags);
+				&cmd->t_state_lock, flags);
 
 			transport_remove_cmd_from_queue(cmd,
 				&cmd->se_dev->dev_queue_obj);
@@ -5710,7 +5710,7 @@ static void transport_processing_shutdown(struct se_device *dev)
 		DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
 				cmd, cmd->t_state);
 
-		if (atomic_read(&cmd->t_task.t_fe_count)) {
+		if (atomic_read(&cmd->t_fe_count)) {
 			transport_send_check_condition_and_sense(cmd,
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
 
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 3b8b02c..d28e9c4 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -270,7 +270,7 @@ void core_scsi3_ua_for_check_condition(
 		nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
 		(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
 		"Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
-		cmd->orig_fe_lun, cmd->t_task.t_task_cdb[0], *asc, *ascq);
+		cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
 }
 
 int core_scsi3_ua_clear_for_request_sense(
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 9c99402..62b1226 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -60,7 +60,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
 	struct fc_seq *sp;
 	struct se_cmd *se_cmd;
 	struct se_mem *mem;
-	struct se_transport_task *task;
 
 	if (!(ft_debug_logging & FT_DEBUG_IO))
 		return;
@@ -72,12 +71,11 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
 		caller, cmd, cmd->cdb);
 	printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
 
-	task = &se_cmd->t_task;
-	printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
-	       caller, cmd, task, task->t_tasks_se_num,
-	       task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
+	printk(KERN_INFO "%s: cmd %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
+	       caller, cmd, se_cmd->t_tasks_se_num,
+	       se_cmd->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
 
-	list_for_each_entry(mem, &task->t_mem_list, se_list)
+	list_for_each_entry(mem, &se_cmd->t_mem_list, se_list)
 		printk(KERN_INFO "%s: cmd %p mem %p page %p "
 		       "len 0x%x off 0x%x\n",
 		       caller, cmd, mem,
@@ -264,9 +262,9 @@ int ft_write_pending(struct se_cmd *se_cmd)
 				 * TCM/LIO target
 				 */
 				transport_do_task_sg_chain(se_cmd);
-				cmd->sg = se_cmd->t_task.t_tasks_sg_chained;
+				cmd->sg = se_cmd->t_tasks_sg_chained;
 				cmd->sg_cnt =
-					se_cmd->t_task.t_tasks_sg_chained_no;
+					se_cmd->t_tasks_sg_chained_no;
 			}
 			if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
 						    cmd->sg, cmd->sg_cnt))
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index f18af6e..8560182 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -65,7 +65,6 @@
 int ft_queue_data_in(struct se_cmd *se_cmd)
 {
 	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-	struct se_transport_task *task;
 	struct fc_frame *fp = NULL;
 	struct fc_exch *ep;
 	struct fc_lport *lport;
@@ -90,14 +89,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 	lport = ep->lp;
 	cmd->seq = lport->tt.seq_start_next(cmd->seq);
 
-	task = &se_cmd->t_task;
 	remaining = se_cmd->data_length;
 
 	/*
 	 * Setup to use first mem list entry if any.
 	 */
-	if (task->t_tasks_se_num) {
-		mem = list_first_entry(&task->t_mem_list,
+	if (se_cmd->t_tasks_se_num) {
+		mem = list_first_entry(&se_cmd->t_mem_list,
 			 struct se_mem, se_list);
 		mem_len = mem->se_len;
 		mem_off = mem->se_off;
@@ -148,8 +146,8 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 
 		if (use_sg) {
 			if (!mem) {
-				BUG_ON(!task->t_task_buf);
-				page_addr = task->t_task_buf + mem_off;
+				BUG_ON(!se_cmd->t_task_buf);
+				page_addr = se_cmd->t_task_buf + mem_off;
 				/*
 				 * In this case, offset is 'offset_in_page' of
 				 * (t_task_buf + mem_off) instead of 'mem_off'.
@@ -180,7 +178,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 			kunmap_atomic(page_addr, KM_SOFTIRQ0);
 			to += tlen;
 		} else {
-			from = task->t_task_buf + mem_off;
+			from = se_cmd->t_task_buf + mem_off;
 			memcpy(to, from, tlen);
 			to += tlen;
 		}
@@ -220,7 +218,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 	struct fc_seq *seq = cmd->seq;
 	struct fc_exch *ep;
 	struct fc_lport *lport;
-	struct se_transport_task *task;
 	struct fc_frame_header *fh;
 	struct se_mem *mem;
 	u32 mem_off;
@@ -235,8 +232,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 	u32 f_ctl;
 	void *buf;
 
-	task = &se_cmd->t_task;
-
 	fh = fc_frame_header_get(fp);
 	if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
 		goto drop;
@@ -312,8 +307,8 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 	/*
 	 * Setup to use first mem list entry if any.
 	 */
-	if (task->t_tasks_se_num) {
-		mem = list_first_entry(&task->t_mem_list,
+	if (se_cmd->t_tasks_se_num) {
+		mem = list_first_entry(&se_cmd->t_mem_list,
 				       struct se_mem, se_list);
 		mem_len = mem->se_len;
 		mem_off = mem->se_off;
@@ -355,7 +350,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 			memcpy(to, from, tlen);
 			kunmap_atomic(page_addr, KM_SOFTIRQ0);
 		} else {
-			to = task->t_task_buf + mem_off;
+			to = se_cmd->t_task_buf + mem_off;
 			memcpy(to, from, tlen);
 		}
 		from += tlen;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 77f570d..58e5c17 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -403,62 +403,6 @@ struct se_queue_obj {
 	wait_queue_head_t	thread_wq;
 } ____cacheline_aligned;
 
-/*
- * Used one per struct se_cmd to hold all extra struct se_task
- * metadata.  This structure is setup and allocated in
- * drivers/target/target_core_transport.c:__transport_alloc_se_cmd()
- */
-struct se_transport_task {
-	unsigned char		*t_task_cdb;
-	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
-	unsigned long long	t_task_lba;
-	int			t_tasks_failed;
-	int			t_tasks_fua;
-	bool			t_tasks_bidi;
-	u32			t_task_cdbs;
-	u32			t_tasks_check;
-	u32			t_tasks_no;
-	u32			t_tasks_sectors;
-	u32			t_tasks_se_num;
-	u32			t_tasks_se_bidi_num;
-	u32			t_tasks_sg_chained_no;
-	atomic_t		t_fe_count;
-	atomic_t		t_se_count;
-	atomic_t		t_task_cdbs_left;
-	atomic_t		t_task_cdbs_ex_left;
-	atomic_t		t_task_cdbs_timeout_left;
-	atomic_t		t_task_cdbs_sent;
-	atomic_t		t_transport_aborted;
-	atomic_t		t_transport_active;
-	atomic_t		t_transport_complete;
-	atomic_t		t_transport_queue_active;
-	atomic_t		t_transport_sent;
-	atomic_t		t_transport_stop;
-	atomic_t		t_transport_timeout;
-	atomic_t		transport_dev_active;
-	atomic_t		transport_lun_active;
-	atomic_t		transport_lun_fe_stop;
-	atomic_t		transport_lun_stop;
-	spinlock_t		t_state_lock;
-	struct completion	t_transport_stop_comp;
-	struct completion	transport_lun_fe_stop_comp;
-	struct completion	transport_lun_stop_comp;
-	struct scatterlist	*t_tasks_sg_chained;
-	struct scatterlist	t_tasks_sg_bounce;
-	void			*t_task_buf;
-	/*
-	 * Used for pre-registered fabric SGL passthrough WRITE and READ
-	 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
-	 * and other HW target mode fabric modules.
-	 */
-	struct scatterlist	*t_task_pt_sgl;
-	u32			t_task_pt_sgl_num;
-	struct list_head	t_mem_list;
-	/* Used for BIDI READ */
-	struct list_head	t_mem_bidi_list;
-	struct list_head	t_task_list;
-} ____cacheline_aligned;
-
 struct se_task {
 	unsigned char	task_sense;
 	struct scatterlist *task_sg;
@@ -534,13 +478,61 @@ struct se_cmd {
 	/* Only used for internal passthrough and legacy TCM fabric modules */
 	struct se_session	*se_sess;
 	struct se_tmr_req	*se_tmr_req;
-	struct se_transport_task t_task;
 	struct list_head	se_queue_node;
 	struct target_core_fabric_ops *se_tfo;
 	int (*transport_emulate_cdb)(struct se_cmd *);
 	void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
 	void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
 	void (*transport_complete_callback)(struct se_cmd *);
+	unsigned char		*t_task_cdb;
+	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
+	unsigned long long	t_task_lba;
+	int			t_tasks_failed;
+	int			t_tasks_fua;
+	bool			t_tasks_bidi;
+	u32			t_task_cdbs;
+	u32			t_tasks_check;
+	u32			t_tasks_no;
+	u32			t_tasks_sectors;
+	u32			t_tasks_se_num;
+	u32			t_tasks_se_bidi_num;
+	u32			t_tasks_sg_chained_no;
+	atomic_t		t_fe_count;
+	atomic_t		t_se_count;
+	atomic_t		t_task_cdbs_left;
+	atomic_t		t_task_cdbs_ex_left;
+	atomic_t		t_task_cdbs_timeout_left;
+	atomic_t		t_task_cdbs_sent;
+	atomic_t		t_transport_aborted;
+	atomic_t		t_transport_active;
+	atomic_t		t_transport_complete;
+	atomic_t		t_transport_queue_active;
+	atomic_t		t_transport_sent;
+	atomic_t		t_transport_stop;
+	atomic_t		t_transport_timeout;
+	atomic_t		transport_dev_active;
+	atomic_t		transport_lun_active;
+	atomic_t		transport_lun_fe_stop;
+	atomic_t		transport_lun_stop;
+	spinlock_t		t_state_lock;
+	struct completion	t_transport_stop_comp;
+	struct completion	transport_lun_fe_stop_comp;
+	struct completion	transport_lun_stop_comp;
+	struct scatterlist	*t_tasks_sg_chained;
+	struct scatterlist	t_tasks_sg_bounce;
+	void			*t_task_buf;
+	/*
+	 * Used for pre-registered fabric SGL passthrough WRITE and READ
+	 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
+	 * and other HW target mode fabric modules.
+	 */
+	struct scatterlist	*t_task_pt_sgl;
+	u32			t_task_pt_sgl_num;
+	struct list_head	t_mem_list;
+	/* Used for BIDI READ */
+	struct list_head	t_mem_bidi_list;
+	struct list_head	t_task_list;
+
 } ____cacheline_aligned;
 
 struct se_tmr_req {
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 055/103] target: Change name & semantics of transport_get_sectors()
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (22 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 054/103] target: inline struct se_transport_task into struct se_cmd Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 056/103] target: Remove unused members of se_cmd Nicholas A. Bellinger
                   ` (5 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

It used to set cmd->t_tasks_sectors, remove the need for this member by
making transport_get_sectors into transport_cmd_get_valid_sectors, a
side-effect-free function that returns the number of sectors, or 0 if
invalid. cmd creation will still error out properly with this change.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |   32 ++++++++++++--------------------
 include/target/target_core_base.h      |    1 -
 2 files changed, 12 insertions(+), 21 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 343dfb0..537d8f5 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -213,7 +213,7 @@ static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
 static int transport_generic_get_mem(struct se_cmd *cmd, u32 length);
 static int transport_generic_remove(struct se_cmd *cmd,
 		int release_to_pool, int session_reinstatement);
-static int transport_get_sectors(struct se_cmd *cmd);
+static int transport_cmd_get_valid_sectors(struct se_cmd *cmd);
 static int transport_map_sg_to_mem(struct se_cmd *cmd,
 		struct list_head *se_mem_list, struct scatterlist *sgl,
 		u32 *se_mem_cnt);
@@ -3398,7 +3398,7 @@ static int transport_generic_cmd_sequencer(
 		 * Check to ensure that LBA + Range does not exceed past end of
 		 * device.
 		 */
-		if (transport_get_sectors(cmd) < 0)
+		if (!transport_cmd_get_valid_sectors(cmd))
 			goto out_invalid_cdb_field;
 		break;
 	case UNMAP:
@@ -3979,30 +3979,26 @@ static inline long long transport_dev_end_lba(struct se_device *dev)
 	return dev->transport->get_blocks(dev) + 1;
 }
 
-static int transport_get_sectors(struct se_cmd *cmd)
+static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-
-	cmd->t_tasks_sectors =
-		(cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
-	if (!(cmd->t_tasks_sectors))
-		cmd->t_tasks_sectors = 1;
+	u32 sectors;
 
 	if (dev->transport->get_device_type(dev) != TYPE_DISK)
 		return 0;
 
-	if ((cmd->t_task_lba + cmd->t_tasks_sectors) >
+	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
+
+	if ((cmd->t_task_lba + sectors) >
 	     transport_dev_end_lba(dev)) {
 		printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
 			" transport_dev_end_lba(): %llu\n",
-			cmd->t_task_lba, cmd->t_tasks_sectors,
+			cmd->t_task_lba, sectors,
 			transport_dev_end_lba(dev));
-		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
-		return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
+		return 0;
 	}
 
-	return 0;
+	return sectors;
 }
 
 static int transport_new_cmd_obj(struct se_cmd *cmd)
@@ -4026,7 +4022,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 		    (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
 			rc = transport_generic_get_cdb_count(cmd,
 				cmd->t_task_lba,
-				cmd->t_tasks_sectors,
+				transport_cmd_get_valid_sectors(cmd),
 				DMA_FROM_DEVICE, &cmd->t_mem_bidi_list,
 				set_counts);
 			if (!(rc)) {
@@ -4043,7 +4039,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 		 */
 		task_cdbs = transport_generic_get_cdb_count(cmd,
 				cmd->t_task_lba,
-				cmd->t_tasks_sectors,
+				transport_cmd_get_valid_sectors(cmd),
 				cmd->data_direction, &cmd->t_mem_list,
 				set_counts);
 		if (!(task_cdbs)) {
@@ -4777,10 +4773,6 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
 			return ret;
 	}
 
-	ret = transport_get_sectors(cmd);
-	if (ret < 0)
-		return ret;
-
 	ret = transport_new_cmd_obj(cmd);
 	if (ret < 0)
 		return ret;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 58e5c17..3bac5e0 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -493,7 +493,6 @@ struct se_cmd {
 	u32			t_task_cdbs;
 	u32			t_tasks_check;
 	u32			t_tasks_no;
-	u32			t_tasks_sectors;
 	u32			t_tasks_se_num;
 	u32			t_tasks_se_bidi_num;
 	u32			t_tasks_sg_chained_no;
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 056/103] target: Remove unused members of se_cmd
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (23 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 055/103] target: Change name & semantics of transport_get_sectors() Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 057/103] target: Rename se_cmd.t_task_cdbs to t_task_list_num Nicholas A. Bellinger
                   ` (4 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

t_tasks_check
t_tasks_no (only incremented, never used)

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |    1 -
 include/target/target_core_base.h      |    2 --
 2 files changed, 0 insertions(+), 3 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 537d8f5..afb453c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1669,7 +1669,6 @@ transport_generic_get_task(struct se_cmd *cmd,
 	INIT_LIST_HEAD(&task->t_execute_list);
 	INIT_LIST_HEAD(&task->t_state_list);
 	init_completion(&task->task_stop_comp);
-	task->task_no = cmd->t_tasks_no++;
 	task->task_se_cmd = cmd;
 	task->se_dev = dev;
 	task->task_data_direction = data_direction;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 3bac5e0..d2b1067 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -491,8 +491,6 @@ struct se_cmd {
 	int			t_tasks_fua;
 	bool			t_tasks_bidi;
 	u32			t_task_cdbs;
-	u32			t_tasks_check;
-	u32			t_tasks_no;
 	u32			t_tasks_se_num;
 	u32			t_tasks_se_bidi_num;
 	u32			t_tasks_sg_chained_no;
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 057/103] target: Rename se_cmd.t_task_cdbs to t_task_list_num
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (24 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 056/103] target: Remove unused members of se_cmd Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 058/103] target: Fix some spelling Nicholas A. Bellinger
                   ` (3 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

Since it tracks the number of entries in the t_task_list, give it
a more related name.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_tmr.c       |    2 +-
 drivers/target/target_core_transport.c |    6 +++---
 include/target/target_core_base.h      |    2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 4235e72..6667e39 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -260,7 +260,7 @@ int core_tmr_lun_reset(
 			" t_task_cdbs_sent: %d -- t_transport_active: %d"
 			" t_transport_stop: %d t_transport_sent: %d\n",
 			cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
-			cmd->t_task_cdbs,
+			cmd->t_task_list_num,
 			atomic_read(&cmd->t_task_cdbs_left),
 			atomic_read(&cmd->t_task_cdbs_sent),
 			atomic_read(&cmd->t_transport_active),
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index afb453c..39e4b29 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2515,7 +2515,7 @@ check_depth:
 	atomic_inc(&cmd->t_task_cdbs_sent);
 
 	if (atomic_read(&cmd->t_task_cdbs_sent) ==
-	    cmd->t_task_cdbs)
+	    cmd->t_task_list_num)
 		atomic_set(&cmd->transport_sent, 1);
 
 	transport_start_task_timer(task);
@@ -4008,7 +4008,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 
 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
 		task_cdbs = 1;
-		cmd->t_task_cdbs = 1;
+		cmd->t_task_list_num = 1;
 	} else {
 		int set_counts = 1;
 
@@ -4047,7 +4047,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
 					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 			return PYX_TRANSPORT_LU_COMM_FAILURE;
 		}
-		cmd->t_task_cdbs = task_cdbs;
+		cmd->t_task_list_num = task_cdbs;
 
 #if 0
 		printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index d2b1067..71c96ce 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -490,7 +490,6 @@ struct se_cmd {
 	int			t_tasks_failed;
 	int			t_tasks_fua;
 	bool			t_tasks_bidi;
-	u32			t_task_cdbs;
 	u32			t_tasks_se_num;
 	u32			t_tasks_se_bidi_num;
 	u32			t_tasks_sg_chained_no;
@@ -529,6 +528,7 @@ struct se_cmd {
 	/* Used for BIDI READ */
 	struct list_head	t_mem_bidi_list;
 	struct list_head	t_task_list;
+	u32			t_task_list_num;
 
 } ____cacheline_aligned;
 
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 058/103] target: Fix some spelling
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (25 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 057/103] target: Rename se_cmd.t_task_cdbs to t_task_list_num Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 059/103] target: Remove unused var from transport_generic_do_tmr Nicholas A. Bellinger
                   ` (2 subsequent siblings)
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

contiguous.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |    8 ++++----
 1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 39e4b29..33e5275 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3955,7 +3955,7 @@ int transport_generic_map_mem_to_cmd(
 			return -ENOSYS;
 		}
 		/*
-		 * For incoming CDBs using a contiguous buffer internall with TCM,
+		 * For incoming CDBs using a contiguous buffer internal with TCM,
 		 * save the passed struct scatterlist memory.  After TCM storage object
 		 * processing has completed for this struct se_cmd, TCM core will call
 		 * transport_memcpy_[write,read]_contig() as necessary from
@@ -4325,7 +4325,7 @@ int transport_map_mem_to_sg(
 
 	while (task_size != 0) {
 		/*
-		 * Setup the contigious array of scatterlists for
+		 * Setup the contiguous array of scatterlists for
 		 * this struct se_task.
 		 */
 		sg_assign_page(sg, se_mem->se_page);
@@ -4429,7 +4429,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
 	}
 	/*
 	 * Walk the struct se_task list and setup scatterlist chains
-	 * for each contiguosly allocated struct se_task->task_sg[].
+	 * for each contiguously allocated struct se_task->task_sg[].
 	 */
 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
 		if (!(task->task_sg) || !(task->task_padded_sg))
@@ -4907,7 +4907,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
 	/*
 	 * For the TCM control CDBs using a contiguous buffer, do the memcpy
 	 * from the passed Linux/SCSI struct scatterlist located at
-	 * se_cmd->t_task_pt_buf to the contiguous buffer at
+	 * se_cmd->t_task_pt_sgl to the contiguous buffer at
 	 * se_cmd->t_task_buf.
 	 */
 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 059/103] target: Remove unused var from transport_generic_do_tmr
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (26 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 058/103] target: Fix some spelling Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 060/103] target: map_sg_to_mem: return sg_count in return value Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 061/103] target/pscsi: Use min_t for sector limits Nicholas A. Bellinger
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |    2 --
 1 files changed, 0 insertions(+), 2 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 33e5275..54fa0a4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -5501,14 +5501,12 @@ void transport_send_task_abort(struct se_cmd *cmd)
  */
 int transport_generic_do_tmr(struct se_cmd *cmd)
 {
-	struct se_cmd *ref_cmd;
 	struct se_device *dev = cmd->se_dev;
 	struct se_tmr_req *tmr = cmd->se_tmr_req;
 	int ret;
 
 	switch (tmr->function) {
 	case TMR_ABORT_TASK:
-		ref_cmd = tmr->ref_cmd;
 		tmr->response = TMR_FUNCTION_REJECTED;
 		break;
 	case TMR_ABORT_TASK_SET:
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 060/103] target: map_sg_to_mem: return sg_count in return value
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (27 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 059/103] target: Remove unused var from transport_generic_do_tmr Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  2011-07-21  7:08 ` [PATCH 061/103] target/pscsi: Use min_t for sector limits Nicholas A. Bellinger
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

Instead of in an out parameter. Seems simpler.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_transport.c |   24 +++++++++---------------
 1 files changed, 9 insertions(+), 15 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 54fa0a4..c5a541d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -215,8 +215,7 @@ static int transport_generic_remove(struct se_cmd *cmd,
 		int release_to_pool, int session_reinstatement);
 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd);
 static int transport_map_sg_to_mem(struct se_cmd *cmd,
-		struct list_head *se_mem_list, struct scatterlist *sgl,
-		u32 *se_mem_cnt);
+		struct list_head *se_mem_list, struct scatterlist *sgl);
 static void transport_memcpy_se_mem_read_contig(unsigned char *dst,
 		struct list_head *se_mem_list, u32 len);
 static void transport_release_fe_cmd(struct se_cmd *cmd);
@@ -3911,7 +3910,6 @@ int transport_generic_map_mem_to_cmd(
 	struct scatterlist *sgl_bidi,
 	u32 sgl_bidi_count)
 {
-	u32 mapped_sg_count = 0;
 	int ret;
 
 	if (!sgl || !sgl_count)
@@ -3927,24 +3925,20 @@ int transport_generic_map_mem_to_cmd(
 		 * processed into a TCM struct se_subsystem_dev, we do the mapping
 		 * from the passed physical memory to struct se_mem->se_page here.
 		 */
-		ret = transport_map_sg_to_mem(cmd,
-			&cmd->t_mem_list, sgl, &mapped_sg_count);
+		ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_list, sgl);
 		if (ret < 0)
 			return -ENOMEM;
 
-		cmd->t_tasks_se_num = mapped_sg_count;
+		cmd->t_tasks_se_num = ret;
 		/*
 		 * Setup BIDI READ list of struct se_mem elements
 		 */
 		if (sgl_bidi && sgl_bidi_count) {
-			mapped_sg_count = 0;
-			ret = transport_map_sg_to_mem(cmd,
-				&cmd->t_mem_bidi_list, sgl_bidi,
-				&mapped_sg_count);
+			ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_bidi_list, sgl_bidi);
 			if (ret < 0)
 				return -ENOMEM;
 
-			cmd->t_tasks_se_bidi_num = mapped_sg_count;
+			cmd->t_tasks_se_bidi_num = ret;
 		}
 		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
 
@@ -4251,11 +4245,11 @@ static inline sector_t transport_limit_task_sectors(
 static int transport_map_sg_to_mem(
 	struct se_cmd *cmd,
 	struct list_head *se_mem_list,
-	struct scatterlist *sg,
-	u32 *sg_count)
+	struct scatterlist *sg)
 {
 	struct se_mem *se_mem;
 	u32 cmd_size = cmd->data_length;
+	int sg_count = 0;
 
 	WARN_ON(!sg);
 
@@ -4285,7 +4279,7 @@ static int transport_map_sg_to_mem(
 			se_mem->se_len = cmd_size;
 
 		cmd_size -= se_mem->se_len;
-		(*sg_count)++;
+		sg_count++;
 
 		DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n",
 				sg_count, cmd_size);
@@ -4297,7 +4291,7 @@ static int transport_map_sg_to_mem(
 
 	DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count);
 
-	return 0;
+	return sg_count;
 }
 
 /*	transport_map_mem_to_sg():
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH 061/103] target/pscsi: Use min_t for sector limits
  2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
                   ` (28 preceding siblings ...)
  2011-07-21  7:08 ` [PATCH 060/103] target: map_sg_to_mem: return sg_count in return value Nicholas A. Bellinger
@ 2011-07-21  7:08 ` Nicholas A. Bellinger
  29 siblings, 0 replies; 31+ messages in thread
From: Nicholas A. Bellinger @ 2011-07-21  7:08 UTC (permalink / raw)
  To: target-devel; +Cc: linux-scsi, Andy Grover

From: Andy Grover <agrover@redhat.com>

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
---
 drivers/target/target_core_pscsi.c |    6 ++----
 1 files changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index a62350d..5df329c 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -328,10 +328,8 @@ static struct se_device *pscsi_add_device_to_list(
 	q = sd->request_queue;
 	limits = &dev_limits.limits;
 	limits->logical_block_size = sd->sector_size;
-	limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
-				  queue_max_hw_sectors(q) : sd->host->max_sectors;
-	limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
-				  queue_max_sectors(q) : sd->host->max_sectors;
+	limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+	limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
 	dev_limits.hw_queue_depth = sd->queue_depth;
 	dev_limits.queue_depth = sd->queue_depth;
 	/*
-- 
1.7.6


^ permalink raw reply related	[flat|nested] 31+ messages in thread

end of thread, other threads:[~2011-07-21  7:11 UTC | newest]

Thread overview: 31+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-07-21  7:07 [PATCH 031/103] target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 032/103] target: Make t_task a member of se_cmd, not a pointer Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 033/103] target: Handle functions returning "-2" Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 034/103] target: Use cmd->se_dev over cmd->se_lun->lun_se_dev Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 035/103] target: Embed qr in struct se_cmd Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 036/103] target: Replace embedded struct se_queue_req with a list_head Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 037/103] target: Rename list_heads that are nodes in struct se_cmd to "*_node" Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 038/103] target: Fold transport_device_setup_cmd() into lookup_{tmr,cmd}_lun() Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 039/103] target: Make t_mem_list and t_mem_list_bidi members of t_task Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 040/103] target: Add comment & cleanup transport_map_sg_to_mem() Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 041/103] target: Remove unneeded checks in transport_free_pages() Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 042/103] target: Fix WRITE_SAME_16 t_task_lba assignment bug Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 043/103] target: Fix WRITE_SAME_[16,32] number of blocks=0 case Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 044/103] tcm_fc: Makefile cleanups Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 045/103] tcm_fc: Convert to wake_up_process and schedule_timeout_interruptible Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 046/103] target: Replace custom sg<->buf functions with lib funcs Nicholas A. Bellinger
2011-07-21  7:07 ` [PATCH 047/103] target: Simplify sector limiting code Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 048/103] target: get_cdb should never return NULL Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 049/103] target: Simplify transport_memcpy_se_mem_read_contig Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 050/103] target: Use assignment rather than increment for t_task_cdbs Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 051/103] target: Don't pass dma_size to generic_get_mem Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 052/103] target: Pass sg with type scatterlist in transport_map_sg_to_mem Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 053/103] target: Move task_sg_num next to task_sg in struct se_task Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 054/103] target: inline struct se_transport_task into struct se_cmd Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 055/103] target: Change name & semantics of transport_get_sectors() Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 056/103] target: Remove unused members of se_cmd Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 057/103] target: Rename se_cmd.t_task_cdbs to t_task_list_num Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 058/103] target: Fix some spelling Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 059/103] target: Remove unused var from transport_generic_do_tmr Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 060/103] target: map_sg_to_mem: return sg_count in return value Nicholas A. Bellinger
2011-07-21  7:08 ` [PATCH 061/103] target/pscsi: Use min_t for sector limits Nicholas A. Bellinger

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.