All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC PATCH 11/19] Target_Core_Mod RAMDISK_DR and RAMDISK_MCP subsystem plugin
@ 2009-09-12  1:57 Nicholas A. Bellinger
  0 siblings, 0 replies; 2+ messages in thread
From: Nicholas A. Bellinger @ 2009-09-12  1:57 UTC (permalink / raw)
  To: LKML, linux-scsi
  Cc: Andrew Morton, Greg KH, Douglas Gilbert, James Bottomley,
	Hannes Reinecke, FUJITA Tomonori, Mike Christie, Joel Becker,
	Martin K. Petersen, Christoph Hellwig, Linus Torvalds,
	Alan Stern, Boaz Harrosh, Florian Haas, Philipp Reisner

[RFC PATCH 11/19] Target_Core_Mod RAMDISK_DR and RAMDISK_MCP subsystem plugin

This patch adds the RAMDISK subsystem plugin for accessing struct page from upstream
Linux/MM code.

Signed-off-by: Nicholas A. Bellinger <nab@linux-iscsi.org>

diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
new file mode 100644
index 0000000..6e223f9
--- /dev/null
+++ b/drivers/target/target_core_rd.c
@@ -0,0 +1,1424 @@
+/*******************************************************************************
+ * Filename:  target_core_rd.c
+ *
+ * This file contains the Storage Engine <-> Ramdisk transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2009 Rising Tide Software, Inc.
+ * Copyright (c) 2008-2009 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+
+#define TARGET_CORE_RD_C
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_rd.h>
+
+#undef TARGET_CORE_RD_C
+
+/* #define DEBUG_RAMDISK_MCP */
+/* #define DEBUG_RAMDISK_DR */
+
+/*	rd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_attach_hba(se_hba_t *hba, u32 host_id)
+{
+	rd_host_t *rd_host;
+
+	rd_host = kzalloc(sizeof(rd_host_t), GFP_KERNEL);
+	if (!(rd_host)) {
+		printk(KERN_ERR "Unable to allocate memory for rd_host_t\n");
+		return -ENOMEM;
+	}
+
+	rd_host->rd_host_id = host_id;
+
+	atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
+	atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
+	hba->hba_ptr = (void *) rd_host;
+	hba->transport = (hba->type == RAMDISK_DR) ?
+			&rd_dr_template : &rd_mcp_template;
+
+	printk(KERN_INFO "CORE_HBA[%d] - %s Ramdisk HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		PYX_ISCSI_VENDOR, RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
+	printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+		" Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
+		rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
+		RD_MAX_SECTORS);
+
+	return 0;
+}
+
+/*	rd_detach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_detach_hba(se_hba_t *hba)
+{
+	rd_host_t *rd_host;
+
+	if (!hba->hba_ptr) {
+		printk(KERN_ERR "hba->hba_ptr is NULL!\n");
+		return -1;
+	}
+
+	rd_host = (rd_host_t *) hba->hba_ptr;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
+
+	kfree(rd_host);
+	hba->hba_ptr = NULL;
+
+	return 0;
+}
+
+/*	rd_release_device_space():
+ *
+ *
+ */
+void rd_release_device_space(rd_dev_t *rd_dev)
+{
+	u32 i, j, page_count = 0, sg_per_table;
+	rd_dev_sg_table_t *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+		return;
+
+	sg_table = rd_dev->sg_table_array;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg = sg_table[i].sg_table;
+		sg_per_table = sg_table[i].rd_sg_count;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = sg_page(&sg[j]);
+			if ((pg)) {
+				__free_page(pg);
+				page_count++;
+			}
+		}
+
+		kfree(sg);
+	}
+
+	printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
+		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
+		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+	kfree(sg_table);
+	rd_dev->sg_table_array = NULL;
+	rd_dev->sg_table_count = 0;
+}
+
+
+/*	rd_build_device_space():
+ *
+ *
+ */
+static int rd_build_device_space(rd_dev_t *rd_dev)
+{
+	u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+	rd_dev_sg_table_t *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (rd_dev->rd_page_count <= 0) {
+		printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
+			rd_dev->rd_page_count);
+		return -1;
+	}
+	total_sg_needed = rd_dev->rd_page_count;
+
+	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+	sg_table = kzalloc(sg_tables * sizeof(rd_dev_sg_table_t), GFP_KERNEL);
+	if (!(sg_table)) {
+		printk(KERN_ERR "Unable to allocate memory for Ramdisk"
+			" scatterlist tables\n");
+		return -1;
+	}
+
+	rd_dev->sg_table_array = sg_table;
+	rd_dev->sg_table_count = sg_tables;
+
+	while (total_sg_needed) {
+		sg_per_table = (total_sg_needed > max_sg_per_table) ?
+			max_sg_per_table : total_sg_needed;
+
+		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
+				GFP_KERNEL);
+		if (!(sg)) {
+			printk(KERN_ERR "Unable to allocate scatterlist array"
+				" for rd_dev_t\n");
+			return -1;
+		}
+
+		sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
+
+		sg_table[i].sg_table = sg;
+		sg_table[i].rd_sg_count = sg_per_table;
+		sg_table[i].page_start_offset = page_offset;
+		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
+						- 1;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = (struct page *) alloc_pages(
+					GFP_KERNEL, 0);
+			if (!(pg)) {
+				printk(KERN_ERR "Unable to allocate scatterlist"
+					" pages for rd_dev_sg_table_t\n");
+				return -1;
+			}
+			sg_assign_page(&sg[j], pg);
+			sg[j].length = PAGE_SIZE;
+		}
+
+		page_offset += sg_per_table;
+		total_sg_needed -= sg_per_table;
+	}
+
+	printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+		rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count);
+
+	return 0;
+}
+
+static void *rd_allocate_virtdevice(
+	se_hba_t *hba,
+	const char *name,
+	int rd_direct)
+{
+	rd_dev_t *rd_dev;
+	rd_host_t *rd_host = (rd_host_t *) hba->hba_ptr;
+
+	rd_dev = kzalloc(sizeof(rd_dev_t), GFP_KERNEL);
+	if (!(rd_dev)) {
+		printk(KERN_ERR "Unable to allocate memory for rd_dev_t\n");
+		return NULL;
+	}
+
+	rd_dev->rd_host = rd_host;
+	rd_dev->rd_direct = rd_direct;
+
+	return rd_dev;
+}
+
+void *rd_DIRECT_allocate_virtdevice(se_hba_t *hba, const char *name)
+{
+	return rd_allocate_virtdevice(hba, name, 1);
+}
+
+void *rd_MEMCPY_allocate_virtdevice(se_hba_t *hba, const char *name)
+{
+	return rd_allocate_virtdevice(hba, name, 0);
+}
+
+/*	rd_create_virtdevice():
+ *
+ *
+ */
+static se_device_t *rd_create_virtdevice(
+	se_hba_t *hba,
+	se_subsystem_dev_t *se_dev,
+	void *p,
+	int rd_direct)
+{
+	se_device_t *dev;
+	rd_dev_t *rd_dev = (rd_dev_t *) p;
+	rd_host_t *rd_host = (rd_host_t *) hba->hba_ptr;
+	int dev_flags = 0;
+
+	if (rd_dev->rd_direct)
+		dev_flags |= DF_TRANSPORT_DMA_ALLOC;
+
+	if (rd_build_device_space(rd_dev) < 0)
+		goto fail;
+
+	dev = transport_add_device_to_core_hba(hba,
+			(rd_dev->rd_direct) ? &rd_dr_template :
+			&rd_mcp_template, se_dev, dev_flags, (void *)rd_dev);
+	if (!(dev))
+		goto fail;
+
+	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
+	rd_dev->rd_queue_depth = dev->queue_depth;
+
+	printk(KERN_INFO "CORE_RD[%u] - Added LIO %s Ramdisk Device ID: %u of"
+		" %u pages in %u tables, %lu total bytes\n",
+		rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
+		"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count,
+		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
+
+	return dev;
+
+fail:
+	rd_release_device_space(rd_dev);
+	return NULL;
+}
+
+se_device_t *rd_DIRECT_create_virtdevice(
+	se_hba_t *hba,
+	se_subsystem_dev_t *se_dev,
+	void *p)
+{
+	return rd_create_virtdevice(hba, se_dev, p, 1);
+}
+
+se_device_t *rd_MEMCPY_create_virtdevice(
+	se_hba_t *hba,
+	se_subsystem_dev_t *se_dev,
+	void *p)
+{
+	return rd_create_virtdevice(hba, se_dev, p, 0);
+}
+
+/*	rd_activate_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_activate_device(se_device_t *dev)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) dev->dev_ptr;
+	rd_host_t *rd_host = rd_dev->rd_host;
+
+	printk(KERN_INFO "CORE_RD[%u] - Activating Device with TCQ: %d at"
+		" Ramdisk Device ID: %d\n", rd_host->rd_host_id,
+		rd_dev->rd_queue_depth, rd_dev->rd_dev_id);
+
+	return 0;
+}
+
+/*	rd_deactivate_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+void rd_deactivate_device(se_device_t *dev)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) dev->dev_ptr;
+	rd_host_t *rd_host = rd_dev->rd_host;
+
+	printk(KERN_INFO "CORE_RD[%u] - Deactivating Device with TCQ: %d at"
+		" Ramdisk Device ID: %d\n", rd_host->rd_host_id,
+		rd_dev->rd_queue_depth, rd_dev->rd_dev_id);
+}
+
+/*	rd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+void rd_free_device(void *p)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) p;
+
+	rd_release_device_space(rd_dev);
+	kfree(rd_dev);
+}
+
+/*	rd_transport_complete(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_transport_complete(se_task_t *task)
+{
+	return 0;
+}
+
+/*	rd_allocate_request(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+void *rd_allocate_request(
+	se_task_t *task,
+	se_device_t *dev)
+{
+	rd_request_t *rd_req;
+
+	rd_req = kzalloc(sizeof(rd_request_t), GFP_KERNEL);
+	if (!(rd_req)) {
+		printk(KERN_ERR "Unable to allocate rd_request_t\n");
+		return NULL;
+	}
+	rd_req->rd_dev = (rd_dev_t *) dev->dev_ptr;
+
+	return (void *)rd_req;
+}
+
+/*	rd_emulate_inquiry():
+ *
+ *
+ */
+static int rd_emulate_inquiry(se_task_t *task)
+{
+	unsigned char prod[64], se_location[128];
+	rd_dev_t *rd_dev = (rd_dev_t *) task->se_dev->dev_ptr;
+	se_cmd_t *cmd = TASK_CMD(task);
+	se_hba_t *hba = task->se_dev->se_hba;
+
+	memset(prod, 0, 64);
+	memset(se_location, 0, 128);
+
+	sprintf(prod, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
+	sprintf(se_location, "%u_%u", hba->hba_id, rd_dev->rd_dev_id);
+
+	return transport_generic_emulate_inquiry(cmd, TYPE_DISK, prod,
+			(hba->transport->do_se_mem_map) ? RD_DR_VERSION :
+			RD_MCP_VERSION, se_location);
+}
+
+/*	rd_emulate_read_cap():
+ *
+ *
+ */
+static int rd_emulate_read_cap(se_task_t *task)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) task->se_dev->dev_ptr;
+	u32 blocks = ((rd_dev->rd_page_count * PAGE_SIZE) /
+		       DEV_ATTRIB(task->se_dev)->block_size) - 1;
+
+	if ((((rd_dev->rd_page_count * PAGE_SIZE) /
+	       DEV_ATTRIB(task->se_dev)->block_size) - 1) >= 0x00000000ffffffff)
+		blocks = 0xffffffff;
+
+	return transport_generic_emulate_readcapacity(TASK_CMD(task), blocks);
+}
+
+static int rd_emulate_read_cap16(se_task_t *task)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) task->se_dev->dev_ptr;
+	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
+				   DEV_ATTRIB(task->se_dev)->block_size) - 1;
+
+	return transport_generic_emulate_readcapacity_16(TASK_CMD(task),
+				blocks_long);
+}
+
+/*	rd_emulate_scsi_cdb():
+ *
+ *
+ */
+static int rd_emulate_scsi_cdb(se_task_t *task)
+{
+	int ret;
+	se_cmd_t *cmd = TASK_CMD(task);
+	rd_request_t *rd_req = (rd_request_t *) task->transport_req;
+
+	switch (rd_req->rd_scsi_cdb[0]) {
+	case INQUIRY:
+		if (rd_emulate_inquiry(task) < 0)
+			return PYX_TRANSPORT_INVALID_CDB_FIELD;
+		break;
+	case READ_CAPACITY:
+		ret = rd_emulate_read_cap(task);
+		if (ret < 0)
+			return ret;
+		break;
+	case MODE_SENSE:
+		ret = transport_generic_emulate_modesense(TASK_CMD(task),
+				rd_req->rd_scsi_cdb, rd_req->rd_buf, 0,
+				TYPE_DISK);
+		if (ret < 0)
+			return ret;
+		break;
+	case MODE_SENSE_10:
+		ret = transport_generic_emulate_modesense(TASK_CMD(task),
+				rd_req->rd_scsi_cdb, rd_req->rd_buf, 1,
+				TYPE_DISK);
+		if (ret < 0)
+			return ret;
+		break;
+	case SERVICE_ACTION_IN:
+		if ((T_TASK(cmd)->t_task_cdb[1] & 0x1f) !=
+		     SAI_READ_CAPACITY_16) {
+			printk(KERN_ERR "Unsupported SA: 0x%02x\n",
+					T_TASK(cmd)->t_task_cdb[1] & 0x1f);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		ret = rd_emulate_read_cap16(task);
+		if (ret < 0)
+			return ret;
+		break;
+	case REQUEST_SENSE:
+		ret = transport_generic_emulate_request_sense(cmd,
+				T_TASK(cmd)->t_task_cdb);
+		if (ret < 0)
+			return ret;
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+	case ERASE:
+	case REZERO_UNIT:
+	case SEEK_10:
+	case SPACE:
+	case START_STOP:
+	case SYNCHRONIZE_CACHE:
+	case TEST_UNIT_READY:
+	case VERIFY:
+	case WRITE_FILEMARKS:
+	case RESERVE:
+	case RESERVE_10:
+	case RELEASE:
+	case RELEASE_10:
+		break;
+	default:
+		printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for"
+			" RAMDISKs\n", rd_req->rd_scsi_cdb[0]);
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_get_sg_table():
+ *
+ *
+ */
+static rd_dev_sg_table_t *rd_get_sg_table(rd_dev_t *rd_dev, u32 page)
+{
+	u32 i;
+	rd_dev_sg_table_t *sg_table;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg_table = &rd_dev->sg_table_array[i];
+		if ((sg_table->page_start_offset <= page) &&
+		    (sg_table->page_end_offset >= page))
+			return sg_table;
+	}
+
+	printk(KERN_ERR "Unable to locate rd_dev_sg_table_t for page: %u\n",
+			page);
+
+	return NULL;
+}
+
+/*	rd_MEMCPY_read():
+ *
+ *
+ */
+static int rd_MEMCPY_read(rd_request_t *req)
+{
+	rd_dev_t *dev = req->rd_dev;
+	rd_dev_sg_table_t *table;
+	struct scatterlist *sg_d, *sg_s;
+	void *dst, *src;
+	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+	u32 length, page_end = 0, table_sg_end;
+	u32 rd_offset = req->rd_offset;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_d = (struct scatterlist *) req->rd_buf;
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_MCP
+	printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
+		" %u\n", dev->rd_dev_id, req->rd_lba, req->rd_size,
+		req->rd_page, req->rd_offset);
+#endif
+	src_offset = rd_offset;
+
+	while (req->rd_size) {
+		if ((sg_d[i].length - dst_offset) <
+		    (sg_s[j].length - src_offset)) {
+			length = (sg_d[i].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
+				" offset: %u sg_s[%d].length: %u\n", i,
+				&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
+				sg_s[j].length);
+			printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
+				" src_offset: %u\n", length, dst_offset,
+				src_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			dst = sg_virt(&sg_d[i++]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			src = sg_virt(&sg_s[j]) + src_offset;
+			if (!src)
+				BUG();
+
+			dst_offset = 0;
+			src_offset = length;
+			page_end = 0;
+		} else {
+			length = (sg_s[j].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
+				" offset: %u sg_s[%d].length: %u\n", i,
+				&sg_d[i], sg_d[i].length, sg_d[i].offset,
+				j, sg_s[j].length);
+			printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
+				" src_offset: %u\n", length, dst_offset,
+				src_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			dst = sg_virt(&sg_d[i]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			if (sg_d[i].length == length) {
+				i++;
+				dst_offset = 0;
+			} else
+				dst_offset = length;
+
+			src = sg_virt(&sg_s[j++]) + src_offset;
+			if (!src)
+				BUG();
+
+			src_offset = 0;
+			page_end = 1;
+		}
+
+		memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+			" i: %u, j: %u\n", req->rd_page,
+			(req->rd_size - length), length, i, j);
+#endif
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			return 0;
+
+		if (!page_end)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+	return 0;
+}
+
+/*	rd_MEMCPY_write():
+ *
+ *
+ */
+static int rd_MEMCPY_write(rd_request_t *req)
+{
+	rd_dev_t *dev = req->rd_dev;
+	rd_dev_sg_table_t *table;
+	struct scatterlist *sg_d, *sg_s;
+	void *dst, *src;
+	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+	u32 length, page_end = 0, table_sg_end;
+	u32 rd_offset = req->rd_offset;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
+	sg_s = (struct scatterlist *) req->rd_buf;
+#ifdef DEBUG_RAMDISK_MCP
+	printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
+		" Offset: %u\n", dev->rd_dev_id, req->rd_lba, req->rd_size,
+		req->rd_page, req->rd_offset);
+#endif
+	dst_offset = rd_offset;
+
+	while (req->rd_size) {
+		if ((sg_s[i].length - src_offset) <
+		    (sg_d[j].length - dst_offset)) {
+			length = (sg_s[i].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
+				" offset: %d sg_d[%d].length: %u\n", i,
+				&sg_s[i], sg_s[i].length, sg_s[i].offset,
+				j, sg_d[j].length);
+			printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
+				" dst_offset: %u\n", length, src_offset,
+				dst_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			src = sg_virt(&sg_s[i++]) + src_offset;
+			if (!src)
+				BUG();
+
+			dst = sg_virt(&sg_d[j]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			src_offset = 0;
+			dst_offset = length;
+			page_end = 0;
+		} else {
+			length = (sg_d[j].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
+				" offset: %d sg_d[%d].length: %u\n", i,
+				&sg_s[i], sg_s[i].length, sg_s[i].offset,
+				j, sg_d[j].length);
+			printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
+				" dst_offset: %u\n", length, src_offset,
+				dst_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			src = sg_virt(&sg_s[i]) + src_offset;
+			if (!src)
+				BUG();
+
+			if (sg_s[i].length == length) {
+				i++;
+				src_offset = 0;
+			} else
+				src_offset = length;
+
+			dst = sg_virt(&sg_d[j++]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			dst_offset = 0;
+			page_end = 1;
+		}
+
+		memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+			" i: %u, j: %u\n", req->rd_page,
+			(req->rd_size - length), length, i, j);
+#endif
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			return 0;
+
+		if (!page_end)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_d = &table->sg_table[j = 0];
+	}
+
+	return 0;
+}
+
+/*	rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_MEMCPY_do_task(se_task_t *task)
+{
+	se_device_t *dev = task->se_dev;
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+	int ret;
+
+	if (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))
+		return rd_emulate_scsi_cdb(task);
+
+	req->rd_lba = task->task_lba;
+	req->rd_page = (req->rd_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
+	req->rd_offset = (req->rd_lba %
+			 (PAGE_SIZE / DEV_ATTRIB(dev)->block_size)) *
+			  DEV_ATTRIB(dev)->block_size;
+	req->rd_size = task->task_size;
+
+	if (req->rd_data_direction == RD_DATA_READ)
+		ret = rd_MEMCPY_read(req);
+	else
+		ret = rd_MEMCPY_write(req);
+
+	if (ret != 0)
+		return ret;
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_DIRECT_with_offset():
+ *
+ *
+ */
+static int rd_DIRECT_with_offset(
+	se_task_t *task,
+	struct list_head *se_mem_list,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	rd_request_t *req = (rd_request_t *)task->transport_req;
+	rd_dev_t *dev = req->rd_dev;
+	rd_dev_sg_table_t *table;
+	se_mem_t *se_mem;
+	struct scatterlist *sg_s;
+	u32 j = 0, set_offset = 1;
+	u32 get_next_table = 0, offset_length, table_sg_end;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+		(req->rd_data_direction != RD_DATA_READ) ? "Write" : "Read",
+		req->rd_lba, req->rd_size, req->rd_page, req->rd_offset);
+#endif
+	while (req->rd_size) {
+		se_mem = kzalloc(sizeof(se_mem_t), GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate se_mem_t\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+
+		if (set_offset) {
+			offset_length = sg_s[j].length - req->rd_offset;
+			if (offset_length > req->rd_size)
+				offset_length = req->rd_size;
+
+			se_mem->se_page = sg_page(&sg_s[j++]);
+			se_mem->se_off = req->rd_offset;
+			se_mem->se_len = offset_length;
+
+			set_offset = 0;
+			get_next_table = (j > table_sg_end);
+			goto check_eot;
+		}
+
+		offset_length = (req->rd_size < req->rd_offset) ?
+			req->rd_size : req->rd_offset;
+
+		se_mem->se_page = sg_page(&sg_s[j]);
+		se_mem->se_len = offset_length;
+
+		set_offset = 1;
+
+check_eot:
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
+			" se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
+			req->rd_page, req->rd_size, offset_length, j, se_mem,
+			se_mem->se_page, se_mem->se_off, se_mem->se_len);
+#endif
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+
+		req->rd_size -= offset_length;
+		if (!(req->rd_size))
+			goto out;
+
+		if (!set_offset && !get_next_table)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+			printk(KERN_INFO "page: %u in same page table\n",
+					req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+out:
+	T_TASK(task->task_se_cmd)->t_task_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "RD_DR - Allocated %u se_mem_t segments for task\n",
+			*se_mem_cnt);
+#endif
+	return 0;
+}
+
+/*	rd_DIRECT_without_offset():
+ *
+ *
+ */
+static int rd_DIRECT_without_offset(
+	se_task_t *task,
+	struct list_head *se_mem_list,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	rd_request_t *req = (rd_request_t *)task->transport_req;
+	rd_dev_t *dev = req->rd_dev;
+	rd_dev_sg_table_t *table;
+	se_mem_t *se_mem;
+	struct scatterlist *sg_s;
+	u32 length, j = 0;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
+		(req->rd_data_direction != RD_DATA_READ) ? "Write" : "Read",
+		req->rd_lba, req->rd_size, req->rd_page);
+#endif
+	while (req->rd_size) {
+		se_mem = kzalloc(sizeof(se_mem_t), GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate se_mem_t\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+
+		length = (req->rd_size < sg_s[j].length) ?
+			req->rd_size : sg_s[j].length;
+
+		se_mem->se_page = sg_page(&sg_s[j++]);
+		se_mem->se_len = length;
+
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
+			" se_page: %p se_off: %u se_len: %u\n", req->rd_page,
+			req->rd_size, j, se_mem, se_mem->se_page,
+			se_mem->se_off, se_mem->se_len);
+#endif
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			goto out;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+			printk("page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+out:
+	T_TASK(task->task_se_cmd)->t_task_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "RD_DR - Allocated %u se_mem_t segments for task\n",
+			*se_mem_cnt);
+#endif
+	return 0;
+}
+
+/*	rd_DIRECT_do_se_mem_map():
+ *
+ *
+ */
+int rd_DIRECT_do_se_mem_map(
+	se_task_t *task,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	se_mem_t *in_se_mem,
+	se_mem_t **out_se_mem,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+	int ret;
+
+	req->rd_lba = task->task_lba;
+	req->rd_req_flags = RRF_GOT_LBA;
+	req->rd_page = ((req->rd_lba * DEV_ATTRIB(task->se_dev)->block_size) /
+			PAGE_SIZE);
+	req->rd_offset = (req->rd_lba %
+			(PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size)) *
+			DEV_ATTRIB(task->se_dev)->block_size;
+	req->rd_size = task->task_size;
+
+	if (req->rd_offset)
+		ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
+				task_offset);
+	else
+		ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
+				task_offset);
+
+	return ret;
+}
+
+/*	rd_DIRECT_free_DMA():
+ *
+ *
+ */
+void rd_DIRECT_free_DMA(se_cmd_t *cmd)
+{
+	se_mem_t *se_mem, *se_mem_tmp;
+
+	/*
+	 * The scatterlists in the RAMDISK DIRECT case are using the pages
+	 * from the rd_device_t's scatterlist table. They are referencing
+	 * valid memory that is held within the RD transport plugin, so we
+	 * only free the se_mem_t elements.
+	 */
+	list_for_each_entry_safe(se_mem, se_mem_tmp, T_TASK(cmd)->t_mem_list,
+				se_list) {
+		 list_del(&se_mem->se_list);
+		 kfree(se_mem);
+	}
+	kfree(T_TASK(cmd)->t_mem_list);
+	T_TASK(cmd)->t_mem_list = NULL;
+	T_TASK(cmd)->t_task_se_num = 0;
+}
+
+/*	rd_DIRECT_allocate_DMA():
+ *
+ *	Note that rd_DIRECT_do_se_mem_map() actually does the real work.
+ */
+int rd_DIRECT_allocate_DMA(se_cmd_t *cmd, u32 length, u32 dma_size)
+{
+	T_TASK(cmd)->t_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (!(T_TASK(cmd)->t_mem_list)) {
+		printk(KERN_ERR "Unable to allocate memory for T_TASK(cmd)"
+				"->t_mem_list\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(T_TASK(cmd)->t_mem_list);
+
+	return 0;
+}
+
+/*	rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_DIRECT_do_task(se_task_t *task)
+{
+	if (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))
+		return rd_emulate_scsi_cdb(task);
+
+	/*
+	 * At this point the locally allocated RD tables have been mapped
+	 * to se_mem_t elements in rd_DIRECT_do_se_mem_map().
+	 */
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+void rd_free_task(se_task_t *task)
+{
+	rd_request_t *req;
+	req = (rd_request_t *) task->transport_req;
+
+	kfree(req);
+}
+
+ssize_t rd_set_configfs_dev_params(
+	se_hba_t *hba,
+	se_subsystem_dev_t *se_dev,
+	const char *page,
+	ssize_t count)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) se_dev->se_dev_su_ptr;
+	char *buf, *cur, *ptr, *ptr2;
+	unsigned long rd_pages;
+	int params = 0, ret;
+
+	buf = kzalloc(count, GFP_KERNEL);
+	if (!(buf)) {
+		printk(KERN_ERR "Unable to allocate memory for temporary buffer\n");
+		return 0;
+	}
+	memcpy(buf, page, count);
+	cur = buf;
+
+	while (cur) {
+		ptr = strstr(cur, "=");
+		if (!(ptr))
+			goto out;
+
+		*ptr = '\0';
+		ptr++;
+
+		ptr2 = strstr(cur, "rd_pages");
+		if ((ptr2)) {
+			transport_check_dev_params_delim(ptr, &cur);
+			ret = strict_strtoul(ptr, 0, &rd_pages);
+			if (ret < 0) {
+				printk(KERN_ERR "strict_strtoul() failed for"
+					" rd_pages=\n");
+				break;
+			}
+			rd_dev->rd_page_count = (u32)rd_pages;
+			printk(KERN_INFO "RAMDISK: Referencing Page"
+				" Count: %u\n", rd_dev->rd_page_count);
+			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
+			params++;
+		} else
+			cur = NULL;
+	}
+
+out:
+	kfree(buf);
+	return (params) ? count : -EINVAL;
+}
+
+ssize_t rd_check_configfs_dev_params(se_hba_t *hba, se_subsystem_dev_t *se_dev)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) se_dev->se_dev_su_ptr;
+
+	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+		printk(KERN_INFO "Missing rd_pages= parameter\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+ssize_t rd_show_configfs_dev_params(
+	se_hba_t *hba,
+	se_subsystem_dev_t *se_dev,
+	char *page)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) se_dev->se_dev_su_ptr;
+	int bl = 0;
+
+	 __rd_get_dev_info(rd_dev, page, &bl);
+	return (ssize_t)bl;
+}
+
+void rd_dr_get_plugin_info(void *p, char *b, int *bl)
+{
+	*bl += sprintf(b + *bl, "%s RAMDISK_DR Plugin %s\n",
+			PYX_ISCSI_VENDOR, RD_DR_VERSION);
+}
+
+void rd_mcp_get_plugin_info(void *p, char *b, int *bl)
+{
+	*bl += sprintf(b + *bl, "%s RAMDISK_MCP Plugin %s\n",
+			PYX_ISCSI_VENDOR, RD_MCP_VERSION);
+}
+
+void rd_get_hba_info(se_hba_t *hba, char *b, int *bl)
+{
+	rd_host_t *rd_host = (rd_host_t *)hba->hba_ptr;
+
+	*bl += sprintf(b + *bl, "SE Host ID: %u  RD Host ID: %u\n",
+		hba->hba_id, rd_host->rd_host_id);
+	*bl += sprintf(b + *bl, "        LIO RamDisk HBA\n");
+}
+
+void rd_get_dev_info(se_device_t *dev, char *b, int *bl)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) dev->dev_ptr;
+
+	__rd_get_dev_info(rd_dev, b, bl);
+}
+
+void __rd_get_dev_info(rd_dev_t *rd_dev, char *b, int *bl)
+{
+	*bl += sprintf(b + *bl, "LIO RamDisk ID: %u  RamDisk Makeup: %s\n",
+			rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
+			"rd_direct" : "rd_mcp");
+	*bl += sprintf(b + *bl, "        PAGES/PAGE_SIZE: %u*%lu"
+			"  SG_table_count: %u\n", rd_dev->rd_page_count,
+			PAGE_SIZE, rd_dev->sg_table_count);
+
+	return;
+}
+
+/*	rd_map_task_non_SG():
+ *
+ *
+ */
+void rd_map_task_non_SG(se_task_t *task)
+{
+	se_cmd_t *cmd = TASK_CMD(task);
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_bufflen		= task->task_size;
+	req->rd_buf		= (void *) T_TASK(cmd)->t_task_buf;
+	req->rd_sg_count	= 0;
+}
+
+/*	rd_map_task_SG():
+ *
+ *
+ */
+void rd_map_task_SG(se_task_t *task)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_bufflen		= task->task_size;
+	req->rd_buf		= task->task_sg;
+	req->rd_sg_count	= task->task_sg_num;
+}
+
+/*      iblock_CDB_inquiry():
+ *
+ *
+ */
+int rd_CDB_inquiry(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction  = RD_DATA_READ;
+
+	rd_map_task_non_SG(task);
+	return 0;
+}
+
+/*      rd_CDB_none():
+ *
+ *
+ */
+int rd_CDB_none(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction	= RD_DATA_NONE;
+	req->rd_bufflen		= 0;
+	req->rd_sg_count	= 0;
+	req->rd_buf		= NULL;
+
+	return 0;
+}
+
+/*	rd_CDB_read_non_SG():
+ *
+ *
+ */
+int rd_CDB_read_non_SG(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction = RD_DATA_READ;
+	rd_map_task_non_SG(task);
+
+	return 0;
+}
+
+/*	rd_CDB_read_SG):
+ *
+ *
+ */
+int rd_CDB_read_SG(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction = RD_DATA_READ;
+	rd_map_task_SG(task);
+
+	return req->rd_sg_count;
+}
+
+/*	rd_CDB_write_non_SG():
+ *
+ *
+ */
+int rd_CDB_write_non_SG(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction = RD_DATA_WRITE;
+	rd_map_task_non_SG(task);
+
+	return 0;
+}
+
+/*	d_CDB_write_SG():
+ *
+ *
+ */
+int rd_CDB_write_SG(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction = RD_DATA_WRITE;
+	rd_map_task_SG(task);
+
+	return req->rd_sg_count;
+}
+
+/*	rd_DIRECT_check_lba():
+ *
+ *
+ */
+int rd_DIRECT_check_lba(unsigned long long lba, se_device_t *dev)
+{
+	return ((lba % (PAGE_SIZE / DEV_ATTRIB(dev)->block_size)) *
+		 DEV_ATTRIB(dev)->block_size) ? 1 : 0;
+}
+
+/*	rd_MEMCPY_check_lba():
+ *
+ *
+ */
+int rd_MEMCPY_check_lba(unsigned long long lba, se_device_t *dev)
+{
+	return 0;
+}
+
+/*	rd_check_for_SG(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_check_for_SG(se_task_t *task)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	return req->rd_sg_count;
+}
+
+/*	rd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+unsigned char *rd_get_cdb(se_task_t *task)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	return req->rd_scsi_cdb;
+}
+
+/*	rd_get_blocksize(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+u32 rd_get_blocksize(se_device_t *dev)
+{
+	return RD_BLOCKSIZE;
+}
+
+u32 rd_get_device_rev(se_device_t *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+u32 rd_get_device_type(se_device_t *dev)
+{
+	return TYPE_DISK;
+}
+
+/*	rd_get_dma_length(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+u32 rd_get_dma_length(u32 task_size, se_device_t *dev)
+{
+	return PAGE_SIZE;
+}
+
+/*	rd_get_max_sectors(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+u32 rd_get_max_sectors(se_device_t *dev)
+{
+	return RD_MAX_SECTORS;
+}
+
+/*	rd_get_queue_depth(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+u32 rd_get_queue_depth(se_device_t *dev)
+{
+	return RD_DEVICE_QUEUE_DEPTH;
+}
+
+u32 rd_get_max_queue_depth(se_device_t *dev)
+{
+	return RD_MAX_DEVICE_QUEUE_DEPTH;
+}
diff --git a/include/target/target_core_rd.h b/include/target/target_core_rd.h
new file mode 100644
index 0000000..67168fd
--- /dev/null
+++ b/include/target/target_core_rd.h
@@ -0,0 +1,257 @@
+/*******************************************************************************
+ * Filename:  target_core_rd.h
+ *
+ * This file contains the Storage Engine <-> Ramdisk transport specific
+ * definitions and prototypes.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2009 Rising Tide Software, Inc.
+ * Copyright (c) 2008-2009 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+
+#ifndef TARGET_CORE_RD_H
+#define TARGET_CORE_RD_H
+
+#define RD_HBA_VERSION		"v3.1"
+#define RD_DR_VERSION		"3.1"
+#define RD_MCP_VERSION		"3.1"
+
+/* Largest piece of memory kmalloc can allocate */
+#define RD_MAX_ALLOCATION_SIZE	65536
+/* Maximum queuedepth for the Ramdisk HBA */
+#define RD_HBA_QUEUE_DEPTH	256
+#define RD_DEVICE_QUEUE_DEPTH	32
+#define RD_MAX_DEVICE_QUEUE_DEPTH 128
+#define RD_BLOCKSIZE		512
+#define RD_MAX_SECTORS		1024
+
+#define RD_DATA_READ		1
+#define RD_DATA_WRITE		2
+#define RD_DATA_NONE		3
+
+extern se_global_t *se_global;
+
+#ifndef RD_INCLUDE_STRUCTS
+extern int rd_CDB_inquiry(se_task_t *, u32);
+extern int rd_CDB_none(se_task_t *, u32);
+extern int rd_CDB_read_non_SG(se_task_t *, u32);
+extern int rd_CDB_read_SG(se_task_t *, u32);
+extern int rd_CDB_write_non_SG(se_task_t *, u32);
+extern int rd_CDB_write_SG(se_task_t *, u32);
+
+extern int rd_attach_hba(se_hba_t *, u32);
+extern int rd_detach_hba(se_hba_t *);
+extern void *rd_DIRECT_allocate_virtdevice(se_hba_t *, const char *);
+extern void *rd_MEMCPY_allocate_virtdevice(se_hba_t *, const char *);
+extern se_device_t *rd_DIRECT_create_virtdevice(se_hba_t *,
+				se_subsystem_dev_t *, void *);
+extern se_device_t *rd_MEMCPY_create_virtdevice(se_hba_t *,
+				se_subsystem_dev_t *, void *);
+extern int rd_activate_device(se_device_t *);
+extern void rd_deactivate_device(se_device_t *);
+extern void rd_free_device(void *);
+extern int rd_transport_complete(se_task_t *);
+extern void *rd_allocate_request(se_task_t *, se_device_t *);
+extern int rd_DIRECT_do_task(se_task_t *);
+extern int rd_MEMCPY_do_task(se_task_t *);
+extern int rd_DIRECT_allocate_DMA(se_cmd_t *, u32, u32);
+extern int rd_DIRECT_do_se_mem_map(struct se_task_s *, struct list_head *,
+				void *, struct se_mem_s *, struct se_mem_s **,
+				u32 *, u32 *);
+extern void rd_DIRECT_free_DMA(se_cmd_t *);
+extern void rd_free_task(se_task_t *);
+extern ssize_t rd_set_configfs_dev_params(se_hba_t *, se_subsystem_dev_t *,
+						const char *, ssize_t);
+extern ssize_t rd_check_configfs_dev_params(se_hba_t *, se_subsystem_dev_t *);
+extern ssize_t rd_show_configfs_dev_params(se_hba_t *, se_subsystem_dev_t *,
+						char *);
+extern void rd_dr_get_plugin_info(void *, char *, int *);
+extern void rd_mcp_get_plugin_info(void *, char *, int *);
+extern void rd_get_hba_info(se_hba_t *, char *, int *);
+extern void rd_get_dev_info(se_device_t *, char *, int *);
+extern int rd_DIRECT_check_lba(unsigned long long, se_device_t *);
+extern int rd_MEMCPY_check_lba(unsigned long long, se_device_t *);
+extern int rd_check_for_SG(se_task_t *);
+extern unsigned char *rd_get_cdb(se_task_t *);
+extern u32 rd_get_blocksize(se_device_t *);
+extern u32 rd_get_device_rev(se_device_t *);
+extern u32 rd_get_device_type(se_device_t *);
+extern u32 rd_get_dma_length(u32, se_device_t *);
+extern u32 rd_get_max_sectors(se_device_t *);
+extern u32 rd_get_queue_depth(se_device_t *);
+extern u32 rd_get_max_queue_depth(se_device_t *);
+#endif /* ! RD_INCLUDE_STRUCTS */
+
+#define RRF_EMULATE_CDB		0x01
+#define RRF_GOT_LBA		0x02
+
+typedef struct rd_request_s {
+	/* SCSI CDB from iSCSI Command PDU */
+	unsigned char	rd_scsi_cdb[SCSI_CDB_SIZE];
+	/* Data Direction */
+	u8		rd_data_direction;
+	/* Total length of request */
+	u32		rd_bufflen;
+	/* RD request flags */
+	u32		rd_req_flags;
+	/* Offset from start of page */
+	u32		rd_offset;
+	/* Starting page in Ramdisk for request */
+	u32		rd_page;
+	/* Total number of pages needed for request */
+	u32		rd_page_count;
+	/* Scatterlist count */
+	u32		rd_sg_count;
+	u32		rd_size;
+	/* Logical Block Address */
+	unsigned long long	rd_lba;
+	 /* Data buffer containing scatterlists(s) or
+	  * contiguous memory segments */
+	void		*rd_buf;
+	/* Ramdisk device */
+	struct rd_dev_s	*rd_dev;
+} ____cacheline_aligned rd_request_t;
+
+typedef struct rd_dev_sg_table_s {
+	u32		page_start_offset;
+	u32		page_end_offset;
+	u32		rd_sg_count;
+	struct scatterlist *sg_table;
+} ____cacheline_aligned rd_dev_sg_table_t;
+
+#define RDF_HAS_PAGE_COUNT	0x01
+
+typedef struct rd_dev_s {
+	int		rd_direct;
+	u32		rd_flags;
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		rd_dev_id;
+	/* Total page count for ramdisk device */
+	u32		rd_page_count;
+	/* Number of SG tables in sg_table_array */
+	u32		sg_table_count;
+	u32		rd_queue_depth;
+	/* Array of rd_dev_sg_table_t containing scatterlists */
+	rd_dev_sg_table_t *sg_table_array;
+	/* Ramdisk HBA device is connected to */
+	struct rd_host_s *rd_host;
+	/* Next RD Device entry in list */
+	struct rd_dev_s *next;
+} ____cacheline_aligned rd_dev_t;
+
+extern void __rd_get_dev_info(rd_dev_t *, char *, int *);
+
+typedef struct rd_host_s {
+	u32		rd_host_dev_id_count;
+	u32		rd_host_id;		/* Unique Ramdisk Host ID */
+} ____cacheline_aligned rd_host_t;
+
+#ifndef RD_INCLUDE_STRUCTS
+/*
+ * We use the generic command sequencer, so we must setup
+ * se_subsystem_spc_t.
+ */
+se_subsystem_spc_t rd_template_spc = {
+	.inquiry		= rd_CDB_inquiry,
+	.none			= rd_CDB_none,
+	.read_non_SG		= rd_CDB_read_non_SG,
+	.read_SG		= rd_CDB_read_SG,
+	.write_non_SG		= rd_CDB_write_non_SG,
+	.write_SG		= rd_CDB_write_SG,
+};
+
+se_subsystem_api_t rd_dr_template = {
+	.name			= "rd_dr",
+	.type			= RAMDISK_DR,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.allocate_virtdevice	= rd_DIRECT_allocate_virtdevice,
+	.create_virtdevice	= rd_DIRECT_create_virtdevice,
+	.activate_device	= rd_activate_device,
+	.deactivate_device	= rd_deactivate_device,
+	.free_device		= rd_free_device,
+	.transport_complete	= rd_transport_complete,
+	.allocate_DMA		= rd_DIRECT_allocate_DMA,
+	.free_DMA		= rd_DIRECT_free_DMA,
+	.allocate_request	= rd_allocate_request,
+	.do_task		= rd_DIRECT_do_task,
+	.free_task		= rd_free_task,
+	.check_configfs_dev_params = rd_check_configfs_dev_params,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_plugin_info	= rd_dr_get_plugin_info,
+	.get_hba_info		= rd_get_hba_info,
+	.get_dev_info		= rd_get_dev_info,
+	.check_lba		= rd_DIRECT_check_lba,
+	.check_for_SG		= rd_check_for_SG,
+	.get_cdb		= rd_get_cdb,
+	.get_blocksize		= rd_get_blocksize,
+	.get_device_rev		= rd_get_device_rev,
+	.get_device_type	= rd_get_device_type,
+	.get_dma_length		= rd_get_dma_length,
+	.get_max_sectors	= rd_get_max_sectors,
+	.get_queue_depth	= rd_get_queue_depth,
+	.get_max_queue_depth	= rd_get_max_queue_depth,
+	.do_se_mem_map		= rd_DIRECT_do_se_mem_map,
+	.write_pending		= NULL,
+	.spc			= &rd_template_spc,
+};
+
+se_subsystem_api_t rd_mcp_template = {
+	.name			= "rd_mcp",
+	.type			= RAMDISK_MCP,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.allocate_virtdevice	= rd_MEMCPY_allocate_virtdevice,
+	.create_virtdevice	= rd_MEMCPY_create_virtdevice,
+	.activate_device	= rd_activate_device,
+	.deactivate_device	= rd_deactivate_device,
+	.free_device		= rd_free_device,
+	.transport_complete	= rd_transport_complete,
+	.allocate_request	= rd_allocate_request,
+	.do_task		= rd_MEMCPY_do_task,
+	.free_task		= rd_free_task,
+	.check_configfs_dev_params = rd_check_configfs_dev_params,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_plugin_info	= rd_mcp_get_plugin_info,
+	.get_hba_info		= rd_get_hba_info,
+	.get_dev_info		= rd_get_dev_info,
+	.check_lba		= rd_MEMCPY_check_lba,
+	.check_for_SG		= rd_check_for_SG,
+	.get_cdb		= rd_get_cdb,
+	.get_blocksize		= rd_get_blocksize,
+	.get_device_rev		= rd_get_device_rev,
+	.get_device_type	= rd_get_device_type,
+	.get_dma_length		= rd_get_dma_length,
+	.get_max_sectors	= rd_get_max_sectors,
+	.get_queue_depth	= rd_get_queue_depth,
+	.get_max_queue_depth	= rd_get_max_queue_depth,
+	.write_pending		= NULL,
+	.spc			= &rd_template_spc,
+};
+
+#endif /* ! RD_INCLUDE_STRUCTS */
+
+#endif /* TARGET_CORE_RD_H */



^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [RFC PATCH 11/19] Target_Core_Mod RAMDISK_DR and RAMDISK_MCP subsystem plugin
@ 2009-09-18 22:08 Nicholas A. Bellinger
  0 siblings, 0 replies; 2+ messages in thread
From: Nicholas A. Bellinger @ 2009-09-18 22:08 UTC (permalink / raw)
  To: LKML, linux-scsi
  Cc: Andrew Morton, Greg KH, Douglas Gilbert, James Bottomley,
	Hannes Reinecke, FUJITA Tomonori, Mike Christie, Joel Becker,
	Martin K. Petersen, Christoph Hellwig, Linus Torvalds,
	Alan Stern, Boaz Harrosh, Florian Haas, Philipp Reisner,
	Lars Ellenberg, Daniel Walker

[RFC PATCH 11/19] Target_Core_Mod RAMDISK_DR and RAMDISK_MCP subsystem plugin

This patch adds the RAMDISK subsystem plugin for accessing struct page from upstream
Linux/MM code.

Signed-off-by: Nicholas A. Bellinger <nab@linux-iscsi.org>

 
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
new file mode 100644
index 0000000..68ebd43
--- /dev/null
+++ b/drivers/target/target_core_rd.c
@@ -0,0 +1,1422 @@
+/*******************************************************************************
+ * Filename:  target_core_rd.c
+ *
+ * This file contains the Storage Engine <-> Ramdisk transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2009 Rising Tide Software, Inc.
+ * Copyright (c) 2008-2009 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+
+#define TARGET_CORE_RD_C
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_rd.h>
+
+#undef TARGET_CORE_RD_C
+
+/* #define DEBUG_RAMDISK_MCP */
+/* #define DEBUG_RAMDISK_DR */
+
+/*	rd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_attach_hba(se_hba_t *hba, u32 host_id)
+{
+	rd_host_t *rd_host;
+
+	rd_host = kzalloc(sizeof(rd_host_t), GFP_KERNEL);
+	if (!(rd_host)) {
+		printk(KERN_ERR "Unable to allocate memory for rd_host_t\n");
+		return -ENOMEM;
+	}
+
+	rd_host->rd_host_id = host_id;
+
+	atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
+	atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
+	hba->hba_ptr = (void *) rd_host;
+	hba->transport = (hba->type == RAMDISK_DR) ?
+			&rd_dr_template : &rd_mcp_template;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
+	printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+		" Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
+		rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
+		RD_MAX_SECTORS);
+
+	return 0;
+}
+
+/*	rd_detach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_detach_hba(se_hba_t *hba)
+{
+	rd_host_t *rd_host;
+
+	if (!hba->hba_ptr) {
+		printk(KERN_ERR "hba->hba_ptr is NULL!\n");
+		return -1;
+	}
+
+	rd_host = (rd_host_t *) hba->hba_ptr;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
+
+	kfree(rd_host);
+	hba->hba_ptr = NULL;
+
+	return 0;
+}
+
+/*	rd_release_device_space():
+ *
+ *
+ */
+void rd_release_device_space(rd_dev_t *rd_dev)
+{
+	u32 i, j, page_count = 0, sg_per_table;
+	rd_dev_sg_table_t *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+		return;
+
+	sg_table = rd_dev->sg_table_array;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg = sg_table[i].sg_table;
+		sg_per_table = sg_table[i].rd_sg_count;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = sg_page(&sg[j]);
+			if ((pg)) {
+				__free_page(pg);
+				page_count++;
+			}
+		}
+
+		kfree(sg);
+	}
+
+	printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
+		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
+		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+	kfree(sg_table);
+	rd_dev->sg_table_array = NULL;
+	rd_dev->sg_table_count = 0;
+}
+
+
+/*	rd_build_device_space():
+ *
+ *
+ */
+static int rd_build_device_space(rd_dev_t *rd_dev)
+{
+	u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+	rd_dev_sg_table_t *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (rd_dev->rd_page_count <= 0) {
+		printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
+			rd_dev->rd_page_count);
+		return -1;
+	}
+	total_sg_needed = rd_dev->rd_page_count;
+
+	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+	sg_table = kzalloc(sg_tables * sizeof(rd_dev_sg_table_t), GFP_KERNEL);
+	if (!(sg_table)) {
+		printk(KERN_ERR "Unable to allocate memory for Ramdisk"
+			" scatterlist tables\n");
+		return -1;
+	}
+
+	rd_dev->sg_table_array = sg_table;
+	rd_dev->sg_table_count = sg_tables;
+
+	while (total_sg_needed) {
+		sg_per_table = (total_sg_needed > max_sg_per_table) ?
+			max_sg_per_table : total_sg_needed;
+
+		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
+				GFP_KERNEL);
+		if (!(sg)) {
+			printk(KERN_ERR "Unable to allocate scatterlist array"
+				" for rd_dev_t\n");
+			return -1;
+		}
+
+		sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
+
+		sg_table[i].sg_table = sg;
+		sg_table[i].rd_sg_count = sg_per_table;
+		sg_table[i].page_start_offset = page_offset;
+		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
+						- 1;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = (struct page *) alloc_pages(
+					GFP_KERNEL, 0);
+			if (!(pg)) {
+				printk(KERN_ERR "Unable to allocate scatterlist"
+					" pages for rd_dev_sg_table_t\n");
+				return -1;
+			}
+			sg_assign_page(&sg[j], pg);
+			sg[j].length = PAGE_SIZE;
+		}
+
+		page_offset += sg_per_table;
+		total_sg_needed -= sg_per_table;
+	}
+
+	printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+		rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count);
+
+	return 0;
+}
+
+static void *rd_allocate_virtdevice(
+	se_hba_t *hba,
+	const char *name,
+	int rd_direct)
+{
+	rd_dev_t *rd_dev;
+	rd_host_t *rd_host = (rd_host_t *) hba->hba_ptr;
+
+	rd_dev = kzalloc(sizeof(rd_dev_t), GFP_KERNEL);
+	if (!(rd_dev)) {
+		printk(KERN_ERR "Unable to allocate memory for rd_dev_t\n");
+		return NULL;
+	}
+
+	rd_dev->rd_host = rd_host;
+	rd_dev->rd_direct = rd_direct;
+
+	return rd_dev;
+}
+
+void *rd_DIRECT_allocate_virtdevice(se_hba_t *hba, const char *name)
+{
+	return rd_allocate_virtdevice(hba, name, 1);
+}
+
+void *rd_MEMCPY_allocate_virtdevice(se_hba_t *hba, const char *name)
+{
+	return rd_allocate_virtdevice(hba, name, 0);
+}
+
+/*	rd_create_virtdevice():
+ *
+ *
+ */
+static se_device_t *rd_create_virtdevice(
+	se_hba_t *hba,
+	se_subsystem_dev_t *se_dev,
+	void *p,
+	int rd_direct)
+{
+	se_device_t *dev;
+	rd_dev_t *rd_dev = (rd_dev_t *) p;
+	rd_host_t *rd_host = (rd_host_t *) hba->hba_ptr;
+	int dev_flags = 0;
+
+	if (rd_dev->rd_direct)
+		dev_flags |= DF_TRANSPORT_DMA_ALLOC;
+
+	if (rd_build_device_space(rd_dev) < 0)
+		goto fail;
+
+	dev = transport_add_device_to_core_hba(hba,
+			(rd_dev->rd_direct) ? &rd_dr_template :
+			&rd_mcp_template, se_dev, dev_flags, (void *)rd_dev);
+	if (!(dev))
+		goto fail;
+
+	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
+	rd_dev->rd_queue_depth = dev->queue_depth;
+
+	printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+		" %u pages in %u tables, %lu total bytes\n",
+		rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
+		"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count,
+		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
+
+	return dev;
+
+fail:
+	rd_release_device_space(rd_dev);
+	return NULL;
+}
+
+se_device_t *rd_DIRECT_create_virtdevice(
+	se_hba_t *hba,
+	se_subsystem_dev_t *se_dev,
+	void *p)
+{
+	return rd_create_virtdevice(hba, se_dev, p, 1);
+}
+
+se_device_t *rd_MEMCPY_create_virtdevice(
+	se_hba_t *hba,
+	se_subsystem_dev_t *se_dev,
+	void *p)
+{
+	return rd_create_virtdevice(hba, se_dev, p, 0);
+}
+
+/*	rd_activate_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_activate_device(se_device_t *dev)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) dev->dev_ptr;
+	rd_host_t *rd_host = rd_dev->rd_host;
+
+	printk(KERN_INFO "CORE_RD[%u] - Activating Device with TCQ: %d at"
+		" Ramdisk Device ID: %d\n", rd_host->rd_host_id,
+		rd_dev->rd_queue_depth, rd_dev->rd_dev_id);
+
+	return 0;
+}
+
+/*	rd_deactivate_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+void rd_deactivate_device(se_device_t *dev)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) dev->dev_ptr;
+	rd_host_t *rd_host = rd_dev->rd_host;
+
+	printk(KERN_INFO "CORE_RD[%u] - Deactivating Device with TCQ: %d at"
+		" Ramdisk Device ID: %d\n", rd_host->rd_host_id,
+		rd_dev->rd_queue_depth, rd_dev->rd_dev_id);
+}
+
+/*	rd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+void rd_free_device(void *p)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) p;
+
+	rd_release_device_space(rd_dev);
+	kfree(rd_dev);
+}
+
+/*	rd_transport_complete(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_transport_complete(se_task_t *task)
+{
+	return 0;
+}
+
+/*	rd_allocate_request(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+void *rd_allocate_request(
+	se_task_t *task,
+	se_device_t *dev)
+{
+	rd_request_t *rd_req;
+
+	rd_req = kzalloc(sizeof(rd_request_t), GFP_KERNEL);
+	if (!(rd_req)) {
+		printk(KERN_ERR "Unable to allocate rd_request_t\n");
+		return NULL;
+	}
+	rd_req->rd_dev = (rd_dev_t *) dev->dev_ptr;
+
+	return (void *)rd_req;
+}
+
+/*	rd_emulate_inquiry():
+ *
+ *
+ */
+static int rd_emulate_inquiry(se_task_t *task)
+{
+	unsigned char prod[64], se_location[128];
+	rd_dev_t *rd_dev = (rd_dev_t *) task->se_dev->dev_ptr;
+	se_cmd_t *cmd = TASK_CMD(task);
+	se_hba_t *hba = task->se_dev->se_hba;
+
+	memset(prod, 0, 64);
+	memset(se_location, 0, 128);
+
+	sprintf(prod, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
+	sprintf(se_location, "%u_%u", hba->hba_id, rd_dev->rd_dev_id);
+
+	return transport_generic_emulate_inquiry(cmd, TYPE_DISK, prod,
+			(hba->transport->do_se_mem_map) ? RD_DR_VERSION :
+			RD_MCP_VERSION, se_location);
+}
+
+/*	rd_emulate_read_cap():
+ *
+ *
+ */
+static int rd_emulate_read_cap(se_task_t *task)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) task->se_dev->dev_ptr;
+	u32 blocks = ((rd_dev->rd_page_count * PAGE_SIZE) /
+		       DEV_ATTRIB(task->se_dev)->block_size) - 1;
+
+	if ((((rd_dev->rd_page_count * PAGE_SIZE) /
+	       DEV_ATTRIB(task->se_dev)->block_size) - 1) >= 0x00000000ffffffff)
+		blocks = 0xffffffff;
+
+	return transport_generic_emulate_readcapacity(TASK_CMD(task), blocks);
+}
+
+static int rd_emulate_read_cap16(se_task_t *task)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) task->se_dev->dev_ptr;
+	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
+				   DEV_ATTRIB(task->se_dev)->block_size) - 1;
+
+	return transport_generic_emulate_readcapacity_16(TASK_CMD(task),
+				blocks_long);
+}
+
+/*	rd_emulate_scsi_cdb():
+ *
+ *
+ */
+static int rd_emulate_scsi_cdb(se_task_t *task)
+{
+	int ret;
+	se_cmd_t *cmd = TASK_CMD(task);
+	rd_request_t *rd_req = (rd_request_t *) task->transport_req;
+
+	switch (rd_req->rd_scsi_cdb[0]) {
+	case INQUIRY:
+		if (rd_emulate_inquiry(task) < 0)
+			return PYX_TRANSPORT_INVALID_CDB_FIELD;
+		break;
+	case READ_CAPACITY:
+		ret = rd_emulate_read_cap(task);
+		if (ret < 0)
+			return ret;
+		break;
+	case MODE_SENSE:
+		ret = transport_generic_emulate_modesense(TASK_CMD(task),
+				rd_req->rd_scsi_cdb, rd_req->rd_buf, 0,
+				TYPE_DISK);
+		if (ret < 0)
+			return ret;
+		break;
+	case MODE_SENSE_10:
+		ret = transport_generic_emulate_modesense(TASK_CMD(task),
+				rd_req->rd_scsi_cdb, rd_req->rd_buf, 1,
+				TYPE_DISK);
+		if (ret < 0)
+			return ret;
+		break;
+	case SERVICE_ACTION_IN:
+		if ((T_TASK(cmd)->t_task_cdb[1] & 0x1f) !=
+		     SAI_READ_CAPACITY_16) {
+			printk(KERN_ERR "Unsupported SA: 0x%02x\n",
+					T_TASK(cmd)->t_task_cdb[1] & 0x1f);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		ret = rd_emulate_read_cap16(task);
+		if (ret < 0)
+			return ret;
+		break;
+	case REQUEST_SENSE:
+		ret = transport_generic_emulate_request_sense(cmd,
+				T_TASK(cmd)->t_task_cdb);
+		if (ret < 0)
+			return ret;
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+	case ERASE:
+	case REZERO_UNIT:
+	case SEEK_10:
+	case SPACE:
+	case START_STOP:
+	case SYNCHRONIZE_CACHE:
+	case TEST_UNIT_READY:
+	case VERIFY:
+	case WRITE_FILEMARKS:
+	case RESERVE:
+	case RESERVE_10:
+	case RELEASE:
+	case RELEASE_10:
+		break;
+	default:
+		printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for"
+			" RAMDISKs\n", rd_req->rd_scsi_cdb[0]);
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_get_sg_table():
+ *
+ *
+ */
+static rd_dev_sg_table_t *rd_get_sg_table(rd_dev_t *rd_dev, u32 page)
+{
+	u32 i;
+	rd_dev_sg_table_t *sg_table;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg_table = &rd_dev->sg_table_array[i];
+		if ((sg_table->page_start_offset <= page) &&
+		    (sg_table->page_end_offset >= page))
+			return sg_table;
+	}
+
+	printk(KERN_ERR "Unable to locate rd_dev_sg_table_t for page: %u\n",
+			page);
+
+	return NULL;
+}
+
+/*	rd_MEMCPY_read():
+ *
+ *
+ */
+static int rd_MEMCPY_read(rd_request_t *req)
+{
+	rd_dev_t *dev = req->rd_dev;
+	rd_dev_sg_table_t *table;
+	struct scatterlist *sg_d, *sg_s;
+	void *dst, *src;
+	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+	u32 length, page_end = 0, table_sg_end;
+	u32 rd_offset = req->rd_offset;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_d = (struct scatterlist *) req->rd_buf;
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_MCP
+	printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
+		" %u\n", dev->rd_dev_id, req->rd_lba, req->rd_size,
+		req->rd_page, req->rd_offset);
+#endif
+	src_offset = rd_offset;
+
+	while (req->rd_size) {
+		if ((sg_d[i].length - dst_offset) <
+		    (sg_s[j].length - src_offset)) {
+			length = (sg_d[i].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
+				" offset: %u sg_s[%d].length: %u\n", i,
+				&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
+				sg_s[j].length);
+			printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
+				" src_offset: %u\n", length, dst_offset,
+				src_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			dst = sg_virt(&sg_d[i++]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			src = sg_virt(&sg_s[j]) + src_offset;
+			if (!src)
+				BUG();
+
+			dst_offset = 0;
+			src_offset = length;
+			page_end = 0;
+		} else {
+			length = (sg_s[j].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
+				" offset: %u sg_s[%d].length: %u\n", i,
+				&sg_d[i], sg_d[i].length, sg_d[i].offset,
+				j, sg_s[j].length);
+			printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
+				" src_offset: %u\n", length, dst_offset,
+				src_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			dst = sg_virt(&sg_d[i]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			if (sg_d[i].length == length) {
+				i++;
+				dst_offset = 0;
+			} else
+				dst_offset = length;
+
+			src = sg_virt(&sg_s[j++]) + src_offset;
+			if (!src)
+				BUG();
+
+			src_offset = 0;
+			page_end = 1;
+		}
+
+		memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+			" i: %u, j: %u\n", req->rd_page,
+			(req->rd_size - length), length, i, j);
+#endif
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			return 0;
+
+		if (!page_end)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+	return 0;
+}
+
+/*	rd_MEMCPY_write():
+ *
+ *
+ */
+static int rd_MEMCPY_write(rd_request_t *req)
+{
+	rd_dev_t *dev = req->rd_dev;
+	rd_dev_sg_table_t *table;
+	struct scatterlist *sg_d, *sg_s;
+	void *dst, *src;
+	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+	u32 length, page_end = 0, table_sg_end;
+	u32 rd_offset = req->rd_offset;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
+	sg_s = (struct scatterlist *) req->rd_buf;
+#ifdef DEBUG_RAMDISK_MCP
+	printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
+		" Offset: %u\n", dev->rd_dev_id, req->rd_lba, req->rd_size,
+		req->rd_page, req->rd_offset);
+#endif
+	dst_offset = rd_offset;
+
+	while (req->rd_size) {
+		if ((sg_s[i].length - src_offset) <
+		    (sg_d[j].length - dst_offset)) {
+			length = (sg_s[i].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
+				" offset: %d sg_d[%d].length: %u\n", i,
+				&sg_s[i], sg_s[i].length, sg_s[i].offset,
+				j, sg_d[j].length);
+			printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
+				" dst_offset: %u\n", length, src_offset,
+				dst_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			src = sg_virt(&sg_s[i++]) + src_offset;
+			if (!src)
+				BUG();
+
+			dst = sg_virt(&sg_d[j]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			src_offset = 0;
+			dst_offset = length;
+			page_end = 0;
+		} else {
+			length = (sg_d[j].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
+				" offset: %d sg_d[%d].length: %u\n", i,
+				&sg_s[i], sg_s[i].length, sg_s[i].offset,
+				j, sg_d[j].length);
+			printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
+				" dst_offset: %u\n", length, src_offset,
+				dst_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			src = sg_virt(&sg_s[i]) + src_offset;
+			if (!src)
+				BUG();
+
+			if (sg_s[i].length == length) {
+				i++;
+				src_offset = 0;
+			} else
+				src_offset = length;
+
+			dst = sg_virt(&sg_d[j++]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			dst_offset = 0;
+			page_end = 1;
+		}
+
+		memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+			" i: %u, j: %u\n", req->rd_page,
+			(req->rd_size - length), length, i, j);
+#endif
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			return 0;
+
+		if (!page_end)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_d = &table->sg_table[j = 0];
+	}
+
+	return 0;
+}
+
+/*	rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_MEMCPY_do_task(se_task_t *task)
+{
+	se_device_t *dev = task->se_dev;
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+	int ret;
+
+	if (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))
+		return rd_emulate_scsi_cdb(task);
+
+	req->rd_lba = task->task_lba;
+	req->rd_page = (req->rd_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
+	req->rd_offset = (do_div(req->rd_lba,
+			  (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
+			   DEV_ATTRIB(dev)->block_size;
+	req->rd_size = task->task_size;
+
+	if (req->rd_data_direction == RD_DATA_READ)
+		ret = rd_MEMCPY_read(req);
+	else
+		ret = rd_MEMCPY_write(req);
+
+	if (ret != 0)
+		return ret;
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_DIRECT_with_offset():
+ *
+ *
+ */
+static int rd_DIRECT_with_offset(
+	se_task_t *task,
+	struct list_head *se_mem_list,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	rd_request_t *req = (rd_request_t *)task->transport_req;
+	rd_dev_t *dev = req->rd_dev;
+	rd_dev_sg_table_t *table;
+	se_mem_t *se_mem;
+	struct scatterlist *sg_s;
+	u32 j = 0, set_offset = 1;
+	u32 get_next_table = 0, offset_length, table_sg_end;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+		(req->rd_data_direction != RD_DATA_READ) ? "Write" : "Read",
+		req->rd_lba, req->rd_size, req->rd_page, req->rd_offset);
+#endif
+	while (req->rd_size) {
+		se_mem = kzalloc(sizeof(se_mem_t), GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate se_mem_t\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+
+		if (set_offset) {
+			offset_length = sg_s[j].length - req->rd_offset;
+			if (offset_length > req->rd_size)
+				offset_length = req->rd_size;
+
+			se_mem->se_page = sg_page(&sg_s[j++]);
+			se_mem->se_off = req->rd_offset;
+			se_mem->se_len = offset_length;
+
+			set_offset = 0;
+			get_next_table = (j > table_sg_end);
+			goto check_eot;
+		}
+
+		offset_length = (req->rd_size < req->rd_offset) ?
+			req->rd_size : req->rd_offset;
+
+		se_mem->se_page = sg_page(&sg_s[j]);
+		se_mem->se_len = offset_length;
+
+		set_offset = 1;
+
+check_eot:
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
+			" se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
+			req->rd_page, req->rd_size, offset_length, j, se_mem,
+			se_mem->se_page, se_mem->se_off, se_mem->se_len);
+#endif
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+
+		req->rd_size -= offset_length;
+		if (!(req->rd_size))
+			goto out;
+
+		if (!set_offset && !get_next_table)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+			printk(KERN_INFO "page: %u in same page table\n",
+					req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+out:
+	T_TASK(task->task_se_cmd)->t_task_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "RD_DR - Allocated %u se_mem_t segments for task\n",
+			*se_mem_cnt);
+#endif
+	return 0;
+}
+
+/*	rd_DIRECT_without_offset():
+ *
+ *
+ */
+static int rd_DIRECT_without_offset(
+	se_task_t *task,
+	struct list_head *se_mem_list,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	rd_request_t *req = (rd_request_t *)task->transport_req;
+	rd_dev_t *dev = req->rd_dev;
+	rd_dev_sg_table_t *table;
+	se_mem_t *se_mem;
+	struct scatterlist *sg_s;
+	u32 length, j = 0;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
+		(req->rd_data_direction != RD_DATA_READ) ? "Write" : "Read",
+		req->rd_lba, req->rd_size, req->rd_page);
+#endif
+	while (req->rd_size) {
+		se_mem = kzalloc(sizeof(se_mem_t), GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate se_mem_t\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+
+		length = (req->rd_size < sg_s[j].length) ?
+			req->rd_size : sg_s[j].length;
+
+		se_mem->se_page = sg_page(&sg_s[j++]);
+		se_mem->se_len = length;
+
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
+			" se_page: %p se_off: %u se_len: %u\n", req->rd_page,
+			req->rd_size, j, se_mem, se_mem->se_page,
+			se_mem->se_off, se_mem->se_len);
+#endif
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			goto out;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+			printk("page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+out:
+	T_TASK(task->task_se_cmd)->t_task_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "RD_DR - Allocated %u se_mem_t segments for task\n",
+			*se_mem_cnt);
+#endif
+	return 0;
+}
+
+/*	rd_DIRECT_do_se_mem_map():
+ *
+ *
+ */
+int rd_DIRECT_do_se_mem_map(
+	se_task_t *task,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	se_mem_t *in_se_mem,
+	se_mem_t **out_se_mem,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+	int ret;
+
+	req->rd_lba = task->task_lba;
+	req->rd_req_flags = RRF_GOT_LBA;
+	req->rd_page = ((req->rd_lba * DEV_ATTRIB(task->se_dev)->block_size) /
+			PAGE_SIZE);
+	req->rd_offset = (do_div(req->rd_lba,
+			  (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
+			   DEV_ATTRIB(task->se_dev)->block_size;
+	req->rd_size = task->task_size;
+
+	if (req->rd_offset)
+		ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
+				task_offset);
+	else
+		ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
+				task_offset);
+
+	return ret;
+}
+
+/*	rd_DIRECT_free_DMA():
+ *
+ *
+ */
+void rd_DIRECT_free_DMA(se_cmd_t *cmd)
+{
+	se_mem_t *se_mem, *se_mem_tmp;
+
+	/*
+	 * The scatterlists in the RAMDISK DIRECT case are using the pages
+	 * from the rd_device_t's scatterlist table. They are referencing
+	 * valid memory that is held within the RD transport plugin, so we
+	 * only free the se_mem_t elements.
+	 */
+	list_for_each_entry_safe(se_mem, se_mem_tmp, T_TASK(cmd)->t_mem_list,
+				se_list) {
+		 list_del(&se_mem->se_list);
+		 kfree(se_mem);
+	}
+	kfree(T_TASK(cmd)->t_mem_list);
+	T_TASK(cmd)->t_mem_list = NULL;
+	T_TASK(cmd)->t_task_se_num = 0;
+}
+
+/*	rd_DIRECT_allocate_DMA():
+ *
+ *	Note that rd_DIRECT_do_se_mem_map() actually does the real work.
+ */
+int rd_DIRECT_allocate_DMA(se_cmd_t *cmd, u32 length, u32 dma_size)
+{
+	T_TASK(cmd)->t_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (!(T_TASK(cmd)->t_mem_list)) {
+		printk(KERN_ERR "Unable to allocate memory for T_TASK(cmd)"
+				"->t_mem_list\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(T_TASK(cmd)->t_mem_list);
+
+	return 0;
+}
+
+/*	rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_DIRECT_do_task(se_task_t *task)
+{
+	if (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))
+		return rd_emulate_scsi_cdb(task);
+
+	/*
+	 * At this point the locally allocated RD tables have been mapped
+	 * to se_mem_t elements in rd_DIRECT_do_se_mem_map().
+	 */
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+void rd_free_task(se_task_t *task)
+{
+	rd_request_t *req;
+	req = (rd_request_t *) task->transport_req;
+
+	kfree(req);
+}
+
+ssize_t rd_set_configfs_dev_params(
+	se_hba_t *hba,
+	se_subsystem_dev_t *se_dev,
+	const char *page,
+	ssize_t count)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) se_dev->se_dev_su_ptr;
+	char *buf, *cur, *ptr, *ptr2;
+	unsigned long rd_pages;
+	int params = 0, ret;
+
+	buf = kzalloc(count, GFP_KERNEL);
+	if (!(buf)) {
+		printk(KERN_ERR "Unable to allocate memory for temporary buffer\n");
+		return 0;
+	}
+	memcpy(buf, page, count);
+	cur = buf;
+
+	while (cur) {
+		ptr = strstr(cur, "=");
+		if (!(ptr))
+			goto out;
+
+		*ptr = '\0';
+		ptr++;
+
+		ptr2 = strstr(cur, "rd_pages");
+		if ((ptr2)) {
+			transport_check_dev_params_delim(ptr, &cur);
+			ret = strict_strtoul(ptr, 0, &rd_pages);
+			if (ret < 0) {
+				printk(KERN_ERR "strict_strtoul() failed for"
+					" rd_pages=\n");
+				break;
+			}
+			rd_dev->rd_page_count = (u32)rd_pages;
+			printk(KERN_INFO "RAMDISK: Referencing Page"
+				" Count: %u\n", rd_dev->rd_page_count);
+			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
+			params++;
+		} else
+			cur = NULL;
+	}
+
+out:
+	kfree(buf);
+	return (params) ? count : -EINVAL;
+}
+
+ssize_t rd_check_configfs_dev_params(se_hba_t *hba, se_subsystem_dev_t *se_dev)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) se_dev->se_dev_su_ptr;
+
+	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+		printk(KERN_INFO "Missing rd_pages= parameter\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+ssize_t rd_show_configfs_dev_params(
+	se_hba_t *hba,
+	se_subsystem_dev_t *se_dev,
+	char *page)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) se_dev->se_dev_su_ptr;
+	int bl = 0;
+
+	 __rd_get_dev_info(rd_dev, page, &bl);
+	return (ssize_t)bl;
+}
+
+void rd_dr_get_plugin_info(void *p, char *b, int *bl)
+{
+	*bl += sprintf(b + *bl, "TCM RAMDISK_DR Plugin %s\n", RD_DR_VERSION);
+}
+
+void rd_mcp_get_plugin_info(void *p, char *b, int *bl)
+{
+	*bl += sprintf(b + *bl, "TCM RAMDISK_MCP Plugin %s\n", RD_MCP_VERSION);
+}
+
+void rd_get_hba_info(se_hba_t *hba, char *b, int *bl)
+{
+	rd_host_t *rd_host = (rd_host_t *)hba->hba_ptr;
+
+	*bl += sprintf(b + *bl, "SE Host ID: %u  RD Host ID: %u\n",
+		hba->hba_id, rd_host->rd_host_id);
+	*bl += sprintf(b + *bl, "        TCM RamDisk HBA\n");
+}
+
+void rd_get_dev_info(se_device_t *dev, char *b, int *bl)
+{
+	rd_dev_t *rd_dev = (rd_dev_t *) dev->dev_ptr;
+
+	__rd_get_dev_info(rd_dev, b, bl);
+}
+
+void __rd_get_dev_info(rd_dev_t *rd_dev, char *b, int *bl)
+{
+	*bl += sprintf(b + *bl, "TCM RamDisk ID: %u  RamDisk Makeup: %s\n",
+			rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
+			"rd_direct" : "rd_mcp");
+	*bl += sprintf(b + *bl, "        PAGES/PAGE_SIZE: %u*%lu"
+			"  SG_table_count: %u\n", rd_dev->rd_page_count,
+			PAGE_SIZE, rd_dev->sg_table_count);
+
+	return;
+}
+
+/*	rd_map_task_non_SG():
+ *
+ *
+ */
+void rd_map_task_non_SG(se_task_t *task)
+{
+	se_cmd_t *cmd = TASK_CMD(task);
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_bufflen		= task->task_size;
+	req->rd_buf		= (void *) T_TASK(cmd)->t_task_buf;
+	req->rd_sg_count	= 0;
+}
+
+/*	rd_map_task_SG():
+ *
+ *
+ */
+void rd_map_task_SG(se_task_t *task)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_bufflen		= task->task_size;
+	req->rd_buf		= task->task_sg;
+	req->rd_sg_count	= task->task_sg_num;
+}
+
+/*      iblock_CDB_inquiry():
+ *
+ *
+ */
+int rd_CDB_inquiry(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction  = RD_DATA_READ;
+
+	rd_map_task_non_SG(task);
+	return 0;
+}
+
+/*      rd_CDB_none():
+ *
+ *
+ */
+int rd_CDB_none(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction	= RD_DATA_NONE;
+	req->rd_bufflen		= 0;
+	req->rd_sg_count	= 0;
+	req->rd_buf		= NULL;
+
+	return 0;
+}
+
+/*	rd_CDB_read_non_SG():
+ *
+ *
+ */
+int rd_CDB_read_non_SG(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction = RD_DATA_READ;
+	rd_map_task_non_SG(task);
+
+	return 0;
+}
+
+/*	rd_CDB_read_SG):
+ *
+ *
+ */
+int rd_CDB_read_SG(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction = RD_DATA_READ;
+	rd_map_task_SG(task);
+
+	return req->rd_sg_count;
+}
+
+/*	rd_CDB_write_non_SG():
+ *
+ *
+ */
+int rd_CDB_write_non_SG(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction = RD_DATA_WRITE;
+	rd_map_task_non_SG(task);
+
+	return 0;
+}
+
+/*	d_CDB_write_SG():
+ *
+ *
+ */
+int rd_CDB_write_SG(se_task_t *task, u32 size)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	req->rd_data_direction = RD_DATA_WRITE;
+	rd_map_task_SG(task);
+
+	return req->rd_sg_count;
+}
+
+/*	rd_DIRECT_check_lba():
+ *
+ *
+ */
+int rd_DIRECT_check_lba(unsigned long long lba, se_device_t *dev)
+{
+	return ((do_div(lba, PAGE_SIZE / DEV_ATTRIB(dev)->block_size)) *
+		 DEV_ATTRIB(dev)->block_size) ? 1 : 0;
+}
+
+/*	rd_MEMCPY_check_lba():
+ *
+ *
+ */
+int rd_MEMCPY_check_lba(unsigned long long lba, se_device_t *dev)
+{
+	return 0;
+}
+
+/*	rd_check_for_SG(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+int rd_check_for_SG(se_task_t *task)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	return req->rd_sg_count;
+}
+
+/*	rd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+unsigned char *rd_get_cdb(se_task_t *task)
+{
+	rd_request_t *req = (rd_request_t *) task->transport_req;
+
+	return req->rd_scsi_cdb;
+}
+
+/*	rd_get_blocksize(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+u32 rd_get_blocksize(se_device_t *dev)
+{
+	return RD_BLOCKSIZE;
+}
+
+u32 rd_get_device_rev(se_device_t *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+u32 rd_get_device_type(se_device_t *dev)
+{
+	return TYPE_DISK;
+}
+
+/*	rd_get_dma_length(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+u32 rd_get_dma_length(u32 task_size, se_device_t *dev)
+{
+	return PAGE_SIZE;
+}
+
+/*	rd_get_max_sectors(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+u32 rd_get_max_sectors(se_device_t *dev)
+{
+	return RD_MAX_SECTORS;
+}
+
+/*	rd_get_queue_depth(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+u32 rd_get_queue_depth(se_device_t *dev)
+{
+	return RD_DEVICE_QUEUE_DEPTH;
+}
+
+u32 rd_get_max_queue_depth(se_device_t *dev)
+{
+	return RD_MAX_DEVICE_QUEUE_DEPTH;
+}
diff --git a/include/target/target_core_rd.h b/include/target/target_core_rd.h
new file mode 100644
index 0000000..67168fd
--- /dev/null
+++ b/include/target/target_core_rd.h
@@ -0,0 +1,257 @@
+/*******************************************************************************
+ * Filename:  target_core_rd.h
+ *
+ * This file contains the Storage Engine <-> Ramdisk transport specific
+ * definitions and prototypes.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2009 Rising Tide Software, Inc.
+ * Copyright (c) 2008-2009 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+
+#ifndef TARGET_CORE_RD_H
+#define TARGET_CORE_RD_H
+
+#define RD_HBA_VERSION		"v3.1"
+#define RD_DR_VERSION		"3.1"
+#define RD_MCP_VERSION		"3.1"
+
+/* Largest piece of memory kmalloc can allocate */
+#define RD_MAX_ALLOCATION_SIZE	65536
+/* Maximum queuedepth for the Ramdisk HBA */
+#define RD_HBA_QUEUE_DEPTH	256
+#define RD_DEVICE_QUEUE_DEPTH	32
+#define RD_MAX_DEVICE_QUEUE_DEPTH 128
+#define RD_BLOCKSIZE		512
+#define RD_MAX_SECTORS		1024
+
+#define RD_DATA_READ		1
+#define RD_DATA_WRITE		2
+#define RD_DATA_NONE		3
+
+extern se_global_t *se_global;
+
+#ifndef RD_INCLUDE_STRUCTS
+extern int rd_CDB_inquiry(se_task_t *, u32);
+extern int rd_CDB_none(se_task_t *, u32);
+extern int rd_CDB_read_non_SG(se_task_t *, u32);
+extern int rd_CDB_read_SG(se_task_t *, u32);
+extern int rd_CDB_write_non_SG(se_task_t *, u32);
+extern int rd_CDB_write_SG(se_task_t *, u32);
+
+extern int rd_attach_hba(se_hba_t *, u32);
+extern int rd_detach_hba(se_hba_t *);
+extern void *rd_DIRECT_allocate_virtdevice(se_hba_t *, const char *);
+extern void *rd_MEMCPY_allocate_virtdevice(se_hba_t *, const char *);
+extern se_device_t *rd_DIRECT_create_virtdevice(se_hba_t *,
+				se_subsystem_dev_t *, void *);
+extern se_device_t *rd_MEMCPY_create_virtdevice(se_hba_t *,
+				se_subsystem_dev_t *, void *);
+extern int rd_activate_device(se_device_t *);
+extern void rd_deactivate_device(se_device_t *);
+extern void rd_free_device(void *);
+extern int rd_transport_complete(se_task_t *);
+extern void *rd_allocate_request(se_task_t *, se_device_t *);
+extern int rd_DIRECT_do_task(se_task_t *);
+extern int rd_MEMCPY_do_task(se_task_t *);
+extern int rd_DIRECT_allocate_DMA(se_cmd_t *, u32, u32);
+extern int rd_DIRECT_do_se_mem_map(struct se_task_s *, struct list_head *,
+				void *, struct se_mem_s *, struct se_mem_s **,
+				u32 *, u32 *);
+extern void rd_DIRECT_free_DMA(se_cmd_t *);
+extern void rd_free_task(se_task_t *);
+extern ssize_t rd_set_configfs_dev_params(se_hba_t *, se_subsystem_dev_t *,
+						const char *, ssize_t);
+extern ssize_t rd_check_configfs_dev_params(se_hba_t *, se_subsystem_dev_t *);
+extern ssize_t rd_show_configfs_dev_params(se_hba_t *, se_subsystem_dev_t *,
+						char *);
+extern void rd_dr_get_plugin_info(void *, char *, int *);
+extern void rd_mcp_get_plugin_info(void *, char *, int *);
+extern void rd_get_hba_info(se_hba_t *, char *, int *);
+extern void rd_get_dev_info(se_device_t *, char *, int *);
+extern int rd_DIRECT_check_lba(unsigned long long, se_device_t *);
+extern int rd_MEMCPY_check_lba(unsigned long long, se_device_t *);
+extern int rd_check_for_SG(se_task_t *);
+extern unsigned char *rd_get_cdb(se_task_t *);
+extern u32 rd_get_blocksize(se_device_t *);
+extern u32 rd_get_device_rev(se_device_t *);
+extern u32 rd_get_device_type(se_device_t *);
+extern u32 rd_get_dma_length(u32, se_device_t *);
+extern u32 rd_get_max_sectors(se_device_t *);
+extern u32 rd_get_queue_depth(se_device_t *);
+extern u32 rd_get_max_queue_depth(se_device_t *);
+#endif /* ! RD_INCLUDE_STRUCTS */
+
+#define RRF_EMULATE_CDB		0x01
+#define RRF_GOT_LBA		0x02
+
+typedef struct rd_request_s {
+	/* SCSI CDB from iSCSI Command PDU */
+	unsigned char	rd_scsi_cdb[SCSI_CDB_SIZE];
+	/* Data Direction */
+	u8		rd_data_direction;
+	/* Total length of request */
+	u32		rd_bufflen;
+	/* RD request flags */
+	u32		rd_req_flags;
+	/* Offset from start of page */
+	u32		rd_offset;
+	/* Starting page in Ramdisk for request */
+	u32		rd_page;
+	/* Total number of pages needed for request */
+	u32		rd_page_count;
+	/* Scatterlist count */
+	u32		rd_sg_count;
+	u32		rd_size;
+	/* Logical Block Address */
+	unsigned long long	rd_lba;
+	 /* Data buffer containing scatterlists(s) or
+	  * contiguous memory segments */
+	void		*rd_buf;
+	/* Ramdisk device */
+	struct rd_dev_s	*rd_dev;
+} ____cacheline_aligned rd_request_t;
+
+typedef struct rd_dev_sg_table_s {
+	u32		page_start_offset;
+	u32		page_end_offset;
+	u32		rd_sg_count;
+	struct scatterlist *sg_table;
+} ____cacheline_aligned rd_dev_sg_table_t;
+
+#define RDF_HAS_PAGE_COUNT	0x01
+
+typedef struct rd_dev_s {
+	int		rd_direct;
+	u32		rd_flags;
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		rd_dev_id;
+	/* Total page count for ramdisk device */
+	u32		rd_page_count;
+	/* Number of SG tables in sg_table_array */
+	u32		sg_table_count;
+	u32		rd_queue_depth;
+	/* Array of rd_dev_sg_table_t containing scatterlists */
+	rd_dev_sg_table_t *sg_table_array;
+	/* Ramdisk HBA device is connected to */
+	struct rd_host_s *rd_host;
+	/* Next RD Device entry in list */
+	struct rd_dev_s *next;
+} ____cacheline_aligned rd_dev_t;
+
+extern void __rd_get_dev_info(rd_dev_t *, char *, int *);
+
+typedef struct rd_host_s {
+	u32		rd_host_dev_id_count;
+	u32		rd_host_id;		/* Unique Ramdisk Host ID */
+} ____cacheline_aligned rd_host_t;
+
+#ifndef RD_INCLUDE_STRUCTS
+/*
+ * We use the generic command sequencer, so we must setup
+ * se_subsystem_spc_t.
+ */
+se_subsystem_spc_t rd_template_spc = {
+	.inquiry		= rd_CDB_inquiry,
+	.none			= rd_CDB_none,
+	.read_non_SG		= rd_CDB_read_non_SG,
+	.read_SG		= rd_CDB_read_SG,
+	.write_non_SG		= rd_CDB_write_non_SG,
+	.write_SG		= rd_CDB_write_SG,
+};
+
+se_subsystem_api_t rd_dr_template = {
+	.name			= "rd_dr",
+	.type			= RAMDISK_DR,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.allocate_virtdevice	= rd_DIRECT_allocate_virtdevice,
+	.create_virtdevice	= rd_DIRECT_create_virtdevice,
+	.activate_device	= rd_activate_device,
+	.deactivate_device	= rd_deactivate_device,
+	.free_device		= rd_free_device,
+	.transport_complete	= rd_transport_complete,
+	.allocate_DMA		= rd_DIRECT_allocate_DMA,
+	.free_DMA		= rd_DIRECT_free_DMA,
+	.allocate_request	= rd_allocate_request,
+	.do_task		= rd_DIRECT_do_task,
+	.free_task		= rd_free_task,
+	.check_configfs_dev_params = rd_check_configfs_dev_params,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_plugin_info	= rd_dr_get_plugin_info,
+	.get_hba_info		= rd_get_hba_info,
+	.get_dev_info		= rd_get_dev_info,
+	.check_lba		= rd_DIRECT_check_lba,
+	.check_for_SG		= rd_check_for_SG,
+	.get_cdb		= rd_get_cdb,
+	.get_blocksize		= rd_get_blocksize,
+	.get_device_rev		= rd_get_device_rev,
+	.get_device_type	= rd_get_device_type,
+	.get_dma_length		= rd_get_dma_length,
+	.get_max_sectors	= rd_get_max_sectors,
+	.get_queue_depth	= rd_get_queue_depth,
+	.get_max_queue_depth	= rd_get_max_queue_depth,
+	.do_se_mem_map		= rd_DIRECT_do_se_mem_map,
+	.write_pending		= NULL,
+	.spc			= &rd_template_spc,
+};
+
+se_subsystem_api_t rd_mcp_template = {
+	.name			= "rd_mcp",
+	.type			= RAMDISK_MCP,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.allocate_virtdevice	= rd_MEMCPY_allocate_virtdevice,
+	.create_virtdevice	= rd_MEMCPY_create_virtdevice,
+	.activate_device	= rd_activate_device,
+	.deactivate_device	= rd_deactivate_device,
+	.free_device		= rd_free_device,
+	.transport_complete	= rd_transport_complete,
+	.allocate_request	= rd_allocate_request,
+	.do_task		= rd_MEMCPY_do_task,
+	.free_task		= rd_free_task,
+	.check_configfs_dev_params = rd_check_configfs_dev_params,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_plugin_info	= rd_mcp_get_plugin_info,
+	.get_hba_info		= rd_get_hba_info,
+	.get_dev_info		= rd_get_dev_info,
+	.check_lba		= rd_MEMCPY_check_lba,
+	.check_for_SG		= rd_check_for_SG,
+	.get_cdb		= rd_get_cdb,
+	.get_blocksize		= rd_get_blocksize,
+	.get_device_rev		= rd_get_device_rev,
+	.get_device_type	= rd_get_device_type,
+	.get_dma_length		= rd_get_dma_length,
+	.get_max_sectors	= rd_get_max_sectors,
+	.get_queue_depth	= rd_get_queue_depth,
+	.get_max_queue_depth	= rd_get_max_queue_depth,
+	.write_pending		= NULL,
+	.spc			= &rd_template_spc,
+};
+
+#endif /* ! RD_INCLUDE_STRUCTS */
+
+#endif /* TARGET_CORE_RD_H */



^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2009-09-18 22:13 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-09-12  1:57 [RFC PATCH 11/19] Target_Core_Mod RAMDISK_DR and RAMDISK_MCP subsystem plugin Nicholas A. Bellinger
2009-09-18 22:08 Nicholas A. Bellinger

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.