All of lore.kernel.org
 help / color / mirror / Atom feed
* first attempt of pvscai backend driver port to pvops
@ 2010-03-11 23:29 James Harper
  2010-03-12  0:09 ` Jeremy Fitzhardinge
  0 siblings, 1 reply; 4+ messages in thread
From: James Harper @ 2010-03-11 23:29 UTC (permalink / raw)
  To: xen-devel

[-- Attachment #1: Type: text/plain, Size: 696 bytes --]

Attached is my first attempt at getting the backend pvscsi driver
(scsiback) working under the latest pvops kernel. I have tested it with
the scsi frontend driver in gplpv, and have restored about 20GB of data
from a HP LTO3 tape drive successfully, so it appears to be working
fine.

There are quite a few changes compared to the version in 2.6.18. The
main change is that I get the vaddr for each sg element and use the
regular dma calls to make it work instead of all the magic that the
2.6.18 version used to use that isn't exported anymore.

I created a new branch in a local git repo, what git magic do I need to
weave to make the patch look a bit neater?

Thanks

James


[-- Attachment #2: scsiback.patch --]
[-- Type: application/octet-stream, Size: 126144 bytes --]

diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 1dd2ab6..ec6680f 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -60,6 +60,14 @@ config XEN_BLKBACK_PAGEMAP
 	depends on XEN_BLKDEV_BACKEND != n && XEN_BLKDEV_TAP != n
 	default XEN_BLKDEV_BACKEND || XEN_BLKDEV_TAP
 
+config XEN_SCSI_BACKEND
+	tristate "SCSI backend driver"
+	depends on SCSI && XEN_BACKEND
+	default m
+	help
+	  The SCSI backend driver allows the kernel to export its SCSI Devices
+	  to other guests via a high-performance shared-memory interface.
+
 config XEN_NETDEV_BACKEND
        tristate "Xen backend network device"
        depends on XEN_BACKEND && NET
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 69d5ffd..148756e 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_XEN_DEV_EVTCHN)		+= xen-evtchn.o
 obj-$(CONFIG_XEN_GNTDEV)		+= xen-gntdev.o
 obj-$(CONFIG_XEN_BLKDEV_BACKEND)	+= blkback/
 obj-$(CONFIG_XEN_BLKDEV_TAP)            += blktap/
+obj-$(CONFIG_XEN_SCSI_BACKEND)          += scsiback/
 obj-$(CONFIG_XEN_NETDEV_BACKEND)	+= netback/
 obj-$(CONFIG_XEN_PCIDEV_BACKEND)	+= pciback/
 obj-$(CONFIG_XENFS)			+= xenfs/
diff --git a/drivers/xen/scsiback/Makefile b/drivers/xen/scsiback/Makefile
new file mode 100644
index 0000000..56271df
--- /dev/null
+++ b/drivers/xen/scsiback/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_XEN_SCSI_BACKEND) := xen-scsibk.o
+
+xen-scsibk-y	:= interface.o scsiback.o xenbus.o translate.o emulate.o
+
diff --git a/drivers/xen/scsiback/common.h b/drivers/xen/scsiback/common.h
new file mode 100644
index 0000000..af448af
--- /dev/null
+++ b/drivers/xen/scsiback/common.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * Based on the blkback driver code.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __SCSIIF__BACKEND__COMMON_H__
+#define __SCSIIF__BACKEND__COMMON_H__
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/blkdev.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/pgalloc.h>
+#include <asm/delay.h>
+#include <xen/evtchn.h>
+#include <asm/hypervisor.h>
+#include <asm/xen/hypercall.h>
+#include <xen/grant_table.h>
+//#include <xen/driver_util.h>
+#include <xen/xenbus.h>
+#include <xen/interface/io/ring.h>
+//#include <xen/interface/grant_table.h>
+#include <xen/interface/io/vscsiif.h>
+
+
+#define DPRINTK(_f, _a...)			\
+	pr_debug("(file=%s, line=%d) " _f,	\
+		 __FILE__ , __LINE__ , ## _a )
+
+struct ids_tuple {
+	unsigned int hst;		/* host    */
+	unsigned int chn;		/* channel */
+	unsigned int tgt;		/* target  */
+	unsigned int lun;		/* LUN     */
+};
+
+struct v2p_entry {
+	struct ids_tuple v;		/* translate from */
+	struct scsi_device *sdev;	/* translate to   */
+	struct list_head l;
+};
+
+struct vscsibk_info {
+	struct xenbus_device *dev;
+
+	domid_t domid;
+	unsigned int evtchn;
+	unsigned int irq;
+
+	int feature;
+
+	struct vscsiif_back_ring  ring;
+	struct vm_struct *ring_area;
+	grant_handle_t shmem_handle;
+	grant_ref_t shmem_ref;
+
+	spinlock_t ring_lock;
+	atomic_t nr_unreplied_reqs;
+
+	spinlock_t v2p_lock;
+	struct list_head v2p_entry_lists;
+
+	struct task_struct *kthread;
+	wait_queue_head_t waiting_to_free;
+	wait_queue_head_t wq;
+	unsigned int waiting_reqs;
+	struct page **mmap_pages;
+
+};
+
+typedef struct {
+	void *sg_vaddr;
+	int sg_length;
+} mapped_sg_elem_t;
+
+typedef struct {
+	unsigned char act;
+	struct vscsibk_info *info;
+	struct scsi_device *sdev;
+
+	uint16_t rqid;
+	
+	uint16_t v_chn, v_tgt;
+
+	uint8_t nr_segments;
+	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
+	uint8_t cmd_len;
+
+	uint8_t sc_data_direction;
+	uint16_t timeout_per_command;
+	
+	uint32_t request_bufflen;
+	struct scatterlist *sgl;
+	//grant_ref_t gref[VSCSIIF_SG_TABLESIZE];
+	mapped_sg_elem_t sg_elem[VSCSIIF_SG_TABLESIZE];
+
+	int32_t rslt;
+	uint32_t resid;
+	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
+
+	struct list_head free_list;
+} pending_req_t;
+
+
+
+#define scsiback_get(_b) (atomic_inc(&(_b)->nr_unreplied_reqs))
+#define scsiback_put(_b)				\
+	do {						\
+		if (atomic_dec_and_test(&(_b)->nr_unreplied_reqs))	\
+			wake_up(&(_b)->waiting_to_free);\
+	} while (0)
+
+#define VSCSIIF_TIMEOUT		(900*HZ)
+
+#define VSCSI_TYPE_HOST		1
+
+irqreturn_t scsiback_intr(int, void *);
+int scsiback_init_sring(struct vscsibk_info *info,
+		unsigned long ring_ref, unsigned int evtchn);
+int scsiback_schedule(void *data);
+
+
+struct vscsibk_info *vscsibk_info_alloc(domid_t domid);
+void scsiback_free(struct vscsibk_info *info);
+void scsiback_disconnect(struct vscsibk_info *info);
+int __init scsiback_interface_init(void);
+void scsiback_interface_exit(void);
+int scsiback_xenbus_init(void);
+void scsiback_xenbus_unregister(void);
+
+void scsiback_init_translation_table(struct vscsibk_info *info);
+
+int scsiback_add_translation_entry(struct vscsibk_info *info,
+			struct scsi_device *sdev, struct ids_tuple *v);
+
+int scsiback_del_translation_entry(struct vscsibk_info *info,
+				struct ids_tuple *v);
+struct scsi_device *scsiback_do_translation(struct vscsibk_info *info,
+			struct ids_tuple *v);
+void scsiback_release_translation_entry(struct vscsibk_info *info);
+
+
+void scsiback_cmd_exec(pending_req_t *pending_req);
+void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
+			uint32_t resid, pending_req_t *pending_req);
+void scsiback_fast_flush_area(pending_req_t *req);
+
+void scsiback_rsp_emulation(pending_req_t *pending_req);
+void scsiback_req_emulation_or_cmdexec(pending_req_t *pending_req);
+void scsiback_emulation_init(void);
+
+
+#endif /* __SCSIIF__BACKEND__COMMON_H__ */
diff --git a/drivers/xen/scsiback/emulate.c b/drivers/xen/scsiback/emulate.c
new file mode 100644
index 0000000..7b8052b
--- /dev/null
+++ b/drivers/xen/scsiback/emulate.c
@@ -0,0 +1,491 @@
+/*
+ * Xen SCSI backend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include "common.h"
+
+/* Following SCSI commands are not defined in scsi/scsi.h */
+#define EXTENDED_COPY		0x83	/* EXTENDED COPY command        */
+#define REPORT_ALIASES		0xa3	/* REPORT ALIASES command       */
+#define CHANGE_ALIASES		0xa4	/* CHANGE ALIASES command       */
+#define SET_PRIORITY		0xa4	/* SET PRIORITY command         */
+
+
+/*
+  The bitmap in order to control emulation.
+  (Bit 3 to 7 are reserved for future use.)
+*/
+#define VSCSIIF_NEED_CMD_EXEC		0x01	/* If this bit is set, cmd exec	*/
+						/* is required.			*/
+#define VSCSIIF_NEED_EMULATE_REQBUF	0x02	/* If this bit is set, need	*/
+						/* emulation reqest buff before	*/
+						/* cmd exec.			*/
+#define VSCSIIF_NEED_EMULATE_RSPBUF	0x04	/* If this bit is set, need	*/
+						/* emulation resp buff after	*/
+						/* cmd exec.			*/
+#define VSCSIIF_NEED_UNSUPPORTED_LOGGED 0x08    /* If this bit is set, we've    */
+                                                /* already logged that this     */
+                                                /* command is unsupported       */
+
+
+/* Additional Sense Code (ASC) used */
+#define NO_ADDITIONAL_SENSE		0x0
+#define LOGICAL_UNIT_NOT_READY		0x4
+#define UNRECOVERED_READ_ERR		0x11
+#define PARAMETER_LIST_LENGTH_ERR	0x1a
+#define INVALID_OPCODE			0x20
+#define ADDR_OUT_OF_RANGE		0x21
+#define INVALID_FIELD_IN_CDB		0x24
+#define INVALID_FIELD_IN_PARAM_LIST	0x26
+#define POWERON_RESET			0x29
+#define SAVING_PARAMS_UNSUP		0x39
+#define THRESHOLD_EXCEEDED		0x5d
+#define LOW_POWER_COND_ON		0x5e
+
+
+
+/* Number os SCSI op_code	*/
+#define VSCSI_MAX_SCSI_OP_CODE		256
+static unsigned char bitmap[VSCSI_MAX_SCSI_OP_CODE];
+
+#define NO_EMULATE(cmd) \
+	bitmap[cmd] = VSCSIIF_NEED_CMD_EXEC; \
+	pre_function[cmd] = NULL; \
+	post_function[cmd] = NULL
+
+
+
+/*
+  Emulation routines for each SCSI op_code.
+*/
+static void (*pre_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
+static void (*post_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
+
+
+static const int check_condition_result =
+		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+static void scsiback_mk_sense_buffer(uint8_t *data, uint8_t key,
+			uint8_t asc, uint8_t asq)
+{
+	data[0] = 0x70;  /* fixed, current */
+	data[2] = key;
+	data[7] = 0xa;	  /* implies 18 byte sense buffer */
+	data[12] = asc;
+	data[13] = asq;
+}
+
+static void resp_not_supported_cmd(pending_req_t *pending_req, void *data)
+{
+        if (!(bitmap[pending_req->cmnd[0]] & VSCSIIF_NEED_UNSUPPORTED_LOGGED))
+        {
+                bitmap[pending_req->cmnd[0]] |= VSCSIIF_NEED_UNSUPPORTED_LOGGED;
+                printk(KERN_ERR "%s: SCSI function 0x%02x not supported\n",
+                        __FUNCTION__, pending_req->cmnd[0]);
+        }
+
+	scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
+		INVALID_OPCODE, 0);
+	pending_req->resid = 0;
+	pending_req->rslt  = check_condition_result;
+}
+
+
+static int __copy_to_sg(struct scatterlist *sg, unsigned int nr_sg,
+	       void *buf, unsigned int buflen)
+{
+	void *from = buf;
+	void *to;
+	unsigned int from_rest = buflen;
+	unsigned int to_capa;
+	unsigned int copy_size = 0;
+	unsigned int i;
+	unsigned long pfn;
+	struct page *page;
+
+	for (i = 0; i < nr_sg; i++) {
+		page = sg_page(sg);
+		if (page == NULL) {
+			printk(KERN_WARNING "%s: inconsistent length field in "
+			       "scatterlist\n", __FUNCTION__);
+			return -ENOMEM;
+		}
+
+		to_capa  = sg->length;
+		copy_size = min_t(unsigned int, to_capa, from_rest);
+
+		pfn = page_to_pfn(page);
+		to = pfn_to_kaddr(pfn) + (sg->offset);
+		memcpy(to, from, copy_size);
+
+		from_rest  -= copy_size;
+		if (from_rest == 0) {
+			return 0;
+		}
+		
+		sg++;
+		from += copy_size;
+	}
+
+	printk(KERN_WARNING "%s: no space in scatterlist\n",
+	       __FUNCTION__);
+	return -ENOMEM;
+}
+
+#if 0
+static int __copy_from_sg(struct scatterlist *sg, unsigned int nr_sg,
+		 void *buf, unsigned int buflen)
+{
+	void *from;
+	void *to = buf;
+	unsigned int from_rest;
+	unsigned int to_capa = buflen;
+	unsigned int copy_size;
+	unsigned int i;
+	unsigned long pfn;
+	struct page *page;
+
+	for (i = 0; i < nr_sg; i++) {
+		page = sg_page(sg);
+		if (page == NULL) {
+			printk(KERN_WARNING "%s: inconsistent length field in "
+			       "scatterlist\n", __FUNCTION__);
+			return -ENOMEM;
+		}
+
+		from_rest = sg->length;
+		if ((from_rest > 0) && (to_capa < from_rest)) {
+			printk(KERN_WARNING
+			       "%s: no space in destination buffer\n",
+			       __FUNCTION__);
+			return -ENOMEM;
+		}
+		copy_size = from_rest;
+
+		pfn = page_to_pfn(page);
+		from = pfn_to_kaddr(pfn) + (sg->offset);
+		memcpy(to, from, copy_size);
+
+		to_capa  -= copy_size;
+		
+		sg++;
+		to += copy_size;
+	}
+
+	return 0;
+}
+#endif
+
+static int __nr_luns_under_host(struct vscsibk_info *info)
+{
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+	int lun_cnt = 0;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry(entry, head, l) {
+			lun_cnt++;
+	}
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+
+	return (lun_cnt);
+}
+
+
+/* REPORT LUNS Define*/
+#define VSCSI_REPORT_LUNS_HEADER	8
+#define VSCSI_REPORT_LUNS_RETRY		3
+
+/* quoted scsi_debug.c/resp_report_luns() */
+static void __report_luns(pending_req_t *pending_req, void *data)
+{
+	struct vscsibk_info *info   = pending_req->info;
+	unsigned int        channel = pending_req->v_chn;
+	unsigned int        target  = pending_req->v_tgt;
+	unsigned int        nr_seg  = pending_req->nr_segments;
+	unsigned char *cmd = (unsigned char *)pending_req->cmnd;
+	
+	unsigned char *buff = NULL;
+	unsigned char alloc_len;
+	unsigned int alloc_luns = 0;
+	unsigned int req_bufflen = 0;
+	unsigned int actual_len = 0;
+	unsigned int retry_cnt = 0;
+	int select_report = (int)cmd[2];
+	int i, lun_cnt = 0, lun, upper, err = 0;
+	
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+	
+	struct scsi_lun *one_lun;
+
+	req_bufflen = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
+	if ((req_bufflen < 4) || (select_report != 0))
+		goto fail;
+
+	alloc_luns = __nr_luns_under_host(info);
+	alloc_len  = sizeof(struct scsi_lun) * alloc_luns
+				+ VSCSI_REPORT_LUNS_HEADER;
+retry:
+	if ((buff = kmalloc(alloc_len, GFP_KERNEL)) == NULL) {
+		printk(KERN_ERR "scsiback:%s kmalloc err\n", __FUNCTION__);
+		goto fail;
+	}
+
+	memset(buff, 0, alloc_len);
+
+	one_lun = (struct scsi_lun *) &buff[8];
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == channel) &&
+		    (entry->v.tgt == target)) {
+			
+			/* check overflow */
+			if (lun_cnt >= alloc_luns) {
+				spin_unlock_irqrestore(&info->v2p_lock,
+							flags);
+
+				if (retry_cnt < VSCSI_REPORT_LUNS_RETRY) {
+					retry_cnt++;
+					if (buff)
+						kfree(buff);
+					goto retry;
+				}
+
+				goto fail;
+			}
+
+			lun = entry->v.lun;
+			upper = (lun >> 8) & 0x3f;
+			if (upper)
+				one_lun[lun_cnt].scsi_lun[0] = upper;
+			one_lun[lun_cnt].scsi_lun[1] = lun & 0xff;
+			lun_cnt++;
+		}
+	}
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+
+	buff[2] = ((sizeof(struct scsi_lun) * lun_cnt) >> 8) & 0xff;
+	buff[3] = (sizeof(struct scsi_lun) * lun_cnt) & 0xff;
+
+	actual_len = lun_cnt * sizeof(struct scsi_lun) 
+				+ VSCSI_REPORT_LUNS_HEADER;
+	req_bufflen = 0;
+	for (i = 0; i < nr_seg; i++)
+		req_bufflen += pending_req->sgl[i].length;
+
+	err = __copy_to_sg(pending_req->sgl, nr_seg, buff, 
+				min(req_bufflen, actual_len));
+	if (err)
+		goto fail;
+
+	memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
+	pending_req->rslt = 0x00;
+	pending_req->resid = req_bufflen - min(req_bufflen, actual_len);
+
+	kfree(buff);
+	return;
+
+fail:
+	scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
+		INVALID_FIELD_IN_CDB, 0);
+	pending_req->rslt  = check_condition_result;
+	pending_req->resid = 0;
+	if (buff)
+		kfree(buff);
+	return;
+}
+
+
+
+int __pre_do_emulation(pending_req_t *pending_req, void *data)
+{
+	uint8_t op_code = pending_req->cmnd[0];
+
+	if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_REQBUF) &&
+	    pre_function[op_code] != NULL) {
+		pre_function[op_code](pending_req, data);
+	}
+
+	/*
+	    0: no need for native driver call, so should return immediately.
+	    1: non emulation or should call native driver 
+	       after modifing the request buffer.
+	*/
+	return !!(bitmap[op_code] & VSCSIIF_NEED_CMD_EXEC);
+}
+
+void scsiback_rsp_emulation(pending_req_t *pending_req)
+{
+	uint8_t op_code = pending_req->cmnd[0];
+
+	if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_RSPBUF) &&
+	    post_function[op_code] != NULL) {
+		post_function[op_code](pending_req, NULL);
+	}
+
+	return;
+}
+
+
+void scsiback_req_emulation_or_cmdexec(pending_req_t *pending_req)
+{
+	if (__pre_do_emulation(pending_req, NULL)) {
+		scsiback_cmd_exec(pending_req);
+	}
+	else {
+		scsiback_fast_flush_area(pending_req);
+		scsiback_do_resp_with_sense(pending_req->sense_buffer,
+		  pending_req->rslt, pending_req->resid, pending_req);
+	}
+}
+
+
+/*
+  Following are not customizable functions.
+*/
+void scsiback_emulation_init(void)
+{
+	int i;
+
+	/* Initialize to default state */
+	for (i = 0; i < VSCSI_MAX_SCSI_OP_CODE; i++) {
+		bitmap[i]        = (VSCSIIF_NEED_EMULATE_REQBUF | 
+					VSCSIIF_NEED_EMULATE_RSPBUF);
+		pre_function[i]  = resp_not_supported_cmd;
+		post_function[i] = NULL;
+		/* means,
+		   - no need for pre-emulation
+		   - no need for post-emulation
+		   - call native driver
+		*/
+	}
+
+	/*
+	  Register appropriate functions below as you need.
+	  (See scsi/scsi.h for definition of SCSI op_code.)
+	*/
+
+	/*
+	  Following commands do not require emulation.
+	*/
+	NO_EMULATE(TEST_UNIT_READY);       /*0x00*/
+	NO_EMULATE(REZERO_UNIT);           /*0x01*/
+	NO_EMULATE(REQUEST_SENSE);         /*0x03*/
+	NO_EMULATE(FORMAT_UNIT);           /*0x04*/
+	NO_EMULATE(READ_BLOCK_LIMITS);     /*0x05*/
+	/*NO_EMULATE(REASSIGN_BLOCKS);       *//*0x07*/
+	/*NO_EMULATE(INITIALIZE_ELEMENT_STATUS); *//*0x07*/
+	NO_EMULATE(READ_6);                /*0x08*/
+	NO_EMULATE(WRITE_6);               /*0x0a*/
+	NO_EMULATE(SEEK_6);                /*0x0b*/
+	/*NO_EMULATE(READ_REVERSE);          *//*0x0f*/
+	NO_EMULATE(WRITE_FILEMARKS);       /*0x10*/
+	NO_EMULATE(SPACE);                 /*0x11*/
+	NO_EMULATE(INQUIRY);               /*0x12*/
+	/*NO_EMULATE(RECOVER_BUFFERED_DATA); *//*0x14*/
+	NO_EMULATE(MODE_SELECT);           /*0x15*/
+	NO_EMULATE(RESERVE);               /*0x16*/
+	NO_EMULATE(RELEASE);               /*0x17*/
+	/*NO_EMULATE(COPY);                  *//*0x18*/
+	NO_EMULATE(ERASE);                 /*0x19*/
+	NO_EMULATE(MODE_SENSE);            /*0x1a*/
+	NO_EMULATE(START_STOP);            /*0x1b*/
+	NO_EMULATE(RECEIVE_DIAGNOSTIC);    /*0x1c*/
+	NO_EMULATE(SEND_DIAGNOSTIC);       /*0x1d*/
+	NO_EMULATE(ALLOW_MEDIUM_REMOVAL);  /*0x1e*/
+
+	/*NO_EMULATE(SET_WINDOW);            *//*0x24*/
+	NO_EMULATE(READ_CAPACITY);         /*0x25*/
+	NO_EMULATE(READ_10);               /*0x28*/
+	NO_EMULATE(WRITE_10);              /*0x2a*/
+	NO_EMULATE(SEEK_10);               /*0x2b*/
+	/*NO_EMULATE(POSITION_TO_ELEMENT);   *//*0x2b*/
+	/*NO_EMULATE(WRITE_VERIFY);          *//*0x2e*/
+	/*NO_EMULATE(VERIFY);                *//*0x2f*/
+	/*NO_EMULATE(SEARCH_HIGH);           *//*0x30*/
+	/*NO_EMULATE(SEARCH_EQUAL);          *//*0x31*/
+	/*NO_EMULATE(SEARCH_LOW);            *//*0x32*/
+	/*NO_EMULATE(SET_LIMITS);            *//*0x33*/
+	NO_EMULATE(PRE_FETCH);             /*0x34*/
+	/*NO_EMULATE(READ_POSITION);         *//*0x34*/
+	/*NO_EMULATE(SYNCHRONIZE_CACHE);     *//*0x35*/
+	/*NO_EMULATE(LOCK_UNLOCK_CACHE);     *//*0x36*/
+	/*NO_EMULATE(READ_DEFECT_DATA);      *//*0x37*/
+	/*NO_EMULATE(MEDIUM_SCAN);           *//*0x38*/
+	/*NO_EMULATE(COMPARE);               *//*0x39*/
+	/*NO_EMULATE(COPY_VERIFY);           *//*0x3a*/
+	NO_EMULATE(WRITE_BUFFER);          /*0x3b*/
+	NO_EMULATE(READ_BUFFER);           /*0x3c*/
+	/*NO_EMULATE(UPDATE_BLOCK);          *//*0x3d*/
+	/*NO_EMULATE(READ_LONG);             *//*0x3e*/
+	/*NO_EMULATE(WRITE_LONG);            *//*0x3f*/
+	/*NO_EMULATE(CHANGE_DEFINITION);     *//*0x40*/
+	/*NO_EMULATE(WRITE_SAME);            *//*0x41*/
+	/*NO_EMULATE(READ_TOC);              *//*0x43*/
+	NO_EMULATE(LOG_SELECT);            /*0x4c*/
+	NO_EMULATE(LOG_SENSE);             /*0x4d*/
+	NO_EMULATE(MODE_SELECT_10);        /*0x55*/
+	NO_EMULATE(RESERVE_10);            /*0x56*/
+	NO_EMULATE(RELEASE_10);            /*0x57*/
+	NO_EMULATE(MODE_SENSE_10);         /*0x5a*/
+	/*NO_EMULATE(PERSISTENT_RESERVE_IN); *//*0x5e*/
+	/*NO_EMULATE(PERSISTENT_RESERVE_OUT); *//*0x5f*/
+	/*           REPORT_LUNS             *//*0xa0*//*Full emulaiton*/
+	/*NO_EMULATE(MOVE_MEDIUM);           *//*0xa5*/
+	/*NO_EMULATE(EXCHANGE_MEDIUM);       *//*0xa6*/
+	/*NO_EMULATE(READ_12);               *//*0xa8*/
+	/*NO_EMULATE(WRITE_12);              *//*0xaa*/
+	/*NO_EMULATE(WRITE_VERIFY_12);       *//*0xae*/
+	/*NO_EMULATE(SEARCH_HIGH_12);        *//*0xb0*/
+	/*NO_EMULATE(SEARCH_EQUAL_12);       *//*0xb1*/
+	/*NO_EMULATE(SEARCH_LOW_12);         *//*0xb2*/
+	/*NO_EMULATE(READ_ELEMENT_STATUS);   *//*0xb8*/
+	/*NO_EMULATE(SEND_VOLUME_TAG);       *//*0xb6*/
+	/*NO_EMULATE(WRITE_LONG_2);          *//*0xea*/
+	/*NO_EMULATE(READ_16);               *//*0x88*/
+	/*NO_EMULATE(WRITE_16);              *//*0x8a*/
+	/*NO_EMULATE(VERIFY_16);	      *//*0x8f*/
+	/*NO_EMULATE(SERVICE_ACTION_IN);     *//*0x9e*/
+
+	/*
+	  Following commands require emulation.
+	*/
+	pre_function[REPORT_LUNS] = __report_luns;
+	bitmap[REPORT_LUNS] = (VSCSIIF_NEED_EMULATE_REQBUF | 
+					VSCSIIF_NEED_EMULATE_RSPBUF);
+
+	return;
+}
diff --git a/drivers/xen/scsiback/interface.c b/drivers/xen/scsiback/interface.c
new file mode 100644
index 0000000..f6b1656
--- /dev/null
+++ b/drivers/xen/scsiback/interface.c
@@ -0,0 +1,184 @@
+/*
+ * interface management.
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * Based on the blkback driver code.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include "common.h"
+
+#include <xen/evtchn.h>
+#include <xen/events.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+
+
+static struct kmem_cache *scsiback_cachep;
+
+struct vscsibk_info *vscsibk_info_alloc(domid_t domid)
+{
+	struct vscsibk_info *info;
+
+	info = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	memset(info, 0, sizeof(*info));
+	info->domid = domid;
+	spin_lock_init(&info->ring_lock);
+	atomic_set(&info->nr_unreplied_reqs, 0);
+	init_waitqueue_head(&info->wq);
+	init_waitqueue_head(&info->waiting_to_free);
+
+	return info;
+}
+
+static int map_frontend_page( struct vscsibk_info *info,
+				unsigned long ring_ref)
+{
+	struct gnttab_map_grant_ref op;
+	int err;
+
+	gnttab_set_map_op(&op, (unsigned long)info->ring_area->addr,
+				GNTMAP_host_map, ring_ref,
+				info->domid);
+
+	err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
+	BUG_ON(err);
+
+	if (op.status) {
+		printk(KERN_ERR "scsiback: Grant table operation failure !\n");
+		return op.status;
+	}
+
+	info->shmem_ref    = ring_ref;
+	info->shmem_handle = op.handle;
+
+	return (GNTST_okay);
+}
+
+static void unmap_frontend_page(struct vscsibk_info *info)
+{
+	struct gnttab_unmap_grant_ref op;
+	int err;
+
+	gnttab_set_unmap_op(&op, (unsigned long)info->ring_area->addr,
+				GNTMAP_host_map, info->shmem_handle);
+
+	err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
+	BUG_ON(err);
+
+}
+
+int scsiback_init_sring(struct vscsibk_info *info,
+		unsigned long ring_ref, unsigned int evtchn)
+{
+	struct vscsiif_sring *sring;
+	int err;
+
+	if (info->irq) {
+		printk(KERN_ERR "scsiback: Already connected through?\n");
+		return -1;
+	}
+
+	info->ring_area = alloc_vm_area(PAGE_SIZE);
+	if (!info)
+		return -ENOMEM;
+
+	err = map_frontend_page(info, ring_ref);
+	if (err)
+		goto free_vm;
+
+	sring = (struct vscsiif_sring *) info->ring_area->addr;
+	BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+	err = bind_interdomain_evtchn_to_irqhandler(
+			info->domid, evtchn,
+			scsiback_intr, 0, "vscsiif-backend", info);
+
+	if (err < 0)
+		goto unmap_page;
+		
+	info->irq = err;
+
+	return 0;
+
+unmap_page:
+	unmap_frontend_page(info);
+free_vm:
+	free_vm_area(info->ring_area);
+
+	return err;
+}
+
+void scsiback_disconnect(struct vscsibk_info *info)
+{
+	if (info->kthread) {
+		kthread_stop(info->kthread);
+		info->kthread = NULL;
+	}
+
+	wait_event(info->waiting_to_free, 
+		atomic_read(&info->nr_unreplied_reqs) == 0);
+
+	if (info->irq) {
+		unbind_from_irqhandler(info->irq, info);
+		info->irq = 0;
+	}
+
+	if (info->ring.sring) {
+		unmap_frontend_page(info);
+		free_vm_area(info->ring_area);
+		info->ring.sring = NULL;
+	}
+}
+
+void scsiback_free(struct vscsibk_info *info)
+{
+	kmem_cache_free(scsiback_cachep, info);
+}
+
+int __init scsiback_interface_init(void)
+{
+	scsiback_cachep = kmem_cache_create("vscsiif_cache",
+		sizeof(struct vscsibk_info), 0, 0, NULL);
+	if (!scsiback_cachep) {
+		printk(KERN_ERR "scsiback: can't init scsi cache\n");
+		return -ENOMEM;
+	}
+	
+	return 0;
+}
+
+void scsiback_interface_exit(void)
+{
+	kmem_cache_destroy(scsiback_cachep);
+}
diff --git a/drivers/xen/scsiback/scsiback.c b/drivers/xen/scsiback/scsiback.c
new file mode 100644
index 0000000..832c0b7
--- /dev/null
+++ b/drivers/xen/scsiback/scsiback.c
@@ -0,0 +1,632 @@
+/*
+ * Xen SCSI backend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * Based on the blkback driver code.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <xen/balloon.h>
+#include <xen/events.h>
+#include <xen/page.h>
+#include <asm/hypervisor.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "common.h"
+
+
+struct list_head pending_free;
+DEFINE_SPINLOCK(pending_free_lock);
+DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
+
+int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
+module_param_named(reqs, vscsiif_reqs, int, 0);
+MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
+
+static unsigned int log_print_stat = 0;
+module_param(log_print_stat, int, 0644);
+
+#define SCSIBACK_INVALID_HANDLE (~0)
+
+static pending_req_t *pending_reqs;
+static struct page **pending_pages;
+static grant_handle_t *pending_grant_handles;
+
+static int vaddr_pagenr(pending_req_t *req, int seg)
+{
+	return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
+}
+
+static unsigned long vaddr(pending_req_t *req, int seg)
+{
+	unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
+	return (unsigned long)pfn_to_kaddr(pfn);
+}
+
+#define pending_handle(_req, _seg) \
+	(pending_grant_handles[vaddr_pagenr(_req, _seg)])
+
+
+void scsiback_fast_flush_area(pending_req_t *req)
+{
+	struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
+	unsigned int i, invcount = 0;
+	grant_handle_t handle;
+	int err;
+
+	if (req->nr_segments) {
+		for (i = 0; i < req->nr_segments; i++) {
+			handle = pending_handle(req, i);
+			if (handle == SCSIBACK_INVALID_HANDLE)
+				continue;
+			gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
+						GNTMAP_host_map, handle);
+			pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
+			invcount++;
+		}
+
+		err = HYPERVISOR_grant_table_op(
+			GNTTABOP_unmap_grant_ref, unmap, invcount);
+		BUG_ON(err);
+	}
+
+	return;
+}
+
+
+static pending_req_t * alloc_req(struct vscsibk_info *info)
+{
+	pending_req_t *req = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pending_free_lock, flags);
+	if (!list_empty(&pending_free)) {
+		req = list_entry(pending_free.next, pending_req_t, free_list);
+		list_del(&req->free_list);
+	}
+	spin_unlock_irqrestore(&pending_free_lock, flags);
+	return req;
+}
+
+
+static void free_req(pending_req_t *req)
+{
+	unsigned long flags;
+	int was_empty;
+
+	spin_lock_irqsave(&pending_free_lock, flags);
+	was_empty = list_empty(&pending_free);
+	list_add(&req->free_list, &pending_free);
+	spin_unlock_irqrestore(&pending_free_lock, flags);
+	if (was_empty)
+		wake_up(&pending_free_wq);
+}
+
+
+static void scsiback_notify_work(struct vscsibk_info *info)
+{
+	info->waiting_reqs = 1;
+	wake_up(&info->wq);
+}
+
+void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
+			uint32_t resid, pending_req_t *pending_req)
+{
+	vscsiif_response_t *ring_res;
+	struct vscsibk_info *info = pending_req->info;
+	int notify;
+	int more_to_do = 1;
+	struct scsi_sense_hdr sshdr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->ring_lock, flags);
+
+	ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
+	info->ring.rsp_prod_pvt++;
+
+	ring_res->rslt   = result;
+	ring_res->rqid   = pending_req->rqid;
+
+	if (sense_buffer != NULL) {
+		if (scsi_normalize_sense(sense_buffer,
+			sizeof(sense_buffer), &sshdr)) {
+
+			int len = 8 + sense_buffer[7];
+
+			if (len > VSCSIIF_SENSE_BUFFERSIZE)
+				len = VSCSIIF_SENSE_BUFFERSIZE;
+
+			memcpy(ring_res->sense_buffer, sense_buffer, len);
+			ring_res->sense_len = len;
+		}
+	} else {
+		ring_res->sense_len = 0;
+	}
+
+	ring_res->residual_len = resid;
+
+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
+	if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
+		RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
+	} else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
+		more_to_do = 1;
+	}
+	
+	spin_unlock_irqrestore(&info->ring_lock, flags);
+
+	if (more_to_do)
+		scsiback_notify_work(info);
+
+	if (notify)
+		notify_remote_via_irq(info->irq);
+
+	free_req(pending_req);
+}
+
+static void scsiback_print_status(char *sense_buffer, int errors,
+					pending_req_t *pending_req)
+{
+	struct scsi_device *sdev = pending_req->sdev;
+	
+	printk(KERN_ERR "scsiback: %d:%d:%d:%d ",sdev->host->host_no,
+			sdev->channel, sdev->id, sdev->lun);
+	printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x, driver = 0x%02x\n",
+			status_byte(errors), msg_byte(errors),
+			host_byte(errors), driver_byte(errors));
+
+	printk(KERN_ERR "scsiback: cmnd[0]=0x%02X\n",
+			pending_req->cmnd[0]);
+
+	if (CHECK_CONDITION & status_byte(errors))
+		__scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
+}
+
+
+static void scsiback_cmd_done(struct request *req, int uptodate)
+{
+	pending_req_t *pending_req = req->end_io_data;
+	unsigned char *sense_buffer;
+	unsigned int resid;
+	int errors;
+
+	sense_buffer = req->sense;
+	resid        = req->resid_len;
+	errors       = req->errors;
+
+	if (errors != 0)
+		scsiback_print_status(sense_buffer, errors, pending_req);
+
+	if (errors != 0) {
+		if (log_print_stat)
+			scsiback_print_status(sense_buffer, errors, pending_req);
+	}
+
+	/* The Host mode is through as for Emulation. */
+	if (pending_req->info->feature != VSCSI_TYPE_HOST)
+		scsiback_rsp_emulation(pending_req);
+
+	scsiback_fast_flush_area(pending_req);
+	scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
+	scsiback_put(pending_req->info);
+
+	__blk_put_request(req->q, req);
+}
+
+
+static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
+					pending_req_t *pending_req)
+{
+	u32 flags;
+	int write;
+	int i, err = 0;
+	unsigned int data_len = 0;
+	struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
+	struct vscsibk_info *info   = pending_req->info;
+
+	int data_dir = (int)pending_req->sc_data_direction;
+	unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
+
+	write = (data_dir == DMA_TO_DEVICE);
+
+	if (nr_segments) {
+		for (i = 0; i < nr_segments; i++) {
+			flags = GNTMAP_host_map;
+			if (write)
+				flags |= GNTMAP_readonly;
+			gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
+						ring_req->seg[i].gref,
+						info->domid);
+		}
+
+		err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
+		BUG_ON(err);
+
+		for (i = 0; i < nr_segments; i++) {
+			if (unlikely(map[i].status != 0)) {
+				printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
+				map[i].handle = SCSIBACK_INVALID_HANDLE;
+				err |= 1;
+			}
+
+			pending_handle(pending_req, i) = map[i].handle;
+
+			if (err)
+				continue;
+
+			set_phys_to_machine(__pa(vaddr(
+				pending_req, i)) >> PAGE_SHIFT,
+				FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
+
+			pending_req->sg_elem[i].sg_vaddr = (char *)vaddr(pending_req, i) + ring_req->seg[i].offset;
+			pending_req->sg_elem[i].sg_length = (int)ring_req->seg[i].length;
+			barrier();
+		}
+
+		if (err)
+			goto fail_flush;
+	}
+	
+	pending_req->request_bufflen = data_len;
+	
+	return 0;
+	
+fail_flush:
+	scsiback_fast_flush_area(pending_req);
+	return -ENOMEM;
+}
+
+static int request_map_sg(struct request *rq, pending_req_t *pending_req, unsigned int count)
+{
+	struct request_queue *q = rq->q;
+	unsigned int nsegs = count;
+	int i;
+
+	for (i = 0; i < nsegs; i++)
+	{
+		blk_rq_map_kern(q, rq, 
+			pending_req->sg_elem[i].sg_vaddr,
+			pending_req->sg_elem[i].sg_length,
+			GFP_KERNEL);
+	}
+
+	return 0;
+}
+
+
+void scsiback_cmd_exec(pending_req_t *pending_req)
+{
+	int cmd_len  = (int)pending_req->cmd_len;
+	int data_dir = (int)pending_req->sc_data_direction;
+	unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
+	unsigned int timeout;
+	struct request *rq;
+	int write;
+
+	/* because it doesn't timeout backend earlier than frontend.*/
+	if (pending_req->timeout_per_command)
+		timeout = pending_req->timeout_per_command * HZ;
+	else
+		timeout = VSCSIIF_TIMEOUT;
+
+	write = (data_dir == DMA_TO_DEVICE);
+	rq = blk_get_request(pending_req->sdev->request_queue, write, GFP_KERNEL);
+
+	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+	rq->cmd_len = cmd_len;
+	memcpy(rq->cmd, pending_req->cmnd, cmd_len);
+
+	memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
+	rq->sense       = pending_req->sense_buffer;
+	rq->sense_len = 0;
+
+	/* not allowed to retry in backend. */
+	rq->retries   = 0;
+	rq->timeout   = timeout;
+	rq->end_io_data = pending_req;
+
+	if (nr_segments) {
+
+		if (request_map_sg(rq, pending_req, nr_segments)) {
+			printk(KERN_ERR "scsiback: SG Request Map Error\n");
+			return;
+		}
+	}
+
+	scsiback_get(pending_req->info);
+	blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
+
+	return ;
+}
+
+
+static void scsiback_device_reset_exec(pending_req_t *pending_req)
+{
+	struct vscsibk_info *info = pending_req->info;
+	int err;
+	struct scsi_device *sdev = pending_req->sdev;
+
+	scsiback_get(info);
+	err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
+
+	scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
+	scsiback_put(info);
+
+	return;
+}
+
+
+irqreturn_t scsiback_intr(int irq, void *dev_id)
+{
+	scsiback_notify_work((struct vscsibk_info *)dev_id);
+	return IRQ_HANDLED;
+}
+
+static int prepare_pending_reqs(struct vscsibk_info *info,
+		vscsiif_request_t *ring_req, pending_req_t *pending_req)
+{
+	struct scsi_device *sdev;
+	struct ids_tuple vir;
+	int err = -EINVAL;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	pending_req->rqid       = ring_req->rqid;
+	pending_req->act        = ring_req->act;
+
+	pending_req->info       = info;
+
+	pending_req->v_chn = vir.chn = ring_req->channel;
+	pending_req->v_tgt = vir.tgt = ring_req->id;
+	vir.lun = ring_req->lun;
+
+	rmb();
+	sdev = scsiback_do_translation(info, &vir);
+	if (!sdev) {
+		pending_req->sdev = NULL;
+		DPRINTK("scsiback: doesn't exist.\n");
+		err = -ENODEV;
+		goto invalid_value;
+	}
+	pending_req->sdev = sdev;
+
+	/* request range check from frontend */
+	pending_req->sc_data_direction = ring_req->sc_data_direction;
+	barrier();
+	if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
+		(pending_req->sc_data_direction != DMA_TO_DEVICE) &&
+		(pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
+		(pending_req->sc_data_direction != DMA_NONE)) {
+		DPRINTK("scsiback: invalid parameter data_dir = %d\n",
+			pending_req->sc_data_direction);
+		err = -EINVAL;
+		goto invalid_value;
+	}
+
+	pending_req->nr_segments = ring_req->nr_segments;
+	barrier();
+	if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
+		DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
+			pending_req->nr_segments);
+		err = -EINVAL;
+		goto invalid_value;
+	}
+
+	pending_req->cmd_len = ring_req->cmd_len;
+	barrier();
+	if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
+		DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
+			pending_req->cmd_len);
+		err = -EINVAL;
+		goto invalid_value;
+	}
+	memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
+	
+	pending_req->timeout_per_command = ring_req->timeout_per_command;
+
+	if (scsiback_gnttab_data_map(ring_req, pending_req)) {
+		DPRINTK("scsiback: invalid buffer\n");
+		err = -EINVAL;
+		goto invalid_value;
+	}
+
+	return 0;
+
+invalid_value:
+	return err;
+}
+
+
+static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+{
+	struct vscsiif_back_ring *ring = &info->ring;
+	vscsiif_request_t  *ring_req;
+
+	pending_req_t *pending_req;
+	RING_IDX rc, rp;
+	int err, more_to_do = 0;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	rc = ring->req_cons;
+	rp = ring->sring->req_prod;
+	rmb();
+
+	while ((rc != rp)) {
+		if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
+			break;
+		pending_req = alloc_req(info);
+		if (NULL == pending_req) {
+			more_to_do = 1;
+			break;
+		}
+
+		ring_req = RING_GET_REQUEST(ring, rc);
+		ring->req_cons = ++rc;
+
+		err = prepare_pending_reqs(info, ring_req,
+						pending_req);
+		if (err == -EINVAL) {
+			scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
+				0, pending_req);
+			continue;
+		} else if (err == -ENODEV) {
+			scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
+				0, pending_req);
+			continue;
+		}
+
+		if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
+
+			/* The Host mode is through as for Emulation. */
+			if (info->feature == VSCSI_TYPE_HOST)
+				scsiback_cmd_exec(pending_req);
+			else
+				scsiback_req_emulation_or_cmdexec(pending_req);
+
+		} else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
+			scsiback_device_reset_exec(pending_req);
+		} else {
+			printk(KERN_ERR "scsiback: invalid parameter for request\n");
+			scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
+				0, pending_req);
+			continue;
+		}
+	}
+
+	if (RING_HAS_UNCONSUMED_REQUESTS(ring))
+		more_to_do = 1;
+
+	/* Yield point for this unbounded loop. */
+	cond_resched();
+
+	return more_to_do;
+}
+
+
+int scsiback_schedule(void *data)
+{
+	struct vscsibk_info *info = (struct vscsibk_info *)data;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(
+			info->wq,
+			info->waiting_reqs || kthread_should_stop());
+		wait_event_interruptible(
+			pending_free_wq,
+			!list_empty(&pending_free) || kthread_should_stop());
+
+		info->waiting_reqs = 0;
+		smp_mb();
+
+		if (scsiback_do_cmd_fn(info))
+			info->waiting_reqs = 1;
+	}
+
+	return 0;
+}
+
+
+static int __init scsiback_init(void)
+{
+	int i, mmap_pages;
+
+	if (!xen_domain())
+		return -ENODEV;
+
+	mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
+
+	pending_reqs          = kmalloc(sizeof(pending_reqs[0]) *
+					vscsiif_reqs, GFP_KERNEL);
+	pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
+					mmap_pages, GFP_KERNEL);
+	pending_pages         = alloc_empty_pages_and_pagevec(mmap_pages);
+
+	if (!pending_reqs || !pending_grant_handles || !pending_pages)
+		goto out_of_memory;
+
+	for (i = 0; i < mmap_pages; i++)
+		pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
+
+	if (scsiback_interface_init() < 0)
+		goto out_of_kmem;
+
+	memset(pending_reqs, 0, sizeof(pending_reqs));
+	INIT_LIST_HEAD(&pending_free);
+
+	for (i = 0; i < vscsiif_reqs; i++)
+		list_add_tail(&pending_reqs[i].free_list, &pending_free);
+
+	if (scsiback_xenbus_init())
+		goto out_of_xenbus;
+
+	scsiback_emulation_init();
+
+	return 0;
+
+out_of_xenbus:
+	scsiback_xenbus_unregister();
+out_of_kmem:
+	scsiback_interface_exit();
+out_of_memory:
+	kfree(pending_reqs);
+	kfree(pending_grant_handles);
+	free_empty_pages_and_pagevec(pending_pages, mmap_pages);
+	printk(KERN_ERR "scsiback: %s: out of memory\n", __FUNCTION__);
+	return -ENOMEM;
+}
+
+#if 0
+static void __exit scsiback_exit(void)
+{
+	scsiback_xenbus_unregister();
+	scsiback_interface_exit();
+	kfree(pending_reqs);
+	kfree(pending_grant_handles);
+	free_empty_pages_and_pagevec(pending_pages, (vscsiif_reqs * VSCSIIF_SG_TABLESIZE));
+
+}
+#endif
+
+module_init(scsiback_init);
+
+#if 0
+module_exit(scsiback_exit);
+#endif
+
+MODULE_DESCRIPTION("Xen SCSI backend driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/xen/scsiback/translate.c b/drivers/xen/scsiback/translate.c
new file mode 100644
index 0000000..25510c5
--- /dev/null
+++ b/drivers/xen/scsiback/translate.c
@@ -0,0 +1,168 @@
+/*
+ * Xen SCSI backend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/gfp.h>
+
+#include "common.h"
+
+/*
+  Initialize the translation entry list
+*/
+void scsiback_init_translation_table(struct vscsibk_info *info)
+{
+	INIT_LIST_HEAD(&info->v2p_entry_lists);
+	spin_lock_init(&info->v2p_lock);
+}
+
+
+/*
+  Add a new translation entry
+*/
+int scsiback_add_translation_entry(struct vscsibk_info *info,
+			struct scsi_device *sdev, struct ids_tuple *v)
+{
+	int err = 0;
+	struct v2p_entry *entry;
+	struct v2p_entry *new;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+	
+	spin_lock_irqsave(&info->v2p_lock, flags);
+
+	/* Check double assignment to identical virtual ID */
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == v->chn) &&
+		    (entry->v.tgt == v->tgt) &&
+		    (entry->v.lun == v->lun)) {
+			printk(KERN_WARNING "scsiback: Virtual ID is already used. "
+			       "Assignment was not performed.\n");
+			err = -EEXIST;
+			goto out;
+		}
+
+	}
+
+	/* Create a new translation entry and add to the list */
+	if ((new = kmalloc(sizeof(struct v2p_entry), GFP_ATOMIC)) == NULL) {
+		printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
+		err = -ENOMEM;
+		goto out;
+	}
+	new->v = *v;
+	new->sdev = sdev;
+	list_add_tail(&new->l, head);
+
+out:	
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return err;
+}
+
+
+/*
+  Delete the translation entry specfied
+*/
+int scsiback_del_translation_entry(struct vscsibk_info *info,
+				struct ids_tuple *v)
+{
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	/* Find out the translation entry specified */
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == v->chn) &&
+		    (entry->v.tgt == v->tgt) &&
+		    (entry->v.lun == v->lun)) {
+			goto found;
+		}
+	}
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return 1;
+
+found:
+	/* Delete the translation entry specfied */
+	scsi_device_put(entry->sdev);
+	list_del(&entry->l);
+	kfree(entry);
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return 0;
+}
+
+
+/*
+  Perform virtual to physical translation
+*/
+struct scsi_device *scsiback_do_translation(struct vscsibk_info *info,
+			struct ids_tuple *v)
+{
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	struct scsi_device *sdev = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == v->chn) &&
+		    (entry->v.tgt == v->tgt) &&
+		    (entry->v.lun == v->lun)) {
+			sdev = entry->sdev;
+			goto out;
+		}
+	}
+out:
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return sdev;
+}
+
+
+/*
+  Release the translation entry specfied
+*/
+void scsiback_release_translation_entry(struct vscsibk_info *info)
+{
+	struct v2p_entry *entry, *tmp;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry_safe(entry, tmp, head, l) {
+		scsi_device_put(entry->sdev);
+		list_del(&entry->l);
+		kfree(entry);
+	}
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return;
+
+}
diff --git a/drivers/xen/scsiback/xenbus.c b/drivers/xen/scsiback/xenbus.c
new file mode 100644
index 0000000..5f8e345
--- /dev/null
+++ b/drivers/xen/scsiback/xenbus.c
@@ -0,0 +1,378 @@
+/*
+ * Xen SCSI backend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * Based on the blkback driver code.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+
+#include "common.h"
+
+struct backend_info
+{
+	struct xenbus_device *dev;
+	struct vscsibk_info *info;
+};
+
+
+static int __vscsiif_name(struct backend_info *be, char *buf)
+{
+	struct xenbus_device *dev = be->dev;
+	unsigned int domid, id;
+
+	sscanf(dev->nodename, "backend/vscsi/%u/%u", &domid, &id);
+	snprintf(buf, TASK_COMM_LEN, "vscsi.%u.%u", be->info->domid, id);
+
+	return 0;
+}
+
+static int scsiback_map(struct backend_info *be)
+{
+	struct xenbus_device *dev = be->dev;
+	unsigned long ring_ref;
+	unsigned int evtchn;
+	int err;
+	char name[TASK_COMM_LEN];
+
+	err = xenbus_gather(XBT_NIL, dev->otherend,
+			"ring-ref", "%lu", &ring_ref,
+			"event-channel", "%u", &evtchn, NULL);
+	if (err) {
+		xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend);
+		return err;
+	}
+
+	err = scsiback_init_sring(be->info, ring_ref, evtchn);
+	if (err)
+		return err;
+
+	err = __vscsiif_name(be, name);
+	if (err) {
+		xenbus_dev_error(dev, err, "get scsiback dev name");
+		return err;
+	}
+
+	be->info->kthread = kthread_run(scsiback_schedule, be->info, name);
+	if (IS_ERR(be->info->kthread)) {
+		err = PTR_ERR(be->info->kthread);
+		be->info->kthread = NULL;
+		xenbus_dev_error(be->dev, err, "start vscsiif");
+		return err;
+	}
+
+	return 0;
+}
+
+
+struct scsi_device *scsiback_get_scsi_device(struct ids_tuple *phy)
+{
+	struct Scsi_Host *shost;
+	struct scsi_device *sdev = NULL;
+
+	shost = scsi_host_lookup(phy->hst);
+	if (IS_ERR(shost)) {
+		printk(KERN_ERR "scsiback: host%d doesn't exist.\n",
+			phy->hst);
+		return NULL;
+	}
+	sdev   = scsi_device_lookup(shost, phy->chn, phy->tgt, phy->lun);
+	if (!sdev) {
+		printk(KERN_ERR "scsiback: %d:%d:%d:%d doesn't exist.\n",
+			phy->hst, phy->chn, phy->tgt, phy->lun);
+		scsi_host_put(shost);
+		return NULL;
+	}
+
+	scsi_host_put(shost);
+	return (sdev);
+}
+
+#define VSCSIBACK_OP_ADD_OR_DEL_LUN	1
+#define VSCSIBACK_OP_UPDATEDEV_STATE	2
+
+
+static void scsiback_do_lun_hotplug(struct backend_info *be, int op)
+{
+	int i, err = 0;
+	struct ids_tuple phy, vir;
+	int device_state;
+	char str[64], state_str[64];
+	char **dir;
+	unsigned int dir_n = 0;
+	struct xenbus_device *dev = be->dev;
+	struct scsi_device *sdev;
+
+	dir = xenbus_directory(XBT_NIL, dev->nodename, "vscsi-devs", &dir_n);
+	if (IS_ERR(dir))
+		return;
+
+	for (i = 0; i < dir_n; i++) {
+		
+		/* read status */
+		snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]);
+		err = xenbus_scanf(XBT_NIL, dev->nodename, state_str, "%u",
+			&device_state);
+		if (XENBUS_EXIST_ERR(err))
+			continue;
+
+		/* physical SCSI device */
+		snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", dir[i]);
+		err = xenbus_scanf(XBT_NIL, dev->nodename, str,
+			"%u:%u:%u:%u", &phy.hst, &phy.chn, &phy.tgt, &phy.lun);
+		if (XENBUS_EXIST_ERR(err)) {
+			xenbus_printf(XBT_NIL, dev->nodename, state_str,
+					"%d", XenbusStateClosed);
+			continue;
+		}
+
+		/* virtual SCSI device */
+		snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
+		err = xenbus_scanf(XBT_NIL, dev->nodename, str,
+			"%u:%u:%u:%u", &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
+		if (XENBUS_EXIST_ERR(err)) {
+			xenbus_printf(XBT_NIL, dev->nodename, state_str,
+					"%d", XenbusStateClosed);
+			continue;
+		}
+
+		switch (op) {
+		case VSCSIBACK_OP_ADD_OR_DEL_LUN:
+			if (device_state == XenbusStateInitialising) {
+				sdev = scsiback_get_scsi_device(&phy);
+				if (!sdev)
+					xenbus_printf(XBT_NIL, dev->nodename, state_str, 
+							    "%d", XenbusStateClosed);
+				else {
+					err = scsiback_add_translation_entry(be->info, sdev, &vir);
+					if (!err) {
+						if (xenbus_printf(XBT_NIL, dev->nodename, state_str, 
+								    "%d", XenbusStateInitialised)) {
+							printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
+							scsiback_del_translation_entry(be->info, &vir);
+						}
+					} else {
+						scsi_device_put(sdev);
+						xenbus_printf(XBT_NIL, dev->nodename, state_str, 
+								    "%d", XenbusStateClosed);
+					}
+				}
+			}
+
+			if (device_state == XenbusStateClosing) {
+				if (!scsiback_del_translation_entry(be->info, &vir)) {
+					if (xenbus_printf(XBT_NIL, dev->nodename, state_str, 
+							    "%d", XenbusStateClosed))
+						printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
+				}
+			}
+			break;
+
+		case VSCSIBACK_OP_UPDATEDEV_STATE:
+			if (device_state == XenbusStateInitialised) {
+				/* modify vscsi-devs/dev-x/state */
+				if (xenbus_printf(XBT_NIL, dev->nodename, state_str, 
+						    "%d", XenbusStateConnected)) {
+					printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
+					scsiback_del_translation_entry(be->info, &vir);
+					xenbus_printf(XBT_NIL, dev->nodename, state_str, 
+							    "%d", XenbusStateClosed);
+				}
+			}
+			break;
+		/*When it is necessary, processing is added here.*/
+		default:
+			break;
+		}
+	}
+
+	kfree(dir);
+	return ;
+}
+
+
+static void scsiback_frontend_changed(struct xenbus_device *dev,
+					enum xenbus_state frontend_state)
+{
+	struct backend_info *be = dev->dev.driver_data;
+	int err;
+
+	switch (frontend_state) {
+	case XenbusStateInitialising:
+		break;
+	case XenbusStateInitialised:
+		err = scsiback_map(be);
+		if (err)
+			break;
+
+		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
+		xenbus_switch_state(dev, XenbusStateConnected);
+
+		break;
+	case XenbusStateConnected:
+
+		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_UPDATEDEV_STATE);
+
+		if (dev->state == XenbusStateConnected)
+			break;
+
+		xenbus_switch_state(dev, XenbusStateConnected);
+
+		break;
+
+	case XenbusStateClosing:
+		scsiback_disconnect(be->info);
+		xenbus_switch_state(dev, XenbusStateClosing);
+		break;
+
+	case XenbusStateClosed:
+		xenbus_switch_state(dev, XenbusStateClosed);
+		if (xenbus_dev_is_online(dev))
+			break;
+		/* fall through if not online */
+	case XenbusStateUnknown:
+		device_unregister(&dev->dev);
+		break;
+
+	case XenbusStateReconfiguring:
+		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
+
+		xenbus_switch_state(dev, XenbusStateReconfigured);
+
+		break;
+
+	default:
+		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
+					frontend_state);
+		break;
+	}
+}
+
+
+static int scsiback_remove(struct xenbus_device *dev)
+{
+	struct backend_info *be = dev->dev.driver_data;
+
+	if (be->info) {
+		scsiback_disconnect(be->info);
+		scsiback_release_translation_entry(be->info);
+		scsiback_free(be->info);
+		be->info = NULL;
+	}
+
+	kfree(be);
+	dev->dev.driver_data = NULL;
+
+	return 0;
+}
+
+
+static int scsiback_probe(struct xenbus_device *dev,
+			   const struct xenbus_device_id *id)
+{
+	int err;
+	unsigned val = 0;
+
+	struct backend_info *be = kzalloc(sizeof(struct backend_info),
+					  GFP_KERNEL);
+
+	DPRINTK("%p %d\n", dev, dev->otherend_id);
+
+	if (!be) {
+		xenbus_dev_fatal(dev, -ENOMEM,
+				 "allocating backend structure");
+		return -ENOMEM;
+	}
+	be->dev = dev;
+	dev->dev.driver_data = be;
+
+	be->info = vscsibk_info_alloc(dev->otherend_id);
+	if (IS_ERR(be->info)) {
+		err = PTR_ERR(be->info);
+		be->info = NULL;
+		xenbus_dev_fatal(dev, err, "creating scsihost interface");
+		goto fail;
+	}
+
+	be->info->dev = dev;
+	be->info->irq = 0;
+	be->info->feature = 0;	/*default not HOSTMODE.*/
+
+	scsiback_init_translation_table(be->info);
+
+	err = xenbus_scanf(XBT_NIL, dev->nodename,
+				"feature-host", "%d", &val);
+	if (XENBUS_EXIST_ERR(err))
+		val = 0;
+
+	if (val)
+		be->info->feature = VSCSI_TYPE_HOST;
+
+	err = xenbus_switch_state(dev, XenbusStateInitWait);
+	if (err)
+		goto fail;
+
+	return 0;
+
+
+fail:
+	printk(KERN_WARNING "scsiback: %s failed\n",__FUNCTION__);
+	scsiback_remove(dev);
+
+	return err;
+}
+
+
+static struct xenbus_device_id scsiback_ids[] = {
+	{ "vscsi" },
+	{ "" }
+};
+
+static struct xenbus_driver scsiback = {
+	.name			= "vscsi",
+	.owner			= THIS_MODULE,
+	.ids			= scsiback_ids,
+	.probe			= scsiback_probe,
+	.remove			= scsiback_remove,
+	.otherend_changed	= scsiback_frontend_changed
+};
+
+int scsiback_xenbus_init(void)
+{
+	return xenbus_register_backend(&scsiback);
+}
+
+void scsiback_xenbus_unregister(void)
+{
+	xenbus_unregister_driver(&scsiback);
+}
diff --git a/drivers/xen/usbback/Makefile b/drivers/xen/usbback/Makefile
new file mode 100644
index 0000000..a7548cb
--- /dev/null
+++ b/drivers/xen/usbback/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_XEN_USB_BACKEND) := usbbk.o
+
+usbbk-y   := usbstub.o xenbus.o interface.o usbback.o
+
diff --git a/drivers/xen/usbback/interface.c b/drivers/xen/usbback/interface.c
new file mode 100644
index 0000000..905bc18
--- /dev/null
+++ b/drivers/xen/usbback/interface.c
@@ -0,0 +1,250 @@
+/*
+ * interface.c
+ *
+ * Xen USB backend interface management.
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * or, by your choice,
+ *
+ * When distributed separately from the Linux kernel or incorporated into
+ * other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "usbback.h"
+
+static LIST_HEAD(usbif_list);
+static DEFINE_SPINLOCK(usbif_list_lock);
+
+usbif_t *find_usbif(domid_t domid, unsigned int handle)
+{
+	usbif_t *usbif;
+	int found = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&usbif_list_lock, flags);
+	list_for_each_entry(usbif, &usbif_list, usbif_list) {
+		if (usbif->domid == domid
+			&& usbif->handle == handle) {
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&usbif_list_lock, flags);
+
+	if (found)
+		return usbif;
+
+	return NULL;
+}
+
+usbif_t *usbif_alloc(domid_t domid, unsigned int handle)
+{
+	usbif_t *usbif;
+	unsigned long flags;
+	int i;
+
+	usbif = kzalloc(sizeof(usbif_t), GFP_KERNEL);
+	if (!usbif)
+		return NULL;
+
+	usbif->domid = domid;
+	usbif->handle = handle;
+	spin_lock_init(&usbif->urb_ring_lock);
+	spin_lock_init(&usbif->conn_ring_lock);
+	atomic_set(&usbif->refcnt, 0);
+	init_waitqueue_head(&usbif->wq);
+	init_waitqueue_head(&usbif->waiting_to_free);
+	spin_lock_init(&usbif->stub_list_lock);
+	INIT_LIST_HEAD(&usbif->stub_list);
+	spin_lock_init(&usbif->addr_lock);
+	// TODO: BUG?
+	for (i = 0; i < USB_DEV_ADDR_SIZE; i++) {
+		usbif->addr_table[i] = NULL;
+	}
+
+	spin_lock_irqsave(&usbif_list_lock, flags);
+	list_add(&usbif->usbif_list, &usbif_list);
+	spin_unlock_irqrestore(&usbif_list_lock, flags);
+
+	return usbif;
+}
+
+static int map_frontend_pages(usbif_t *usbif,
+				grant_ref_t urb_ring_ref,
+				grant_ref_t conn_ring_ref)
+{
+	struct gnttab_map_grant_ref op;
+
+	gnttab_set_map_op(&op, (unsigned long)usbif->urb_ring_area->addr,
+			  GNTMAP_host_map, urb_ring_ref, usbif->domid);
+
+	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+		BUG();
+
+	if (op.status) {
+		printk(KERN_ERR "grant table failure mapping urb_ring_ref\n");
+		return op.status;
+	}
+
+	usbif->urb_shmem_ref = urb_ring_ref;
+	usbif->urb_shmem_handle = op.handle;
+
+	gnttab_set_map_op(&op, (unsigned long)usbif->conn_ring_area->addr,
+			  GNTMAP_host_map, conn_ring_ref, usbif->domid);
+
+	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+		BUG();
+
+	if (op.status) {
+		struct gnttab_unmap_grant_ref unop;
+		
+		gnttab_set_unmap_op(&unop,
+					(unsigned long)usbif->urb_ring_area->addr,
+					GNTMAP_host_map, usbif->urb_shmem_handle);
+		HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unop, 1);
+		printk(KERN_ERR "grant table failure mapping conn_ring_ref\n");
+		return op.status;
+	}
+
+	usbif->conn_shmem_ref = conn_ring_ref;
+	usbif->conn_shmem_handle = op.handle;
+
+	return 0;
+}
+
+static void unmap_frontend_pages(usbif_t *usbif)
+{
+	struct gnttab_unmap_grant_ref op;
+
+	gnttab_set_unmap_op(&op, (unsigned long)usbif->urb_ring_area->addr,
+			    GNTMAP_host_map, usbif->urb_shmem_handle);
+
+	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+		BUG();
+
+	gnttab_set_unmap_op(&op, (unsigned long)usbif->conn_ring_area->addr,
+			    GNTMAP_host_map, usbif->conn_shmem_handle);
+
+	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+		BUG();
+}
+
+int usbif_map(usbif_t *usbif, unsigned long urb_ring_ref,
+		unsigned long conn_ring_ref, unsigned int evtchn)
+{
+	int err = -ENOMEM;
+	
+	usbif_urb_sring_t *urb_sring;
+	usbif_conn_sring_t *conn_sring;
+
+	if (usbif->irq)
+		return 0;
+
+	if ((usbif->urb_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
+		return err;
+	if ((usbif->conn_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
+		goto fail_alloc;
+
+	err = map_frontend_pages(usbif, urb_ring_ref, conn_ring_ref);
+	if (err)
+		goto fail_map;
+
+	err = bind_interdomain_evtchn_to_irqhandler(
+			usbif->domid, evtchn, usbbk_be_int, 0, "usbif-backend", usbif);
+	if (err < 0)
+		goto fail_evtchn;
+	usbif->irq = err;
+
+	urb_sring = (usbif_urb_sring_t *) usbif->urb_ring_area->addr;
+	BACK_RING_INIT(&usbif->urb_ring, urb_sring, PAGE_SIZE);	
+	
+	conn_sring = (usbif_conn_sring_t *) usbif->conn_ring_area->addr;
+	BACK_RING_INIT(&usbif->conn_ring, conn_sring, PAGE_SIZE);	
+	
+	return 0;
+
+fail_evtchn:
+	unmap_frontend_pages(usbif);
+fail_map:
+	free_vm_area(usbif->conn_ring_area);
+fail_alloc:
+	free_vm_area(usbif->urb_ring_area);
+
+	return err;
+}
+
+void usbif_disconnect(usbif_t *usbif)
+{
+	struct usbstub *stub, *tmp;
+	unsigned long flags;
+
+	if (usbif->xenusbd) {
+		kthread_stop(usbif->xenusbd);
+		usbif->xenusbd = NULL;
+	}
+
+	spin_lock_irqsave(&usbif->stub_list_lock, flags);
+	list_for_each_entry_safe(stub, tmp, &usbif->stub_list, dev_list) {
+		usbbk_unlink_urbs(stub);
+		detach_device_without_lock(usbif, stub);
+	}
+	spin_unlock_irqrestore(&usbif->stub_list_lock, flags);
+
+	wait_event(usbif->waiting_to_free, atomic_read(&usbif->refcnt) == 0);
+
+	if (usbif->irq) {
+		unbind_from_irqhandler(usbif->irq, usbif);
+		usbif->irq = 0;
+	}
+
+	if (usbif->urb_ring.sring) {
+		unmap_frontend_pages(usbif);
+		free_vm_area(usbif->urb_ring_area);
+		free_vm_area(usbif->conn_ring_area);
+		usbif->urb_ring.sring = NULL;
+		usbif->conn_ring.sring = NULL;
+	}
+}
+
+void usbif_free(usbif_t *usbif)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&usbif_list_lock, flags);
+	list_del(&usbif->usbif_list);
+	spin_unlock_irqrestore(&usbif_list_lock, flags);
+	kfree(usbif);
+}
diff --git a/drivers/xen/usbback/usbback.c b/drivers/xen/usbback/usbback.c
new file mode 100644
index 0000000..fb8d3c9
--- /dev/null
+++ b/drivers/xen/usbback/usbback.c
@@ -0,0 +1,1171 @@
+/*
+ * usbback.c
+ *
+ * Xen USB backend driver
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * or, by your choice,
+ *
+ * When distributed separately from the Linux kernel or incorporated into
+ * other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/mm.h>
+#include <xen/balloon.h>
+#include "usbback.h"
+
+#if 0
+#include "../../usb/core/hub.h"
+#endif
+
+int usbif_reqs = USBIF_BACK_MAX_PENDING_REQS;
+module_param_named(reqs, usbif_reqs, int, 0);
+MODULE_PARM_DESC(reqs, "Number of usbback requests to allocate");
+
+struct pending_req_segment {
+	uint16_t offset;
+	uint16_t length;
+};
+
+typedef struct {
+	usbif_t *usbif;
+
+	uint16_t id; /* request id */
+
+	struct usbstub *stub;
+	struct list_head urb_list;
+
+	/* urb */
+	struct urb *urb;
+	void *buffer;
+	dma_addr_t transfer_dma;
+	struct usb_ctrlrequest *setup;
+	dma_addr_t setup_dma;
+
+	/* request segments */
+	uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
+	uint16_t nr_extra_segs; /* number of iso_frame_desc segments (ISO) */
+	struct pending_req_segment *seg;
+
+	struct list_head free_list;
+} pending_req_t;
+
+static pending_req_t *pending_reqs;
+static struct list_head pending_free;
+static DEFINE_SPINLOCK(pending_free_lock);
+static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
+
+#define USBBACK_INVALID_HANDLE (~0)
+
+static struct page **pending_pages;
+static grant_handle_t *pending_grant_handles;
+
+static inline int vaddr_pagenr(pending_req_t *req, int seg)
+{
+	return (req - pending_reqs) * USBIF_MAX_SEGMENTS_PER_REQUEST + seg;
+}
+
+static inline unsigned long vaddr(pending_req_t *req, int seg)
+{
+	unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
+	return (unsigned long)pfn_to_kaddr(pfn);
+}
+
+#define pending_handle(_req, _seg) \
+	(pending_grant_handles[vaddr_pagenr(_req, _seg)])
+
+static pending_req_t* alloc_req(void)
+{
+	pending_req_t *req = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pending_free_lock, flags);
+	if (!list_empty(&pending_free)) {
+		req = list_entry(pending_free.next, pending_req_t, free_list);
+		list_del(&req->free_list);
+	}
+	spin_unlock_irqrestore(&pending_free_lock, flags);
+	return req;
+}
+
+static void free_req(pending_req_t *req)
+{
+	unsigned long flags;
+	int was_empty;
+
+	spin_lock_irqsave(&pending_free_lock, flags);
+	was_empty = list_empty(&pending_free);
+	list_add(&req->free_list, &pending_free);
+	spin_unlock_irqrestore(&pending_free_lock, flags);
+	if (was_empty)
+		wake_up(&pending_free_wq);
+}
+
+static inline void add_req_to_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&stub->submitting_lock, flags);
+	list_add_tail(&pending_req->urb_list, &stub->submitting_list);
+	spin_unlock_irqrestore(&stub->submitting_lock, flags);
+}
+
+static inline void remove_req_from_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&stub->submitting_lock, flags);
+	list_del_init(&pending_req->urb_list);
+	spin_unlock_irqrestore(&stub->submitting_lock, flags);
+}
+
+void usbbk_unlink_urbs(struct usbstub *stub)
+{
+	pending_req_t *req, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&stub->submitting_lock, flags);
+	list_for_each_entry_safe(req, tmp, &stub->submitting_list, urb_list) {
+		usb_unlink_urb(req->urb);
+	}
+	spin_unlock_irqrestore(&stub->submitting_lock, flags);
+}
+
+static void fast_flush_area(pending_req_t *pending_req)
+{
+	struct gnttab_unmap_grant_ref unmap[USBIF_MAX_SEGMENTS_PER_REQUEST];
+	unsigned int i, nr_segs, invcount = 0;
+	grant_handle_t handle;
+	int ret;
+
+	nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
+
+	if (nr_segs) {
+		for (i = 0; i < nr_segs; i++) {
+			handle = pending_handle(pending_req, i);
+			if (handle == USBBACK_INVALID_HANDLE)
+				continue;
+			gnttab_set_unmap_op(&unmap[invcount], vaddr(pending_req, i),
+					    GNTMAP_host_map, handle);
+			pending_handle(pending_req, i) = USBBACK_INVALID_HANDLE;
+			invcount++;
+		}
+
+		ret = HYPERVISOR_grant_table_op(
+			GNTTABOP_unmap_grant_ref, unmap, invcount);
+		BUG_ON(ret);
+
+		kfree(pending_req->seg);
+	}
+
+	return;
+}
+
+static void copy_buff_to_pages(void *buff, pending_req_t *pending_req,
+		int start, int nr_pages)
+{
+	unsigned long copied = 0;
+	int i;
+
+	for (i = start; i < start + nr_pages; i++) {
+		memcpy((void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
+			buff + copied,
+			pending_req->seg[i].length);
+		copied += pending_req->seg[i].length;
+	}
+}
+
+static void copy_pages_to_buff(void *buff, pending_req_t *pending_req,
+		int start, int nr_pages)
+{
+	unsigned long copied = 0;
+	int i;
+
+	for (i = start; i < start + nr_pages; i++) {
+		memcpy(buff + copied,
+			(void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
+			pending_req->seg[i].length);
+		copied += pending_req->seg[i].length;
+	}
+}
+
+static void *usbbk_get_buffer(dma_freelist_t *dma_freelist, dma_addr_t *dma_addr)
+{
+	if (!dma_freelist->free_entry)
+		return NULL;
+
+	dma_freelist->free_entry--;
+
+	*dma_addr = dma_freelist->dma_addr[dma_freelist->free_entry];
+	return dma_freelist->v_addr[dma_freelist->free_entry];
+}
+
+static void usbbk_put_buffer(dma_freelist_t *dma_freelist, void *v_addr, dma_addr_t dma_addr)
+{
+	// check for overflow
+
+	dma_freelist->dma_addr[dma_freelist->free_entry] = dma_addr;
+	dma_freelist->v_addr[dma_freelist->free_entry] = v_addr;
+
+	dma_freelist->free_entry++;
+}
+
+static void *usbbk_buffer_alloc(pending_req_t *pending_req, int length, dma_addr_t *dma_addr)
+{
+	void *v_addr;
+	int order;
+
+	if (length > (1 << DMA_FREELIST_MAX_ORDER))
+		return NULL;
+
+	for (order = DMA_FREELIST_MIN_ORDER; order < DMA_FREELIST_MAX_ORDER; order++)
+		if ((1 << order) > length)
+			break;
+
+        v_addr = usbbk_get_buffer(&pending_req->stub->dma_freelist[order - DMA_FREELIST_MIN_ORDER], dma_addr);
+	if (v_addr)
+		return v_addr;
+
+	return usb_buffer_alloc(pending_req->stub->udev, length, GFP_KERNEL, dma_addr);
+}
+
+static void usbbk_buffer_free(pending_req_t *pending_req, int length, void *v_addr, dma_addr_t dma_addr)
+{
+	int order;
+
+	if (length > (1 << DMA_FREELIST_MAX_ORDER))
+		return;
+
+	for (order = DMA_FREELIST_MIN_ORDER; order < DMA_FREELIST_MAX_ORDER; order++)
+		if ((1 << order) > length)
+			break;
+
+	usbbk_put_buffer(&pending_req->stub->dma_freelist[order - DMA_FREELIST_MIN_ORDER], v_addr, dma_addr);
+
+	return;
+}
+
+static int usbbk_alloc_urb(usbif_urb_request_t *req, pending_req_t *pending_req)
+{
+	int ret;
+
+	if (usb_pipeisoc(req->pipe))
+		pending_req->urb = usb_alloc_urb(req->u.isoc.number_of_packets, GFP_KERNEL);
+	else
+		pending_req->urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!pending_req->urb) {
+		printk(KERN_ERR "usbback: can't alloc urb\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	if (req->buffer_length) {
+		pending_req->buffer = usbbk_buffer_alloc(pending_req,
+				req->buffer_length, &pending_req->transfer_dma);
+		if (!pending_req->buffer) {
+			printk(KERN_ERR "usbback: can't alloc urb buffer\n");
+			ret = -ENOMEM;
+			goto fail_free_urb;
+		}
+	}
+
+	if (usb_pipecontrol(req->pipe)) {
+		pending_req->setup = usbbk_buffer_alloc(pending_req,
+				sizeof(struct usb_ctrlrequest), &pending_req->setup_dma);
+		if (!pending_req->setup) {
+			printk(KERN_ERR "usbback: can't alloc usb_ctrlrequest\n");
+			ret = -ENOMEM;
+			goto fail_free_buffer;
+		}
+	}
+
+	return 0;
+
+fail_free_buffer:
+	if (req->buffer_length)
+		usbbk_buffer_free(pending_req, req->buffer_length,
+				pending_req->buffer, pending_req->transfer_dma);
+fail_free_urb:
+	usb_free_urb(pending_req->urb);
+fail:
+	return ret;
+}
+
+static void usbbk_free_urb(pending_req_t *req)
+
+{
+	if (usb_pipecontrol(req->urb->pipe))
+		usbbk_buffer_free(req, sizeof(struct usb_ctrlrequest),
+				req->urb->setup_packet, req->urb->setup_dma);
+	if (req->urb->transfer_buffer_length)
+		usbbk_buffer_free(req, req->urb->transfer_buffer_length,
+				req->urb->transfer_buffer, req->urb->transfer_dma);
+	barrier();
+	usb_free_urb(req->urb);
+}
+
+static void usbbk_notify_work(usbif_t *usbif)
+{
+	usbif->waiting_reqs = 1;
+	wake_up(&usbif->wq);
+}
+
+irqreturn_t usbbk_be_int(int irq, void *dev_id)
+{
+	usbbk_notify_work(dev_id);
+	return IRQ_HANDLED;
+}
+
+static void usbbk_do_response(pending_req_t *pending_req, int32_t status,
+					int32_t actual_length, int32_t error_count, uint16_t start_frame)
+{
+	usbif_t *usbif = pending_req->usbif;
+	usbif_urb_response_t *res;
+	unsigned long flags;
+	int notify;
+
+	spin_lock_irqsave(&usbif->urb_ring_lock, flags);
+	res = RING_GET_RESPONSE(&usbif->urb_ring, usbif->urb_ring.rsp_prod_pvt);
+	res->id = pending_req->id;
+	res->status = status;
+	res->actual_length = actual_length;
+	res->error_count = error_count;
+	res->start_frame = start_frame;
+	usbif->urb_ring.rsp_prod_pvt++;
+	barrier();
+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&usbif->urb_ring, notify);
+	spin_unlock_irqrestore(&usbif->urb_ring_lock, flags);
+
+	if (notify)
+		notify_remote_via_irq(usbif->irq);
+}
+
+static void usbbk_urb_complete(struct urb *urb)
+{
+	pending_req_t *pending_req = (pending_req_t *)urb->context;
+
+	if (usb_pipein(urb->pipe) && urb->status == 0 && urb->actual_length > 0)
+		copy_buff_to_pages(pending_req->buffer, pending_req,
+					0, pending_req->nr_buffer_segs);
+
+	if (usb_pipeisoc(urb->pipe))
+		copy_buff_to_pages(&urb->iso_frame_desc[0], pending_req,
+					pending_req->nr_buffer_segs, pending_req->nr_extra_segs);
+
+	barrier();
+
+	fast_flush_area(pending_req);
+
+	usbbk_do_response(pending_req, urb->status, urb->actual_length,
+					urb->error_count, urb->start_frame);
+
+	remove_req_from_submitting_list(pending_req->stub, pending_req);
+
+	barrier();
+	usbbk_free_urb(pending_req);
+	usbif_put(pending_req->usbif);
+	free_req(pending_req);
+}
+
+static int usbbk_gnttab_map(usbif_t *usbif,
+			usbif_urb_request_t *req, pending_req_t *pending_req)
+{
+	int i, ret;
+	unsigned int nr_segs;
+	uint32_t flags;
+	struct gnttab_map_grant_ref map[USBIF_MAX_SEGMENTS_PER_REQUEST];
+
+	nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
+
+	if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) {
+		printk(KERN_ERR "Bad number of segments in request\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (nr_segs) {
+		pending_req->seg = kmalloc(sizeof(struct pending_req_segment)
+				* nr_segs, GFP_KERNEL);
+		if (!pending_req->seg) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		if (pending_req->nr_buffer_segs) {
+			flags = GNTMAP_host_map;
+			if (usb_pipeout(req->pipe))
+				flags |= GNTMAP_readonly;
+			for (i = 0; i < pending_req->nr_buffer_segs; i++)
+				gnttab_set_map_op(&map[i], vaddr(
+						pending_req, i), flags,
+						req->seg[i].gref,
+						usbif->domid);
+		}
+
+		if (pending_req->nr_extra_segs) {
+			flags = GNTMAP_host_map;
+			for (i = req->nr_buffer_segs; i < nr_segs; i++)
+				gnttab_set_map_op(&map[i], vaddr(
+						pending_req, i), flags,
+						req->seg[i].gref,
+						usbif->domid);
+		}
+
+		ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
+					map, nr_segs);
+		BUG_ON(ret);
+
+		for (i = 0; i < nr_segs; i++) {
+			if (unlikely(map[i].status != 0)) {
+				printk(KERN_ERR "usbback: invalid buffer -- could not remap it\n");
+				map[i].handle = USBBACK_INVALID_HANDLE;
+				ret |= 1;
+			}
+
+			pending_handle(pending_req, i) = map[i].handle;
+
+			if (ret)
+				continue;
+
+			set_phys_to_machine(__pa(vaddr(
+				pending_req, i)) >> PAGE_SHIFT,
+				FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
+
+			pending_req->seg[i].offset = req->seg[i].offset;
+			pending_req->seg[i].length = req->seg[i].length;
+
+			barrier();
+
+			if (pending_req->seg[i].offset >= PAGE_SIZE ||
+					pending_req->seg[i].length > PAGE_SIZE ||
+					pending_req->seg[i].offset + pending_req->seg[i].length > PAGE_SIZE)
+					ret |= 1;
+		}
+
+		if (ret)
+			goto fail_flush;
+	}
+
+	return 0;
+
+fail_flush:
+	fast_flush_area(pending_req);
+	ret = -ENOMEM;
+
+fail:
+	return ret;
+}
+
+static void usbbk_init_urb(usbif_urb_request_t *req, pending_req_t *pending_req)
+{
+	unsigned int pipe;
+	struct usb_device *udev = pending_req->stub->udev;
+	struct urb *urb = pending_req->urb;
+
+	switch (usb_pipetype(req->pipe)) {
+	case PIPE_ISOCHRONOUS:
+		if (usb_pipein(req->pipe))
+			pipe = usb_rcvisocpipe(udev, usb_pipeendpoint(req->pipe));
+		else
+			pipe = usb_sndisocpipe(udev, usb_pipeendpoint(req->pipe));
+
+		urb->dev = udev;
+		urb->pipe = pipe;
+		urb->transfer_flags = req->transfer_flags;
+		urb->transfer_flags |= URB_ISO_ASAP;
+		urb->transfer_buffer = pending_req->buffer;
+		urb->transfer_buffer_length = req->buffer_length;
+		urb->complete = usbbk_urb_complete;
+		urb->context = pending_req;
+		urb->interval = req->u.isoc.interval;
+		urb->start_frame = req->u.isoc.start_frame;
+		urb->number_of_packets = req->u.isoc.number_of_packets;
+
+		break;
+	case PIPE_INTERRUPT:
+		if (usb_pipein(req->pipe))
+			pipe = usb_rcvintpipe(udev, usb_pipeendpoint(req->pipe));
+		else
+			pipe = usb_sndintpipe(udev, usb_pipeendpoint(req->pipe));
+
+		usb_fill_int_urb(urb, udev, pipe,
+				pending_req->buffer, req->buffer_length,
+				usbbk_urb_complete,
+				pending_req, req->u.intr.interval);
+		/*
+		 * high speed interrupt endpoints use a logarithmic encoding of
+		 * the endpoint interval, and usb_fill_int_urb() initializes a
+		 * interrupt urb with the encoded interval value.
+		 *
+		 * req->u.intr.interval is the interval value that already
+		 * encoded in the frontend part, and the above usb_fill_int_urb()
+		 * initializes the urb->interval with double encoded value.
+		 *
+		 * so, simply overwrite the urb->interval with original value.
+		 */
+		urb->interval = req->u.intr.interval;
+		urb->transfer_flags = req->transfer_flags;
+
+		break;
+	case PIPE_CONTROL:
+		if (usb_pipein(req->pipe))
+			pipe = usb_rcvctrlpipe(udev, 0);
+		else
+			pipe = usb_sndctrlpipe(udev, 0);
+
+		usb_fill_control_urb(urb, udev, pipe,
+				(unsigned char *) pending_req->setup,
+				pending_req->buffer, req->buffer_length,
+				usbbk_urb_complete, pending_req);
+		memcpy(pending_req->setup, req->u.ctrl, 8);
+		urb->setup_dma = pending_req->setup_dma;
+		urb->transfer_flags = req->transfer_flags;
+		urb->transfer_flags |= URB_NO_SETUP_DMA_MAP;
+
+		break;
+	case PIPE_BULK:
+		if (usb_pipein(req->pipe))
+			pipe = usb_rcvbulkpipe(udev, usb_pipeendpoint(req->pipe));
+		else
+			pipe = usb_sndbulkpipe(udev, usb_pipeendpoint(req->pipe));
+
+		usb_fill_bulk_urb(urb, udev, pipe,
+				pending_req->buffer, req->buffer_length,
+				usbbk_urb_complete, pending_req);
+		urb->transfer_flags = req->transfer_flags;
+
+		break;
+	default:
+		break;
+	}
+
+	if (req->buffer_length) {
+		urb->transfer_dma = pending_req->transfer_dma;
+		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+	}
+}
+
+struct set_interface_request {
+	pending_req_t *pending_req;
+	int interface;
+	int alternate;
+	struct work_struct work;
+};
+
+static void usbbk_set_interface_work(struct work_struct *work)
+{
+	struct set_interface_request *req = container_of(work, struct set_interface_request, work);
+	pending_req_t *pending_req = req->pending_req;
+	struct usb_device *udev = req->pending_req->stub->udev;
+
+	int ret;
+
+	usb_lock_device(udev);
+	ret = usb_set_interface(udev, req->interface, req->alternate);
+	usb_unlock_device(udev);
+	usb_put_dev(udev);
+
+	usbbk_do_response(pending_req, ret, 0, 0, 0);
+	usbif_put(pending_req->usbif);
+	free_req(pending_req);
+	kfree(req);
+}
+
+static int usbbk_set_interface(pending_req_t *pending_req, int interface, int alternate)
+{
+	struct set_interface_request *req;
+	struct usb_device *udev = pending_req->stub->udev;
+
+	req = kmalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+	req->pending_req = pending_req;
+	req->interface = interface;
+	req->alternate = alternate;
+	INIT_WORK(&req->work, usbbk_set_interface_work);
+	usb_get_dev(udev);
+	schedule_work(&req->work);
+	return 0;
+}
+
+struct clear_halt_request {
+	pending_req_t *pending_req;
+	int pipe;
+	struct work_struct work;
+};
+
+static void usbbk_clear_halt_work(struct work_struct *work)
+{
+	struct clear_halt_request *req = container_of(work, struct clear_halt_request, work);
+	pending_req_t *pending_req = req->pending_req;
+	struct usb_device *udev = req->pending_req->stub->udev;
+	int ret;
+
+	usb_lock_device(udev);
+	ret = usb_clear_halt(req->pending_req->stub->udev, req->pipe);
+	usb_unlock_device(udev);
+	usb_put_dev(udev);
+
+	usbbk_do_response(pending_req, ret, 0, 0, 0);
+	usbif_put(pending_req->usbif);
+	free_req(pending_req);
+	kfree(req);
+}
+
+static int usbbk_clear_halt(pending_req_t *pending_req, int pipe)
+{
+	struct clear_halt_request *req;
+	struct usb_device *udev = pending_req->stub->udev;
+
+	req = kmalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+	req->pending_req = pending_req;
+	req->pipe = pipe;
+	INIT_WORK(&req->work, usbbk_clear_halt_work);
+
+	usb_get_dev(udev);
+	schedule_work(&req->work);
+	return 0;
+}
+
+#if 0
+struct port_reset_request {
+	pending_req_t *pending_req;
+	struct work_struct work;
+};
+
+static void usbbk_port_reset_work(void *data)
+{
+	struct port_reset_request *req = (struct port_reset_request *) data;
+	pending_req_t *pending_req = req->pending_req;
+	struct usb_device *udev = pending_req->stub->udev;
+	int ret, ret_lock;
+
+	ret = ret_lock = usb_lock_device_for_reset(udev, NULL);
+	if (ret_lock >= 0) {
+		ret = usb_reset_device(udev);
+		if (ret_lock)
+			usb_unlock_device(udev);
+	}
+	usb_put_dev(udev);
+
+	usbbk_do_response(pending_req, ret, 0, 0, 0);
+	usbif_put(pending_req->usbif);
+	free_req(pending_req);
+	kfree(req);
+}
+
+static int usbbk_port_reset(pending_req_t *pending_req)
+{
+	struct port_reset_request *req;
+	struct usb_device *udev = pending_req->stub->udev;
+
+	req = kmalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	req->pending_req = pending_req;
+	INIT_WORK(&req->work, usbbk_port_reset_work, req);
+
+	usb_get_dev(udev);
+	schedule_work(&req->work);
+	return 0;
+}
+#endif
+
+static void usbbk_set_address(usbif_t *usbif, struct usbstub *stub, int cur_addr, int new_addr)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&usbif->addr_lock, flags);
+	if (cur_addr)
+		usbif->addr_table[cur_addr] = NULL;
+	if (new_addr)
+		usbif->addr_table[new_addr] = stub;
+	stub->addr = new_addr;
+	spin_unlock_irqrestore(&usbif->addr_lock, flags);
+}
+
+struct usbstub *find_attached_device(usbif_t *usbif, int portnum)
+{
+	struct usbstub *stub;
+	int found = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&usbif->stub_list_lock, flags);
+	list_for_each_entry(stub, &usbif->stub_list, dev_list) {
+		if (stub->portid->portnum == portnum) {
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&usbif->stub_list_lock, flags);
+
+	if (found)
+		return stub;
+
+	return NULL;
+}
+
+static int check_and_submit_special_ctrlreq(usbif_t *usbif, usbif_urb_request_t *req, pending_req_t *pending_req)
+{
+	int devnum;
+	struct usbstub *stub = NULL;
+	struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *) req->u.ctrl;
+	int ret;
+	int done = 0;
+
+	devnum = usb_pipedevice(req->pipe);
+
+	/*
+	 * When the device is first connected or reseted, USB device has no address.
+	 * In this initial state, following requests are send to device address (#0),
+	 *
+	 *  1. GET_DESCRIPTOR (with Descriptor Type is "DEVICE") is send,
+	 *     and OS knows what device is connected to.
+	 *
+	 *  2. SET_ADDRESS is send, and then, device has its address.
+	 *
+	 * In the next step, SET_CONFIGURATION is send to addressed device, and then,
+	 * the device is finally ready to use.
+	 */
+	if (unlikely(devnum == 0)) {
+		stub = find_attached_device(usbif, usbif_pipeportnum(req->pipe));
+		if (unlikely(!stub)) {
+			ret = -ENODEV;
+			goto fail_response;
+		}
+
+		switch (ctrl->bRequest) {
+		case USB_REQ_GET_DESCRIPTOR:
+			/*
+			 * GET_DESCRIPTOR request to device #0.
+			 * through to normal urb transfer.
+			 */
+			pending_req->stub = stub;
+			return 0;
+			break;
+		case USB_REQ_SET_ADDRESS:
+			/*
+			 * SET_ADDRESS request to device #0.
+			 * add attached device to addr_table.
+			 */
+			{
+				__u16 addr = le16_to_cpu(ctrl->wValue);
+				usbbk_set_address(usbif, stub, 0, addr);
+			}
+			ret = 0;
+			goto fail_response;
+			break;
+		default:
+			ret = -EINVAL;
+			goto fail_response;
+		}
+	} else {
+		if (unlikely(!usbif->addr_table[devnum])) {
+			ret = -ENODEV;
+			goto fail_response;
+		}
+		pending_req->stub = usbif->addr_table[devnum];
+	}
+
+	/*
+	 * Check special request
+	 */
+	switch (ctrl->bRequest) {
+	case USB_REQ_SET_ADDRESS:
+		/*
+		 * SET_ADDRESS request to addressed device.
+		 * change addr or remove from addr_table.
+		 */
+		{
+			__u16 addr = le16_to_cpu(ctrl->wValue);
+			usbbk_set_address(usbif, stub, devnum, addr);
+		}
+		ret = 0;
+		goto fail_response;
+		break;
+#if 0
+	case USB_REQ_SET_CONFIGURATION:
+		/*
+		 * linux 2.6.27 or later version only!
+		 */
+		if (ctrl->RequestType == USB_RECIP_DEVICE) {
+			__u16 config = le16_to_cpu(ctrl->wValue);
+			usb_driver_set_configuration(pending_req->stub->udev, config);
+			done = 1;
+		}
+		break;
+#endif
+	case USB_REQ_SET_INTERFACE:
+		if (ctrl->bRequestType == USB_RECIP_INTERFACE) {
+			__u16 alt = le16_to_cpu(ctrl->wValue);
+			__u16 intf = le16_to_cpu(ctrl->wIndex);
+			usbbk_set_interface(pending_req, intf, alt);
+			done = 1;
+		}
+		break;
+	case USB_REQ_CLEAR_FEATURE:
+		if (ctrl->bRequestType == USB_RECIP_ENDPOINT
+			&& ctrl->wValue == USB_ENDPOINT_HALT) {
+			int pipe;
+			int ep = le16_to_cpu(ctrl->wIndex) & 0x0f;
+			int dir = le16_to_cpu(ctrl->wIndex)
+					& USB_DIR_IN;
+			if (dir)
+				pipe = usb_rcvctrlpipe(pending_req->stub->udev, ep);
+			else
+				pipe = usb_sndctrlpipe(pending_req->stub->udev, ep);
+			usbbk_clear_halt(pending_req, pipe);
+			done = 1;
+		}
+		break;
+#if 0 /* not tested yet */
+	case USB_REQ_SET_FEATURE:
+		if (ctrl->bRequestType == USB_RT_PORT) {
+			__u16 feat = le16_to_cpu(ctrl->wValue);
+			if (feat == USB_PORT_FEAT_RESET) {
+				usbbk_port_reset(pending_req);
+				done = 1;
+			}
+		}
+		break;
+#endif
+	default:
+		break;
+	}
+
+	return done;
+
+fail_response:
+	usbbk_do_response(pending_req, ret, 0, 0, 0);
+	usbif_put(usbif);
+	free_req(pending_req);
+	return 1;
+}
+
+static void dispatch_request_to_pending_reqs(usbif_t *usbif,
+		usbif_urb_request_t *req,
+		pending_req_t *pending_req)
+{
+	int ret;
+
+	pending_req->id = req->id;
+	pending_req->usbif = usbif;
+
+	barrier();
+
+	/*
+	 * TODO:
+	 * receive unlink request and cancel the urb in backend
+	 */
+#if 0
+	if (unlikely(usb_pipeunlink(req->pipe))) {
+
+	}
+#endif
+
+	usbif_get(usbif);
+
+	if (usb_pipecontrol(req->pipe)) {
+		if (check_and_submit_special_ctrlreq(usbif, req, pending_req))
+			return;
+	} else {
+		int devnum = usb_pipedevice(req->pipe);
+		if (unlikely(!usbif->addr_table[devnum])) {
+			ret = -ENODEV;
+			goto fail_response;
+		}
+		pending_req->stub = usbif->addr_table[devnum];
+	}
+
+	barrier();
+
+	ret = usbbk_alloc_urb(req, pending_req);
+	if (ret) {
+		ret = -ESHUTDOWN;
+		goto fail_response;
+	}
+
+	add_req_to_submitting_list(pending_req->stub, pending_req);
+
+	barrier();
+
+	usbbk_init_urb(req, pending_req);
+
+	barrier();
+
+	pending_req->nr_buffer_segs = req->nr_buffer_segs;
+	if (usb_pipeisoc(req->pipe))
+		pending_req->nr_extra_segs = req->u.isoc.nr_frame_desc_segs;
+	else
+		pending_req->nr_extra_segs = 0;
+
+	barrier();
+
+	ret = usbbk_gnttab_map(usbif, req, pending_req);
+	if (ret) {
+		printk(KERN_ERR "usbback: invalid buffer\n");
+		ret = -ESHUTDOWN;
+		goto fail_free_urb;
+	}
+
+	barrier();
+
+	if (usb_pipeout(req->pipe) && req->buffer_length)
+		copy_pages_to_buff(pending_req->buffer,
+					pending_req,
+					0,
+					pending_req->nr_buffer_segs);
+	if (usb_pipeisoc(req->pipe)) {
+		copy_pages_to_buff(&pending_req->urb->iso_frame_desc[0],
+			pending_req,
+			pending_req->nr_buffer_segs,
+			pending_req->nr_extra_segs);
+	}
+
+	barrier();
+
+	ret = usb_submit_urb(pending_req->urb, GFP_KERNEL);
+	if (ret) {
+		printk(KERN_ERR "usbback: failed submitting urb, error %d\n", ret);
+		ret = -ESHUTDOWN;
+		goto fail_flush_area;
+	}
+	return;
+
+fail_flush_area:
+	fast_flush_area(pending_req);
+fail_free_urb:
+	remove_req_from_submitting_list(pending_req->stub, pending_req);
+	barrier();
+	usbbk_free_urb(pending_req);
+fail_response:
+	usbbk_do_response(pending_req, ret, 0, 0, 0);
+	usbif_put(usbif);
+	free_req(pending_req);
+}
+
+static int usbbk_start_submit_urb(usbif_t *usbif)
+{
+	struct usbif_urb_back_ring *urb_ring = &usbif->urb_ring;
+	usbif_urb_request_t *req;
+	pending_req_t *pending_req;
+	RING_IDX rc, rp;
+	int more_to_do = 0;
+
+	rc = urb_ring->req_cons;
+	rp = urb_ring->sring->req_prod;
+	rmb();
+
+	while (rc != rp) {
+		if (RING_REQUEST_CONS_OVERFLOW(urb_ring, rc)) {
+			printk(KERN_WARNING "RING_REQUEST_CONS_OVERFLOW\n");
+			break;
+		}
+
+		pending_req = alloc_req();
+		if (NULL == pending_req) {
+			more_to_do = 1;
+			break;
+		}
+
+		req = RING_GET_REQUEST(urb_ring, rc);
+		urb_ring->req_cons = ++rc;
+
+		dispatch_request_to_pending_reqs(usbif, req,
+							pending_req);
+	}
+
+	RING_FINAL_CHECK_FOR_REQUESTS(&usbif->urb_ring, more_to_do);
+
+	cond_resched();
+
+	return more_to_do;
+}
+
+void usbbk_hotplug_notify(usbif_t *usbif, int portnum, int speed)
+{
+	struct usbif_conn_back_ring *ring = &usbif->conn_ring;
+	usbif_conn_request_t *req;
+	usbif_conn_response_t *res;
+	unsigned long flags;
+	u16 id;
+	int notify;
+
+	spin_lock_irqsave(&usbif->conn_ring_lock, flags);
+
+	req = RING_GET_REQUEST(ring, ring->req_cons);;
+	id = req->id;
+	ring->req_cons++;
+	ring->sring->req_event = ring->req_cons + 1;
+
+	res = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
+	res->id = id;
+	res->portnum = portnum;
+	res->speed = speed;
+	ring->rsp_prod_pvt++;
+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, notify);
+
+	spin_unlock_irqrestore(&usbif->conn_ring_lock, flags);
+
+	if (notify)
+		notify_remote_via_irq(usbif->irq);
+}
+
+int usbbk_schedule(void *arg)
+{
+	usbif_t *usbif = (usbif_t *) arg;
+
+	usbif_get(usbif);
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(usbif->wq,
+				usbif->waiting_reqs || kthread_should_stop());
+		wait_event_interruptible(pending_free_wq,
+				!list_empty(&pending_free) || kthread_should_stop());
+		usbif->waiting_reqs = 0;
+		smp_mb();
+
+		if (usbbk_start_submit_urb(usbif))
+			usbif->waiting_reqs = 1;
+	}
+
+	usbif->xenusbd = NULL;
+	usbif_put(usbif);
+
+	return 0;
+}
+
+/*
+ * attach usbstub device to usbif.
+ */
+void usbbk_attach_device(usbif_t *usbif, struct usbstub *stub)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&usbif->stub_list_lock, flags);
+	list_add(&stub->dev_list, &usbif->stub_list);
+	spin_unlock_irqrestore(&usbif->stub_list_lock, flags);
+	stub->usbif = usbif;
+}
+
+/*
+ * detach usbstub device from usbif.
+ */
+void usbbk_detach_device(usbif_t *usbif, struct usbstub *stub)
+{
+	unsigned long flags;
+
+	if (stub->addr)
+		usbbk_set_address(usbif, stub, stub->addr, 0);
+	spin_lock_irqsave(&usbif->stub_list_lock, flags);
+	list_del(&stub->dev_list);
+	spin_unlock_irqrestore(&usbif->stub_list_lock, flags);
+	stub->usbif = NULL;
+}
+
+void detach_device_without_lock(usbif_t *usbif, struct usbstub *stub)
+{
+	if (stub->addr)
+		usbbk_set_address(usbif, stub, stub->addr, 0);
+	list_del(&stub->dev_list);
+	stub->usbif = NULL;
+}
+
+static int __init usbback_init(void)
+{
+	int i, mmap_pages;
+
+	if (!xen_domain())
+		return -ENODEV;
+
+	if (usbstub_init())
+		return -ENODEV;
+
+	mmap_pages = usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST;
+	pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
+			usbif_reqs, GFP_KERNEL);
+	pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
+			mmap_pages, GFP_KERNEL);
+	pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
+
+	if (!pending_reqs || !pending_grant_handles || !pending_pages)
+		goto out_of_memory;
+
+	if (usbback_xenbus_init())
+	{
+		printk("%s: xenbus init failed\n", __FUNCTION__);
+		goto error_path;
+	}
+
+	for (i = 0; i < mmap_pages; i++)
+		pending_grant_handles[i] = USBBACK_INVALID_HANDLE;
+
+	memset(pending_reqs, 0, sizeof(pending_reqs));
+	INIT_LIST_HEAD(&pending_free);
+
+	for (i = 0; i < usbif_reqs; i++) {
+		list_add_tail(&pending_reqs[i].free_list, &pending_free);
+	}
+
+	return 0;
+
+ out_of_memory:
+	 printk("%s: out of memory\n", __FUNCTION__);
+ error_path:
+	 kfree(pending_reqs);
+	 kfree(pending_grant_handles);
+	 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
+	 return -ENOMEM;
+}
+
+static void __exit usbback_exit(void)
+{
+	usbback_xenbus_exit();
+	usbstub_exit();
+	kfree(pending_reqs);
+	kfree(pending_grant_handles);
+	free_empty_pages_and_pagevec(pending_pages, usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST);
+}
+
+module_init(usbback_init);
+module_exit(usbback_exit);
+
+MODULE_AUTHOR("");
+MODULE_DESCRIPTION("Xen USB backend driver (usbback)");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/xen/usbback/usbback.h b/drivers/xen/usbback/usbback.h
new file mode 100644
index 0000000..fc668ea
--- /dev/null
+++ b/drivers/xen/usbback/usbback.h
@@ -0,0 +1,195 @@
+/*
+ * usbback.h
+ *
+ * This file is part of Xen USB backend driver.
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * or, by your choice,
+ *
+ * When distributed separately from the Linux kernel or incorporated into
+ * other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_USBBACK_H__
+#define __XEN_USBBACK_H__
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/vmalloc.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <xen/events.h>
+#include <xen/evtchn.h>
+#include <xen/grant_table.h>
+//#include <xen/driver_util.h>
+#include <xen/interface/xen.h>
+#include <xen/xenbus.h>
+#include <xen/page.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+#include <xen/interface/io/usbif.h>
+
+typedef struct usbif_urb_sring usbif_urb_sring_t;
+typedef struct usbif_conn_sring usbif_conn_sring_t;
+
+struct usbstub;
+
+#define USB_DEV_ADDR_SIZE 128
+
+typedef struct usbif_st {
+	domid_t domid;
+	unsigned int handle;
+	int num_ports;
+
+	struct xenbus_device *xbdev;
+	struct list_head usbif_list;
+
+	unsigned int      irq;
+
+	struct usbif_urb_back_ring urb_ring;
+	struct usbif_conn_back_ring conn_ring;
+	struct vm_struct *urb_ring_area;
+	struct vm_struct *conn_ring_area;
+
+	spinlock_t urb_ring_lock;
+	spinlock_t conn_ring_lock;
+	atomic_t refcnt;
+	
+	grant_handle_t urb_shmem_handle;
+	grant_ref_t urb_shmem_ref;
+	grant_handle_t conn_shmem_handle;
+	grant_ref_t conn_shmem_ref;	
+
+	struct xenbus_watch backend_watch;
+	
+	/* device address lookup table */
+	spinlock_t addr_lock;
+	struct usbstub *addr_table[USB_DEV_ADDR_SIZE];
+
+	/* connected device list */
+	spinlock_t stub_list_lock;
+	struct list_head stub_list;
+
+	/* request schedule */
+	struct task_struct *xenusbd;
+	unsigned int waiting_reqs;
+	wait_queue_head_t waiting_to_free;
+	wait_queue_head_t wq;
+
+} usbif_t;
+
+struct vusb_port_id
+{
+	struct list_head id_list;
+
+	char *phys_bus;
+	domid_t domid;
+	unsigned int handle;
+	int portnum;
+	unsigned is_connected:1;
+};
+
+/* MAX is influenced by USBIF_MAX_SEGMENTS_PER_REQUEST */
+#define DMA_FREELIST_MIN_ORDER 9  /* 512 */
+#define DMA_FREELIST_MAX_ORDER 16 /* 65536 */
+
+#define DMA_FREELIST_SIZE 16
+
+typedef struct
+{
+	short int free_entry;
+	dma_addr_t dma_addr[DMA_FREELIST_SIZE];
+	void *v_addr[DMA_FREELIST_SIZE];
+} dma_freelist_t;
+
+struct usbstub
+{
+	struct kref kref;
+	struct list_head dev_list;
+
+	struct vusb_port_id *portid;
+	struct usb_device *udev;
+	usbif_t *usbif;
+	int addr;
+	spinlock_t submitting_lock;
+	struct list_head submitting_list;
+        dma_freelist_t dma_freelist[DMA_FREELIST_MAX_ORDER - DMA_FREELIST_MIN_ORDER + 1];
+};
+
+usbif_t *usbif_alloc(domid_t domid, unsigned int handle);
+void usbif_disconnect(usbif_t *usbif);
+void usbif_free(usbif_t *usbif);
+int usbif_map(usbif_t *usbif, unsigned long urb_ring_ref,
+		unsigned long conn_ring_ref, unsigned int evtchn);
+
+#define usbif_get(_b) (atomic_inc(&(_b)->refcnt))
+#define usbif_put(_b) \
+	do { \
+		if (atomic_dec_and_test(&(_b)->refcnt)) \
+		wake_up(&(_b)->waiting_to_free); \
+	} while (0)
+
+usbif_t *find_usbif(domid_t domid, unsigned int handle);
+int usbback_xenbus_init(void);
+void usbback_xenbus_exit(void);
+struct vusb_port_id *find_portid_by_busid(const char *busid);
+struct vusb_port_id *find_portid(const domid_t domid,
+						const unsigned int handle,
+						const int portnum);
+int portid_add(const char *busid,
+					const domid_t domid,
+					const unsigned int handle,
+					const int portnum);
+int portid_remove(const domid_t domid,
+					const unsigned int handle,
+					const int portnum);
+irqreturn_t usbbk_be_int(int irq, void *dev_id);
+int usbbk_schedule(void *arg);
+struct usbstub *find_attached_device(usbif_t *usbif, int port);
+void usbbk_attach_device(usbif_t *usbif, struct usbstub *stub);
+void usbbk_detach_device(usbif_t *usbif, struct usbstub *stub);
+void usbbk_hotplug_notify(usbif_t *usbif, int portnum, int speed);
+void detach_device_without_lock(usbif_t *usbif, struct usbstub *stub);
+void usbbk_unlink_urbs(struct usbstub *stub);
+
+int usbstub_init(void);
+void usbstub_exit(void);
+
+#endif /* __XEN_USBBACK_H__ */
diff --git a/drivers/xen/usbback/usbstub.c b/drivers/xen/usbback/usbstub.c
new file mode 100644
index 0000000..aae93de
--- /dev/null
+++ b/drivers/xen/usbback/usbstub.c
@@ -0,0 +1,319 @@
+/*
+ * usbstub.c
+ *
+ * USB stub driver - grabbing and managing USB devices.
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * or, by your choice,
+ *
+ * When distributed separately from the Linux kernel or incorporated into
+ * other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "usbback.h"
+
+static LIST_HEAD(port_list);
+static DEFINE_SPINLOCK(port_list_lock);
+
+struct vusb_port_id *find_portid_by_busid(const char *busid)
+{
+	struct vusb_port_id *portid;
+	int found = 0;
+	unsigned long flags;
+	
+	spin_lock_irqsave(&port_list_lock, flags);
+	list_for_each_entry(portid, &port_list, id_list) {
+		if (!(strcmp(portid->phys_bus, busid))) {
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&port_list_lock, flags);
+
+	if (found)
+		return portid;
+
+	return NULL;	
+}
+
+struct vusb_port_id *find_portid(const domid_t domid,
+						const unsigned int handle,
+						const int portnum)
+{
+	struct vusb_port_id *portid;
+	int found = 0;
+	unsigned long flags;
+	
+	spin_lock_irqsave(&port_list_lock, flags);
+	list_for_each_entry(portid, &port_list, id_list) {
+		if ((portid->domid == domid)
+				&& (portid->handle == handle)
+				&& (portid->portnum == portnum)) {
+				found = 1;
+				break;
+		}
+	}
+	spin_unlock_irqrestore(&port_list_lock, flags);
+
+	if (found)
+		return portid;
+
+	return NULL;	
+}
+
+int portid_add(const char *busid,
+					const domid_t domid,
+					const unsigned int handle,
+					const int portnum)
+{
+	struct vusb_port_id *portid;
+	unsigned long flags;
+
+	portid = kzalloc(sizeof(*portid), GFP_KERNEL);
+	if (!portid)
+		return -ENOMEM;
+
+	portid->domid = domid;
+	portid->handle = handle;
+	portid->portnum = portnum;
+
+	portid->phys_bus = kzalloc(strlen(busid) + 1, GFP_KERNEL);
+	strcpy(portid->phys_bus, busid);
+
+	spin_lock_irqsave(&port_list_lock, flags);
+	list_add(&portid->id_list, &port_list);
+	spin_unlock_irqrestore(&port_list_lock, flags);
+
+	return 0;
+}
+
+int portid_remove(const domid_t domid,
+					const unsigned int handle,
+					const int portnum)
+{
+	struct vusb_port_id *portid, *tmp;
+	int err = -ENOENT;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port_list_lock, flags);
+	list_for_each_entry_safe(portid, tmp, &port_list, id_list) {
+		if (portid->domid == domid
+				&& portid->handle == handle
+				&& portid->portnum == portnum) {
+			list_del(&portid->id_list);
+			kfree(portid);
+
+			err = 0;
+		}
+	}
+	spin_unlock_irqrestore(&port_list_lock, flags);
+
+	return err;
+}
+
+static struct usbstub *usbstub_alloc(struct usb_device *udev,
+						struct vusb_port_id *portid)
+{
+	struct usbstub *stub;
+	int i;
+
+	stub = kzalloc(sizeof(*stub), GFP_KERNEL);
+	if (!stub) {
+		printk(KERN_ERR "no memory for alloc usbstub\n");
+		return NULL;
+	}
+	kref_init(&stub->kref); 
+	stub->udev = usb_get_dev(udev);
+	stub->portid = portid;
+	spin_lock_init(&stub->submitting_lock);
+	INIT_LIST_HEAD(&stub->submitting_list);
+	for (i = 0; i < DMA_FREELIST_MAX_ORDER; i++)
+	{
+		stub->dma_freelist[i].free_entry = 0;
+	}
+
+	return stub;
+}
+
+static void usbstub_release(struct kref *kref)
+{
+	struct usbstub *stub;
+	
+	stub = container_of(kref, struct usbstub, kref);
+	
+	usb_put_dev(stub->udev);
+	stub->udev = NULL;
+	stub->portid = NULL;
+	kfree(stub);
+}
+
+static inline void usbstub_get(struct usbstub *stub)
+{
+	kref_get(&stub->kref);
+}
+
+static inline void usbstub_put(struct usbstub *stub)
+{
+	kref_put(&stub->kref, usbstub_release);
+}
+
+static int usbstub_probe(struct usb_interface *intf,
+		const struct usb_device_id *id)
+{
+	struct usb_device *udev = interface_to_usbdev(intf);
+	char *busid = (char *)dev_name(intf->dev.parent);
+	struct vusb_port_id *portid = NULL;
+	struct usbstub *stub = NULL;
+	usbif_t *usbif = NULL;
+	int retval = -ENODEV;
+	
+	/* hub currently not supported, so skip. */
+	if (udev->descriptor.bDeviceClass ==  USB_CLASS_HUB)
+		goto out;
+
+	if ((portid = find_portid_by_busid(busid))) {
+		usbif = find_usbif(portid->domid, portid->handle);		
+		if (!usbif)
+			goto out;
+		
+		stub = find_attached_device(usbif, portid->portnum);
+		if (!stub) {
+			stub = usbstub_alloc(udev, portid);
+			if (!stub)
+				return -ENOMEM;
+			usbbk_attach_device(usbif, stub);
+			usbbk_hotplug_notify(usbif, portid->portnum, udev->speed);
+		} else {
+			/* 
+			 * If usbstub of the same busid is already connected,
+			 * probe functions might be called from other intf.
+			 */
+			if (strcmp(stub->portid->phys_bus, busid))
+				goto out; /* invalid call */
+		}
+		
+		usbstub_get(stub);
+		usb_set_intfdata(intf, stub);
+		retval = 0;
+	}
+
+out:
+	return retval;
+}
+
+static void usbstub_disconnect(struct usb_interface *intf)
+{
+	struct usbstub *stub
+		= (struct usbstub *) usb_get_intfdata(intf);
+
+	usb_set_intfdata(intf, NULL);
+
+	if (!stub)
+		return;
+
+	if (stub->usbif) {
+		usbbk_hotplug_notify(stub->usbif, stub->portid->portnum, 0);
+		usbbk_detach_device(stub->usbif, stub);
+	}
+	usbbk_unlink_urbs(stub);
+	usbstub_put(stub);
+}
+
+static ssize_t usbstub_show_portids(struct device_driver *driver,
+		char *buf)
+{
+	struct vusb_port_id *portid;
+	size_t count = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port_list_lock, flags);
+	list_for_each_entry(portid, &port_list, id_list) {
+		if (count >= PAGE_SIZE)
+			break;
+		count += scnprintf((char *)buf + count, PAGE_SIZE - count,
+				"%s:%d:%d:%d\n",
+				&portid->phys_bus[0],
+				portid->domid,
+				portid->handle,
+				portid->portnum);
+	}
+	spin_unlock_irqrestore(&port_list_lock, flags);
+
+	return count;
+}
+
+DRIVER_ATTR(port_ids, S_IRUSR, usbstub_show_portids, NULL);
+
+/* table of devices that matches any usbdevice */
+static struct usb_device_id usbstub_table[] = {
+		{ .driver_info = 1 }, /* wildcard, see usb_match_id() */
+		{ } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, usbstub_table);
+
+static struct usb_driver usbback_usb_driver = {
+		.name = "usbback",
+		.probe = usbstub_probe,
+		.disconnect = usbstub_disconnect,
+		.id_table = usbstub_table,
+		.no_dynamic_id = 1,
+};
+
+int __init usbstub_init(void)
+{
+ 	int err;
+
+	err = usb_register(&usbback_usb_driver);
+	if (err < 0) {
+		printk(KERN_ERR "usbback: usb_register failed (error %d)\n", err);
+		goto out;
+	}
+
+	err = driver_create_file(&usbback_usb_driver.drvwrap.driver,
+				&driver_attr_port_ids);
+	if (err)
+		usb_deregister(&usbback_usb_driver);
+
+out:
+	return err;
+}
+
+void __exit usbstub_exit(void)
+{
+	driver_remove_file(&usbback_usb_driver.drvwrap.driver,
+				&driver_attr_port_ids);
+	usb_deregister(&usbback_usb_driver);
+}
diff --git a/drivers/xen/usbback/xenbus.c b/drivers/xen/usbback/xenbus.c
new file mode 100644
index 0000000..536a4f5
--- /dev/null
+++ b/drivers/xen/usbback/xenbus.c
@@ -0,0 +1,318 @@
+/*
+ * xenbus.c
+ *
+ * Xenbus interface for USB backend driver.
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * or, by your choice,
+ *
+ * When distributed separately from the Linux kernel or incorporated into
+ * other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+//#include <xen/xenbus.h>
+#include "usbback.h"
+
+static int start_xenusbd(usbif_t *usbif)
+{
+        int err = 0;
+        char name[TASK_COMM_LEN];
+
+        snprintf(name, TASK_COMM_LEN, "usbback.%d.%d", usbif->domid, usbif->handle);
+        usbif->xenusbd = kthread_run(usbbk_schedule, usbif, name);
+        if (IS_ERR(usbif->xenusbd)) {
+                err = PTR_ERR(usbif->xenusbd);
+                usbif->xenusbd = NULL;
+                xenbus_dev_error(usbif->xbdev, err, "start xenusbd");
+        }
+        return err;
+}
+
+static void backend_changed(struct xenbus_watch *watch,
+			const char **vec, unsigned int len)
+{
+	struct xenbus_transaction xbt;
+	int err;
+	int i;
+	char node[8];
+	char *busid;
+	struct vusb_port_id *portid = NULL;
+	
+	usbif_t *usbif = container_of(watch, usbif_t, backend_watch);
+	struct xenbus_device *dev = usbif->xbdev;
+
+	printk("usbback: backend_changed()\n");
+again:
+	err = xenbus_transaction_start(&xbt);
+	if (err) {
+		xenbus_dev_fatal(dev, err, "starting transaction");
+		return;
+	}
+
+	for (i = 1; i <= usbif->num_ports; i++) {
+		sprintf(node, "port/%d", i);
+		busid = xenbus_read(xbt, dev->nodename, node, NULL);
+		if (IS_ERR(busid)) {
+			err = PTR_ERR(busid);
+			xenbus_dev_fatal(dev, err, "reading port/%d", i);
+			goto abort;
+		}
+		
+		/*
+		 * remove portid, if the port is not connected.
+		 */
+		if (strlen(busid) == 0) {
+			portid = find_portid(usbif->domid, usbif->handle, i);	
+			if (portid) {
+				if (portid->is_connected)
+					xenbus_dev_fatal(dev, err, "can't remove port/%d, unbind first", i);
+				else
+					portid_remove(usbif->domid, usbif->handle, i);
+			}
+			continue; /* never configured, ignore */
+		}
+
+		/*
+		 * add portid, if the port is not configured and not used from other backend.
+		 */
+		portid = find_portid(usbif->domid, usbif->handle, i);
+		if (portid) {
+			if ((strcmp(portid->phys_bus, busid)))
+				xenbus_dev_fatal(dev, err, "can't add port/%d, remove first", i);
+			else
+				continue; /* already configured, ignore */
+		} else {
+			if (find_portid_by_busid(busid))
+				xenbus_dev_fatal(dev, err, "can't add port/%d, busid already used", i);
+			else
+				portid_add(busid, usbif->domid, usbif->handle, i);
+		}
+
+		printk("port/%d: %s\n", i, busid); // debug
+
+	}
+
+	err = xenbus_transaction_end(xbt, 0);
+	if (err == -EAGAIN)
+		goto again;
+	if (err)
+		xenbus_dev_fatal(dev, err, "completing transaction");
+
+	return;
+
+abort:
+	xenbus_transaction_end(xbt, 1);
+	
+	return;
+}
+
+static int usbback_remove(struct xenbus_device *dev)
+{
+	usbif_t *usbif = dev->dev.driver_data;
+	int i;
+
+	if (usbif->backend_watch.node) {
+		unregister_xenbus_watch(&usbif->backend_watch);
+		kfree(usbif->backend_watch.node);
+		usbif->backend_watch.node = NULL;
+	}
+	
+	if (usbif) {
+		/* remove all ports */
+		for (i = 1; i <= usbif->num_ports; i++) {
+			portid_remove(usbif->domid, usbif->handle, i);
+		}
+		usbif_disconnect(usbif);
+		usbif_free(usbif);;
+	}
+	dev->dev.driver_data = NULL;
+
+	return 0;
+}
+
+static int usbback_probe(struct xenbus_device *dev,
+			  const struct xenbus_device_id *id)
+{
+	usbif_t *usbif;
+	unsigned int handle;
+	int num_ports;
+	int err;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	handle = simple_strtoul(strrchr(dev->otherend,'/')+1, NULL, 0);
+	usbif = usbif_alloc(dev->otherend_id, handle);
+	if (!usbif) {
+		xenbus_dev_fatal(dev, -ENOMEM, "allocating backend interface");
+		return -ENOMEM;
+	}
+	usbif->xbdev = dev;
+	dev->dev.driver_data = usbif;
+
+	err = xenbus_scanf(XBT_NIL, dev->nodename,
+					"num-ports", "%d", &num_ports);
+	if (err != 1) {
+		xenbus_dev_fatal(dev, err, "reading num-ports");
+		goto fail;
+	}
+	if (num_ports < 1 || num_ports > USB_MAXCHILDREN) {
+		xenbus_dev_fatal(dev, err, "invalid num-ports");
+		goto fail;
+	}
+	usbif->num_ports = num_ports;
+	
+	err = xenbus_switch_state(dev, XenbusStateInitWait);
+	if (err)
+		goto fail;
+
+	return 0;
+
+fail:
+	usbback_remove(dev);
+	return err;
+}
+
+static int connect_rings(usbif_t *usbif)
+{
+	struct xenbus_device *dev = usbif->xbdev;
+	unsigned long urb_ring_ref;
+	unsigned long conn_ring_ref;
+	unsigned int evtchn;
+	int err;
+
+	err = xenbus_gather(XBT_NIL, dev->otherend,
+			    "urb-ring-ref", "%lu", &urb_ring_ref,
+			    "conn-ring-ref", "%lu", &conn_ring_ref,
+			    "event-channel", "%u", &evtchn, NULL);
+	if (err) {
+		xenbus_dev_fatal(dev, err,
+				 "reading %s/ring-ref and event-channel",
+				 dev->otherend);
+		return err;
+	}
+
+	printk("usbback: urb-ring-ref %ld, conn-ring-ref %ld, event-channel %d\n",
+	       urb_ring_ref, conn_ring_ref, evtchn);
+
+	err = usbif_map(usbif, urb_ring_ref, conn_ring_ref, evtchn);
+	if (err) {
+		xenbus_dev_fatal(dev, err,
+				"mapping urb-ring-ref %lu conn-ring-ref %lu port %u",
+				urb_ring_ref, conn_ring_ref, evtchn);
+		return err;
+	}
+
+	return 0;
+}
+
+static void frontend_changed(struct xenbus_device *dev,
+				     enum xenbus_state frontend_state)
+{
+	usbif_t *usbif = dev->dev.driver_data;
+	int err;
+
+	switch (frontend_state) {
+	case XenbusStateInitialised:
+		break;
+
+	case XenbusStateInitialising:
+		if (dev->state == XenbusStateClosed) {
+			printk("%s: %s: prepare for reconnect\n",
+			       __FUNCTION__, dev->nodename);
+			xenbus_switch_state(dev, XenbusStateInitWait);
+		}
+		break;
+
+	case XenbusStateConnected:
+		if (dev->state == XenbusStateConnected)
+			break;
+		err = connect_rings(usbif);
+		if (err)
+			break;
+		err = start_xenusbd(usbif);
+		if (err)
+			break;
+		err = xenbus_watch_pathfmt(dev, &usbif->backend_watch, backend_changed,
+					"%s/%s", dev->nodename, "port");
+		if (err)
+			break;
+		xenbus_switch_state(dev, XenbusStateConnected);
+		break;
+
+	case XenbusStateClosing:
+		usbif_disconnect(usbif);
+		xenbus_switch_state(dev, XenbusStateClosing);
+		break;
+
+	case XenbusStateClosed:
+		xenbus_switch_state(dev, XenbusStateClosed);
+		break;
+
+	case XenbusStateUnknown:
+		device_unregister(&dev->dev);
+		break;
+
+	default:
+		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
+				 frontend_state);
+		break;
+	}
+}
+
+static const struct xenbus_device_id usbback_ids[] = {
+	{ "vusb" },
+	{ "" },
+};
+
+static struct xenbus_driver usbback_driver = {
+	.name = "vusb",
+	.owner = THIS_MODULE,
+	.ids = usbback_ids,
+	.probe = usbback_probe,
+	.otherend_changed = frontend_changed,
+	.remove = usbback_remove,
+};
+
+int usbback_xenbus_init(void)
+{
+	return xenbus_register_backend(&usbback_driver);
+}
+
+void usbback_xenbus_exit(void)
+{
+	xenbus_unregister_driver(&usbback_driver);
+}
diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
new file mode 100644
index 0000000..3ce2914
--- /dev/null
+++ b/include/xen/interface/io/vscsiif.h
@@ -0,0 +1,105 @@
+/******************************************************************************
+ * vscsiif.h
+ * 
+ * Based on the blkif.h code.
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright(c) FUJITSU Limited 2008.
+ */
+
+#ifndef __XEN__PUBLIC_IO_SCSI_H__
+#define __XEN__PUBLIC_IO_SCSI_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/* command between backend and frontend */
+#define VSCSIIF_ACT_SCSI_CDB         1    /* SCSI CDB command */
+#define VSCSIIF_ACT_SCSI_ABORT       2    /* SCSI Device(Lun) Abort*/
+#define VSCSIIF_ACT_SCSI_RESET       3    /* SCSI Device(Lun) Reset*/
+
+
+#define VSCSIIF_BACK_MAX_PENDING_REQS    128
+
+/*
+ * Maximum scatter/gather segments per request.
+ *
+ * Considering balance between allocating al least 16 "vscsiif_request"
+ * structures on one page (4096bytes) and number of scatter gather 
+ * needed, we decided to use 26 as a magic number.
+ */
+#define VSCSIIF_SG_TABLESIZE             26
+
+/*
+ * base on linux kernel 2.6.18
+ */
+#define VSCSIIF_MAX_COMMAND_SIZE         16
+#define VSCSIIF_SENSE_BUFFERSIZE         96
+
+
+struct vscsiif_request {
+    uint16_t rqid;          /* private guest value, echoed in resp  */
+    uint8_t act;            /* command between backend and frontend */
+    uint8_t cmd_len;
+
+    uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
+    uint16_t timeout_per_command;     /* The command is issued by twice 
+                                         the value in Backend. */
+    uint16_t channel, id, lun;
+    uint16_t padding;
+    uint8_t sc_data_direction;        /* for DMA_TO_DEVICE(1)
+                                         DMA_FROM_DEVICE(2)
+                                         DMA_NONE(3) requests  */
+    uint8_t nr_segments;              /* Number of pieces of scatter-gather */
+
+    struct scsiif_request_segment {
+        grant_ref_t gref;
+        uint16_t offset;
+        uint16_t length;
+    } seg[VSCSIIF_SG_TABLESIZE];
+    uint32_t reserved[3];
+};
+typedef struct vscsiif_request vscsiif_request_t;
+
+struct vscsiif_response {
+    uint16_t rqid;
+    uint8_t padding;
+    uint8_t sense_len;
+    uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
+    int32_t rslt;
+    uint32_t residual_len;     /* request bufflen - 
+                                  return the value from physical device */
+    uint32_t reserved[36];
+};
+typedef struct vscsiif_response vscsiif_response_t;
+
+DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
+
+
+#endif  /*__XEN__PUBLIC_IO_SCSI_H__*/
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: first attempt of pvscai backend driver port to pvops
  2010-03-11 23:29 first attempt of pvscai backend driver port to pvops James Harper
@ 2010-03-12  0:09 ` Jeremy Fitzhardinge
  2010-03-12  0:37   ` James Harper
  0 siblings, 1 reply; 4+ messages in thread
From: Jeremy Fitzhardinge @ 2010-03-12  0:09 UTC (permalink / raw)
  To: James Harper; +Cc: xen-devel

On 03/11/2010 03:29 PM, James Harper wrote:
> Attached is my first attempt at getting the backend pvscsi driver
> (scsiback) working under the latest pvops kernel. I have tested it with
> the scsi frontend driver in gplpv, and have restored about 20GB of data
> from a HP LTO3 tape drive successfully, so it appears to be working
> fine.
>
> There are quite a few changes compared to the version in 2.6.18. The
> main change is that I get the vaddr for each sg element and use the
> regular dma calls to make it work instead of all the magic that the
> 2.6.18 version used to use that isn't exported anymore.
>
> I created a new branch in a local git repo, what git magic do I need to
> weave to make the patch look a bit neater?
>    

It does need some cleaning up.  Run it through 
linux/scripts/checkpatch.pl and iterate until its fairly clean (as clean 
as possible without making it barbarous).  Most obvious is that kernel 
code uses 8-width hard tabs.

The patch also seems to contain usbback.  Did you mean to include it?  
Either way it shouldn't be part of the scsi patches.

If possible and sensible, split the patch into logically separate parts 
(like introduce infrastructure, use infrastructure for a, for b, for 
c).  However for these kinds of "new subsystem" patches, it often makes 
most sense to just have one large patch adding the new code.

Please base the patches on (ideally) 2.6.31 or .32.  If you need some 
Xen infrastructure (which is likely), then base the patch on the 
xen/dom0/backend/common branch.

Mail your patches to the list for review (but, please, inlined plain 
text).  But the easiest way for me to include the patches is if I can 
pull them from your git server.  If you can't make your own machine 
accessible, you can set up an account on github.com, or ask for an 
account on kernel.org.

I haven't actually looked at the substance of the patch yet.

Thanks,
     J

^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: first attempt of pvscai backend driver port to pvops
  2010-03-12  0:09 ` Jeremy Fitzhardinge
@ 2010-03-12  0:37   ` James Harper
  2010-04-14 17:20     ` first attempt of pvscsi " Pasi Kärkkäinen
  0 siblings, 1 reply; 4+ messages in thread
From: James Harper @ 2010-03-12  0:37 UTC (permalink / raw)
  To: Jeremy Fitzhardinge; +Cc: xen-devel

> 
> It does need some cleaning up.  Run it through
> linux/scripts/checkpatch.pl and iterate until its fairly clean (as
clean
> as possible without making it barbarous).  Most obvious is that kernel
> code uses 8-width hard tabs.
> 
> The patch also seems to contain usbback.  Did you mean to include it?

I sure didn't. Not sure how that got there...
 
> If possible and sensible, split the patch into logically separate
parts
> (like introduce infrastructure, use infrastructure for a, for b, for
> c).  However for these kinds of "new subsystem" patches, it often
makes
> most sense to just have one large patch adding the new code.
> 
> Please base the patches on (ideally) 2.6.31 or .32.  If you need some
> Xen infrastructure (which is likely), then base the patch on the
> xen/dom0/backend/common branch.

They are based on xen/master at the moment.

> Mail your patches to the list for review (but, please, inlined plain
> text).  But the easiest way for me to include the patches is if I can
> pull them from your git server.  If you can't make your own machine
> accessible, you can set up an account on github.com, or ask for an
> account on kernel.org.

I'll look at doing that.

> 
> I haven't actually looked at the substance of the patch yet.
> 

Probably best not to bother until I've cleaned up some of the above.

Thanks

James

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: first attempt of pvscsi backend driver port to pvops
  2010-03-12  0:37   ` James Harper
@ 2010-04-14 17:20     ` Pasi Kärkkäinen
  0 siblings, 0 replies; 4+ messages in thread
From: Pasi Kärkkäinen @ 2010-04-14 17:20 UTC (permalink / raw)
  To: James Harper; +Cc: Jeremy Fitzhardinge, xen-devel

On Fri, Mar 12, 2010 at 11:37:05AM +1100, James Harper wrote:
> > 
> > It does need some cleaning up.  Run it through
> > linux/scripts/checkpatch.pl and iterate until its fairly clean (as
> clean
> > as possible without making it barbarous).  Most obvious is that kernel
> > code uses 8-width hard tabs.
> > 
> > The patch also seems to contain usbback.  Did you mean to include it?
> 
> I sure didn't. Not sure how that got there...
>  
> > If possible and sensible, split the patch into logically separate
> parts
> > (like introduce infrastructure, use infrastructure for a, for b, for
> > c).  However for these kinds of "new subsystem" patches, it often
> makes
> > most sense to just have one large patch adding the new code.
> > 
> > Please base the patches on (ideally) 2.6.31 or .32.  If you need some
> > Xen infrastructure (which is likely), then base the patch on the
> > xen/dom0/backend/common branch.
> 
> They are based on xen/master at the moment.
> 
> > Mail your patches to the list for review (but, please, inlined plain
> > text).  But the easiest way for me to include the patches is if I can
> > pull them from your git server.  If you can't make your own machine
> > accessible, you can set up an account on github.com, or ask for an
> > account on kernel.org.
> 
> I'll look at doing that.
> 
> > 
> > I haven't actually looked at the substance of the patch yet.
> > 
> 
> Probably best not to bother until I've cleaned up some of the above.
> 

Did you get it working? :)

-- Pasi

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2010-04-14 17:20 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-03-11 23:29 first attempt of pvscai backend driver port to pvops James Harper
2010-03-12  0:09 ` Jeremy Fitzhardinge
2010-03-12  0:37   ` James Harper
2010-04-14 17:20     ` first attempt of pvscsi " Pasi Kärkkäinen

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.