All of lore.kernel.org
 help / color / mirror / Atom feed
* Add XEN pvSCSI support
@ 2014-06-27 14:34 jgross
  2014-06-27 14:34 ` [PATCH 1/4] Add XEN pvSCSI protocol description jgross
                   ` (8 more replies)
  0 siblings, 9 replies; 34+ messages in thread
From: jgross @ 2014-06-27 14:34 UTC (permalink / raw)
  To: JBottomley, linux-scsi, xen-devel

This series adds XEN pvSCSI support. With pvSCSI it is possible to use physical
SCSI devices from a XEN domain.

The support consists of a backend in the privileged Domain-0 doing the real
I/O and a frontend in the unprivileged domU passing I/O-requests to the backend.

The code is taken (and adapted) from the original pvSCSI implementation done
for Linux 2.6 in 2008 by Fujitsu.

[PATCH 1/4] Add XEN pvSCSI protocol description
[PATCH 2/4] Introduce xen-scsifront module
[PATCH 3/4] Introduce XEN scsiback module
[PATCH 4/4] add xen pvscsi maintainer

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-27 14:34 Add XEN pvSCSI support jgross
@ 2014-06-27 14:34 ` jgross
  2014-06-27 17:11   ` Konrad Rzeszutek Wilk
  2014-06-27 17:11   ` [Xen-devel] " Konrad Rzeszutek Wilk
  2014-06-27 14:34 ` jgross
                   ` (7 subsequent siblings)
  8 siblings, 2 replies; 34+ messages in thread
From: jgross @ 2014-06-27 14:34 UTC (permalink / raw)
  To: JBottomley, linux-scsi, xen-devel; +Cc: Juergen Gross

From: Juergen Gross <jgross@suse.com>

Add the definition of pvSCSI protocol used between the pvSCSI frontend in a
XEN domU and the pvSCSI backend in a XEN driver domain (usually Dom0).

This header was originally provided by Fujitsu for XEN based on Linux 2.6.18.
Changes are:
- added comment
- adapt to Linux style guide

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 include/xen/interface/io/vscsiif.h | 110 +++++++++++++++++++++++++++++++++++++
 1 file changed, 110 insertions(+)
 create mode 100644 include/xen/interface/io/vscsiif.h

diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
new file mode 100644
index 0000000..f8420f3
--- /dev/null
+++ b/include/xen/interface/io/vscsiif.h
@@ -0,0 +1,110 @@
+/******************************************************************************
+ * vscsiif.h
+ *
+ * Based on the blkif.h code.
+ *
+ * This interface is to be regarded as a stable API between XEN domains
+ * running potentially different Linux kernel versions.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright(c) FUJITSU Limited 2008.
+ */
+
+#ifndef __XEN__PUBLIC_IO_SCSI_H__
+#define __XEN__PUBLIC_IO_SCSI_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/* commands between backend and frontend */
+#define VSCSIIF_ACT_SCSI_CDB		1	/* SCSI CDB command */
+#define VSCSIIF_ACT_SCSI_ABORT		2	/* SCSI Device(Lun) Abort*/
+#define VSCSIIF_ACT_SCSI_RESET		3	/* SCSI Device(Lun) Reset*/
+#define VSCSIIF_ACT_SCSI_SG_PRESET	4	/* Preset SG elements */
+
+/*
+ * Maximum scatter/gather segments per request.
+ *
+ * Considering balance between allocating at least 16 "vscsiif_request"
+ * structures on one page (4096 bytes) and the number of scatter/gather
+ * elements needed, we decided to use 26 as a magic number.
+ */
+#define VSCSIIF_SG_TABLESIZE		26
+
+/*
+ * based on Linux kernel 2.6.18
+ */
+#define VSCSIIF_MAX_COMMAND_SIZE	16
+#define VSCSIIF_SENSE_BUFFERSIZE	96
+
+struct scsiif_request_segment {
+	grant_ref_t gref;
+	uint16_t offset;
+	uint16_t length;
+};
+typedef struct scsiif_request_segment vscsiif_segment_t;
+
+struct vscsiif_request {
+	uint16_t rqid;		/* private guest value, echoed in resp  */
+	uint8_t act;		/* command between backend and frontend */
+	uint8_t cmd_len;
+
+	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
+	uint16_t timeout_per_command;	/* The command is issued by twice
+					   the value in Backend. */
+	uint16_t channel, id, lun;
+	uint16_t padding;
+	uint8_t sc_data_direction;	/* for DMA_TO_DEVICE(1)
+					   DMA_FROM_DEVICE(2)
+					   DMA_NONE(3) requests */
+	uint8_t nr_segments;		/* Number of pieces of scatter-gather */
+
+	vscsiif_segment_t seg[VSCSIIF_SG_TABLESIZE];
+	uint32_t reserved[3];
+};
+typedef struct vscsiif_request vscsiif_request_t;
+
+#define VSCSIIF_SG_LIST_SIZE ((sizeof(vscsiif_request_t) - 4) /		\
+				sizeof(vscsiif_segment_t))
+
+struct vscsiif_sg_list {
+	/* First two fields must match struct vscsiif_request! */
+	uint16_t rqid;		/* private guest value, must match main req */
+	uint8_t act;		/* VSCSIIF_ACT_SCSI_SG_PRESET */
+	uint8_t nr_segments;	/* Number of pieces of scatter-gather */
+	vscsiif_segment_t seg[VSCSIIF_SG_LIST_SIZE];
+};
+typedef struct vscsiif_sg_list vscsiif_sg_list_t;
+
+struct vscsiif_response {
+	uint16_t rqid;
+	uint8_t act;		/* valid only when backend supports SG_PRESET */
+	uint8_t sense_len;
+	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
+	int32_t rslt;
+	uint32_t residual_len;	/* request bufflen -
+				   return the value from physical device */
+	uint32_t reserved[36];
+};
+typedef struct vscsiif_response vscsiif_response_t;
+
+DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
+
+#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
-- 
1.8.4.5


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-27 14:34 Add XEN pvSCSI support jgross
  2014-06-27 14:34 ` [PATCH 1/4] Add XEN pvSCSI protocol description jgross
@ 2014-06-27 14:34 ` jgross
  2014-06-27 14:34 ` [PATCH 2/4] Introduce xen-scsifront module jgross
                   ` (6 subsequent siblings)
  8 siblings, 0 replies; 34+ messages in thread
From: jgross @ 2014-06-27 14:34 UTC (permalink / raw)
  To: JBottomley, linux-scsi, xen-devel; +Cc: Juergen Gross

From: Juergen Gross <jgross@suse.com>

Add the definition of pvSCSI protocol used between the pvSCSI frontend in a
XEN domU and the pvSCSI backend in a XEN driver domain (usually Dom0).

This header was originally provided by Fujitsu for XEN based on Linux 2.6.18.
Changes are:
- added comment
- adapt to Linux style guide

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 include/xen/interface/io/vscsiif.h | 110 +++++++++++++++++++++++++++++++++++++
 1 file changed, 110 insertions(+)
 create mode 100644 include/xen/interface/io/vscsiif.h

diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
new file mode 100644
index 0000000..f8420f3
--- /dev/null
+++ b/include/xen/interface/io/vscsiif.h
@@ -0,0 +1,110 @@
+/******************************************************************************
+ * vscsiif.h
+ *
+ * Based on the blkif.h code.
+ *
+ * This interface is to be regarded as a stable API between XEN domains
+ * running potentially different Linux kernel versions.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright(c) FUJITSU Limited 2008.
+ */
+
+#ifndef __XEN__PUBLIC_IO_SCSI_H__
+#define __XEN__PUBLIC_IO_SCSI_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/* commands between backend and frontend */
+#define VSCSIIF_ACT_SCSI_CDB		1	/* SCSI CDB command */
+#define VSCSIIF_ACT_SCSI_ABORT		2	/* SCSI Device(Lun) Abort*/
+#define VSCSIIF_ACT_SCSI_RESET		3	/* SCSI Device(Lun) Reset*/
+#define VSCSIIF_ACT_SCSI_SG_PRESET	4	/* Preset SG elements */
+
+/*
+ * Maximum scatter/gather segments per request.
+ *
+ * Considering balance between allocating at least 16 "vscsiif_request"
+ * structures on one page (4096 bytes) and the number of scatter/gather
+ * elements needed, we decided to use 26 as a magic number.
+ */
+#define VSCSIIF_SG_TABLESIZE		26
+
+/*
+ * based on Linux kernel 2.6.18
+ */
+#define VSCSIIF_MAX_COMMAND_SIZE	16
+#define VSCSIIF_SENSE_BUFFERSIZE	96
+
+struct scsiif_request_segment {
+	grant_ref_t gref;
+	uint16_t offset;
+	uint16_t length;
+};
+typedef struct scsiif_request_segment vscsiif_segment_t;
+
+struct vscsiif_request {
+	uint16_t rqid;		/* private guest value, echoed in resp  */
+	uint8_t act;		/* command between backend and frontend */
+	uint8_t cmd_len;
+
+	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
+	uint16_t timeout_per_command;	/* The command is issued by twice
+					   the value in Backend. */
+	uint16_t channel, id, lun;
+	uint16_t padding;
+	uint8_t sc_data_direction;	/* for DMA_TO_DEVICE(1)
+					   DMA_FROM_DEVICE(2)
+					   DMA_NONE(3) requests */
+	uint8_t nr_segments;		/* Number of pieces of scatter-gather */
+
+	vscsiif_segment_t seg[VSCSIIF_SG_TABLESIZE];
+	uint32_t reserved[3];
+};
+typedef struct vscsiif_request vscsiif_request_t;
+
+#define VSCSIIF_SG_LIST_SIZE ((sizeof(vscsiif_request_t) - 4) /		\
+				sizeof(vscsiif_segment_t))
+
+struct vscsiif_sg_list {
+	/* First two fields must match struct vscsiif_request! */
+	uint16_t rqid;		/* private guest value, must match main req */
+	uint8_t act;		/* VSCSIIF_ACT_SCSI_SG_PRESET */
+	uint8_t nr_segments;	/* Number of pieces of scatter-gather */
+	vscsiif_segment_t seg[VSCSIIF_SG_LIST_SIZE];
+};
+typedef struct vscsiif_sg_list vscsiif_sg_list_t;
+
+struct vscsiif_response {
+	uint16_t rqid;
+	uint8_t act;		/* valid only when backend supports SG_PRESET */
+	uint8_t sense_len;
+	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
+	int32_t rslt;
+	uint32_t residual_len;	/* request bufflen -
+				   return the value from physical device */
+	uint32_t reserved[36];
+};
+typedef struct vscsiif_response vscsiif_response_t;
+
+DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
+
+#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
-- 
1.8.4.5

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH 2/4] Introduce xen-scsifront module
  2014-06-27 14:34 Add XEN pvSCSI support jgross
                   ` (2 preceding siblings ...)
  2014-06-27 14:34 ` [PATCH 2/4] Introduce xen-scsifront module jgross
@ 2014-06-27 14:34 ` jgross
  2014-06-30 13:35   ` David Vrabel
  2014-06-30 13:35   ` [Xen-devel] " David Vrabel
  2014-06-27 14:34 ` [PATCH 3/4] Introduce XEN scsiback module jgross
                   ` (4 subsequent siblings)
  8 siblings, 2 replies; 34+ messages in thread
From: jgross @ 2014-06-27 14:34 UTC (permalink / raw)
  To: JBottomley, linux-scsi, xen-devel; +Cc: Juergen Gross

From: Juergen Gross <jgross@suse.com>

Introduces the XEN pvSCSI frontend. With pvSCSI it is possible for a XEN domU
to issue SCSI commands to a SCSI LUN assigned to that domU. The SCSI commands
are passed to the pvSCSI backend in a driver domain (usually Dom0) which is
owner of the physical device. This allows e.g. to use SCSI tape drives in a
XEN domU.

The code is taken from the pvSCSI implementation in XEN done by Fujitsu based
on Linux kernel 2.6.18.

Changes from the original version are:
- port to upstream kernel
- put all code in just one source file
- move module to appropriate location in kernel tree
- adapt to Linux style guide
- some minor code simplifications
- replace constants with defines
- remove not used defines

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 drivers/scsi/Kconfig         |   7 +
 drivers/scsi/Makefile        |   1 +
 drivers/scsi/xen-scsifront.c | 936 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 944 insertions(+)
 create mode 100644 drivers/scsi/xen-scsifront.c

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index baca589..734d691 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -611,6 +611,13 @@ config VMWARE_PVSCSI
 	  To compile this driver as a module, choose M here: the
 	  module will be called vmw_pvscsi.
 
+config XEN_SCSI_FRONTEND
+	tristate "XEN SCSI frontend driver"
+	depends on SCSI && XEN
+	help
+	  The XEN SCSI frontend driver allows the kernel to access SCSI Devices
+	  within another guest OS.
+
 config HYPERV_STORAGE
 	tristate "Microsoft Hyper-V virtual storage driver"
 	depends on SCSI && HYPERV
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e172d4f..a4ee9c5 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -144,6 +144,7 @@ obj-$(CONFIG_SCSI_ESAS2R)	+= esas2r/
 obj-$(CONFIG_SCSI_PMCRAID)	+= pmcraid.o
 obj-$(CONFIG_SCSI_VIRTIO)	+= virtio_scsi.o
 obj-$(CONFIG_VMWARE_PVSCSI)	+= vmw_pvscsi.o
+obj-$(CONFIG_XEN_SCSI_FRONTEND)	+= xen-scsifront.o
 obj-$(CONFIG_HYPERV_STORAGE)	+= hv_storvsc.o
 
 obj-$(CONFIG_ARM)		+= arm/
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
new file mode 100644
index 0000000..b60cc72
--- /dev/null
+++ b/drivers/scsi/xen-scsifront.c
@@ -0,0 +1,936 @@
+/*
+ * Xen SCSI frontend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/*
+ * Patched to support >2TB drives
+ * 2010, Samuel Kvasnica, IMS Nanofabrication AG
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/vscsiif.h>
+#include <xen/interface/io/protocols.h>
+
+#include <asm/xen/hypervisor.h>
+
+
+#define GRANT_INVALID_REF	0
+
+#define VSCSIFRONT_OP_ADD_LUN	1
+#define VSCSIFRONT_OP_DEL_LUN	2
+
+#define DEFAULT_TASK_COMM_LEN	TASK_COMM_LEN
+
+/* tuning point*/
+#define VSCSIIF_DEFAULT_CMD_PER_LUN 10
+#define VSCSIIF_MAX_TARGET          64
+#define VSCSIIF_MAX_LUN             255
+
+#define VSCSIIF_RING_SIZE	__CONST_RING_SIZE(vscsiif, PAGE_SIZE)
+#define VSCSIIF_MAX_REQS	VSCSIIF_RING_SIZE
+
+struct vscsifrnt_shadow {
+	uint16_t next_free;
+
+	/* command between backend and frontend
+	 * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */
+	unsigned char act;
+
+	/* Number of pieces of scatter-gather */
+	unsigned int nr_segments;
+
+	/* do reset function */
+	wait_queue_head_t wq_reset;	/* reset work queue           */
+	int wait_reset;			/* reset work queue condition */
+	int32_t rslt_reset;		/* reset response status      */
+					/* (SUCESS or FAILED)         */
+
+	/* requested struct scsi_cmnd is stored from kernel */
+	struct scsi_cmnd *sc;
+	int gref[VSCSIIF_SG_TABLESIZE];
+};
+
+struct vscsifrnt_info {
+	struct xenbus_device *dev;
+
+	struct Scsi_Host *host;
+
+	spinlock_t shadow_lock;
+	unsigned int evtchn;
+	unsigned int irq;
+
+	grant_ref_t ring_ref;
+	struct vscsiif_front_ring ring;
+	struct vscsiif_response	ring_res;
+
+	struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS];
+	uint32_t shadow_free;
+
+	struct task_struct *kthread;
+	wait_queue_head_t wq;
+	wait_queue_head_t wq_sync;
+	unsigned int waiting_resp:1;
+	unsigned int waiting_sync:1;
+};
+
+#define DPRINTK(_f, _a...)				\
+	pr_debug("(file=%s, line=%d) " _f, __FILE__ , __LINE__ , ## _a )
+
+#define PREFIX(lvl) KERN_##lvl "scsifront: "
+
+static int get_id_from_freelist(struct vscsifrnt_info *info)
+{
+	unsigned long flags;
+	uint32_t free;
+
+	spin_lock_irqsave(&info->shadow_lock, flags);
+
+	free = info->shadow_free;
+	BUG_ON(free >= VSCSIIF_MAX_REQS);
+	info->shadow_free = info->shadow[free].next_free;
+	info->shadow[free].next_free = VSCSIIF_MAX_REQS;
+	info->shadow[free].wait_reset = 0;
+
+	spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+	return free;
+}
+
+static void _add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id)
+{
+	info->shadow[id].next_free = info->shadow_free;
+	info->shadow[id].sc = NULL;
+	info->shadow_free = id;
+}
+
+static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->shadow_lock, flags);
+	_add_id_to_freelist(info, id);
+	spin_unlock_irqrestore(&info->shadow_lock, flags);
+}
+
+static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
+{
+	struct vscsiif_front_ring *ring = &(info->ring);
+	vscsiif_request_t *ring_req;
+	uint32_t id;
+
+	ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
+
+	ring->req_prod_pvt++;
+
+	id = get_id_from_freelist(info);	/* use id by response */
+	ring_req->rqid = (uint16_t)id;
+
+	return ring_req;
+}
+
+static void scsifront_notify_work(struct vscsifrnt_info *info)
+{
+	info->waiting_resp = 1;
+	wake_up(&info->wq);
+}
+
+static void scsifront_do_request(struct vscsifrnt_info *info)
+{
+	struct vscsiif_front_ring *ring = &(info->ring);
+	int notify;
+
+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
+	if (notify)
+		notify_remote_via_irq(info->irq);
+}
+
+static irqreturn_t scsifront_intr(int irq, void *dev_id)
+{
+	scsifront_notify_work((struct vscsifrnt_info *)dev_id);
+	return IRQ_HANDLED;
+}
+
+static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
+{
+	struct vscsifrnt_shadow *s = &info->shadow[id];
+	int i;
+
+	if (s->sc->sc_data_direction == DMA_NONE)
+		return;
+
+	for (i = 0; i < s->nr_segments; i++) {
+		if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
+			shost_printk(PREFIX(ALERT), info->host,
+				     "grant still in use by backend\n");
+			BUG();
+		}
+		gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+	}
+}
+
+static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
+				   vscsiif_response_t *ring_res)
+{
+	struct scsi_cmnd *sc;
+	uint32_t id;
+	uint8_t sense_len;
+
+	id = ring_res->rqid;
+	sc = info->shadow[id].sc;
+
+	BUG_ON(sc == NULL);
+
+	scsifront_gnttab_done(info, id);
+	add_id_to_freelist(info, id);
+
+	sc->result = ring_res->rslt;
+	scsi_set_resid(sc, ring_res->residual_len);
+
+	sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE, ring_res->sense_len);
+
+	if (sense_len)
+		memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len);
+
+	sc->scsi_done(sc);
+
+	return;
+}
+
+static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
+				    vscsiif_response_t *ring_res)
+{
+	uint16_t id = ring_res->rqid;
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->shadow_lock, flags);
+	info->shadow[id].wait_reset = 1;
+	switch (info->shadow[id].rslt_reset) {
+	case 0:
+		info->shadow[id].rslt_reset = ring_res->rslt;
+		break;
+	case -1:
+		_add_id_to_freelist(info, id);
+		break;
+	default:
+		shost_printk(PREFIX(ERR), info->host,
+			     "bad reset state %d, possibly leaking %u\n",
+			     info->shadow[id].rslt_reset, id);
+		break;
+	}
+	spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+	wake_up(&(info->shadow[id].wq_reset));
+}
+
+static int scsifront_cmd_done(struct vscsifrnt_info *info)
+{
+	vscsiif_response_t *ring_res;
+	RING_IDX i, rp;
+	int more_to_do = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(info->host->host_lock, flags);
+
+	rp = info->ring.sring->rsp_prod;
+	rmb();
+	for (i = info->ring.rsp_cons; i != rp; i++) {
+
+		ring_res = RING_GET_RESPONSE(&info->ring, i);
+
+		if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB)
+			scsifront_cdb_cmd_done(info, ring_res);
+		else
+			scsifront_sync_cmd_done(info, ring_res);
+	}
+
+	info->ring.rsp_cons = i;
+
+	if (i != info->ring.req_prod_pvt) {
+		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
+	} else {
+		info->ring.sring->rsp_event = i + 1;
+	}
+
+	info->waiting_sync = 0;
+
+	spin_unlock_irqrestore(info->host->host_lock, flags);
+
+	wake_up(&info->wq_sync);
+
+	/* Yield point for this unbounded loop. */
+	cond_resched();
+
+	return more_to_do;
+}
+
+static int scsifront_schedule(void *data)
+{
+	struct vscsifrnt_info *info = (struct vscsifrnt_info *)data;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(info->wq,
+			info->waiting_resp || kthread_should_stop());
+
+		info->waiting_resp = 0;
+		smp_mb();
+
+		if (scsifront_cmd_done(info))
+			info->waiting_resp = 1;
+	}
+
+	return 0;
+}
+
+static int map_data_for_request(struct vscsifrnt_info *info,
+				struct scsi_cmnd *sc,
+				vscsiif_request_t *ring_req,
+				uint32_t id)
+{
+	grant_ref_t gref_head;
+	struct page *page;
+	int err, ref, ref_cnt = 0;
+	int write = (sc->sc_data_direction == DMA_TO_DEVICE);
+	unsigned int i, nr_pages, off, len, bytes;
+	unsigned int data_len = scsi_bufflen(sc);
+	struct scatterlist *sg;
+	unsigned long mfn;
+
+	if (sc->sc_data_direction == DMA_NONE || !data_len)
+		return 0;
+
+	err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head);
+	if (err) {
+		shost_printk(PREFIX(ERR), info->host,
+			     "gnttab_alloc_grant_references() error\n");
+		return -ENOMEM;
+	}
+
+	/* quoted scsi_lib.c/scsi_req_map_sg . */
+	nr_pages = PFN_UP(data_len + scsi_sglist(sc)->offset);
+	if (nr_pages > VSCSIIF_SG_TABLESIZE) {
+		shost_printk(PREFIX(ERR), info->host,
+			     "Unable to map request_buffer for command!\n");
+		ref_cnt = -E2BIG;
+		goto out;
+	}
+
+	scsi_for_each_sg (sc, sg, scsi_sg_count(sc), i) {
+		page = sg_page(sg);
+		off = sg->offset;
+		len = sg->length;
+
+		while (len > 0 && data_len > 0) {
+			/*
+			 * sg sends a scatterlist that is larger than
+			 * the data_len it wants transferred for certain
+			 * IO sizes
+			 */
+			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+			bytes = min(bytes, data_len);
+
+			ref = gnttab_claim_grant_reference(&gref_head);
+			BUG_ON(ref == -ENOSPC);
+
+			mfn = pfn_to_mfn(page_to_pfn(page));
+			gnttab_grant_foreign_access_ref(ref,
+				info->dev->otherend_id, mfn, write);
+
+			info->shadow[id].gref[ref_cnt] = ref;
+			ring_req->seg[ref_cnt].gref    = ref;
+			ring_req->seg[ref_cnt].offset  = (uint16_t)off;
+			ring_req->seg[ref_cnt].length  = (uint16_t)bytes;
+
+			page++;
+			len -= bytes;
+			data_len -= bytes;
+			off = 0;
+			ref_cnt++;
+		}
+	}
+
+out:
+	gnttab_free_grant_references(gref_head);
+
+	return ref_cnt;
+}
+
+static vscsiif_request_t *scsifront_command2ring(struct vscsifrnt_info *info,
+						 struct scsi_cmnd *sc)
+{
+	vscsiif_request_t *ring_req;
+
+	ring_req = scsifront_pre_req(info);
+
+	ring_req->id      = sc->device->id;
+	ring_req->lun     = sc->device->lun;
+	ring_req->channel = sc->device->channel;
+	ring_req->cmd_len = sc->cmd_len;
+
+	BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+
+	if (sc->cmd_len)
+		memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+	else
+		memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
+
+	ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
+	ring_req->timeout_per_command = sc->request->timeout / HZ;
+
+	return ring_req;
+}
+
+static int scsifront_queuecommand(struct Scsi_Host *shost,
+				  struct scsi_cmnd *sc)
+{
+	struct vscsifrnt_info *info = shost_priv(shost);
+	vscsiif_request_t *ring_req;
+	unsigned long flags;
+	int ref_cnt;
+	uint16_t rqid;
+
+/* debug printk to identify more missing scsi commands
+	shost_printk(KERN_INFO "scsicmd: ", sc->device->host,
+		     "len=%u %#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x\n",
+		     sc->cmd_len, sc->cmnd[0], sc->cmnd[1],
+		     sc->cmnd[2], sc->cmnd[3], sc->cmnd[4], sc->cmnd[5],
+		     sc->cmnd[6], sc->cmnd[7], sc->cmnd[8], sc->cmnd[9]);
+*/
+	spin_lock_irqsave(shost->host_lock, flags);
+	scsi_cmd_get_serial(shost, sc);
+	if (RING_FULL(&info->ring)) {
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	sc->result    = 0;
+
+	ring_req          = scsifront_command2ring(info, sc);
+	rqid              = ring_req->rqid;
+	ring_req->act     = VSCSIIF_ACT_SCSI_CDB;
+
+	info->shadow[rqid].sc  = sc;
+	info->shadow[rqid].act = VSCSIIF_ACT_SCSI_CDB;
+
+	ref_cnt = map_data_for_request(info, sc, ring_req, rqid);
+	if (ref_cnt < 0) {
+		add_id_to_freelist(info, rqid);
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		if (ref_cnt == -ENOMEM)
+			return SCSI_MLQUEUE_HOST_BUSY;
+		sc->result = DID_ERROR << 16;
+		sc->scsi_done(sc);
+		return 0;
+	}
+
+	ring_req->nr_segments          = (uint8_t)ref_cnt;
+	info->shadow[rqid].nr_segments = ref_cnt;
+
+	scsifront_do_request(info);
+	spin_unlock_irqrestore(shost->host_lock, flags);
+
+	return 0;
+}
+
+static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
+{
+	return FAILED;
+}
+
+/* vscsi supports only device_reset, because it is each of LUNs */
+static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
+{
+	struct Scsi_Host *host = sc->device->host;
+	struct vscsifrnt_info *info = shost_priv(host);
+	vscsiif_request_t *ring_req;
+	uint16_t rqid;
+	int err = 0;
+
+	for (;;) {
+		spin_lock_irq(host->host_lock);
+		if (!RING_FULL(&info->ring))
+			break;
+		if (err) {
+			spin_unlock_irq(host->host_lock);
+			return FAILED;
+		}
+		info->waiting_sync = 1;
+		spin_unlock_irq(host->host_lock);
+		err = wait_event_interruptible(info->wq_sync,
+					       !info->waiting_sync);
+		spin_lock_irq(host->host_lock);
+	}
+
+	ring_req      = scsifront_command2ring(info, sc);
+	rqid          = ring_req->rqid;
+	ring_req->act = VSCSIIF_ACT_SCSI_RESET;
+
+	info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET;
+	info->shadow[rqid].rslt_reset = 0;
+
+	ring_req->nr_segments         = 0;
+
+	scsifront_do_request(info);
+
+	spin_unlock_irq(host->host_lock);
+	err = wait_event_interruptible(info->shadow[rqid].wq_reset,
+				       info->shadow[rqid].wait_reset);
+	spin_lock_irq(host->host_lock);
+
+	if (!err) {
+		err = info->shadow[rqid].rslt_reset;
+		add_id_to_freelist(info, rqid);
+	} else {
+		spin_lock(&info->shadow_lock);
+		info->shadow[rqid].rslt_reset = -1;
+		spin_unlock(&info->shadow_lock);
+		err = FAILED;
+	}
+
+	spin_unlock_irq(host->host_lock);
+	return err;
+}
+
+static struct scsi_host_template scsifront_sht = {
+	.module			= THIS_MODULE,
+	.name			= "Xen SCSI frontend driver",
+	.queuecommand		= scsifront_queuecommand,
+	.eh_abort_handler	= scsifront_eh_abort_handler,
+	.eh_device_reset_handler= scsifront_dev_reset_handler,
+	.cmd_per_lun		= VSCSIIF_DEFAULT_CMD_PER_LUN,
+	.can_queue		= VSCSIIF_MAX_REQS,
+	.this_id		= -1,
+	.sg_tablesize		= VSCSIIF_SG_TABLESIZE,
+	.use_clustering		= DISABLE_CLUSTERING,
+	.proc_name		= "scsifront",
+};
+
+static void scsifront_free(struct vscsifrnt_info *info)
+{
+	struct Scsi_Host *host = info->host;
+
+	if (host->shost_state != SHOST_DEL)
+		scsi_remove_host(info->host);
+
+	if (info->ring_ref != GRANT_INVALID_REF) {
+		gnttab_end_foreign_access(info->ring_ref, 0,
+					  (unsigned long)info->ring.sring);
+		info->ring_ref = GRANT_INVALID_REF;
+		info->ring.sring = NULL;
+	}
+
+	if (info->irq)
+		unbind_from_irqhandler(info->irq, info);
+	info->irq = 0;
+	info->evtchn = 0;
+
+	scsi_host_put(info->host);
+}
+
+static int scsifront_alloc_ring(struct vscsifrnt_info *info)
+{
+	struct xenbus_device *dev = info->dev;
+	struct vscsiif_sring *sring;
+	int err = -ENOMEM;
+
+	info->ring_ref = GRANT_INVALID_REF;
+
+	/***** Frontend to Backend ring start *****/
+	sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL);
+	if (!sring) {
+		xenbus_dev_fatal(dev, err,
+			"fail to allocate shared ring (Front to Back)");
+		return err;
+	}
+	SHARED_RING_INIT(sring);
+	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+	err = xenbus_grant_ring(dev, virt_to_mfn(sring));
+	if (err < 0) {
+		free_page((unsigned long) sring);
+		info->ring.sring = NULL;
+		xenbus_dev_fatal(dev, err,
+			"fail to grant shared ring (Front to Back)");
+		goto free_sring;
+	}
+	info->ring_ref = err;
+
+	err = xenbus_alloc_evtchn(dev, &info->evtchn);
+	if (err)
+		goto free_sring;
+
+	err = bind_evtchn_to_irqhandler(info->evtchn, scsifront_intr,
+					0, "scsifront", info);
+
+	if (err <= 0) {
+		xenbus_dev_fatal(dev, err, "bind_evtchn_to_irqhandler");
+		goto free_sring;
+	}
+	info->irq = err;
+
+	return 0;
+
+/* free resource */
+free_sring:
+	scsifront_free(info);
+
+	return err;
+}
+
+static int scsifront_init_ring(struct vscsifrnt_info *info)
+{
+	struct xenbus_device *dev = info->dev;
+	struct xenbus_transaction xbt;
+	int err;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	err = scsifront_alloc_ring(info);
+	if (err)
+		return err;
+	DPRINTK("%u %u\n", info->ring_ref, info->evtchn);
+
+again:
+	err = xenbus_transaction_start(&xbt);
+	if (err)
+		xenbus_dev_fatal(dev, err, "starting transaction");
+
+	err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
+			    info->ring_ref);
+	if (err) {
+		xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
+		goto fail;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+			    info->evtchn);
+
+	if (err) {
+		xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
+		goto fail;
+	}
+
+	err = xenbus_transaction_end(xbt, 0);
+	if (err) {
+		if (err == -EAGAIN)
+			goto again;
+		xenbus_dev_fatal(dev, err, "completing transaction");
+		goto free_sring;
+	}
+
+	return 0;
+
+fail:
+	xenbus_transaction_end(xbt, 1);
+free_sring:
+	/* free resource */
+	scsifront_free(info);
+
+	return err;
+}
+
+
+static int scsifront_probe(struct xenbus_device *dev,
+			   const struct xenbus_device_id *id)
+{
+	struct vscsifrnt_info *info;
+	struct Scsi_Host *host;
+	int i, err = -ENOMEM;
+	char name[DEFAULT_TASK_COMM_LEN];
+
+	host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
+	if (!host) {
+		xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
+		return err;
+	}
+	info = (struct vscsifrnt_info *) host->hostdata;
+	info->host = host;
+
+	dev_set_drvdata(&dev->dev, info);
+	info->dev  = dev;
+
+	for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
+		info->shadow[i].next_free = i + 1;
+		init_waitqueue_head(&(info->shadow[i].wq_reset));
+		info->shadow[i].wait_reset = 0;
+	}
+	info->shadow_free = 0;
+
+	err = scsifront_init_ring(info);
+	if (err) {
+		scsi_host_put(host);
+		return err;
+	}
+
+	init_waitqueue_head(&info->wq);
+	init_waitqueue_head(&info->wq_sync);
+	spin_lock_init(&info->shadow_lock);
+
+	snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d",
+		 info->host->host_no);
+
+	info->kthread = kthread_run(scsifront_schedule, info, name);
+	if (IS_ERR(info->kthread)) {
+		err = PTR_ERR(info->kthread);
+		info->kthread = NULL;
+		dev_err(&dev->dev, "kthread start err %d\n", err);
+		goto free_sring;
+	}
+
+	host->max_id      = VSCSIIF_MAX_TARGET;
+	host->max_channel = 0;
+	host->max_lun     = VSCSIIF_MAX_LUN;
+	host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512;
+	host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE;
+
+	err = scsi_add_host(host, &dev->dev);
+	if (err) {
+		dev_err(&dev->dev, "fail to add scsi host %d\n", err);
+		goto free_sring;
+	}
+
+	xenbus_switch_state(dev, XenbusStateInitialised);
+
+	return 0;
+
+free_sring:
+	/* free resource */
+	scsifront_free(info);
+	return err;
+}
+
+static int scsifront_remove(struct xenbus_device *dev)
+{
+	struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+
+	DPRINTK("%s: %s removed\n", __FUNCTION__, dev->nodename);
+
+	if (info->kthread) {
+		kthread_stop(info->kthread);
+		info->kthread = NULL;
+	}
+
+	scsifront_free(info);
+
+	return 0;
+}
+
+static int scsifront_disconnect(struct vscsifrnt_info *info)
+{
+	struct xenbus_device *dev = info->dev;
+	struct Scsi_Host *host = info->host;
+
+	DPRINTK("%s: %s disconnect\n", __FUNCTION__, dev->nodename);
+
+	/*
+	 * When this function is executed,  all devices of
+	 * Frontend have been deleted.
+	 * Therefore, it need not block I/O before remove_host.
+	 */
+
+	scsi_remove_host(host);
+	xenbus_frontend_closed(dev);
+
+	return 0;
+}
+
+static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
+{
+	struct xenbus_device *dev = info->dev;
+	int i, err = 0;
+	char str[64], state_str[64];
+	char **dir;
+	unsigned int dir_n = 0;
+	unsigned int device_state;
+	unsigned int hst, chn, tgt, lun;
+	struct scsi_device *sdev;
+
+	dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
+	if (IS_ERR(dir))
+		return;
+
+	for (i = 0; i < dir_n; i++) {
+		/* read status */
+		snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
+		err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
+				   &device_state);
+		if (XENBUS_EXIST_ERR(err))
+			continue;
+
+		/* virtual SCSI device */
+		snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
+		err = xenbus_scanf(XBT_NIL, dev->otherend, str,
+				   "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
+		if (XENBUS_EXIST_ERR(err))
+			continue;
+
+		/* front device state path */
+		snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state",
+			 dir[i]);
+
+		switch (op) {
+		case VSCSIFRONT_OP_ADD_LUN:
+			if (device_state == XenbusStateInitialised) {
+				sdev = scsi_device_lookup(info->host, chn, tgt,
+							  lun);
+				if (sdev) {
+					dev_err(&dev->dev,
+						"Device already in use.\n");
+					scsi_device_put(sdev);
+					xenbus_printf(XBT_NIL, dev->nodename,
+						      state_str, "%d",
+						      XenbusStateClosed);
+				} else {
+					scsi_add_device(info->host, chn, tgt,
+							lun);
+					xenbus_printf(XBT_NIL, dev->nodename,
+						      state_str, "%d",
+						      XenbusStateConnected);
+				}
+			}
+			break;
+		case VSCSIFRONT_OP_DEL_LUN:
+			if (device_state == XenbusStateClosing) {
+				sdev = scsi_device_lookup(info->host, chn, tgt,
+							  lun);
+				if (sdev) {
+					scsi_remove_device(sdev);
+					scsi_device_put(sdev);
+					xenbus_printf(XBT_NIL, dev->nodename,
+						      state_str, "%d",
+						      XenbusStateClosed);
+				}
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	kfree(dir);
+	return;
+}
+
+static void scsifront_backend_changed(struct xenbus_device *dev,
+				      enum xenbus_state backend_state)
+{
+	struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+
+	DPRINTK("%p %u %u\n", dev, dev->state, backend_state);
+
+	switch (backend_state) {
+	case XenbusStateUnknown:
+	case XenbusStateInitialising:
+	case XenbusStateInitWait:
+	case XenbusStateInitialised:
+		break;
+
+	case XenbusStateConnected:
+		if (xenbus_read_driver_state(dev->nodename) ==
+			XenbusStateInitialised) {
+			scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
+		}
+
+		if (dev->state != XenbusStateConnected)
+			xenbus_switch_state(dev, XenbusStateConnected);
+		break;
+
+	case XenbusStateClosed:
+		if (dev->state == XenbusStateClosed)
+			break;
+		/* Missed the backend's Closing state -- fallthrough */
+	case XenbusStateClosing:
+		scsifront_disconnect(info);
+		break;
+
+	case XenbusStateReconfiguring:
+		scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
+		xenbus_switch_state(dev, XenbusStateReconfiguring);
+		break;
+
+	case XenbusStateReconfigured:
+		scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
+		xenbus_switch_state(dev, XenbusStateConnected);
+		break;
+	}
+}
+
+static const struct xenbus_device_id scsifront_ids[] = {
+	{ "vscsi" },
+	{ "" }
+};
+
+static DEFINE_XENBUS_DRIVER(scsifront, ,
+	.probe			= scsifront_probe,
+	.remove			= scsifront_remove,
+/*	.resume			= scsifront_resume, */
+	.otherend_changed	= scsifront_backend_changed,
+);
+
+static int __init scsifront_init(void)
+{
+	if (!xen_domain())
+		return -ENODEV;
+
+	return xenbus_register_frontend(&scsifront_driver);
+}
+module_init(scsifront_init);
+
+static void __exit scsifront_exit(void)
+{
+	xenbus_unregister_driver(&scsifront_driver);
+}
+module_exit(scsifront_exit);
+
+MODULE_DESCRIPTION("Xen SCSI frontend driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xen:vscsi");
-- 
1.8.4.5


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH 2/4] Introduce xen-scsifront module
  2014-06-27 14:34 Add XEN pvSCSI support jgross
  2014-06-27 14:34 ` [PATCH 1/4] Add XEN pvSCSI protocol description jgross
  2014-06-27 14:34 ` jgross
@ 2014-06-27 14:34 ` jgross
  2014-06-27 14:34 ` jgross
                   ` (5 subsequent siblings)
  8 siblings, 0 replies; 34+ messages in thread
From: jgross @ 2014-06-27 14:34 UTC (permalink / raw)
  To: JBottomley, linux-scsi, xen-devel; +Cc: Juergen Gross

From: Juergen Gross <jgross@suse.com>

Introduces the XEN pvSCSI frontend. With pvSCSI it is possible for a XEN domU
to issue SCSI commands to a SCSI LUN assigned to that domU. The SCSI commands
are passed to the pvSCSI backend in a driver domain (usually Dom0) which is
owner of the physical device. This allows e.g. to use SCSI tape drives in a
XEN domU.

The code is taken from the pvSCSI implementation in XEN done by Fujitsu based
on Linux kernel 2.6.18.

Changes from the original version are:
- port to upstream kernel
- put all code in just one source file
- move module to appropriate location in kernel tree
- adapt to Linux style guide
- some minor code simplifications
- replace constants with defines
- remove not used defines

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 drivers/scsi/Kconfig         |   7 +
 drivers/scsi/Makefile        |   1 +
 drivers/scsi/xen-scsifront.c | 936 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 944 insertions(+)
 create mode 100644 drivers/scsi/xen-scsifront.c

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index baca589..734d691 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -611,6 +611,13 @@ config VMWARE_PVSCSI
 	  To compile this driver as a module, choose M here: the
 	  module will be called vmw_pvscsi.
 
+config XEN_SCSI_FRONTEND
+	tristate "XEN SCSI frontend driver"
+	depends on SCSI && XEN
+	help
+	  The XEN SCSI frontend driver allows the kernel to access SCSI Devices
+	  within another guest OS.
+
 config HYPERV_STORAGE
 	tristate "Microsoft Hyper-V virtual storage driver"
 	depends on SCSI && HYPERV
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e172d4f..a4ee9c5 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -144,6 +144,7 @@ obj-$(CONFIG_SCSI_ESAS2R)	+= esas2r/
 obj-$(CONFIG_SCSI_PMCRAID)	+= pmcraid.o
 obj-$(CONFIG_SCSI_VIRTIO)	+= virtio_scsi.o
 obj-$(CONFIG_VMWARE_PVSCSI)	+= vmw_pvscsi.o
+obj-$(CONFIG_XEN_SCSI_FRONTEND)	+= xen-scsifront.o
 obj-$(CONFIG_HYPERV_STORAGE)	+= hv_storvsc.o
 
 obj-$(CONFIG_ARM)		+= arm/
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
new file mode 100644
index 0000000..b60cc72
--- /dev/null
+++ b/drivers/scsi/xen-scsifront.c
@@ -0,0 +1,936 @@
+/*
+ * Xen SCSI frontend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/*
+ * Patched to support >2TB drives
+ * 2010, Samuel Kvasnica, IMS Nanofabrication AG
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/vscsiif.h>
+#include <xen/interface/io/protocols.h>
+
+#include <asm/xen/hypervisor.h>
+
+
+#define GRANT_INVALID_REF	0
+
+#define VSCSIFRONT_OP_ADD_LUN	1
+#define VSCSIFRONT_OP_DEL_LUN	2
+
+#define DEFAULT_TASK_COMM_LEN	TASK_COMM_LEN
+
+/* tuning point*/
+#define VSCSIIF_DEFAULT_CMD_PER_LUN 10
+#define VSCSIIF_MAX_TARGET          64
+#define VSCSIIF_MAX_LUN             255
+
+#define VSCSIIF_RING_SIZE	__CONST_RING_SIZE(vscsiif, PAGE_SIZE)
+#define VSCSIIF_MAX_REQS	VSCSIIF_RING_SIZE
+
+struct vscsifrnt_shadow {
+	uint16_t next_free;
+
+	/* command between backend and frontend
+	 * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */
+	unsigned char act;
+
+	/* Number of pieces of scatter-gather */
+	unsigned int nr_segments;
+
+	/* do reset function */
+	wait_queue_head_t wq_reset;	/* reset work queue           */
+	int wait_reset;			/* reset work queue condition */
+	int32_t rslt_reset;		/* reset response status      */
+					/* (SUCESS or FAILED)         */
+
+	/* requested struct scsi_cmnd is stored from kernel */
+	struct scsi_cmnd *sc;
+	int gref[VSCSIIF_SG_TABLESIZE];
+};
+
+struct vscsifrnt_info {
+	struct xenbus_device *dev;
+
+	struct Scsi_Host *host;
+
+	spinlock_t shadow_lock;
+	unsigned int evtchn;
+	unsigned int irq;
+
+	grant_ref_t ring_ref;
+	struct vscsiif_front_ring ring;
+	struct vscsiif_response	ring_res;
+
+	struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS];
+	uint32_t shadow_free;
+
+	struct task_struct *kthread;
+	wait_queue_head_t wq;
+	wait_queue_head_t wq_sync;
+	unsigned int waiting_resp:1;
+	unsigned int waiting_sync:1;
+};
+
+#define DPRINTK(_f, _a...)				\
+	pr_debug("(file=%s, line=%d) " _f, __FILE__ , __LINE__ , ## _a )
+
+#define PREFIX(lvl) KERN_##lvl "scsifront: "
+
+static int get_id_from_freelist(struct vscsifrnt_info *info)
+{
+	unsigned long flags;
+	uint32_t free;
+
+	spin_lock_irqsave(&info->shadow_lock, flags);
+
+	free = info->shadow_free;
+	BUG_ON(free >= VSCSIIF_MAX_REQS);
+	info->shadow_free = info->shadow[free].next_free;
+	info->shadow[free].next_free = VSCSIIF_MAX_REQS;
+	info->shadow[free].wait_reset = 0;
+
+	spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+	return free;
+}
+
+static void _add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id)
+{
+	info->shadow[id].next_free = info->shadow_free;
+	info->shadow[id].sc = NULL;
+	info->shadow_free = id;
+}
+
+static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->shadow_lock, flags);
+	_add_id_to_freelist(info, id);
+	spin_unlock_irqrestore(&info->shadow_lock, flags);
+}
+
+static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
+{
+	struct vscsiif_front_ring *ring = &(info->ring);
+	vscsiif_request_t *ring_req;
+	uint32_t id;
+
+	ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
+
+	ring->req_prod_pvt++;
+
+	id = get_id_from_freelist(info);	/* use id by response */
+	ring_req->rqid = (uint16_t)id;
+
+	return ring_req;
+}
+
+static void scsifront_notify_work(struct vscsifrnt_info *info)
+{
+	info->waiting_resp = 1;
+	wake_up(&info->wq);
+}
+
+static void scsifront_do_request(struct vscsifrnt_info *info)
+{
+	struct vscsiif_front_ring *ring = &(info->ring);
+	int notify;
+
+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
+	if (notify)
+		notify_remote_via_irq(info->irq);
+}
+
+static irqreturn_t scsifront_intr(int irq, void *dev_id)
+{
+	scsifront_notify_work((struct vscsifrnt_info *)dev_id);
+	return IRQ_HANDLED;
+}
+
+static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
+{
+	struct vscsifrnt_shadow *s = &info->shadow[id];
+	int i;
+
+	if (s->sc->sc_data_direction == DMA_NONE)
+		return;
+
+	for (i = 0; i < s->nr_segments; i++) {
+		if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
+			shost_printk(PREFIX(ALERT), info->host,
+				     "grant still in use by backend\n");
+			BUG();
+		}
+		gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+	}
+}
+
+static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
+				   vscsiif_response_t *ring_res)
+{
+	struct scsi_cmnd *sc;
+	uint32_t id;
+	uint8_t sense_len;
+
+	id = ring_res->rqid;
+	sc = info->shadow[id].sc;
+
+	BUG_ON(sc == NULL);
+
+	scsifront_gnttab_done(info, id);
+	add_id_to_freelist(info, id);
+
+	sc->result = ring_res->rslt;
+	scsi_set_resid(sc, ring_res->residual_len);
+
+	sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE, ring_res->sense_len);
+
+	if (sense_len)
+		memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len);
+
+	sc->scsi_done(sc);
+
+	return;
+}
+
+static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
+				    vscsiif_response_t *ring_res)
+{
+	uint16_t id = ring_res->rqid;
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->shadow_lock, flags);
+	info->shadow[id].wait_reset = 1;
+	switch (info->shadow[id].rslt_reset) {
+	case 0:
+		info->shadow[id].rslt_reset = ring_res->rslt;
+		break;
+	case -1:
+		_add_id_to_freelist(info, id);
+		break;
+	default:
+		shost_printk(PREFIX(ERR), info->host,
+			     "bad reset state %d, possibly leaking %u\n",
+			     info->shadow[id].rslt_reset, id);
+		break;
+	}
+	spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+	wake_up(&(info->shadow[id].wq_reset));
+}
+
+static int scsifront_cmd_done(struct vscsifrnt_info *info)
+{
+	vscsiif_response_t *ring_res;
+	RING_IDX i, rp;
+	int more_to_do = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(info->host->host_lock, flags);
+
+	rp = info->ring.sring->rsp_prod;
+	rmb();
+	for (i = info->ring.rsp_cons; i != rp; i++) {
+
+		ring_res = RING_GET_RESPONSE(&info->ring, i);
+
+		if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB)
+			scsifront_cdb_cmd_done(info, ring_res);
+		else
+			scsifront_sync_cmd_done(info, ring_res);
+	}
+
+	info->ring.rsp_cons = i;
+
+	if (i != info->ring.req_prod_pvt) {
+		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
+	} else {
+		info->ring.sring->rsp_event = i + 1;
+	}
+
+	info->waiting_sync = 0;
+
+	spin_unlock_irqrestore(info->host->host_lock, flags);
+
+	wake_up(&info->wq_sync);
+
+	/* Yield point for this unbounded loop. */
+	cond_resched();
+
+	return more_to_do;
+}
+
+static int scsifront_schedule(void *data)
+{
+	struct vscsifrnt_info *info = (struct vscsifrnt_info *)data;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(info->wq,
+			info->waiting_resp || kthread_should_stop());
+
+		info->waiting_resp = 0;
+		smp_mb();
+
+		if (scsifront_cmd_done(info))
+			info->waiting_resp = 1;
+	}
+
+	return 0;
+}
+
+static int map_data_for_request(struct vscsifrnt_info *info,
+				struct scsi_cmnd *sc,
+				vscsiif_request_t *ring_req,
+				uint32_t id)
+{
+	grant_ref_t gref_head;
+	struct page *page;
+	int err, ref, ref_cnt = 0;
+	int write = (sc->sc_data_direction == DMA_TO_DEVICE);
+	unsigned int i, nr_pages, off, len, bytes;
+	unsigned int data_len = scsi_bufflen(sc);
+	struct scatterlist *sg;
+	unsigned long mfn;
+
+	if (sc->sc_data_direction == DMA_NONE || !data_len)
+		return 0;
+
+	err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head);
+	if (err) {
+		shost_printk(PREFIX(ERR), info->host,
+			     "gnttab_alloc_grant_references() error\n");
+		return -ENOMEM;
+	}
+
+	/* quoted scsi_lib.c/scsi_req_map_sg . */
+	nr_pages = PFN_UP(data_len + scsi_sglist(sc)->offset);
+	if (nr_pages > VSCSIIF_SG_TABLESIZE) {
+		shost_printk(PREFIX(ERR), info->host,
+			     "Unable to map request_buffer for command!\n");
+		ref_cnt = -E2BIG;
+		goto out;
+	}
+
+	scsi_for_each_sg (sc, sg, scsi_sg_count(sc), i) {
+		page = sg_page(sg);
+		off = sg->offset;
+		len = sg->length;
+
+		while (len > 0 && data_len > 0) {
+			/*
+			 * sg sends a scatterlist that is larger than
+			 * the data_len it wants transferred for certain
+			 * IO sizes
+			 */
+			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+			bytes = min(bytes, data_len);
+
+			ref = gnttab_claim_grant_reference(&gref_head);
+			BUG_ON(ref == -ENOSPC);
+
+			mfn = pfn_to_mfn(page_to_pfn(page));
+			gnttab_grant_foreign_access_ref(ref,
+				info->dev->otherend_id, mfn, write);
+
+			info->shadow[id].gref[ref_cnt] = ref;
+			ring_req->seg[ref_cnt].gref    = ref;
+			ring_req->seg[ref_cnt].offset  = (uint16_t)off;
+			ring_req->seg[ref_cnt].length  = (uint16_t)bytes;
+
+			page++;
+			len -= bytes;
+			data_len -= bytes;
+			off = 0;
+			ref_cnt++;
+		}
+	}
+
+out:
+	gnttab_free_grant_references(gref_head);
+
+	return ref_cnt;
+}
+
+static vscsiif_request_t *scsifront_command2ring(struct vscsifrnt_info *info,
+						 struct scsi_cmnd *sc)
+{
+	vscsiif_request_t *ring_req;
+
+	ring_req = scsifront_pre_req(info);
+
+	ring_req->id      = sc->device->id;
+	ring_req->lun     = sc->device->lun;
+	ring_req->channel = sc->device->channel;
+	ring_req->cmd_len = sc->cmd_len;
+
+	BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+
+	if (sc->cmd_len)
+		memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+	else
+		memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
+
+	ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
+	ring_req->timeout_per_command = sc->request->timeout / HZ;
+
+	return ring_req;
+}
+
+static int scsifront_queuecommand(struct Scsi_Host *shost,
+				  struct scsi_cmnd *sc)
+{
+	struct vscsifrnt_info *info = shost_priv(shost);
+	vscsiif_request_t *ring_req;
+	unsigned long flags;
+	int ref_cnt;
+	uint16_t rqid;
+
+/* debug printk to identify more missing scsi commands
+	shost_printk(KERN_INFO "scsicmd: ", sc->device->host,
+		     "len=%u %#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x\n",
+		     sc->cmd_len, sc->cmnd[0], sc->cmnd[1],
+		     sc->cmnd[2], sc->cmnd[3], sc->cmnd[4], sc->cmnd[5],
+		     sc->cmnd[6], sc->cmnd[7], sc->cmnd[8], sc->cmnd[9]);
+*/
+	spin_lock_irqsave(shost->host_lock, flags);
+	scsi_cmd_get_serial(shost, sc);
+	if (RING_FULL(&info->ring)) {
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	sc->result    = 0;
+
+	ring_req          = scsifront_command2ring(info, sc);
+	rqid              = ring_req->rqid;
+	ring_req->act     = VSCSIIF_ACT_SCSI_CDB;
+
+	info->shadow[rqid].sc  = sc;
+	info->shadow[rqid].act = VSCSIIF_ACT_SCSI_CDB;
+
+	ref_cnt = map_data_for_request(info, sc, ring_req, rqid);
+	if (ref_cnt < 0) {
+		add_id_to_freelist(info, rqid);
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		if (ref_cnt == -ENOMEM)
+			return SCSI_MLQUEUE_HOST_BUSY;
+		sc->result = DID_ERROR << 16;
+		sc->scsi_done(sc);
+		return 0;
+	}
+
+	ring_req->nr_segments          = (uint8_t)ref_cnt;
+	info->shadow[rqid].nr_segments = ref_cnt;
+
+	scsifront_do_request(info);
+	spin_unlock_irqrestore(shost->host_lock, flags);
+
+	return 0;
+}
+
+static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
+{
+	return FAILED;
+}
+
+/* vscsi supports only device_reset, because it is each of LUNs */
+static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
+{
+	struct Scsi_Host *host = sc->device->host;
+	struct vscsifrnt_info *info = shost_priv(host);
+	vscsiif_request_t *ring_req;
+	uint16_t rqid;
+	int err = 0;
+
+	for (;;) {
+		spin_lock_irq(host->host_lock);
+		if (!RING_FULL(&info->ring))
+			break;
+		if (err) {
+			spin_unlock_irq(host->host_lock);
+			return FAILED;
+		}
+		info->waiting_sync = 1;
+		spin_unlock_irq(host->host_lock);
+		err = wait_event_interruptible(info->wq_sync,
+					       !info->waiting_sync);
+		spin_lock_irq(host->host_lock);
+	}
+
+	ring_req      = scsifront_command2ring(info, sc);
+	rqid          = ring_req->rqid;
+	ring_req->act = VSCSIIF_ACT_SCSI_RESET;
+
+	info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET;
+	info->shadow[rqid].rslt_reset = 0;
+
+	ring_req->nr_segments         = 0;
+
+	scsifront_do_request(info);
+
+	spin_unlock_irq(host->host_lock);
+	err = wait_event_interruptible(info->shadow[rqid].wq_reset,
+				       info->shadow[rqid].wait_reset);
+	spin_lock_irq(host->host_lock);
+
+	if (!err) {
+		err = info->shadow[rqid].rslt_reset;
+		add_id_to_freelist(info, rqid);
+	} else {
+		spin_lock(&info->shadow_lock);
+		info->shadow[rqid].rslt_reset = -1;
+		spin_unlock(&info->shadow_lock);
+		err = FAILED;
+	}
+
+	spin_unlock_irq(host->host_lock);
+	return err;
+}
+
+static struct scsi_host_template scsifront_sht = {
+	.module			= THIS_MODULE,
+	.name			= "Xen SCSI frontend driver",
+	.queuecommand		= scsifront_queuecommand,
+	.eh_abort_handler	= scsifront_eh_abort_handler,
+	.eh_device_reset_handler= scsifront_dev_reset_handler,
+	.cmd_per_lun		= VSCSIIF_DEFAULT_CMD_PER_LUN,
+	.can_queue		= VSCSIIF_MAX_REQS,
+	.this_id		= -1,
+	.sg_tablesize		= VSCSIIF_SG_TABLESIZE,
+	.use_clustering		= DISABLE_CLUSTERING,
+	.proc_name		= "scsifront",
+};
+
+static void scsifront_free(struct vscsifrnt_info *info)
+{
+	struct Scsi_Host *host = info->host;
+
+	if (host->shost_state != SHOST_DEL)
+		scsi_remove_host(info->host);
+
+	if (info->ring_ref != GRANT_INVALID_REF) {
+		gnttab_end_foreign_access(info->ring_ref, 0,
+					  (unsigned long)info->ring.sring);
+		info->ring_ref = GRANT_INVALID_REF;
+		info->ring.sring = NULL;
+	}
+
+	if (info->irq)
+		unbind_from_irqhandler(info->irq, info);
+	info->irq = 0;
+	info->evtchn = 0;
+
+	scsi_host_put(info->host);
+}
+
+static int scsifront_alloc_ring(struct vscsifrnt_info *info)
+{
+	struct xenbus_device *dev = info->dev;
+	struct vscsiif_sring *sring;
+	int err = -ENOMEM;
+
+	info->ring_ref = GRANT_INVALID_REF;
+
+	/***** Frontend to Backend ring start *****/
+	sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL);
+	if (!sring) {
+		xenbus_dev_fatal(dev, err,
+			"fail to allocate shared ring (Front to Back)");
+		return err;
+	}
+	SHARED_RING_INIT(sring);
+	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+	err = xenbus_grant_ring(dev, virt_to_mfn(sring));
+	if (err < 0) {
+		free_page((unsigned long) sring);
+		info->ring.sring = NULL;
+		xenbus_dev_fatal(dev, err,
+			"fail to grant shared ring (Front to Back)");
+		goto free_sring;
+	}
+	info->ring_ref = err;
+
+	err = xenbus_alloc_evtchn(dev, &info->evtchn);
+	if (err)
+		goto free_sring;
+
+	err = bind_evtchn_to_irqhandler(info->evtchn, scsifront_intr,
+					0, "scsifront", info);
+
+	if (err <= 0) {
+		xenbus_dev_fatal(dev, err, "bind_evtchn_to_irqhandler");
+		goto free_sring;
+	}
+	info->irq = err;
+
+	return 0;
+
+/* free resource */
+free_sring:
+	scsifront_free(info);
+
+	return err;
+}
+
+static int scsifront_init_ring(struct vscsifrnt_info *info)
+{
+	struct xenbus_device *dev = info->dev;
+	struct xenbus_transaction xbt;
+	int err;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	err = scsifront_alloc_ring(info);
+	if (err)
+		return err;
+	DPRINTK("%u %u\n", info->ring_ref, info->evtchn);
+
+again:
+	err = xenbus_transaction_start(&xbt);
+	if (err)
+		xenbus_dev_fatal(dev, err, "starting transaction");
+
+	err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
+			    info->ring_ref);
+	if (err) {
+		xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
+		goto fail;
+	}
+
+	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+			    info->evtchn);
+
+	if (err) {
+		xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
+		goto fail;
+	}
+
+	err = xenbus_transaction_end(xbt, 0);
+	if (err) {
+		if (err == -EAGAIN)
+			goto again;
+		xenbus_dev_fatal(dev, err, "completing transaction");
+		goto free_sring;
+	}
+
+	return 0;
+
+fail:
+	xenbus_transaction_end(xbt, 1);
+free_sring:
+	/* free resource */
+	scsifront_free(info);
+
+	return err;
+}
+
+
+static int scsifront_probe(struct xenbus_device *dev,
+			   const struct xenbus_device_id *id)
+{
+	struct vscsifrnt_info *info;
+	struct Scsi_Host *host;
+	int i, err = -ENOMEM;
+	char name[DEFAULT_TASK_COMM_LEN];
+
+	host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
+	if (!host) {
+		xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
+		return err;
+	}
+	info = (struct vscsifrnt_info *) host->hostdata;
+	info->host = host;
+
+	dev_set_drvdata(&dev->dev, info);
+	info->dev  = dev;
+
+	for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
+		info->shadow[i].next_free = i + 1;
+		init_waitqueue_head(&(info->shadow[i].wq_reset));
+		info->shadow[i].wait_reset = 0;
+	}
+	info->shadow_free = 0;
+
+	err = scsifront_init_ring(info);
+	if (err) {
+		scsi_host_put(host);
+		return err;
+	}
+
+	init_waitqueue_head(&info->wq);
+	init_waitqueue_head(&info->wq_sync);
+	spin_lock_init(&info->shadow_lock);
+
+	snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d",
+		 info->host->host_no);
+
+	info->kthread = kthread_run(scsifront_schedule, info, name);
+	if (IS_ERR(info->kthread)) {
+		err = PTR_ERR(info->kthread);
+		info->kthread = NULL;
+		dev_err(&dev->dev, "kthread start err %d\n", err);
+		goto free_sring;
+	}
+
+	host->max_id      = VSCSIIF_MAX_TARGET;
+	host->max_channel = 0;
+	host->max_lun     = VSCSIIF_MAX_LUN;
+	host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512;
+	host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE;
+
+	err = scsi_add_host(host, &dev->dev);
+	if (err) {
+		dev_err(&dev->dev, "fail to add scsi host %d\n", err);
+		goto free_sring;
+	}
+
+	xenbus_switch_state(dev, XenbusStateInitialised);
+
+	return 0;
+
+free_sring:
+	/* free resource */
+	scsifront_free(info);
+	return err;
+}
+
+static int scsifront_remove(struct xenbus_device *dev)
+{
+	struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+
+	DPRINTK("%s: %s removed\n", __FUNCTION__, dev->nodename);
+
+	if (info->kthread) {
+		kthread_stop(info->kthread);
+		info->kthread = NULL;
+	}
+
+	scsifront_free(info);
+
+	return 0;
+}
+
+static int scsifront_disconnect(struct vscsifrnt_info *info)
+{
+	struct xenbus_device *dev = info->dev;
+	struct Scsi_Host *host = info->host;
+
+	DPRINTK("%s: %s disconnect\n", __FUNCTION__, dev->nodename);
+
+	/*
+	 * When this function is executed,  all devices of
+	 * Frontend have been deleted.
+	 * Therefore, it need not block I/O before remove_host.
+	 */
+
+	scsi_remove_host(host);
+	xenbus_frontend_closed(dev);
+
+	return 0;
+}
+
+static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
+{
+	struct xenbus_device *dev = info->dev;
+	int i, err = 0;
+	char str[64], state_str[64];
+	char **dir;
+	unsigned int dir_n = 0;
+	unsigned int device_state;
+	unsigned int hst, chn, tgt, lun;
+	struct scsi_device *sdev;
+
+	dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
+	if (IS_ERR(dir))
+		return;
+
+	for (i = 0; i < dir_n; i++) {
+		/* read status */
+		snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
+		err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
+				   &device_state);
+		if (XENBUS_EXIST_ERR(err))
+			continue;
+
+		/* virtual SCSI device */
+		snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
+		err = xenbus_scanf(XBT_NIL, dev->otherend, str,
+				   "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
+		if (XENBUS_EXIST_ERR(err))
+			continue;
+
+		/* front device state path */
+		snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state",
+			 dir[i]);
+
+		switch (op) {
+		case VSCSIFRONT_OP_ADD_LUN:
+			if (device_state == XenbusStateInitialised) {
+				sdev = scsi_device_lookup(info->host, chn, tgt,
+							  lun);
+				if (sdev) {
+					dev_err(&dev->dev,
+						"Device already in use.\n");
+					scsi_device_put(sdev);
+					xenbus_printf(XBT_NIL, dev->nodename,
+						      state_str, "%d",
+						      XenbusStateClosed);
+				} else {
+					scsi_add_device(info->host, chn, tgt,
+							lun);
+					xenbus_printf(XBT_NIL, dev->nodename,
+						      state_str, "%d",
+						      XenbusStateConnected);
+				}
+			}
+			break;
+		case VSCSIFRONT_OP_DEL_LUN:
+			if (device_state == XenbusStateClosing) {
+				sdev = scsi_device_lookup(info->host, chn, tgt,
+							  lun);
+				if (sdev) {
+					scsi_remove_device(sdev);
+					scsi_device_put(sdev);
+					xenbus_printf(XBT_NIL, dev->nodename,
+						      state_str, "%d",
+						      XenbusStateClosed);
+				}
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	kfree(dir);
+	return;
+}
+
+static void scsifront_backend_changed(struct xenbus_device *dev,
+				      enum xenbus_state backend_state)
+{
+	struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+
+	DPRINTK("%p %u %u\n", dev, dev->state, backend_state);
+
+	switch (backend_state) {
+	case XenbusStateUnknown:
+	case XenbusStateInitialising:
+	case XenbusStateInitWait:
+	case XenbusStateInitialised:
+		break;
+
+	case XenbusStateConnected:
+		if (xenbus_read_driver_state(dev->nodename) ==
+			XenbusStateInitialised) {
+			scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
+		}
+
+		if (dev->state != XenbusStateConnected)
+			xenbus_switch_state(dev, XenbusStateConnected);
+		break;
+
+	case XenbusStateClosed:
+		if (dev->state == XenbusStateClosed)
+			break;
+		/* Missed the backend's Closing state -- fallthrough */
+	case XenbusStateClosing:
+		scsifront_disconnect(info);
+		break;
+
+	case XenbusStateReconfiguring:
+		scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
+		xenbus_switch_state(dev, XenbusStateReconfiguring);
+		break;
+
+	case XenbusStateReconfigured:
+		scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
+		xenbus_switch_state(dev, XenbusStateConnected);
+		break;
+	}
+}
+
+static const struct xenbus_device_id scsifront_ids[] = {
+	{ "vscsi" },
+	{ "" }
+};
+
+static DEFINE_XENBUS_DRIVER(scsifront, ,
+	.probe			= scsifront_probe,
+	.remove			= scsifront_remove,
+/*	.resume			= scsifront_resume, */
+	.otherend_changed	= scsifront_backend_changed,
+);
+
+static int __init scsifront_init(void)
+{
+	if (!xen_domain())
+		return -ENODEV;
+
+	return xenbus_register_frontend(&scsifront_driver);
+}
+module_init(scsifront_init);
+
+static void __exit scsifront_exit(void)
+{
+	xenbus_unregister_driver(&scsifront_driver);
+}
+module_exit(scsifront_exit);
+
+MODULE_DESCRIPTION("Xen SCSI frontend driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xen:vscsi");
-- 
1.8.4.5

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH 3/4] Introduce XEN scsiback module
  2014-06-27 14:34 Add XEN pvSCSI support jgross
                   ` (4 preceding siblings ...)
  2014-06-27 14:34 ` [PATCH 3/4] Introduce XEN scsiback module jgross
@ 2014-06-27 14:34 ` jgross
  2014-06-28 18:09   ` Christoph Hellwig
  2014-06-28 18:09   ` Christoph Hellwig
  2014-06-27 14:34 ` [PATCH 4/4] add xen pvscsi maintainer jgross
                   ` (2 subsequent siblings)
  8 siblings, 2 replies; 34+ messages in thread
From: jgross @ 2014-06-27 14:34 UTC (permalink / raw)
  To: JBottomley, linux-scsi, xen-devel; +Cc: Juergen Gross

From: Juergen Gross <jgross@suse.com>

Introduces the XEN pvSCSI backend. With pvSCSI it is possible for a XEN domU
to issue SCSI commands to a SCSI LUN assigned to that domU. The SCSI commands
are passed to the pvSCSI backend in a driver domain (usually Dom0) which is
owner of the physical device. This allows e.g. to use SCSI tape drives in a
XEN domU.

The code is taken from the pvSCSI implementation in XEN done by Fujitsu based
on Linux kernel 2.6.18.

Changes from the original version are:
- port to upstream kernel
- put all code in just one source file
- move module to appropriate location in kernel tree
- adapt to Linux style guide
- correct minor error in scsiback_fast_flush_area
- some minor code simplifications

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 drivers/scsi/Kconfig        |    7 +
 drivers/scsi/Makefile       |    1 +
 drivers/scsi/xen-scsiback.c | 1797 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 1805 insertions(+)
 create mode 100644 drivers/scsi/xen-scsiback.c

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 734d691..ccd8ba0 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -611,6 +611,13 @@ config VMWARE_PVSCSI
 	  To compile this driver as a module, choose M here: the
 	  module will be called vmw_pvscsi.
 
+config XEN_SCSI_BACKEND
+	tristate "XEN SCSI backend driver"
+	depends on SCSI && XEN && XEN_BACKEND
+	help
+	  The SCSI backend driver allows the kernel to export its SCSI Devices
+	  to other guests via a high-performance shared-memory interface.
+
 config XEN_SCSI_FRONTEND
 	tristate "XEN SCSI frontend driver"
 	depends on SCSI && XEN
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a4ee9c5..4cfcc3a 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -156,6 +156,7 @@ obj-$(CONFIG_BLK_DEV_SR)	+= sr_mod.o
 obj-$(CONFIG_CHR_DEV_SG)	+= sg.o
 obj-$(CONFIG_CHR_DEV_SCH)	+= ch.o
 obj-$(CONFIG_SCSI_ENCLOSURE)	+= ses.o
+obj-$(CONFIG_XEN_SCSI_BACKEND)	+= xen-scsiback.o
 
 obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
 
diff --git a/drivers/scsi/xen-scsiback.c b/drivers/scsi/xen-scsiback.c
new file mode 100644
index 0000000..5b85e6f
--- /dev/null
+++ b/drivers/scsi/xen-scsiback.c
@@ -0,0 +1,1797 @@
+/*
+ * Xen SCSI backend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * Based on the blkback driver code.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/*
+* Patched to support >2TB drives + allow tape & autoloader operations
+* 2010, Samuel Kvasnica, IMS Nanofabrication AG
+*/
+
+#include <stdarg.h>
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
+#include <linux/list.h>
+#include <linux/gfp.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include <asm/hypervisor.h>
+
+#include <xen/xen.h>
+#include <xen/balloon.h>
+#include <xen/events.h>
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/vscsiif.h>
+
+#define DPRINTK(_f, _a...)			\
+	pr_debug("(file=%s, line=%d) " _f,	\
+		 __FILE__ , __LINE__ , ## _a )
+
+struct ids_tuple {
+	unsigned int hst;		/* host    */
+	unsigned int chn;		/* channel */
+	unsigned int tgt;		/* target  */
+	unsigned int lun;		/* LUN     */
+};
+
+struct v2p_entry {
+	struct ids_tuple v;		/* translate from */
+	struct scsi_device *sdev;	/* translate to   */
+	struct list_head l;
+};
+
+struct vscsibk_info {
+	struct xenbus_device *dev;
+
+	domid_t domid;
+	unsigned int evtchn;
+	unsigned int irq;
+
+	int feature;
+
+	struct vscsiif_back_ring  ring;
+
+	spinlock_t ring_lock;
+	atomic_t nr_unreplied_reqs;
+
+	spinlock_t v2p_lock;
+	struct list_head v2p_entry_lists;
+
+	struct task_struct *kthread;
+	wait_queue_head_t waiting_to_free;
+	wait_queue_head_t wq;
+	wait_queue_head_t shutdown_wq;
+	unsigned int waiting_reqs;
+	struct page **mmap_pages;
+
+};
+
+typedef struct {
+	unsigned char act;
+	struct vscsibk_info *info;
+	struct scsi_device *sdev;
+
+	uint16_t rqid;
+
+	uint16_t v_chn, v_tgt;
+
+	uint8_t nr_segments;
+	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
+	uint8_t cmd_len;
+
+	uint8_t sc_data_direction;
+	uint16_t timeout_per_command;
+
+	uint32_t request_bufflen;
+	struct scatterlist *sgl;
+	grant_ref_t gref[VSCSIIF_SG_TABLESIZE];
+
+	int32_t rslt;
+	uint32_t resid;
+	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
+
+	struct list_head free_list;
+} pending_req_t;
+
+struct backend_info
+{
+	struct xenbus_device *dev;
+	struct vscsibk_info *info;
+};
+
+#define scsiback_get(_b) (atomic_inc(&(_b)->nr_unreplied_reqs))
+#define scsiback_put(_b)				\
+	do {						\
+		if (atomic_dec_and_test(&(_b)->nr_unreplied_reqs))	\
+			wake_up(&(_b)->waiting_to_free);\
+	} while (0)
+
+#define VSCSIIF_TIMEOUT		(900*HZ)
+
+#define VSCSI_TYPE_HOST		1
+
+/* Following SCSI commands are not defined in scsi/scsi.h */
+#define EXTENDED_COPY		0x83	/* EXTENDED COPY command        */
+#define REPORT_ALIASES		0xa3	/* REPORT ALIASES command       */
+#define CHANGE_ALIASES		0xa4	/* CHANGE ALIASES command       */
+#define SET_PRIORITY		0xa4	/* SET PRIORITY command         */
+
+/*
+  The bitmap in order to control emulation.
+  (Bit 3 to 7 are reserved for future use.)
+*/
+#define VSCSIIF_NEED_CMD_EXEC		0x01	/* cmd exec required */
+#define VSCSIIF_NEED_EMULATE_REQBUF	0x02	/* emul reqest buff before */
+						/* cmd exec. */
+#define VSCSIIF_NEED_EMULATE_RSPBUF	0x04	/* emul resp buff after	*/
+						/* cmd exec. */
+
+/* Additional Sense Code (ASC) used */
+#define NO_ADDITIONAL_SENSE		0x0
+#define LOGICAL_UNIT_NOT_READY		0x4
+#define UNRECOVERED_READ_ERR		0x11
+#define PARAMETER_LIST_LENGTH_ERR	0x1a
+#define INVALID_OPCODE			0x20
+#define ADDR_OUT_OF_RANGE		0x21
+#define INVALID_FIELD_IN_CDB		0x24
+#define INVALID_FIELD_IN_PARAM_LIST	0x26
+#define POWERON_RESET			0x29
+#define SAVING_PARAMS_UNSUP		0x39
+#define THRESHOLD_EXCEEDED		0x5d
+#define LOW_POWER_COND_ON		0x5e
+
+/* Number os SCSI op_code	*/
+#define VSCSI_MAX_SCSI_OP_CODE		256
+static unsigned char bitmap[VSCSI_MAX_SCSI_OP_CODE];
+
+#define NO_EMULATE(cmd) \
+	bitmap[cmd] = VSCSIIF_NEED_CMD_EXEC; \
+	pre_function[cmd] = NULL; \
+	post_function[cmd] = NULL
+
+#define SCSIBACK_INVALID_HANDLE (~0)
+
+static bool log_print_stat;
+module_param(log_print_stat, bool, 0644);
+
+/*
+  Emulation routines for each SCSI op_code.
+*/
+static void (*pre_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
+static void (*post_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
+
+static pending_req_t *pending_reqs;
+static struct page **pending_pages;
+static grant_handle_t *pending_grant_handles;
+
+static const int check_condition_result =
+		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+struct list_head pending_free;
+DEFINE_SPINLOCK(pending_free_lock);
+DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
+
+static struct kmem_cache *scsiback_cachep;
+
+static int vaddr_pagenr(pending_req_t *req, int seg)
+{
+	return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
+}
+
+static unsigned long vaddr(pending_req_t *req, int seg)
+{
+	unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
+	return (unsigned long)pfn_to_kaddr(pfn);
+}
+
+static void scsiback_mk_sense_buffer(uint8_t *data, uint8_t key,
+			uint8_t asc, uint8_t asq)
+{
+	data[0] = 0x70;  /* fixed, current */
+	data[2] = key;
+	data[7] = 0xa;	  /* implies 18 byte sense buffer */
+	data[12] = asc;
+	data[13] = asq;
+}
+
+static void resp_not_supported_cmd(pending_req_t *pending_req, void *data)
+{
+	scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
+		INVALID_OPCODE, 0);
+	pending_req->resid = 0;
+	pending_req->rslt  = check_condition_result;
+}
+
+static int __copy_to_sg(struct scatterlist *sgl, unsigned int nr_sg,
+	       void *buf, unsigned int buflen)
+{
+	struct scatterlist *sg;
+	void *from = buf;
+	void *to;
+	unsigned int from_rest = buflen;
+	unsigned int to_capa;
+	unsigned int copy_size = 0;
+	unsigned int i;
+	unsigned long pfn;
+
+	for_each_sg (sgl, sg, nr_sg, i) {
+		if (sg_page(sg) == NULL) {
+			pr_warning("%s: inconsistent length field in "
+				   "scatterlist\n", __FUNCTION__);
+			return -ENOMEM;
+		}
+
+		to_capa  = sg->length;
+		copy_size = min_t(unsigned int, to_capa, from_rest);
+
+		pfn = page_to_pfn(sg_page(sg));
+		to = pfn_to_kaddr(pfn) + (sg->offset);
+		memcpy(to, from, copy_size);
+
+		from_rest  -= copy_size;
+		if (from_rest == 0)
+			return 0;
+
+		from += copy_size;
+	}
+
+	pr_warning("%s: no space in scatterlist\n", __FUNCTION__);
+	return -ENOMEM;
+}
+
+static int __maybe_unused __copy_from_sg(struct scatterlist *sgl,
+					 unsigned int nr_sg, void *buf,
+					 unsigned int buflen)
+{
+	struct scatterlist *sg;
+	void *from;
+	void *to = buf;
+	unsigned int from_rest;
+	unsigned int to_capa = buflen;
+	unsigned int copy_size;
+	unsigned int i;
+	unsigned long pfn;
+
+	for_each_sg (sgl, sg, nr_sg, i) {
+		if (sg_page(sg) == NULL) {
+			pr_warning("%s: inconsistent length field in "
+				   "scatterlist\n", __FUNCTION__);
+			return -ENOMEM;
+		}
+
+		from_rest = sg->length;
+		if ((from_rest > 0) && (to_capa < from_rest)) {
+			pr_warning("%s: no space in destination buffer\n",
+				   __FUNCTION__);
+			return -ENOMEM;
+		}
+		copy_size = from_rest;
+
+		pfn = page_to_pfn(sg_page(sg));
+		from = pfn_to_kaddr(pfn) + (sg->offset);
+		memcpy(to, from, copy_size);
+
+		to_capa  -= copy_size;
+		to += copy_size;
+	}
+
+	return 0;
+}
+
+static int __nr_luns_under_host(struct vscsibk_info *info)
+{
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+	int lun_cnt = 0;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry(entry, head, l) {
+		lun_cnt++;
+	}
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+
+	return (lun_cnt);
+}
+
+/* REPORT LUNS Define*/
+#define VSCSI_REPORT_LUNS_HEADER	8
+#define VSCSI_REPORT_LUNS_RETRY		3
+
+/* quoted scsi_debug.c/resp_report_luns() */
+static void __report_luns(pending_req_t *pending_req, void *data)
+{
+	struct vscsibk_info *info = pending_req->info;
+	unsigned int nr_seg  = pending_req->nr_segments;
+	unsigned char *cmd = (unsigned char *)pending_req->cmnd;
+	unsigned char *buff = NULL;
+	unsigned char alloc_len;
+	unsigned int alloc_luns = 0;
+	unsigned int req_bufflen = 0;
+	unsigned int actual_len = 0;
+	unsigned int retry_cnt = 0;
+	int select_report = (int)cmd[2];
+	int i, lun_cnt = 0, lun, upper, err = 0;
+
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+
+	struct scsi_lun *one_lun;
+
+	req_bufflen = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
+	if ((req_bufflen < 4) || (select_report != 0))
+		goto fail;
+
+	alloc_luns = __nr_luns_under_host(info);
+	alloc_len  = sizeof(struct scsi_lun) * alloc_luns
+				+ VSCSI_REPORT_LUNS_HEADER;
+retry:
+	if ((buff = kzalloc(alloc_len, GFP_KERNEL)) == NULL) {
+		pr_err("scsiback:%s kmalloc err\n", __FUNCTION__);
+		goto fail;
+	}
+
+	one_lun = (struct scsi_lun *) &buff[8];
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == pending_req->v_chn) &&
+		    (entry->v.tgt == pending_req->v_tgt)) {
+
+			/* check overflow */
+			if (lun_cnt >= alloc_luns) {
+				spin_unlock_irqrestore(&info->v2p_lock, flags);
+
+				if (retry_cnt < VSCSI_REPORT_LUNS_RETRY) {
+					retry_cnt++;
+					if (buff)
+						kfree(buff);
+					goto retry;
+				}
+
+				goto fail;
+			}
+
+			lun = entry->v.lun;
+			upper = (lun >> 8) & 0x3f;
+			if (upper)
+				one_lun[lun_cnt].scsi_lun[0] = upper;
+			one_lun[lun_cnt].scsi_lun[1] = lun & 0xff;
+			lun_cnt++;
+		}
+	}
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+
+	buff[2] = ((sizeof(struct scsi_lun) * lun_cnt) >> 8) & 0xff;
+	buff[3] = (sizeof(struct scsi_lun) * lun_cnt) & 0xff;
+
+	actual_len = lun_cnt * sizeof(struct scsi_lun)
+				+ VSCSI_REPORT_LUNS_HEADER;
+	req_bufflen = 0;
+	for (i = 0; i < nr_seg; i++)
+		req_bufflen += pending_req->sgl[i].length;
+
+	err = __copy_to_sg(pending_req->sgl, nr_seg, buff,
+				min(req_bufflen, actual_len));
+	if (err)
+		goto fail;
+
+	memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
+	pending_req->rslt = 0x00;
+	pending_req->resid = req_bufflen - min(req_bufflen, actual_len);
+
+	kfree(buff);
+	return;
+
+fail:
+	scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
+		INVALID_FIELD_IN_CDB, 0);
+	pending_req->rslt  = check_condition_result;
+	pending_req->resid = 0;
+	if (buff)
+		kfree(buff);
+	return;
+}
+
+int __pre_do_emulation(pending_req_t *pending_req, void *data)
+{
+	uint8_t op_code = pending_req->cmnd[0];
+
+	if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_REQBUF) &&
+	    pre_function[op_code] != NULL)
+		pre_function[op_code](pending_req, data);
+
+	/*
+	    0: no need for native driver call, so should return immediately.
+	    1: non emulation or should call native driver
+	       after modifing the request buffer.
+	*/
+	return !!(bitmap[op_code] & VSCSIIF_NEED_CMD_EXEC);
+}
+
+static void scsiback_rsp_emulation(pending_req_t *pending_req)
+{
+	uint8_t op_code = pending_req->cmnd[0];
+
+	if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_RSPBUF) &&
+	    post_function[op_code] != NULL)
+		post_function[op_code](pending_req, NULL);
+
+	return;
+}
+
+/* quoted scsi_lib.c/scsi_bi_endio */
+static void scsiback_bi_endio(struct bio *bio, int error)
+{
+	bio_put(bio);
+}
+
+/* quoted scsi_lib.c/scsi_req_map_sg . */
+static struct bio *request_map_sg(pending_req_t *pending_req)
+{
+	struct request_queue *q = pending_req->sdev->request_queue;
+	unsigned int nsegs = (unsigned int)pending_req->nr_segments;
+	unsigned int i, len, bytes, off, nr_pages, nr_vecs = 0;
+	struct scatterlist *sg;
+	struct page *page;
+	struct bio *bio = NULL, *bio_first = NULL, *bio_last = NULL;
+	int err;
+
+	for_each_sg (pending_req->sgl, sg, nsegs, i) {
+		page = sg_page(sg);
+		off = sg->offset;
+		len = sg->length;
+
+		nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
+		while (len > 0) {
+			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+
+			if (!bio) {
+				nr_vecs = min_t(unsigned int, BIO_MAX_PAGES,
+						nr_pages);
+				nr_pages -= nr_vecs;
+				bio = bio_alloc(GFP_KERNEL, nr_vecs);
+				if (!bio) {
+					err = -ENOMEM;
+					goto free_bios;
+				}
+				bio->bi_end_io = scsiback_bi_endio;
+				if (bio_last)
+					bio_last->bi_next = bio;
+				else
+					bio_first = bio;
+				bio_last = bio;
+			}
+
+			if (bio_add_pc_page(q, bio, page, bytes, off) !=
+						bytes) {
+				err = -EINVAL;
+				goto free_bios;
+			}
+
+			if (bio->bi_vcnt >= nr_vecs) {
+				bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+				if (pending_req->sc_data_direction == WRITE)
+					bio->bi_rw |= REQ_WRITE;
+				bio = NULL;
+			}
+
+			page++;
+			len -= bytes;
+			off = 0;
+		}
+	}
+
+	return bio_first;
+
+free_bios:
+	while ((bio = bio_first) != NULL) {
+		bio_first = bio->bi_next;
+		bio_put(bio);
+	}
+
+	return ERR_PTR(err);
+}
+
+static void scsiback_print_status(char *sense_buffer, int errors,
+					pending_req_t *pending_req)
+{
+	struct scsi_device *sdev = pending_req->sdev;
+
+	pr_err("scsiback[%d:%d:%d:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n",
+	       sdev->host->host_no, sdev->channel, sdev->id, sdev->lun,
+	       pending_req->cmnd[0], status_byte(errors), msg_byte(errors),
+	       host_byte(errors), driver_byte(errors));
+
+	if (CHECK_CONDITION & status_byte(errors))
+		__scsi_print_sense("scsiback", sense_buffer,
+				   SCSI_SENSE_BUFFERSIZE);
+}
+
+#define pending_handle(_req, _seg) \
+	(pending_grant_handles[vaddr_pagenr(_req, _seg)])
+
+static void scsiback_fast_flush_area(pending_req_t *req)
+{
+	struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
+	struct page *pages[VSCSIIF_SG_TABLESIZE];
+	unsigned int i, invcount = 0;
+	grant_handle_t handle;
+	int err;
+
+	if (req->nr_segments) {
+		for (i = 0; i < req->nr_segments; i++) {
+			handle = pending_handle(req, i);
+			if (handle == SCSIBACK_INVALID_HANDLE)
+				continue;
+			gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
+					    GNTMAP_host_map, handle);
+			pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
+			pages[invcount] = pending_pages[vaddr_pagenr(req, i)];
+			invcount++;
+		}
+
+		err = gnttab_unmap_refs(unmap, NULL, pages, invcount);
+		BUG_ON(err);
+		kfree(req->sgl);
+	}
+
+	return;
+}
+
+static void free_req(pending_req_t *req)
+{
+	unsigned long flags;
+	int was_empty;
+
+	spin_lock_irqsave(&pending_free_lock, flags);
+	was_empty = list_empty(&pending_free);
+	list_add(&req->free_list, &pending_free);
+	spin_unlock_irqrestore(&pending_free_lock, flags);
+	if (was_empty)
+		wake_up(&pending_free_wq);
+}
+
+static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
+			uint32_t resid, pending_req_t *pending_req)
+{
+	vscsiif_response_t *ring_res;
+	struct vscsibk_info *info = pending_req->info;
+	int notify;
+	struct scsi_sense_hdr sshdr;
+	unsigned long flags;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	spin_lock_irqsave(&info->ring_lock, flags);
+
+	ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
+	info->ring.rsp_prod_pvt++;
+
+	ring_res->rslt   = result;
+	ring_res->rqid   = pending_req->rqid;
+
+	if (sense_buffer != NULL) {
+		if (scsi_normalize_sense(sense_buffer,
+			sizeof(sense_buffer), &sshdr)) {
+
+			int len = 8 + sense_buffer[7];
+
+			if (len > VSCSIIF_SENSE_BUFFERSIZE)
+				len = VSCSIIF_SENSE_BUFFERSIZE;
+
+			memcpy(ring_res->sense_buffer, sense_buffer, len);
+			ring_res->sense_len = len;
+		}
+	} else {
+		ring_res->sense_len = 0;
+	}
+
+	ring_res->residual_len = resid;
+
+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
+	spin_unlock_irqrestore(&info->ring_lock, flags);
+
+	if (notify)
+		notify_remote_via_irq(info->irq);
+
+	free_req(pending_req);
+}
+
+static void scsiback_cmd_done(struct request *req, int uptodate)
+{
+	pending_req_t *pending_req = req->end_io_data;
+	unsigned char *sense_buffer;
+	unsigned int resid;
+	int errors;
+
+	sense_buffer = req->sense;
+	resid        = blk_rq_bytes(req);
+	errors       = req->errors;
+
+	if (errors && log_print_stat)
+		scsiback_print_status(sense_buffer, errors, pending_req);
+
+	/* The Host mode is through as for Emulation. */
+	if (pending_req->info->feature != VSCSI_TYPE_HOST)
+		scsiback_rsp_emulation(pending_req);
+
+	scsiback_fast_flush_area(pending_req);
+	scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
+	scsiback_put(pending_req->info);
+
+	__blk_put_request(req->q, req);
+}
+
+static void scsiback_cmd_exec(pending_req_t *pending_req)
+{
+	int cmd_len  = (int)pending_req->cmd_len;
+	int data_dir = (int)pending_req->sc_data_direction;
+	unsigned int timeout;
+	struct bio *bio;
+	struct request *rq;
+	int write;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	/* because it doesn't timeout backend earlier than frontend.*/
+	if (pending_req->timeout_per_command)
+		timeout = pending_req->timeout_per_command * HZ;
+	else
+		timeout = VSCSIIF_TIMEOUT;
+
+	write = (data_dir == DMA_TO_DEVICE);
+	if (pending_req->nr_segments) {
+		bio = request_map_sg(pending_req);
+		if (IS_ERR(bio)) {
+			pr_err("scsiback: SG Request Map Error %ld\n",
+			       PTR_ERR(bio));
+			return;
+		}
+	} else {
+		bio = NULL;
+	}
+
+	if (bio) {
+		rq = blk_make_request(pending_req->sdev->request_queue, bio,
+				      GFP_KERNEL);
+		if (IS_ERR(rq)) {
+			pr_err("scsiback: Make Request Error %ld\n",
+			       PTR_ERR(rq));
+			return;
+		}
+	} else {
+		rq = blk_get_request(pending_req->sdev->request_queue, write,
+				     GFP_KERNEL);
+		if (unlikely(!rq)) {
+			pr_err("scsiback: Get Request Error\n");
+			return;
+		}
+	}
+
+	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+	rq->cmd_len = cmd_len;
+	memcpy(rq->cmd, pending_req->cmnd, cmd_len);
+
+	memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
+	rq->sense       = pending_req->sense_buffer;
+	rq->sense_len = 0;
+
+	/* not allowed to retry in backend.                   */
+	rq->retries   = 0;
+	rq->timeout   = timeout;
+	rq->end_io_data = pending_req;
+
+	scsiback_get(pending_req->info);
+	blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
+
+	return ;
+}
+
+static void scsiback_req_emulation_or_cmdexec(pending_req_t *pending_req)
+{
+	if (__pre_do_emulation(pending_req, NULL)) {
+		scsiback_cmd_exec(pending_req);
+	}
+	else {
+		scsiback_fast_flush_area(pending_req);
+		scsiback_do_resp_with_sense(pending_req->sense_buffer,
+		  pending_req->rslt, pending_req->resid, pending_req);
+	}
+}
+
+
+/*
+  Following are not customizable functions.
+*/
+static void scsiback_emulation_init(void)
+{
+	int i;
+
+	/* Initialize to default state */
+	for (i = 0; i < VSCSI_MAX_SCSI_OP_CODE; i++) {
+		bitmap[i] = VSCSIIF_NEED_EMULATE_REQBUF |
+			    VSCSIIF_NEED_EMULATE_RSPBUF;
+		pre_function[i] = resp_not_supported_cmd;
+		post_function[i] = NULL;
+		/* means,
+		   - no need for pre-emulation
+		   - no need for post-emulation
+		   - call native driver
+		*/
+	}
+
+	/*
+	  Register appropriate functions below as you need.
+	  (See scsi/scsi.h for definition of SCSI op_code.)
+	*/
+
+	/*
+	  Following commands do not require emulation.
+	*/
+	NO_EMULATE(TEST_UNIT_READY);       /*0x00*/ /* sd,st */
+	NO_EMULATE(REZERO_UNIT);           /*0x01*/ /* st */
+	NO_EMULATE(REQUEST_SENSE);         /*0x03*/
+	NO_EMULATE(FORMAT_UNIT);           /*0x04*/
+	NO_EMULATE(READ_BLOCK_LIMITS);     /*0x05*/ /* st */
+	/*NO_EMULATE(REASSIGN_BLOCKS);       *//*0x07*/
+	NO_EMULATE(INITIALIZE_ELEMENT_STATUS); /*0x07*/ /* ch */
+	NO_EMULATE(READ_6);                /*0x08*/ /* sd,st */
+	NO_EMULATE(WRITE_6);               /*0x0a*/ /* sd,st */
+	NO_EMULATE(SEEK_6);                /*0x0b*/
+	/*NO_EMULATE(READ_REVERSE);          *//*0x0f*/
+	NO_EMULATE(WRITE_FILEMARKS);       /*0x10*/ /* st */
+	NO_EMULATE(SPACE);                 /*0x11*/ /* st */
+	NO_EMULATE(INQUIRY);               /*0x12*/
+	/*NO_EMULATE(RECOVER_BUFFERED_DATA); *//*0x14*/
+	NO_EMULATE(MODE_SELECT);           /*0x15*/ /* st */
+	NO_EMULATE(RESERVE);               /*0x16*/
+	NO_EMULATE(RELEASE);               /*0x17*/
+	/*NO_EMULATE(COPY);                  *//*0x18*/
+	NO_EMULATE(ERASE);                 /*0x19*/ /* st */
+	NO_EMULATE(MODE_SENSE);            /*0x1a*/ /* st */
+	NO_EMULATE(START_STOP);            /*0x1b*/ /* sd,st */
+	NO_EMULATE(RECEIVE_DIAGNOSTIC);    /*0x1c*/
+	NO_EMULATE(SEND_DIAGNOSTIC);       /*0x1d*/
+	NO_EMULATE(ALLOW_MEDIUM_REMOVAL);  /*0x1e*/
+
+	/*NO_EMULATE(SET_WINDOW);            *//*0x24*/
+	NO_EMULATE(READ_CAPACITY);         /*0x25*/ /* sd */
+	NO_EMULATE(READ_10);               /*0x28*/ /* sd */
+	NO_EMULATE(WRITE_10);              /*0x2a*/ /* sd */
+	NO_EMULATE(SEEK_10);               /*0x2b*/ /* st */
+	NO_EMULATE(POSITION_TO_ELEMENT);   /*0x2b*/ /* ch */
+	/*NO_EMULATE(WRITE_VERIFY);          *//*0x2e*/
+	/*NO_EMULATE(VERIFY);                *//*0x2f*/
+	/*NO_EMULATE(SEARCH_HIGH);           *//*0x30*/
+	/*NO_EMULATE(SEARCH_EQUAL);          *//*0x31*/
+	/*NO_EMULATE(SEARCH_LOW);            *//*0x32*/
+	NO_EMULATE(SET_LIMITS);            /*0x33*/
+	NO_EMULATE(PRE_FETCH);             /*0x34*/ /* st! */
+	NO_EMULATE(READ_POSITION);          /*0x34*/ /* st */
+	NO_EMULATE(SYNCHRONIZE_CACHE);      /*0x35*/ /* sd */
+	NO_EMULATE(LOCK_UNLOCK_CACHE);     /*0x36*/
+	NO_EMULATE(READ_DEFECT_DATA);      /*0x37*/
+	NO_EMULATE(MEDIUM_SCAN);           /*0x38*/
+	/*NO_EMULATE(COMPARE);               *//*0x39*/
+	/*NO_EMULATE(COPY_VERIFY);           *//*0x3a*/
+	NO_EMULATE(WRITE_BUFFER);          /*0x3b*/
+	NO_EMULATE(READ_BUFFER);           /*0x3c*/ /* osst */
+	/*NO_EMULATE(UPDATE_BLOCK);          *//*0x3d*/
+	/*NO_EMULATE(READ_LONG);             *//*0x3e*/
+	/*NO_EMULATE(WRITE_LONG);            *//*0x3f*/
+	/*NO_EMULATE(CHANGE_DEFINITION);     *//*0x40*/
+	/*NO_EMULATE(WRITE_SAME);            *//*0x41*/
+	NO_EMULATE(READ_TOC);              /*0x43*/ /* sr */
+	NO_EMULATE(LOG_SELECT);            /*0x4c*/
+	NO_EMULATE(LOG_SENSE);             /*0x4d*/ /* st! */
+	/*NO_EMULATE(MODE_SELECT_10);        *//*0x55*/
+	/*NO_EMULATE(RESERVE_10);            *//*0x56*/
+	/*NO_EMULATE(RELEASE_10);            *//*0x57*/
+	NO_EMULATE(MODE_SENSE_10);         /*0x5a*/ /* scsi_lib */
+	/*NO_EMULATE(PERSISTENT_RESERVE_IN); *//*0x5e*/
+	/*NO_EMULATE(PERSISTENT_RESERVE_OUT); *//*0x5f*/
+	/*           REPORT_LUNS             *//*0xa0*//*Full emulaiton*/
+#ifdef MAINTENANCE_IN
+	NO_EMULATE(MAINTENANCE_IN);           /*0xa3*/ /* IFT alua */
+	NO_EMULATE(MAINTENANCE_OUT);       /*0xa4*/ /* IFT alua */
+#endif
+	NO_EMULATE(MOVE_MEDIUM);           /*0xa5*/ /* ch */
+	NO_EMULATE(EXCHANGE_MEDIUM);       /*0xa6*/ /* ch */
+	/*NO_EMULATE(READ_12);               *//*0xa8*/
+	/*NO_EMULATE(WRITE_12);              *//*0xaa*/
+	/*NO_EMULATE(WRITE_VERIFY_12);       *//*0xae*/
+	/*NO_EMULATE(SEARCH_HIGH_12);        *//*0xb0*/
+	/*NO_EMULATE(SEARCH_EQUAL_12);       *//*0xb1*/
+	/*NO_EMULATE(SEARCH_LOW_12);         *//*0xb2*/
+	NO_EMULATE(READ_ELEMENT_STATUS);   /*0xb8*/ /* ch */
+	NO_EMULATE(SEND_VOLUME_TAG);       /*0xb6*/ /* ch */
+	/*NO_EMULATE(WRITE_LONG_2);          *//*0xea*/
+	NO_EMULATE(READ_16);               /*0x88*/ /* sd >2TB */
+	NO_EMULATE(WRITE_16);              /*0x8a*/ /* sd >2TB */
+	NO_EMULATE(VERIFY_16);	           /*0x8f*/
+	NO_EMULATE(SERVICE_ACTION_IN);     /*0x9e*/ /* sd >2TB */
+
+/* st: QFA_REQUEST_BLOCK, QFA_SEEK_BLOCK might be needed ? */
+	/*
+	  Following commands require emulation.
+	*/
+	pre_function[REPORT_LUNS] = __report_luns;
+	bitmap[REPORT_LUNS] = VSCSIIF_NEED_EMULATE_REQBUF |
+			      VSCSIIF_NEED_EMULATE_RSPBUF;
+
+	return;
+}
+
+static struct vscsibk_info *vscsibk_info_alloc(domid_t domid)
+{
+	struct vscsibk_info *info;
+
+	info = kmem_cache_zalloc(scsiback_cachep, GFP_KERNEL);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	info->domid = domid;
+	spin_lock_init(&info->ring_lock);
+	atomic_set(&info->nr_unreplied_reqs, 0);
+	init_waitqueue_head(&info->wq);
+	init_waitqueue_head(&info->shutdown_wq);
+	init_waitqueue_head(&info->waiting_to_free);
+
+	return info;
+}
+
+static void scsiback_notify_work(struct vscsibk_info *info)
+{
+	info->waiting_reqs = 1;
+	wake_up(&info->wq);
+}
+
+static irqreturn_t scsiback_intr(int irq, void *dev_id)
+{
+	scsiback_notify_work((struct vscsibk_info *)dev_id);
+	return IRQ_HANDLED;
+}
+
+static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
+			evtchn_port_t evtchn)
+{
+	void *area;
+	struct vscsiif_sring *sring;
+	int err;
+
+	if (info->irq) {
+		pr_err("scsiback: Already connected through?\n");
+		return -1;
+	}
+
+	err = xenbus_map_ring_valloc(info->dev, ring_ref, &area);
+	if (err)
+		return err;
+
+	sring = (struct vscsiif_sring *)area;
+	BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+	err = bind_interdomain_evtchn_to_irqhandler(
+			info->domid, evtchn,
+			scsiback_intr, 0, "vscsiif-backend", info);
+
+	if (err < 0)
+		goto unmap_page;
+
+	info->irq = err;
+
+	return 0;
+
+unmap_page:
+	xenbus_unmap_ring_vfree(info->dev, area);
+
+	return err;
+}
+
+static void scsiback_disconnect(struct vscsibk_info *info)
+{
+	if (info->kthread) {
+		kthread_stop(info->kthread);
+		info->kthread = NULL;
+		wake_up(&info->shutdown_wq);
+	}
+
+	wait_event(info->waiting_to_free,
+		atomic_read(&info->nr_unreplied_reqs) == 0);
+
+	if (info->irq) {
+		unbind_from_irqhandler(info->irq, info);
+		info->irq = 0;
+	}
+
+	if (info->ring.sring) {
+		xenbus_unmap_ring_vfree(info->dev, info->ring.sring);
+		info->ring.sring = NULL;
+	}
+}
+
+static void scsiback_free(struct vscsibk_info *info)
+{
+	kmem_cache_free(scsiback_cachep, info);
+}
+
+static int __init scsiback_interface_init(void)
+{
+	scsiback_cachep = kmem_cache_create("vscsiif_cache",
+		sizeof(struct vscsibk_info), 0, 0, NULL);
+	if (!scsiback_cachep) {
+		pr_err("scsiback: can't init scsi cache\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void scsiback_interface_exit(void)
+{
+	kmem_cache_destroy(scsiback_cachep);
+}
+
+static unsigned int vscsiif_reqs = 128;
+module_param_named(reqs, vscsiif_reqs, uint, 0);
+MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
+
+static pending_req_t * alloc_req(struct vscsibk_info *info)
+{
+	pending_req_t *req = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pending_free_lock, flags);
+	if (!list_empty(&pending_free)) {
+		req = list_entry(pending_free.next, pending_req_t, free_list);
+		list_del(&req->free_list);
+	}
+	spin_unlock_irqrestore(&pending_free_lock, flags);
+	return req;
+}
+
+static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
+					pending_req_t *pending_req)
+{
+	u32 flags;
+	int write;
+	int i, err = 0;
+	unsigned int data_len = 0;
+	struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
+	struct vscsibk_info *info   = pending_req->info;
+	struct page **pg;
+	int data_dir = (int)pending_req->sc_data_direction;
+	unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
+
+	write = (data_dir == DMA_TO_DEVICE);
+
+	if (nr_segments) {
+		struct scatterlist *sg;
+
+		/* free of (sgl) in fast_flush_area()*/
+		pending_req->sgl = kmalloc(sizeof(struct scatterlist) *
+					   nr_segments, GFP_KERNEL);
+		if (!pending_req->sgl) {
+			pr_err("scsiback: %s: kmalloc() error\n", __FUNCTION__);
+			return -ENOMEM;
+		}
+
+		sg_init_table(pending_req->sgl, nr_segments);
+
+		flags = GNTMAP_host_map;
+		if (write)
+			flags |= GNTMAP_readonly;
+
+		for (i = 0; i < nr_segments; i++)
+			gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
+					  ring_req->seg[i].gref, info->domid);
+
+		pg = pending_pages + vaddr_pagenr(pending_req, 0);
+		err = gnttab_map_refs(map, NULL, pg, nr_segments);
+		BUG_ON(err);
+
+		for_each_sg (pending_req->sgl, sg, nr_segments, i) {
+
+			if (unlikely(map[i].status != GNTST_okay)) {
+				pr_err("scsiback: invalid buffer -- could not remap it\n");
+				map[i].handle = SCSIBACK_INVALID_HANDLE;
+				err |= 1;
+			}
+
+			pending_handle(pending_req, i) = map[i].handle;
+
+			if (err)
+				continue;
+
+			sg_set_page(sg, pg[i], ring_req->seg[i].length,
+				    ring_req->seg[i].offset);
+			data_len += sg->length;
+
+			barrier();
+			if (sg->offset >= PAGE_SIZE ||
+			    sg->length > PAGE_SIZE ||
+			    sg->offset + sg->length > PAGE_SIZE)
+				err |= 1;
+
+		}
+
+		if (err)
+			goto fail_flush;
+	}
+
+	pending_req->request_bufflen = data_len;
+
+	return 0;
+
+fail_flush:
+	scsiback_fast_flush_area(pending_req);
+	return -ENOMEM;
+}
+
+static void scsiback_device_reset_exec(pending_req_t *pending_req)
+{
+	struct vscsibk_info *info = pending_req->info;
+	int err;
+	struct scsi_device *sdev = pending_req->sdev;
+
+	scsiback_get(info);
+	err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
+
+	scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
+	scsiback_put(info);
+
+	return;
+}
+
+/*
+  Perform virtual to physical translation
+*/
+static struct scsi_device *scsiback_do_translation(struct vscsibk_info *info,
+			struct ids_tuple *v)
+{
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	struct scsi_device *sdev = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == v->chn) &&
+		    (entry->v.tgt == v->tgt) &&
+		    (entry->v.lun == v->lun)) {
+			sdev = entry->sdev;
+			goto out;
+		}
+	}
+out:
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return sdev;
+}
+
+static int prepare_pending_reqs(struct vscsibk_info *info,
+		vscsiif_request_t *ring_req, pending_req_t *pending_req)
+{
+	struct scsi_device *sdev;
+	struct ids_tuple vir;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	pending_req->rqid       = ring_req->rqid;
+	pending_req->act        = ring_req->act;
+
+	pending_req->info       = info;
+
+	pending_req->v_chn = vir.chn = ring_req->channel;
+	pending_req->v_tgt = vir.tgt = ring_req->id;
+	vir.lun = ring_req->lun;
+
+	rmb();
+	sdev = scsiback_do_translation(info, &vir);
+	if (!sdev) {
+		pending_req->sdev = NULL;
+		DPRINTK("scsiback: doesn't exist.\n");
+		return -ENODEV;
+	}
+	pending_req->sdev = sdev;
+
+	/* request range check from frontend */
+	pending_req->sc_data_direction = ring_req->sc_data_direction;
+	barrier();
+	if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
+		(pending_req->sc_data_direction != DMA_TO_DEVICE) &&
+		(pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
+		(pending_req->sc_data_direction != DMA_NONE)) {
+		DPRINTK("scsiback: invalid parameter data_dir = %d\n",
+			pending_req->sc_data_direction);
+		return -EINVAL;
+	}
+
+	pending_req->nr_segments = ring_req->nr_segments;
+	barrier();
+	if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
+		DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
+			pending_req->nr_segments);
+		return -EINVAL;
+	}
+
+	pending_req->cmd_len = ring_req->cmd_len;
+	barrier();
+	if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
+		DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
+			pending_req->cmd_len);
+		return -EINVAL;
+	}
+	memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
+
+	pending_req->timeout_per_command = ring_req->timeout_per_command;
+
+	if(scsiback_gnttab_data_map(ring_req, pending_req)) {
+		DPRINTK("scsiback: invalid buffer\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int _scsiback_do_cmd_fn(struct vscsibk_info *info)
+{
+	struct vscsiif_back_ring *ring = &info->ring;
+	vscsiif_request_t  *ring_req;
+
+	pending_req_t *pending_req;
+	RING_IDX rc, rp;
+	int err, more_to_do = 0;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	rc = ring->req_cons;
+	rp = ring->sring->req_prod;
+	rmb();
+
+	if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) {
+		rc = ring->rsp_prod_pvt;
+		pr_warning("scsiback:"
+			" Dom%d provided bogus ring requests (%#x - %#x = %u)."
+			" Halting ring processing\n",
+			   info->domid, rp, rc, rp - rc);
+		return -EACCES;
+	}
+
+	while ((rc != rp)) {
+		if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
+			break;
+		pending_req = alloc_req(info);
+		if (NULL == pending_req) {
+			more_to_do = 1;
+			break;
+		}
+
+		ring_req = RING_GET_REQUEST(ring, rc);
+		ring->req_cons = ++rc;
+
+		err = prepare_pending_reqs(info, ring_req,
+						pending_req);
+		switch (err ?: pending_req->act) {
+		case VSCSIIF_ACT_SCSI_CDB:
+			/* The Host mode is through as for Emulation. */
+			if (info->feature == VSCSI_TYPE_HOST)
+				scsiback_cmd_exec(pending_req);
+			else
+				scsiback_req_emulation_or_cmdexec(pending_req);
+			break;
+		case VSCSIIF_ACT_SCSI_RESET:
+			scsiback_device_reset_exec(pending_req);
+			break;
+		default:
+			if(!err && printk_ratelimit())
+				pr_err("scsiback: invalid request\n");
+			scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24,
+						    0, pending_req);
+			break;
+		case -ENODEV:
+			scsiback_do_resp_with_sense(NULL, DID_NO_CONNECT << 16,
+						    0, pending_req);
+			break;
+		}
+
+		/* Yield point for this unbounded loop. */
+		cond_resched();
+	}
+
+	if (RING_HAS_UNCONSUMED_REQUESTS(ring))
+		more_to_do = 1;
+
+	return more_to_do;
+}
+
+static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+{
+	int more_to_do;
+
+	do {
+		more_to_do = _scsiback_do_cmd_fn(info);
+		if (more_to_do)
+			break;
+
+		RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
+	} while (more_to_do);
+
+	return more_to_do;
+}
+
+static int scsiback_schedule(void *data)
+{
+	struct vscsibk_info *info = (struct vscsibk_info *)data;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(info->wq,
+			info->waiting_reqs || kthread_should_stop());
+		wait_event_interruptible(pending_free_wq,
+			!list_empty(&pending_free) || kthread_should_stop());
+
+		info->waiting_reqs = 0;
+		smp_mb();
+
+		switch (scsiback_do_cmd_fn(info)) {
+		case 1:
+			info->waiting_reqs = 1;
+		case 0:
+			break;
+		case -EACCES:
+			wait_event_interruptible(info->shutdown_wq,
+						 kthread_should_stop());
+			break;
+		default:
+			BUG();
+		}
+	}
+
+	return 0;
+}
+
+static void __vscsiif_name(struct backend_info *be, char *buf)
+{
+	struct xenbus_device *dev = be->dev;
+	unsigned int domid, id;
+
+	sscanf(dev->nodename, "backend/vscsi/%u/%u", &domid, &id);
+	snprintf(buf, TASK_COMM_LEN, "vscsi.%u.%u", be->info->domid, id);
+}
+
+static int scsiback_map(struct backend_info *be)
+{
+	struct xenbus_device *dev = be->dev;
+	unsigned int ring_ref, evtchn;
+	int err;
+	char name[TASK_COMM_LEN];
+
+	err = xenbus_gather(XBT_NIL, dev->otherend,
+			"ring-ref", "%u", &ring_ref,
+			"event-channel", "%u", &evtchn, NULL);
+	if (err) {
+		xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend);
+		return err;
+	}
+
+	err = scsiback_init_sring(be->info, ring_ref, evtchn);
+	if (err)
+		return err;
+
+	__vscsiif_name(be, name);
+
+	be->info->kthread = kthread_run(scsiback_schedule, be->info, name);
+	if (IS_ERR(be->info->kthread)) {
+		err = PTR_ERR(be->info->kthread);
+		be->info->kthread = NULL;
+		xenbus_dev_error(be->dev, err, "start vscsiif");
+		return err;
+	}
+
+	return 0;
+}
+
+struct scsi_device *scsiback_get_scsi_device(struct ids_tuple *phy)
+{
+	struct Scsi_Host *shost;
+	struct scsi_device *sdev = NULL;
+
+	shost = scsi_host_lookup(phy->hst);
+	if (!shost) {
+		pr_err("scsiback: host%d doesn't exist\n", phy->hst);
+		return NULL;
+	}
+	sdev   = scsi_device_lookup(shost, phy->chn, phy->tgt, phy->lun);
+	if (!sdev) {
+		pr_err("scsiback: %d:%d:%d:%d doesn't exist\n",
+		       phy->hst, phy->chn, phy->tgt, phy->lun);
+		scsi_host_put(shost);
+		return NULL;
+	}
+
+	scsi_host_put(shost);
+	return (sdev);
+}
+
+/*
+  Add a new translation entry
+*/
+static int scsiback_add_translation_entry(struct vscsibk_info *info,
+			struct scsi_device *sdev, struct ids_tuple *v)
+{
+	int err = 0;
+	struct v2p_entry *entry;
+	struct v2p_entry *new;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+
+	/* Check double assignment to identical virtual ID */
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == v->chn) &&
+		    (entry->v.tgt == v->tgt) &&
+		    (entry->v.lun == v->lun)) {
+			pr_warning("scsiback: Virtual ID is already used. "
+				   "Assignment was not performed.\n");
+			err = -EEXIST;
+			goto out;
+		}
+
+	}
+
+	/* Create a new translation entry and add to the list */
+	if ((new = kmalloc(sizeof(struct v2p_entry), GFP_ATOMIC)) == NULL) {
+		pr_err("scsiback: %s: kmalloc() error\n", __FUNCTION__);
+		err = -ENOMEM;
+		goto out;
+	}
+	new->v = *v;
+	new->sdev = sdev;
+	list_add_tail(&new->l, head);
+
+out:
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return err;
+}
+
+/*
+  Delete the translation entry specfied
+*/
+static int scsiback_del_translation_entry(struct vscsibk_info *info,
+				struct ids_tuple *v)
+{
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	/* Find out the translation entry specified */
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == v->chn) &&
+		    (entry->v.tgt == v->tgt) &&
+		    (entry->v.lun == v->lun)) {
+			goto found;
+		}
+	}
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return 1;
+
+found:
+	/* Delete the translation entry specfied */
+	scsi_device_put(entry->sdev);
+	list_del(&entry->l);
+	kfree(entry);
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return 0;
+}
+
+static void scsiback_do_add_lun(struct backend_info *be, const char *state,
+				struct ids_tuple *phy, struct ids_tuple *vir)
+{
+	struct scsi_device *sdev;
+
+	sdev = scsiback_get_scsi_device(phy);
+	if (!sdev) {
+		xenbus_printf(XBT_NIL, be->dev->nodename, state,
+			      "%d", XenbusStateClosed);
+		return;
+	}
+	if (!scsiback_add_translation_entry(be->info, sdev, vir)) {
+		if (xenbus_printf(XBT_NIL, be->dev->nodename, state,
+				  "%d", XenbusStateInitialised)) {
+			pr_err("scsiback: xenbus_printf error %s\n", state);
+			scsiback_del_translation_entry(be->info, vir);
+		}
+	} else {
+		scsi_device_put(sdev);
+		xenbus_printf(XBT_NIL, be->dev->nodename, state,
+			      "%d", XenbusStateClosed);
+	}
+}
+
+static void scsiback_do_del_lun(struct backend_info *be, const char *state,
+				struct ids_tuple *vir)
+{
+	if (!scsiback_del_translation_entry(be->info, vir)) {
+		if (xenbus_printf(XBT_NIL, be->dev->nodename, state,
+				  "%d", XenbusStateClosed))
+			pr_err("scsiback: xenbus_printf error %s\n", state);
+	}
+}
+
+#define VSCSIBACK_OP_ADD_OR_DEL_LUN	1
+#define VSCSIBACK_OP_UPDATEDEV_STATE	2
+
+static void scsiback_do_1lun_hotplug(struct backend_info *be, int op, char *ent)
+{
+	int err;
+	struct ids_tuple phy, vir;
+	int device_state;
+	char str[64];
+	char state[64];
+	struct xenbus_device *dev = be->dev;
+
+	/* read status */
+	snprintf(state, sizeof(state), "vscsi-devs/%s/state", ent);
+	err = xenbus_scanf(XBT_NIL, dev->nodename, state, "%u", &device_state);
+	if (XENBUS_EXIST_ERR(err))
+		return;
+
+	/* physical SCSI device */
+	snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
+	err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
+			   &phy.hst, &phy.chn, &phy.tgt, &phy.lun);
+	if (XENBUS_EXIST_ERR(err)) {
+		xenbus_printf(XBT_NIL, dev->nodename, state,
+			      "%d", XenbusStateClosed);
+		return;
+	}
+
+	/* virtual SCSI device */
+	snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", ent);
+	err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
+			   &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
+	if (XENBUS_EXIST_ERR(err)) {
+		xenbus_printf(XBT_NIL, dev->nodename, state,
+			      "%d", XenbusStateClosed);
+		return;
+	}
+
+	switch (op) {
+	case VSCSIBACK_OP_ADD_OR_DEL_LUN:
+		if (device_state == XenbusStateInitialising)
+			scsiback_do_add_lun(be, state, &phy, &vir);
+		if (device_state == XenbusStateClosing)
+			scsiback_do_del_lun(be, state, &vir);
+		break;
+
+	case VSCSIBACK_OP_UPDATEDEV_STATE:
+		if (device_state == XenbusStateInitialised) {
+			/* modify vscsi-devs/dev-x/state */
+			if (xenbus_printf(XBT_NIL, dev->nodename, state,
+					  "%d", XenbusStateConnected)) {
+				pr_err("scsiback: xenbus_printf error %s\n",
+				       str);
+				scsiback_del_translation_entry(be->info, &vir);
+				xenbus_printf(XBT_NIL, dev->nodename, state,
+					      "%d", XenbusStateClosed);
+			}
+		}
+		break;
+	/*When it is necessary, processing is added here.*/
+	default:
+		break;
+	}
+}
+
+static void scsiback_do_lun_hotplug(struct backend_info *be, int op)
+{
+	int i;
+	char **dir;
+	unsigned int ndir = 0;
+
+	dir = xenbus_directory(XBT_NIL, be->dev->nodename, "vscsi-devs", &ndir);
+	if (IS_ERR(dir))
+		return;
+
+	for (i = 0; i < ndir; i++)
+		scsiback_do_1lun_hotplug(be, op, dir[i]);
+
+	kfree(dir);
+	return ;
+}
+
+static void scsiback_frontend_changed(struct xenbus_device *dev,
+					enum xenbus_state frontend_state)
+{
+	struct backend_info *be = dev_get_drvdata(&dev->dev);
+	int err;
+
+	switch (frontend_state) {
+	case XenbusStateInitialising:
+		break;
+	case XenbusStateInitialised:
+		err = scsiback_map(be);
+		if (err)
+			break;
+
+		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
+		xenbus_switch_state(dev, XenbusStateConnected);
+
+		break;
+	case XenbusStateConnected:
+
+		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_UPDATEDEV_STATE);
+
+		if (dev->state == XenbusStateConnected)
+			break;
+
+		xenbus_switch_state(dev, XenbusStateConnected);
+
+		break;
+
+	case XenbusStateClosing:
+		scsiback_disconnect(be->info);
+		xenbus_switch_state(dev, XenbusStateClosing);
+		break;
+
+	case XenbusStateClosed:
+		xenbus_switch_state(dev, XenbusStateClosed);
+		if (xenbus_dev_is_online(dev))
+			break;
+		/* fall through if not online */
+	case XenbusStateUnknown:
+		device_unregister(&dev->dev);
+		break;
+
+	case XenbusStateReconfiguring:
+		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
+
+		xenbus_switch_state(dev, XenbusStateReconfigured);
+
+		break;
+
+	default:
+		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
+					frontend_state);
+		break;
+	}
+}
+
+/*
+  Release the translation entry specfied
+*/
+static void scsiback_release_translation_entry(struct vscsibk_info *info)
+{
+	struct v2p_entry *entry, *tmp;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry_safe(entry, tmp, head, l) {
+		scsi_device_put(entry->sdev);
+		list_del(&entry->l);
+		kfree(entry);
+	}
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return;
+
+}
+
+static int scsiback_remove(struct xenbus_device *dev)
+{
+	struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+	if (be->info) {
+		scsiback_disconnect(be->info);
+		scsiback_release_translation_entry(be->info);
+		scsiback_free(be->info);
+		be->info = NULL;
+	}
+
+	kfree(be);
+	dev_set_drvdata(&dev->dev, NULL);
+
+	return 0;
+}
+
+/*
+  Initialize the translation entry list
+*/
+static void scsiback_init_translation_table(struct vscsibk_info *info)
+{
+	INIT_LIST_HEAD(&info->v2p_entry_lists);
+	spin_lock_init(&info->v2p_lock);
+}
+
+static int scsiback_probe(struct xenbus_device *dev,
+			   const struct xenbus_device_id *id)
+{
+	int err;
+	unsigned val = 0;
+
+	struct backend_info *be = kzalloc(sizeof(struct backend_info),
+					  GFP_KERNEL);
+
+	DPRINTK("%p %d\n", dev, dev->otherend_id);
+
+	if (!be) {
+		xenbus_dev_fatal(dev, -ENOMEM,
+				 "allocating backend structure");
+		return -ENOMEM;
+	}
+	be->dev = dev;
+	dev_set_drvdata(&dev->dev, be);
+
+	be->info = vscsibk_info_alloc(dev->otherend_id);
+	if (IS_ERR(be->info)) {
+		err = PTR_ERR(be->info);
+		be->info = NULL;
+		xenbus_dev_fatal(dev, err, "creating scsihost interface");
+		goto fail;
+	}
+
+	be->info->dev = dev;
+	be->info->irq = 0;
+	be->info->feature = 0;	/*default not HOSTMODE.*/
+
+	scsiback_init_translation_table(be->info);
+
+	err = xenbus_scanf(XBT_NIL, dev->nodename,
+			   "feature-host", "%d", &val);
+	if (XENBUS_EXIST_ERR(err))
+		val = 0;
+
+	if (val)
+		be->info->feature = VSCSI_TYPE_HOST;
+
+	err = xenbus_switch_state(dev, XenbusStateInitWait);
+	if (err)
+		goto fail;
+
+	return 0;
+
+fail:
+	pr_warning("scsiback: %s failed\n",__FUNCTION__);
+	scsiback_remove(dev);
+
+	return err;
+}
+
+static const struct xenbus_device_id scsiback_ids[] = {
+	{ "vscsi" },
+	{ "" }
+};
+
+static DEFINE_XENBUS_DRIVER(scsiback, ,
+	.probe			= scsiback_probe,
+	.remove			= scsiback_remove,
+	.otherend_changed	= scsiback_frontend_changed
+);
+
+static int __init scsiback_init(void)
+{
+	int i, mmap_pages;
+
+	if (!xen_domain())
+		return -ENODEV;
+
+	mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
+
+	pending_reqs          = kzalloc(sizeof(pending_reqs[0]) *
+					vscsiif_reqs, GFP_KERNEL);
+	pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
+					mmap_pages, GFP_KERNEL);
+	pending_pages         = kcalloc(mmap_pages, sizeof(pending_pages[0]),
+					GFP_KERNEL);
+
+	if (!pending_reqs || !pending_grant_handles || !pending_pages)
+		goto out_of_memory;
+
+	if (alloc_xenballooned_pages(mmap_pages, pending_pages, 0))
+		goto out_of_memory;
+
+	for (i = 0; i < mmap_pages; i++)
+		pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
+
+	if (scsiback_interface_init() < 0)
+		goto out_of_memory;
+
+	INIT_LIST_HEAD(&pending_free);
+
+	for (i = 0; i < vscsiif_reqs; i++)
+		list_add_tail(&pending_reqs[i].free_list, &pending_free);
+
+	if (xenbus_register_backend(&scsiback_driver))
+		goto out_interface;
+
+	scsiback_emulation_init();
+
+	return 0;
+
+out_interface:
+	scsiback_interface_exit();
+out_of_memory:
+	kfree(pending_reqs);
+	kfree(pending_grant_handles);
+	free_xenballooned_pages(mmap_pages, pending_pages);
+	kfree(pending_pages);
+	pr_err("scsiback: %s: out of memory\n", __FUNCTION__);
+	return -ENOMEM;
+}
+
+#if 0
+static void __exit scsiback_exit(void)
+{
+	xenbus_unregister_driver(&scsiback_driver);
+	scsiback_interface_exit();
+	kfree(pending_reqs);
+	kfree(pending_grant_handles);
+	free_xenballooned_pages(vscsiif_reqs * VSCSIIF_SG_TABLESIZE,
+				pending_pages);
+	kfree(pending_pages);
+}
+#endif
+
+module_init(scsiback_init);
+
+#if 0
+module_exit(scsiback_exit);
+#endif
+
+MODULE_DESCRIPTION("Xen SCSI backend driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("xen-backend:vscsi");
-- 
1.8.4.5


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH 3/4] Introduce XEN scsiback module
  2014-06-27 14:34 Add XEN pvSCSI support jgross
                   ` (3 preceding siblings ...)
  2014-06-27 14:34 ` jgross
@ 2014-06-27 14:34 ` jgross
  2014-06-27 14:34 ` jgross
                   ` (3 subsequent siblings)
  8 siblings, 0 replies; 34+ messages in thread
From: jgross @ 2014-06-27 14:34 UTC (permalink / raw)
  To: JBottomley, linux-scsi, xen-devel; +Cc: Juergen Gross

From: Juergen Gross <jgross@suse.com>

Introduces the XEN pvSCSI backend. With pvSCSI it is possible for a XEN domU
to issue SCSI commands to a SCSI LUN assigned to that domU. The SCSI commands
are passed to the pvSCSI backend in a driver domain (usually Dom0) which is
owner of the physical device. This allows e.g. to use SCSI tape drives in a
XEN domU.

The code is taken from the pvSCSI implementation in XEN done by Fujitsu based
on Linux kernel 2.6.18.

Changes from the original version are:
- port to upstream kernel
- put all code in just one source file
- move module to appropriate location in kernel tree
- adapt to Linux style guide
- correct minor error in scsiback_fast_flush_area
- some minor code simplifications

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 drivers/scsi/Kconfig        |    7 +
 drivers/scsi/Makefile       |    1 +
 drivers/scsi/xen-scsiback.c | 1797 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 1805 insertions(+)
 create mode 100644 drivers/scsi/xen-scsiback.c

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 734d691..ccd8ba0 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -611,6 +611,13 @@ config VMWARE_PVSCSI
 	  To compile this driver as a module, choose M here: the
 	  module will be called vmw_pvscsi.
 
+config XEN_SCSI_BACKEND
+	tristate "XEN SCSI backend driver"
+	depends on SCSI && XEN && XEN_BACKEND
+	help
+	  The SCSI backend driver allows the kernel to export its SCSI Devices
+	  to other guests via a high-performance shared-memory interface.
+
 config XEN_SCSI_FRONTEND
 	tristate "XEN SCSI frontend driver"
 	depends on SCSI && XEN
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a4ee9c5..4cfcc3a 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -156,6 +156,7 @@ obj-$(CONFIG_BLK_DEV_SR)	+= sr_mod.o
 obj-$(CONFIG_CHR_DEV_SG)	+= sg.o
 obj-$(CONFIG_CHR_DEV_SCH)	+= ch.o
 obj-$(CONFIG_SCSI_ENCLOSURE)	+= ses.o
+obj-$(CONFIG_XEN_SCSI_BACKEND)	+= xen-scsiback.o
 
 obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
 
diff --git a/drivers/scsi/xen-scsiback.c b/drivers/scsi/xen-scsiback.c
new file mode 100644
index 0000000..5b85e6f
--- /dev/null
+++ b/drivers/scsi/xen-scsiback.c
@@ -0,0 +1,1797 @@
+/*
+ * Xen SCSI backend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * Based on the blkback driver code.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/*
+* Patched to support >2TB drives + allow tape & autoloader operations
+* 2010, Samuel Kvasnica, IMS Nanofabrication AG
+*/
+
+#include <stdarg.h>
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
+#include <linux/list.h>
+#include <linux/gfp.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include <asm/hypervisor.h>
+
+#include <xen/xen.h>
+#include <xen/balloon.h>
+#include <xen/events.h>
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/vscsiif.h>
+
+#define DPRINTK(_f, _a...)			\
+	pr_debug("(file=%s, line=%d) " _f,	\
+		 __FILE__ , __LINE__ , ## _a )
+
+struct ids_tuple {
+	unsigned int hst;		/* host    */
+	unsigned int chn;		/* channel */
+	unsigned int tgt;		/* target  */
+	unsigned int lun;		/* LUN     */
+};
+
+struct v2p_entry {
+	struct ids_tuple v;		/* translate from */
+	struct scsi_device *sdev;	/* translate to   */
+	struct list_head l;
+};
+
+struct vscsibk_info {
+	struct xenbus_device *dev;
+
+	domid_t domid;
+	unsigned int evtchn;
+	unsigned int irq;
+
+	int feature;
+
+	struct vscsiif_back_ring  ring;
+
+	spinlock_t ring_lock;
+	atomic_t nr_unreplied_reqs;
+
+	spinlock_t v2p_lock;
+	struct list_head v2p_entry_lists;
+
+	struct task_struct *kthread;
+	wait_queue_head_t waiting_to_free;
+	wait_queue_head_t wq;
+	wait_queue_head_t shutdown_wq;
+	unsigned int waiting_reqs;
+	struct page **mmap_pages;
+
+};
+
+typedef struct {
+	unsigned char act;
+	struct vscsibk_info *info;
+	struct scsi_device *sdev;
+
+	uint16_t rqid;
+
+	uint16_t v_chn, v_tgt;
+
+	uint8_t nr_segments;
+	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
+	uint8_t cmd_len;
+
+	uint8_t sc_data_direction;
+	uint16_t timeout_per_command;
+
+	uint32_t request_bufflen;
+	struct scatterlist *sgl;
+	grant_ref_t gref[VSCSIIF_SG_TABLESIZE];
+
+	int32_t rslt;
+	uint32_t resid;
+	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
+
+	struct list_head free_list;
+} pending_req_t;
+
+struct backend_info
+{
+	struct xenbus_device *dev;
+	struct vscsibk_info *info;
+};
+
+#define scsiback_get(_b) (atomic_inc(&(_b)->nr_unreplied_reqs))
+#define scsiback_put(_b)				\
+	do {						\
+		if (atomic_dec_and_test(&(_b)->nr_unreplied_reqs))	\
+			wake_up(&(_b)->waiting_to_free);\
+	} while (0)
+
+#define VSCSIIF_TIMEOUT		(900*HZ)
+
+#define VSCSI_TYPE_HOST		1
+
+/* Following SCSI commands are not defined in scsi/scsi.h */
+#define EXTENDED_COPY		0x83	/* EXTENDED COPY command        */
+#define REPORT_ALIASES		0xa3	/* REPORT ALIASES command       */
+#define CHANGE_ALIASES		0xa4	/* CHANGE ALIASES command       */
+#define SET_PRIORITY		0xa4	/* SET PRIORITY command         */
+
+/*
+  The bitmap in order to control emulation.
+  (Bit 3 to 7 are reserved for future use.)
+*/
+#define VSCSIIF_NEED_CMD_EXEC		0x01	/* cmd exec required */
+#define VSCSIIF_NEED_EMULATE_REQBUF	0x02	/* emul reqest buff before */
+						/* cmd exec. */
+#define VSCSIIF_NEED_EMULATE_RSPBUF	0x04	/* emul resp buff after	*/
+						/* cmd exec. */
+
+/* Additional Sense Code (ASC) used */
+#define NO_ADDITIONAL_SENSE		0x0
+#define LOGICAL_UNIT_NOT_READY		0x4
+#define UNRECOVERED_READ_ERR		0x11
+#define PARAMETER_LIST_LENGTH_ERR	0x1a
+#define INVALID_OPCODE			0x20
+#define ADDR_OUT_OF_RANGE		0x21
+#define INVALID_FIELD_IN_CDB		0x24
+#define INVALID_FIELD_IN_PARAM_LIST	0x26
+#define POWERON_RESET			0x29
+#define SAVING_PARAMS_UNSUP		0x39
+#define THRESHOLD_EXCEEDED		0x5d
+#define LOW_POWER_COND_ON		0x5e
+
+/* Number os SCSI op_code	*/
+#define VSCSI_MAX_SCSI_OP_CODE		256
+static unsigned char bitmap[VSCSI_MAX_SCSI_OP_CODE];
+
+#define NO_EMULATE(cmd) \
+	bitmap[cmd] = VSCSIIF_NEED_CMD_EXEC; \
+	pre_function[cmd] = NULL; \
+	post_function[cmd] = NULL
+
+#define SCSIBACK_INVALID_HANDLE (~0)
+
+static bool log_print_stat;
+module_param(log_print_stat, bool, 0644);
+
+/*
+  Emulation routines for each SCSI op_code.
+*/
+static void (*pre_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
+static void (*post_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
+
+static pending_req_t *pending_reqs;
+static struct page **pending_pages;
+static grant_handle_t *pending_grant_handles;
+
+static const int check_condition_result =
+		(DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+struct list_head pending_free;
+DEFINE_SPINLOCK(pending_free_lock);
+DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
+
+static struct kmem_cache *scsiback_cachep;
+
+static int vaddr_pagenr(pending_req_t *req, int seg)
+{
+	return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
+}
+
+static unsigned long vaddr(pending_req_t *req, int seg)
+{
+	unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
+	return (unsigned long)pfn_to_kaddr(pfn);
+}
+
+static void scsiback_mk_sense_buffer(uint8_t *data, uint8_t key,
+			uint8_t asc, uint8_t asq)
+{
+	data[0] = 0x70;  /* fixed, current */
+	data[2] = key;
+	data[7] = 0xa;	  /* implies 18 byte sense buffer */
+	data[12] = asc;
+	data[13] = asq;
+}
+
+static void resp_not_supported_cmd(pending_req_t *pending_req, void *data)
+{
+	scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
+		INVALID_OPCODE, 0);
+	pending_req->resid = 0;
+	pending_req->rslt  = check_condition_result;
+}
+
+static int __copy_to_sg(struct scatterlist *sgl, unsigned int nr_sg,
+	       void *buf, unsigned int buflen)
+{
+	struct scatterlist *sg;
+	void *from = buf;
+	void *to;
+	unsigned int from_rest = buflen;
+	unsigned int to_capa;
+	unsigned int copy_size = 0;
+	unsigned int i;
+	unsigned long pfn;
+
+	for_each_sg (sgl, sg, nr_sg, i) {
+		if (sg_page(sg) == NULL) {
+			pr_warning("%s: inconsistent length field in "
+				   "scatterlist\n", __FUNCTION__);
+			return -ENOMEM;
+		}
+
+		to_capa  = sg->length;
+		copy_size = min_t(unsigned int, to_capa, from_rest);
+
+		pfn = page_to_pfn(sg_page(sg));
+		to = pfn_to_kaddr(pfn) + (sg->offset);
+		memcpy(to, from, copy_size);
+
+		from_rest  -= copy_size;
+		if (from_rest == 0)
+			return 0;
+
+		from += copy_size;
+	}
+
+	pr_warning("%s: no space in scatterlist\n", __FUNCTION__);
+	return -ENOMEM;
+}
+
+static int __maybe_unused __copy_from_sg(struct scatterlist *sgl,
+					 unsigned int nr_sg, void *buf,
+					 unsigned int buflen)
+{
+	struct scatterlist *sg;
+	void *from;
+	void *to = buf;
+	unsigned int from_rest;
+	unsigned int to_capa = buflen;
+	unsigned int copy_size;
+	unsigned int i;
+	unsigned long pfn;
+
+	for_each_sg (sgl, sg, nr_sg, i) {
+		if (sg_page(sg) == NULL) {
+			pr_warning("%s: inconsistent length field in "
+				   "scatterlist\n", __FUNCTION__);
+			return -ENOMEM;
+		}
+
+		from_rest = sg->length;
+		if ((from_rest > 0) && (to_capa < from_rest)) {
+			pr_warning("%s: no space in destination buffer\n",
+				   __FUNCTION__);
+			return -ENOMEM;
+		}
+		copy_size = from_rest;
+
+		pfn = page_to_pfn(sg_page(sg));
+		from = pfn_to_kaddr(pfn) + (sg->offset);
+		memcpy(to, from, copy_size);
+
+		to_capa  -= copy_size;
+		to += copy_size;
+	}
+
+	return 0;
+}
+
+static int __nr_luns_under_host(struct vscsibk_info *info)
+{
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+	int lun_cnt = 0;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry(entry, head, l) {
+		lun_cnt++;
+	}
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+
+	return (lun_cnt);
+}
+
+/* REPORT LUNS Define*/
+#define VSCSI_REPORT_LUNS_HEADER	8
+#define VSCSI_REPORT_LUNS_RETRY		3
+
+/* quoted scsi_debug.c/resp_report_luns() */
+static void __report_luns(pending_req_t *pending_req, void *data)
+{
+	struct vscsibk_info *info = pending_req->info;
+	unsigned int nr_seg  = pending_req->nr_segments;
+	unsigned char *cmd = (unsigned char *)pending_req->cmnd;
+	unsigned char *buff = NULL;
+	unsigned char alloc_len;
+	unsigned int alloc_luns = 0;
+	unsigned int req_bufflen = 0;
+	unsigned int actual_len = 0;
+	unsigned int retry_cnt = 0;
+	int select_report = (int)cmd[2];
+	int i, lun_cnt = 0, lun, upper, err = 0;
+
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+
+	struct scsi_lun *one_lun;
+
+	req_bufflen = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
+	if ((req_bufflen < 4) || (select_report != 0))
+		goto fail;
+
+	alloc_luns = __nr_luns_under_host(info);
+	alloc_len  = sizeof(struct scsi_lun) * alloc_luns
+				+ VSCSI_REPORT_LUNS_HEADER;
+retry:
+	if ((buff = kzalloc(alloc_len, GFP_KERNEL)) == NULL) {
+		pr_err("scsiback:%s kmalloc err\n", __FUNCTION__);
+		goto fail;
+	}
+
+	one_lun = (struct scsi_lun *) &buff[8];
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == pending_req->v_chn) &&
+		    (entry->v.tgt == pending_req->v_tgt)) {
+
+			/* check overflow */
+			if (lun_cnt >= alloc_luns) {
+				spin_unlock_irqrestore(&info->v2p_lock, flags);
+
+				if (retry_cnt < VSCSI_REPORT_LUNS_RETRY) {
+					retry_cnt++;
+					if (buff)
+						kfree(buff);
+					goto retry;
+				}
+
+				goto fail;
+			}
+
+			lun = entry->v.lun;
+			upper = (lun >> 8) & 0x3f;
+			if (upper)
+				one_lun[lun_cnt].scsi_lun[0] = upper;
+			one_lun[lun_cnt].scsi_lun[1] = lun & 0xff;
+			lun_cnt++;
+		}
+	}
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+
+	buff[2] = ((sizeof(struct scsi_lun) * lun_cnt) >> 8) & 0xff;
+	buff[3] = (sizeof(struct scsi_lun) * lun_cnt) & 0xff;
+
+	actual_len = lun_cnt * sizeof(struct scsi_lun)
+				+ VSCSI_REPORT_LUNS_HEADER;
+	req_bufflen = 0;
+	for (i = 0; i < nr_seg; i++)
+		req_bufflen += pending_req->sgl[i].length;
+
+	err = __copy_to_sg(pending_req->sgl, nr_seg, buff,
+				min(req_bufflen, actual_len));
+	if (err)
+		goto fail;
+
+	memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
+	pending_req->rslt = 0x00;
+	pending_req->resid = req_bufflen - min(req_bufflen, actual_len);
+
+	kfree(buff);
+	return;
+
+fail:
+	scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
+		INVALID_FIELD_IN_CDB, 0);
+	pending_req->rslt  = check_condition_result;
+	pending_req->resid = 0;
+	if (buff)
+		kfree(buff);
+	return;
+}
+
+int __pre_do_emulation(pending_req_t *pending_req, void *data)
+{
+	uint8_t op_code = pending_req->cmnd[0];
+
+	if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_REQBUF) &&
+	    pre_function[op_code] != NULL)
+		pre_function[op_code](pending_req, data);
+
+	/*
+	    0: no need for native driver call, so should return immediately.
+	    1: non emulation or should call native driver
+	       after modifing the request buffer.
+	*/
+	return !!(bitmap[op_code] & VSCSIIF_NEED_CMD_EXEC);
+}
+
+static void scsiback_rsp_emulation(pending_req_t *pending_req)
+{
+	uint8_t op_code = pending_req->cmnd[0];
+
+	if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_RSPBUF) &&
+	    post_function[op_code] != NULL)
+		post_function[op_code](pending_req, NULL);
+
+	return;
+}
+
+/* quoted scsi_lib.c/scsi_bi_endio */
+static void scsiback_bi_endio(struct bio *bio, int error)
+{
+	bio_put(bio);
+}
+
+/* quoted scsi_lib.c/scsi_req_map_sg . */
+static struct bio *request_map_sg(pending_req_t *pending_req)
+{
+	struct request_queue *q = pending_req->sdev->request_queue;
+	unsigned int nsegs = (unsigned int)pending_req->nr_segments;
+	unsigned int i, len, bytes, off, nr_pages, nr_vecs = 0;
+	struct scatterlist *sg;
+	struct page *page;
+	struct bio *bio = NULL, *bio_first = NULL, *bio_last = NULL;
+	int err;
+
+	for_each_sg (pending_req->sgl, sg, nsegs, i) {
+		page = sg_page(sg);
+		off = sg->offset;
+		len = sg->length;
+
+		nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
+		while (len > 0) {
+			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+
+			if (!bio) {
+				nr_vecs = min_t(unsigned int, BIO_MAX_PAGES,
+						nr_pages);
+				nr_pages -= nr_vecs;
+				bio = bio_alloc(GFP_KERNEL, nr_vecs);
+				if (!bio) {
+					err = -ENOMEM;
+					goto free_bios;
+				}
+				bio->bi_end_io = scsiback_bi_endio;
+				if (bio_last)
+					bio_last->bi_next = bio;
+				else
+					bio_first = bio;
+				bio_last = bio;
+			}
+
+			if (bio_add_pc_page(q, bio, page, bytes, off) !=
+						bytes) {
+				err = -EINVAL;
+				goto free_bios;
+			}
+
+			if (bio->bi_vcnt >= nr_vecs) {
+				bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+				if (pending_req->sc_data_direction == WRITE)
+					bio->bi_rw |= REQ_WRITE;
+				bio = NULL;
+			}
+
+			page++;
+			len -= bytes;
+			off = 0;
+		}
+	}
+
+	return bio_first;
+
+free_bios:
+	while ((bio = bio_first) != NULL) {
+		bio_first = bio->bi_next;
+		bio_put(bio);
+	}
+
+	return ERR_PTR(err);
+}
+
+static void scsiback_print_status(char *sense_buffer, int errors,
+					pending_req_t *pending_req)
+{
+	struct scsi_device *sdev = pending_req->sdev;
+
+	pr_err("scsiback[%d:%d:%d:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n",
+	       sdev->host->host_no, sdev->channel, sdev->id, sdev->lun,
+	       pending_req->cmnd[0], status_byte(errors), msg_byte(errors),
+	       host_byte(errors), driver_byte(errors));
+
+	if (CHECK_CONDITION & status_byte(errors))
+		__scsi_print_sense("scsiback", sense_buffer,
+				   SCSI_SENSE_BUFFERSIZE);
+}
+
+#define pending_handle(_req, _seg) \
+	(pending_grant_handles[vaddr_pagenr(_req, _seg)])
+
+static void scsiback_fast_flush_area(pending_req_t *req)
+{
+	struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
+	struct page *pages[VSCSIIF_SG_TABLESIZE];
+	unsigned int i, invcount = 0;
+	grant_handle_t handle;
+	int err;
+
+	if (req->nr_segments) {
+		for (i = 0; i < req->nr_segments; i++) {
+			handle = pending_handle(req, i);
+			if (handle == SCSIBACK_INVALID_HANDLE)
+				continue;
+			gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
+					    GNTMAP_host_map, handle);
+			pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
+			pages[invcount] = pending_pages[vaddr_pagenr(req, i)];
+			invcount++;
+		}
+
+		err = gnttab_unmap_refs(unmap, NULL, pages, invcount);
+		BUG_ON(err);
+		kfree(req->sgl);
+	}
+
+	return;
+}
+
+static void free_req(pending_req_t *req)
+{
+	unsigned long flags;
+	int was_empty;
+
+	spin_lock_irqsave(&pending_free_lock, flags);
+	was_empty = list_empty(&pending_free);
+	list_add(&req->free_list, &pending_free);
+	spin_unlock_irqrestore(&pending_free_lock, flags);
+	if (was_empty)
+		wake_up(&pending_free_wq);
+}
+
+static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
+			uint32_t resid, pending_req_t *pending_req)
+{
+	vscsiif_response_t *ring_res;
+	struct vscsibk_info *info = pending_req->info;
+	int notify;
+	struct scsi_sense_hdr sshdr;
+	unsigned long flags;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	spin_lock_irqsave(&info->ring_lock, flags);
+
+	ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
+	info->ring.rsp_prod_pvt++;
+
+	ring_res->rslt   = result;
+	ring_res->rqid   = pending_req->rqid;
+
+	if (sense_buffer != NULL) {
+		if (scsi_normalize_sense(sense_buffer,
+			sizeof(sense_buffer), &sshdr)) {
+
+			int len = 8 + sense_buffer[7];
+
+			if (len > VSCSIIF_SENSE_BUFFERSIZE)
+				len = VSCSIIF_SENSE_BUFFERSIZE;
+
+			memcpy(ring_res->sense_buffer, sense_buffer, len);
+			ring_res->sense_len = len;
+		}
+	} else {
+		ring_res->sense_len = 0;
+	}
+
+	ring_res->residual_len = resid;
+
+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
+	spin_unlock_irqrestore(&info->ring_lock, flags);
+
+	if (notify)
+		notify_remote_via_irq(info->irq);
+
+	free_req(pending_req);
+}
+
+static void scsiback_cmd_done(struct request *req, int uptodate)
+{
+	pending_req_t *pending_req = req->end_io_data;
+	unsigned char *sense_buffer;
+	unsigned int resid;
+	int errors;
+
+	sense_buffer = req->sense;
+	resid        = blk_rq_bytes(req);
+	errors       = req->errors;
+
+	if (errors && log_print_stat)
+		scsiback_print_status(sense_buffer, errors, pending_req);
+
+	/* The Host mode is through as for Emulation. */
+	if (pending_req->info->feature != VSCSI_TYPE_HOST)
+		scsiback_rsp_emulation(pending_req);
+
+	scsiback_fast_flush_area(pending_req);
+	scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
+	scsiback_put(pending_req->info);
+
+	__blk_put_request(req->q, req);
+}
+
+static void scsiback_cmd_exec(pending_req_t *pending_req)
+{
+	int cmd_len  = (int)pending_req->cmd_len;
+	int data_dir = (int)pending_req->sc_data_direction;
+	unsigned int timeout;
+	struct bio *bio;
+	struct request *rq;
+	int write;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	/* because it doesn't timeout backend earlier than frontend.*/
+	if (pending_req->timeout_per_command)
+		timeout = pending_req->timeout_per_command * HZ;
+	else
+		timeout = VSCSIIF_TIMEOUT;
+
+	write = (data_dir == DMA_TO_DEVICE);
+	if (pending_req->nr_segments) {
+		bio = request_map_sg(pending_req);
+		if (IS_ERR(bio)) {
+			pr_err("scsiback: SG Request Map Error %ld\n",
+			       PTR_ERR(bio));
+			return;
+		}
+	} else {
+		bio = NULL;
+	}
+
+	if (bio) {
+		rq = blk_make_request(pending_req->sdev->request_queue, bio,
+				      GFP_KERNEL);
+		if (IS_ERR(rq)) {
+			pr_err("scsiback: Make Request Error %ld\n",
+			       PTR_ERR(rq));
+			return;
+		}
+	} else {
+		rq = blk_get_request(pending_req->sdev->request_queue, write,
+				     GFP_KERNEL);
+		if (unlikely(!rq)) {
+			pr_err("scsiback: Get Request Error\n");
+			return;
+		}
+	}
+
+	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+	rq->cmd_len = cmd_len;
+	memcpy(rq->cmd, pending_req->cmnd, cmd_len);
+
+	memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
+	rq->sense       = pending_req->sense_buffer;
+	rq->sense_len = 0;
+
+	/* not allowed to retry in backend.                   */
+	rq->retries   = 0;
+	rq->timeout   = timeout;
+	rq->end_io_data = pending_req;
+
+	scsiback_get(pending_req->info);
+	blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
+
+	return ;
+}
+
+static void scsiback_req_emulation_or_cmdexec(pending_req_t *pending_req)
+{
+	if (__pre_do_emulation(pending_req, NULL)) {
+		scsiback_cmd_exec(pending_req);
+	}
+	else {
+		scsiback_fast_flush_area(pending_req);
+		scsiback_do_resp_with_sense(pending_req->sense_buffer,
+		  pending_req->rslt, pending_req->resid, pending_req);
+	}
+}
+
+
+/*
+  Following are not customizable functions.
+*/
+static void scsiback_emulation_init(void)
+{
+	int i;
+
+	/* Initialize to default state */
+	for (i = 0; i < VSCSI_MAX_SCSI_OP_CODE; i++) {
+		bitmap[i] = VSCSIIF_NEED_EMULATE_REQBUF |
+			    VSCSIIF_NEED_EMULATE_RSPBUF;
+		pre_function[i] = resp_not_supported_cmd;
+		post_function[i] = NULL;
+		/* means,
+		   - no need for pre-emulation
+		   - no need for post-emulation
+		   - call native driver
+		*/
+	}
+
+	/*
+	  Register appropriate functions below as you need.
+	  (See scsi/scsi.h for definition of SCSI op_code.)
+	*/
+
+	/*
+	  Following commands do not require emulation.
+	*/
+	NO_EMULATE(TEST_UNIT_READY);       /*0x00*/ /* sd,st */
+	NO_EMULATE(REZERO_UNIT);           /*0x01*/ /* st */
+	NO_EMULATE(REQUEST_SENSE);         /*0x03*/
+	NO_EMULATE(FORMAT_UNIT);           /*0x04*/
+	NO_EMULATE(READ_BLOCK_LIMITS);     /*0x05*/ /* st */
+	/*NO_EMULATE(REASSIGN_BLOCKS);       *//*0x07*/
+	NO_EMULATE(INITIALIZE_ELEMENT_STATUS); /*0x07*/ /* ch */
+	NO_EMULATE(READ_6);                /*0x08*/ /* sd,st */
+	NO_EMULATE(WRITE_6);               /*0x0a*/ /* sd,st */
+	NO_EMULATE(SEEK_6);                /*0x0b*/
+	/*NO_EMULATE(READ_REVERSE);          *//*0x0f*/
+	NO_EMULATE(WRITE_FILEMARKS);       /*0x10*/ /* st */
+	NO_EMULATE(SPACE);                 /*0x11*/ /* st */
+	NO_EMULATE(INQUIRY);               /*0x12*/
+	/*NO_EMULATE(RECOVER_BUFFERED_DATA); *//*0x14*/
+	NO_EMULATE(MODE_SELECT);           /*0x15*/ /* st */
+	NO_EMULATE(RESERVE);               /*0x16*/
+	NO_EMULATE(RELEASE);               /*0x17*/
+	/*NO_EMULATE(COPY);                  *//*0x18*/
+	NO_EMULATE(ERASE);                 /*0x19*/ /* st */
+	NO_EMULATE(MODE_SENSE);            /*0x1a*/ /* st */
+	NO_EMULATE(START_STOP);            /*0x1b*/ /* sd,st */
+	NO_EMULATE(RECEIVE_DIAGNOSTIC);    /*0x1c*/
+	NO_EMULATE(SEND_DIAGNOSTIC);       /*0x1d*/
+	NO_EMULATE(ALLOW_MEDIUM_REMOVAL);  /*0x1e*/
+
+	/*NO_EMULATE(SET_WINDOW);            *//*0x24*/
+	NO_EMULATE(READ_CAPACITY);         /*0x25*/ /* sd */
+	NO_EMULATE(READ_10);               /*0x28*/ /* sd */
+	NO_EMULATE(WRITE_10);              /*0x2a*/ /* sd */
+	NO_EMULATE(SEEK_10);               /*0x2b*/ /* st */
+	NO_EMULATE(POSITION_TO_ELEMENT);   /*0x2b*/ /* ch */
+	/*NO_EMULATE(WRITE_VERIFY);          *//*0x2e*/
+	/*NO_EMULATE(VERIFY);                *//*0x2f*/
+	/*NO_EMULATE(SEARCH_HIGH);           *//*0x30*/
+	/*NO_EMULATE(SEARCH_EQUAL);          *//*0x31*/
+	/*NO_EMULATE(SEARCH_LOW);            *//*0x32*/
+	NO_EMULATE(SET_LIMITS);            /*0x33*/
+	NO_EMULATE(PRE_FETCH);             /*0x34*/ /* st! */
+	NO_EMULATE(READ_POSITION);          /*0x34*/ /* st */
+	NO_EMULATE(SYNCHRONIZE_CACHE);      /*0x35*/ /* sd */
+	NO_EMULATE(LOCK_UNLOCK_CACHE);     /*0x36*/
+	NO_EMULATE(READ_DEFECT_DATA);      /*0x37*/
+	NO_EMULATE(MEDIUM_SCAN);           /*0x38*/
+	/*NO_EMULATE(COMPARE);               *//*0x39*/
+	/*NO_EMULATE(COPY_VERIFY);           *//*0x3a*/
+	NO_EMULATE(WRITE_BUFFER);          /*0x3b*/
+	NO_EMULATE(READ_BUFFER);           /*0x3c*/ /* osst */
+	/*NO_EMULATE(UPDATE_BLOCK);          *//*0x3d*/
+	/*NO_EMULATE(READ_LONG);             *//*0x3e*/
+	/*NO_EMULATE(WRITE_LONG);            *//*0x3f*/
+	/*NO_EMULATE(CHANGE_DEFINITION);     *//*0x40*/
+	/*NO_EMULATE(WRITE_SAME);            *//*0x41*/
+	NO_EMULATE(READ_TOC);              /*0x43*/ /* sr */
+	NO_EMULATE(LOG_SELECT);            /*0x4c*/
+	NO_EMULATE(LOG_SENSE);             /*0x4d*/ /* st! */
+	/*NO_EMULATE(MODE_SELECT_10);        *//*0x55*/
+	/*NO_EMULATE(RESERVE_10);            *//*0x56*/
+	/*NO_EMULATE(RELEASE_10);            *//*0x57*/
+	NO_EMULATE(MODE_SENSE_10);         /*0x5a*/ /* scsi_lib */
+	/*NO_EMULATE(PERSISTENT_RESERVE_IN); *//*0x5e*/
+	/*NO_EMULATE(PERSISTENT_RESERVE_OUT); *//*0x5f*/
+	/*           REPORT_LUNS             *//*0xa0*//*Full emulaiton*/
+#ifdef MAINTENANCE_IN
+	NO_EMULATE(MAINTENANCE_IN);           /*0xa3*/ /* IFT alua */
+	NO_EMULATE(MAINTENANCE_OUT);       /*0xa4*/ /* IFT alua */
+#endif
+	NO_EMULATE(MOVE_MEDIUM);           /*0xa5*/ /* ch */
+	NO_EMULATE(EXCHANGE_MEDIUM);       /*0xa6*/ /* ch */
+	/*NO_EMULATE(READ_12);               *//*0xa8*/
+	/*NO_EMULATE(WRITE_12);              *//*0xaa*/
+	/*NO_EMULATE(WRITE_VERIFY_12);       *//*0xae*/
+	/*NO_EMULATE(SEARCH_HIGH_12);        *//*0xb0*/
+	/*NO_EMULATE(SEARCH_EQUAL_12);       *//*0xb1*/
+	/*NO_EMULATE(SEARCH_LOW_12);         *//*0xb2*/
+	NO_EMULATE(READ_ELEMENT_STATUS);   /*0xb8*/ /* ch */
+	NO_EMULATE(SEND_VOLUME_TAG);       /*0xb6*/ /* ch */
+	/*NO_EMULATE(WRITE_LONG_2);          *//*0xea*/
+	NO_EMULATE(READ_16);               /*0x88*/ /* sd >2TB */
+	NO_EMULATE(WRITE_16);              /*0x8a*/ /* sd >2TB */
+	NO_EMULATE(VERIFY_16);	           /*0x8f*/
+	NO_EMULATE(SERVICE_ACTION_IN);     /*0x9e*/ /* sd >2TB */
+
+/* st: QFA_REQUEST_BLOCK, QFA_SEEK_BLOCK might be needed ? */
+	/*
+	  Following commands require emulation.
+	*/
+	pre_function[REPORT_LUNS] = __report_luns;
+	bitmap[REPORT_LUNS] = VSCSIIF_NEED_EMULATE_REQBUF |
+			      VSCSIIF_NEED_EMULATE_RSPBUF;
+
+	return;
+}
+
+static struct vscsibk_info *vscsibk_info_alloc(domid_t domid)
+{
+	struct vscsibk_info *info;
+
+	info = kmem_cache_zalloc(scsiback_cachep, GFP_KERNEL);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	info->domid = domid;
+	spin_lock_init(&info->ring_lock);
+	atomic_set(&info->nr_unreplied_reqs, 0);
+	init_waitqueue_head(&info->wq);
+	init_waitqueue_head(&info->shutdown_wq);
+	init_waitqueue_head(&info->waiting_to_free);
+
+	return info;
+}
+
+static void scsiback_notify_work(struct vscsibk_info *info)
+{
+	info->waiting_reqs = 1;
+	wake_up(&info->wq);
+}
+
+static irqreturn_t scsiback_intr(int irq, void *dev_id)
+{
+	scsiback_notify_work((struct vscsibk_info *)dev_id);
+	return IRQ_HANDLED;
+}
+
+static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
+			evtchn_port_t evtchn)
+{
+	void *area;
+	struct vscsiif_sring *sring;
+	int err;
+
+	if (info->irq) {
+		pr_err("scsiback: Already connected through?\n");
+		return -1;
+	}
+
+	err = xenbus_map_ring_valloc(info->dev, ring_ref, &area);
+	if (err)
+		return err;
+
+	sring = (struct vscsiif_sring *)area;
+	BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+	err = bind_interdomain_evtchn_to_irqhandler(
+			info->domid, evtchn,
+			scsiback_intr, 0, "vscsiif-backend", info);
+
+	if (err < 0)
+		goto unmap_page;
+
+	info->irq = err;
+
+	return 0;
+
+unmap_page:
+	xenbus_unmap_ring_vfree(info->dev, area);
+
+	return err;
+}
+
+static void scsiback_disconnect(struct vscsibk_info *info)
+{
+	if (info->kthread) {
+		kthread_stop(info->kthread);
+		info->kthread = NULL;
+		wake_up(&info->shutdown_wq);
+	}
+
+	wait_event(info->waiting_to_free,
+		atomic_read(&info->nr_unreplied_reqs) == 0);
+
+	if (info->irq) {
+		unbind_from_irqhandler(info->irq, info);
+		info->irq = 0;
+	}
+
+	if (info->ring.sring) {
+		xenbus_unmap_ring_vfree(info->dev, info->ring.sring);
+		info->ring.sring = NULL;
+	}
+}
+
+static void scsiback_free(struct vscsibk_info *info)
+{
+	kmem_cache_free(scsiback_cachep, info);
+}
+
+static int __init scsiback_interface_init(void)
+{
+	scsiback_cachep = kmem_cache_create("vscsiif_cache",
+		sizeof(struct vscsibk_info), 0, 0, NULL);
+	if (!scsiback_cachep) {
+		pr_err("scsiback: can't init scsi cache\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void scsiback_interface_exit(void)
+{
+	kmem_cache_destroy(scsiback_cachep);
+}
+
+static unsigned int vscsiif_reqs = 128;
+module_param_named(reqs, vscsiif_reqs, uint, 0);
+MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
+
+static pending_req_t * alloc_req(struct vscsibk_info *info)
+{
+	pending_req_t *req = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pending_free_lock, flags);
+	if (!list_empty(&pending_free)) {
+		req = list_entry(pending_free.next, pending_req_t, free_list);
+		list_del(&req->free_list);
+	}
+	spin_unlock_irqrestore(&pending_free_lock, flags);
+	return req;
+}
+
+static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
+					pending_req_t *pending_req)
+{
+	u32 flags;
+	int write;
+	int i, err = 0;
+	unsigned int data_len = 0;
+	struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
+	struct vscsibk_info *info   = pending_req->info;
+	struct page **pg;
+	int data_dir = (int)pending_req->sc_data_direction;
+	unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
+
+	write = (data_dir == DMA_TO_DEVICE);
+
+	if (nr_segments) {
+		struct scatterlist *sg;
+
+		/* free of (sgl) in fast_flush_area()*/
+		pending_req->sgl = kmalloc(sizeof(struct scatterlist) *
+					   nr_segments, GFP_KERNEL);
+		if (!pending_req->sgl) {
+			pr_err("scsiback: %s: kmalloc() error\n", __FUNCTION__);
+			return -ENOMEM;
+		}
+
+		sg_init_table(pending_req->sgl, nr_segments);
+
+		flags = GNTMAP_host_map;
+		if (write)
+			flags |= GNTMAP_readonly;
+
+		for (i = 0; i < nr_segments; i++)
+			gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
+					  ring_req->seg[i].gref, info->domid);
+
+		pg = pending_pages + vaddr_pagenr(pending_req, 0);
+		err = gnttab_map_refs(map, NULL, pg, nr_segments);
+		BUG_ON(err);
+
+		for_each_sg (pending_req->sgl, sg, nr_segments, i) {
+
+			if (unlikely(map[i].status != GNTST_okay)) {
+				pr_err("scsiback: invalid buffer -- could not remap it\n");
+				map[i].handle = SCSIBACK_INVALID_HANDLE;
+				err |= 1;
+			}
+
+			pending_handle(pending_req, i) = map[i].handle;
+
+			if (err)
+				continue;
+
+			sg_set_page(sg, pg[i], ring_req->seg[i].length,
+				    ring_req->seg[i].offset);
+			data_len += sg->length;
+
+			barrier();
+			if (sg->offset >= PAGE_SIZE ||
+			    sg->length > PAGE_SIZE ||
+			    sg->offset + sg->length > PAGE_SIZE)
+				err |= 1;
+
+		}
+
+		if (err)
+			goto fail_flush;
+	}
+
+	pending_req->request_bufflen = data_len;
+
+	return 0;
+
+fail_flush:
+	scsiback_fast_flush_area(pending_req);
+	return -ENOMEM;
+}
+
+static void scsiback_device_reset_exec(pending_req_t *pending_req)
+{
+	struct vscsibk_info *info = pending_req->info;
+	int err;
+	struct scsi_device *sdev = pending_req->sdev;
+
+	scsiback_get(info);
+	err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
+
+	scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
+	scsiback_put(info);
+
+	return;
+}
+
+/*
+  Perform virtual to physical translation
+*/
+static struct scsi_device *scsiback_do_translation(struct vscsibk_info *info,
+			struct ids_tuple *v)
+{
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	struct scsi_device *sdev = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == v->chn) &&
+		    (entry->v.tgt == v->tgt) &&
+		    (entry->v.lun == v->lun)) {
+			sdev = entry->sdev;
+			goto out;
+		}
+	}
+out:
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return sdev;
+}
+
+static int prepare_pending_reqs(struct vscsibk_info *info,
+		vscsiif_request_t *ring_req, pending_req_t *pending_req)
+{
+	struct scsi_device *sdev;
+	struct ids_tuple vir;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	pending_req->rqid       = ring_req->rqid;
+	pending_req->act        = ring_req->act;
+
+	pending_req->info       = info;
+
+	pending_req->v_chn = vir.chn = ring_req->channel;
+	pending_req->v_tgt = vir.tgt = ring_req->id;
+	vir.lun = ring_req->lun;
+
+	rmb();
+	sdev = scsiback_do_translation(info, &vir);
+	if (!sdev) {
+		pending_req->sdev = NULL;
+		DPRINTK("scsiback: doesn't exist.\n");
+		return -ENODEV;
+	}
+	pending_req->sdev = sdev;
+
+	/* request range check from frontend */
+	pending_req->sc_data_direction = ring_req->sc_data_direction;
+	barrier();
+	if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
+		(pending_req->sc_data_direction != DMA_TO_DEVICE) &&
+		(pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
+		(pending_req->sc_data_direction != DMA_NONE)) {
+		DPRINTK("scsiback: invalid parameter data_dir = %d\n",
+			pending_req->sc_data_direction);
+		return -EINVAL;
+	}
+
+	pending_req->nr_segments = ring_req->nr_segments;
+	barrier();
+	if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
+		DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
+			pending_req->nr_segments);
+		return -EINVAL;
+	}
+
+	pending_req->cmd_len = ring_req->cmd_len;
+	barrier();
+	if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
+		DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
+			pending_req->cmd_len);
+		return -EINVAL;
+	}
+	memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
+
+	pending_req->timeout_per_command = ring_req->timeout_per_command;
+
+	if(scsiback_gnttab_data_map(ring_req, pending_req)) {
+		DPRINTK("scsiback: invalid buffer\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int _scsiback_do_cmd_fn(struct vscsibk_info *info)
+{
+	struct vscsiif_back_ring *ring = &info->ring;
+	vscsiif_request_t  *ring_req;
+
+	pending_req_t *pending_req;
+	RING_IDX rc, rp;
+	int err, more_to_do = 0;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	rc = ring->req_cons;
+	rp = ring->sring->req_prod;
+	rmb();
+
+	if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) {
+		rc = ring->rsp_prod_pvt;
+		pr_warning("scsiback:"
+			" Dom%d provided bogus ring requests (%#x - %#x = %u)."
+			" Halting ring processing\n",
+			   info->domid, rp, rc, rp - rc);
+		return -EACCES;
+	}
+
+	while ((rc != rp)) {
+		if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
+			break;
+		pending_req = alloc_req(info);
+		if (NULL == pending_req) {
+			more_to_do = 1;
+			break;
+		}
+
+		ring_req = RING_GET_REQUEST(ring, rc);
+		ring->req_cons = ++rc;
+
+		err = prepare_pending_reqs(info, ring_req,
+						pending_req);
+		switch (err ?: pending_req->act) {
+		case VSCSIIF_ACT_SCSI_CDB:
+			/* The Host mode is through as for Emulation. */
+			if (info->feature == VSCSI_TYPE_HOST)
+				scsiback_cmd_exec(pending_req);
+			else
+				scsiback_req_emulation_or_cmdexec(pending_req);
+			break;
+		case VSCSIIF_ACT_SCSI_RESET:
+			scsiback_device_reset_exec(pending_req);
+			break;
+		default:
+			if(!err && printk_ratelimit())
+				pr_err("scsiback: invalid request\n");
+			scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24,
+						    0, pending_req);
+			break;
+		case -ENODEV:
+			scsiback_do_resp_with_sense(NULL, DID_NO_CONNECT << 16,
+						    0, pending_req);
+			break;
+		}
+
+		/* Yield point for this unbounded loop. */
+		cond_resched();
+	}
+
+	if (RING_HAS_UNCONSUMED_REQUESTS(ring))
+		more_to_do = 1;
+
+	return more_to_do;
+}
+
+static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+{
+	int more_to_do;
+
+	do {
+		more_to_do = _scsiback_do_cmd_fn(info);
+		if (more_to_do)
+			break;
+
+		RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
+	} while (more_to_do);
+
+	return more_to_do;
+}
+
+static int scsiback_schedule(void *data)
+{
+	struct vscsibk_info *info = (struct vscsibk_info *)data;
+
+	DPRINTK("%s\n",__FUNCTION__);
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(info->wq,
+			info->waiting_reqs || kthread_should_stop());
+		wait_event_interruptible(pending_free_wq,
+			!list_empty(&pending_free) || kthread_should_stop());
+
+		info->waiting_reqs = 0;
+		smp_mb();
+
+		switch (scsiback_do_cmd_fn(info)) {
+		case 1:
+			info->waiting_reqs = 1;
+		case 0:
+			break;
+		case -EACCES:
+			wait_event_interruptible(info->shutdown_wq,
+						 kthread_should_stop());
+			break;
+		default:
+			BUG();
+		}
+	}
+
+	return 0;
+}
+
+static void __vscsiif_name(struct backend_info *be, char *buf)
+{
+	struct xenbus_device *dev = be->dev;
+	unsigned int domid, id;
+
+	sscanf(dev->nodename, "backend/vscsi/%u/%u", &domid, &id);
+	snprintf(buf, TASK_COMM_LEN, "vscsi.%u.%u", be->info->domid, id);
+}
+
+static int scsiback_map(struct backend_info *be)
+{
+	struct xenbus_device *dev = be->dev;
+	unsigned int ring_ref, evtchn;
+	int err;
+	char name[TASK_COMM_LEN];
+
+	err = xenbus_gather(XBT_NIL, dev->otherend,
+			"ring-ref", "%u", &ring_ref,
+			"event-channel", "%u", &evtchn, NULL);
+	if (err) {
+		xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend);
+		return err;
+	}
+
+	err = scsiback_init_sring(be->info, ring_ref, evtchn);
+	if (err)
+		return err;
+
+	__vscsiif_name(be, name);
+
+	be->info->kthread = kthread_run(scsiback_schedule, be->info, name);
+	if (IS_ERR(be->info->kthread)) {
+		err = PTR_ERR(be->info->kthread);
+		be->info->kthread = NULL;
+		xenbus_dev_error(be->dev, err, "start vscsiif");
+		return err;
+	}
+
+	return 0;
+}
+
+struct scsi_device *scsiback_get_scsi_device(struct ids_tuple *phy)
+{
+	struct Scsi_Host *shost;
+	struct scsi_device *sdev = NULL;
+
+	shost = scsi_host_lookup(phy->hst);
+	if (!shost) {
+		pr_err("scsiback: host%d doesn't exist\n", phy->hst);
+		return NULL;
+	}
+	sdev   = scsi_device_lookup(shost, phy->chn, phy->tgt, phy->lun);
+	if (!sdev) {
+		pr_err("scsiback: %d:%d:%d:%d doesn't exist\n",
+		       phy->hst, phy->chn, phy->tgt, phy->lun);
+		scsi_host_put(shost);
+		return NULL;
+	}
+
+	scsi_host_put(shost);
+	return (sdev);
+}
+
+/*
+  Add a new translation entry
+*/
+static int scsiback_add_translation_entry(struct vscsibk_info *info,
+			struct scsi_device *sdev, struct ids_tuple *v)
+{
+	int err = 0;
+	struct v2p_entry *entry;
+	struct v2p_entry *new;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+
+	/* Check double assignment to identical virtual ID */
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == v->chn) &&
+		    (entry->v.tgt == v->tgt) &&
+		    (entry->v.lun == v->lun)) {
+			pr_warning("scsiback: Virtual ID is already used. "
+				   "Assignment was not performed.\n");
+			err = -EEXIST;
+			goto out;
+		}
+
+	}
+
+	/* Create a new translation entry and add to the list */
+	if ((new = kmalloc(sizeof(struct v2p_entry), GFP_ATOMIC)) == NULL) {
+		pr_err("scsiback: %s: kmalloc() error\n", __FUNCTION__);
+		err = -ENOMEM;
+		goto out;
+	}
+	new->v = *v;
+	new->sdev = sdev;
+	list_add_tail(&new->l, head);
+
+out:
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return err;
+}
+
+/*
+  Delete the translation entry specfied
+*/
+static int scsiback_del_translation_entry(struct vscsibk_info *info,
+				struct ids_tuple *v)
+{
+	struct v2p_entry *entry;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	/* Find out the translation entry specified */
+	list_for_each_entry(entry, head, l) {
+		if ((entry->v.chn == v->chn) &&
+		    (entry->v.tgt == v->tgt) &&
+		    (entry->v.lun == v->lun)) {
+			goto found;
+		}
+	}
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return 1;
+
+found:
+	/* Delete the translation entry specfied */
+	scsi_device_put(entry->sdev);
+	list_del(&entry->l);
+	kfree(entry);
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return 0;
+}
+
+static void scsiback_do_add_lun(struct backend_info *be, const char *state,
+				struct ids_tuple *phy, struct ids_tuple *vir)
+{
+	struct scsi_device *sdev;
+
+	sdev = scsiback_get_scsi_device(phy);
+	if (!sdev) {
+		xenbus_printf(XBT_NIL, be->dev->nodename, state,
+			      "%d", XenbusStateClosed);
+		return;
+	}
+	if (!scsiback_add_translation_entry(be->info, sdev, vir)) {
+		if (xenbus_printf(XBT_NIL, be->dev->nodename, state,
+				  "%d", XenbusStateInitialised)) {
+			pr_err("scsiback: xenbus_printf error %s\n", state);
+			scsiback_del_translation_entry(be->info, vir);
+		}
+	} else {
+		scsi_device_put(sdev);
+		xenbus_printf(XBT_NIL, be->dev->nodename, state,
+			      "%d", XenbusStateClosed);
+	}
+}
+
+static void scsiback_do_del_lun(struct backend_info *be, const char *state,
+				struct ids_tuple *vir)
+{
+	if (!scsiback_del_translation_entry(be->info, vir)) {
+		if (xenbus_printf(XBT_NIL, be->dev->nodename, state,
+				  "%d", XenbusStateClosed))
+			pr_err("scsiback: xenbus_printf error %s\n", state);
+	}
+}
+
+#define VSCSIBACK_OP_ADD_OR_DEL_LUN	1
+#define VSCSIBACK_OP_UPDATEDEV_STATE	2
+
+static void scsiback_do_1lun_hotplug(struct backend_info *be, int op, char *ent)
+{
+	int err;
+	struct ids_tuple phy, vir;
+	int device_state;
+	char str[64];
+	char state[64];
+	struct xenbus_device *dev = be->dev;
+
+	/* read status */
+	snprintf(state, sizeof(state), "vscsi-devs/%s/state", ent);
+	err = xenbus_scanf(XBT_NIL, dev->nodename, state, "%u", &device_state);
+	if (XENBUS_EXIST_ERR(err))
+		return;
+
+	/* physical SCSI device */
+	snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
+	err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
+			   &phy.hst, &phy.chn, &phy.tgt, &phy.lun);
+	if (XENBUS_EXIST_ERR(err)) {
+		xenbus_printf(XBT_NIL, dev->nodename, state,
+			      "%d", XenbusStateClosed);
+		return;
+	}
+
+	/* virtual SCSI device */
+	snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", ent);
+	err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
+			   &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
+	if (XENBUS_EXIST_ERR(err)) {
+		xenbus_printf(XBT_NIL, dev->nodename, state,
+			      "%d", XenbusStateClosed);
+		return;
+	}
+
+	switch (op) {
+	case VSCSIBACK_OP_ADD_OR_DEL_LUN:
+		if (device_state == XenbusStateInitialising)
+			scsiback_do_add_lun(be, state, &phy, &vir);
+		if (device_state == XenbusStateClosing)
+			scsiback_do_del_lun(be, state, &vir);
+		break;
+
+	case VSCSIBACK_OP_UPDATEDEV_STATE:
+		if (device_state == XenbusStateInitialised) {
+			/* modify vscsi-devs/dev-x/state */
+			if (xenbus_printf(XBT_NIL, dev->nodename, state,
+					  "%d", XenbusStateConnected)) {
+				pr_err("scsiback: xenbus_printf error %s\n",
+				       str);
+				scsiback_del_translation_entry(be->info, &vir);
+				xenbus_printf(XBT_NIL, dev->nodename, state,
+					      "%d", XenbusStateClosed);
+			}
+		}
+		break;
+	/*When it is necessary, processing is added here.*/
+	default:
+		break;
+	}
+}
+
+static void scsiback_do_lun_hotplug(struct backend_info *be, int op)
+{
+	int i;
+	char **dir;
+	unsigned int ndir = 0;
+
+	dir = xenbus_directory(XBT_NIL, be->dev->nodename, "vscsi-devs", &ndir);
+	if (IS_ERR(dir))
+		return;
+
+	for (i = 0; i < ndir; i++)
+		scsiback_do_1lun_hotplug(be, op, dir[i]);
+
+	kfree(dir);
+	return ;
+}
+
+static void scsiback_frontend_changed(struct xenbus_device *dev,
+					enum xenbus_state frontend_state)
+{
+	struct backend_info *be = dev_get_drvdata(&dev->dev);
+	int err;
+
+	switch (frontend_state) {
+	case XenbusStateInitialising:
+		break;
+	case XenbusStateInitialised:
+		err = scsiback_map(be);
+		if (err)
+			break;
+
+		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
+		xenbus_switch_state(dev, XenbusStateConnected);
+
+		break;
+	case XenbusStateConnected:
+
+		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_UPDATEDEV_STATE);
+
+		if (dev->state == XenbusStateConnected)
+			break;
+
+		xenbus_switch_state(dev, XenbusStateConnected);
+
+		break;
+
+	case XenbusStateClosing:
+		scsiback_disconnect(be->info);
+		xenbus_switch_state(dev, XenbusStateClosing);
+		break;
+
+	case XenbusStateClosed:
+		xenbus_switch_state(dev, XenbusStateClosed);
+		if (xenbus_dev_is_online(dev))
+			break;
+		/* fall through if not online */
+	case XenbusStateUnknown:
+		device_unregister(&dev->dev);
+		break;
+
+	case XenbusStateReconfiguring:
+		scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
+
+		xenbus_switch_state(dev, XenbusStateReconfigured);
+
+		break;
+
+	default:
+		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
+					frontend_state);
+		break;
+	}
+}
+
+/*
+  Release the translation entry specfied
+*/
+static void scsiback_release_translation_entry(struct vscsibk_info *info)
+{
+	struct v2p_entry *entry, *tmp;
+	struct list_head *head = &(info->v2p_entry_lists);
+	unsigned long flags;
+
+	spin_lock_irqsave(&info->v2p_lock, flags);
+	list_for_each_entry_safe(entry, tmp, head, l) {
+		scsi_device_put(entry->sdev);
+		list_del(&entry->l);
+		kfree(entry);
+	}
+
+	spin_unlock_irqrestore(&info->v2p_lock, flags);
+	return;
+
+}
+
+static int scsiback_remove(struct xenbus_device *dev)
+{
+	struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+	if (be->info) {
+		scsiback_disconnect(be->info);
+		scsiback_release_translation_entry(be->info);
+		scsiback_free(be->info);
+		be->info = NULL;
+	}
+
+	kfree(be);
+	dev_set_drvdata(&dev->dev, NULL);
+
+	return 0;
+}
+
+/*
+  Initialize the translation entry list
+*/
+static void scsiback_init_translation_table(struct vscsibk_info *info)
+{
+	INIT_LIST_HEAD(&info->v2p_entry_lists);
+	spin_lock_init(&info->v2p_lock);
+}
+
+static int scsiback_probe(struct xenbus_device *dev,
+			   const struct xenbus_device_id *id)
+{
+	int err;
+	unsigned val = 0;
+
+	struct backend_info *be = kzalloc(sizeof(struct backend_info),
+					  GFP_KERNEL);
+
+	DPRINTK("%p %d\n", dev, dev->otherend_id);
+
+	if (!be) {
+		xenbus_dev_fatal(dev, -ENOMEM,
+				 "allocating backend structure");
+		return -ENOMEM;
+	}
+	be->dev = dev;
+	dev_set_drvdata(&dev->dev, be);
+
+	be->info = vscsibk_info_alloc(dev->otherend_id);
+	if (IS_ERR(be->info)) {
+		err = PTR_ERR(be->info);
+		be->info = NULL;
+		xenbus_dev_fatal(dev, err, "creating scsihost interface");
+		goto fail;
+	}
+
+	be->info->dev = dev;
+	be->info->irq = 0;
+	be->info->feature = 0;	/*default not HOSTMODE.*/
+
+	scsiback_init_translation_table(be->info);
+
+	err = xenbus_scanf(XBT_NIL, dev->nodename,
+			   "feature-host", "%d", &val);
+	if (XENBUS_EXIST_ERR(err))
+		val = 0;
+
+	if (val)
+		be->info->feature = VSCSI_TYPE_HOST;
+
+	err = xenbus_switch_state(dev, XenbusStateInitWait);
+	if (err)
+		goto fail;
+
+	return 0;
+
+fail:
+	pr_warning("scsiback: %s failed\n",__FUNCTION__);
+	scsiback_remove(dev);
+
+	return err;
+}
+
+static const struct xenbus_device_id scsiback_ids[] = {
+	{ "vscsi" },
+	{ "" }
+};
+
+static DEFINE_XENBUS_DRIVER(scsiback, ,
+	.probe			= scsiback_probe,
+	.remove			= scsiback_remove,
+	.otherend_changed	= scsiback_frontend_changed
+);
+
+static int __init scsiback_init(void)
+{
+	int i, mmap_pages;
+
+	if (!xen_domain())
+		return -ENODEV;
+
+	mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
+
+	pending_reqs          = kzalloc(sizeof(pending_reqs[0]) *
+					vscsiif_reqs, GFP_KERNEL);
+	pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
+					mmap_pages, GFP_KERNEL);
+	pending_pages         = kcalloc(mmap_pages, sizeof(pending_pages[0]),
+					GFP_KERNEL);
+
+	if (!pending_reqs || !pending_grant_handles || !pending_pages)
+		goto out_of_memory;
+
+	if (alloc_xenballooned_pages(mmap_pages, pending_pages, 0))
+		goto out_of_memory;
+
+	for (i = 0; i < mmap_pages; i++)
+		pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
+
+	if (scsiback_interface_init() < 0)
+		goto out_of_memory;
+
+	INIT_LIST_HEAD(&pending_free);
+
+	for (i = 0; i < vscsiif_reqs; i++)
+		list_add_tail(&pending_reqs[i].free_list, &pending_free);
+
+	if (xenbus_register_backend(&scsiback_driver))
+		goto out_interface;
+
+	scsiback_emulation_init();
+
+	return 0;
+
+out_interface:
+	scsiback_interface_exit();
+out_of_memory:
+	kfree(pending_reqs);
+	kfree(pending_grant_handles);
+	free_xenballooned_pages(mmap_pages, pending_pages);
+	kfree(pending_pages);
+	pr_err("scsiback: %s: out of memory\n", __FUNCTION__);
+	return -ENOMEM;
+}
+
+#if 0
+static void __exit scsiback_exit(void)
+{
+	xenbus_unregister_driver(&scsiback_driver);
+	scsiback_interface_exit();
+	kfree(pending_reqs);
+	kfree(pending_grant_handles);
+	free_xenballooned_pages(vscsiif_reqs * VSCSIIF_SG_TABLESIZE,
+				pending_pages);
+	kfree(pending_pages);
+}
+#endif
+
+module_init(scsiback_init);
+
+#if 0
+module_exit(scsiback_exit);
+#endif
+
+MODULE_DESCRIPTION("Xen SCSI backend driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("xen-backend:vscsi");
-- 
1.8.4.5

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH 4/4] add xen pvscsi maintainer
  2014-06-27 14:34 Add XEN pvSCSI support jgross
                   ` (6 preceding siblings ...)
  2014-06-27 14:34 ` [PATCH 4/4] add xen pvscsi maintainer jgross
@ 2014-06-27 14:34 ` jgross
  2014-06-27 17:13   ` Konrad Rzeszutek Wilk
  2014-06-27 17:45 ` Add XEN pvSCSI support Pasi Kärkkäinen
  8 siblings, 1 reply; 34+ messages in thread
From: jgross @ 2014-06-27 14:34 UTC (permalink / raw)
  To: JBottomley, linux-scsi, xen-devel; +Cc: Juergen Gross

From: Juergen Gross <jgross@suse.com>

Add myself as maintainer for the Xen pvSCSI stuff.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 MAINTAINERS | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index ebc8128..78c60e6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10008,6 +10008,13 @@ S:	Supported
 F:	arch/x86/pci/*xen*
 F:	drivers/pci/*xen*
 
+XEN PVSCSI DRIVERS
+M:	Juergen Gross <jgross@suse.com>
+L:	xen-devel@lists.xenproject.org (moderated for non-subscribers)
+S:	Supported
+F:	drivers/scsi/xen*
+F:	include/xen/interface/io/vscsiif.h
+
 XEN SWIOTLB SUBSYSTEM
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:	xen-devel@lists.xenproject.org (moderated for non-subscribers)
-- 
1.8.4.5


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH 4/4] add xen pvscsi maintainer
  2014-06-27 14:34 Add XEN pvSCSI support jgross
                   ` (5 preceding siblings ...)
  2014-06-27 14:34 ` jgross
@ 2014-06-27 14:34 ` jgross
  2014-06-27 14:34 ` jgross
  2014-06-27 17:45 ` Add XEN pvSCSI support Pasi Kärkkäinen
  8 siblings, 0 replies; 34+ messages in thread
From: jgross @ 2014-06-27 14:34 UTC (permalink / raw)
  To: JBottomley, linux-scsi, xen-devel; +Cc: Juergen Gross

From: Juergen Gross <jgross@suse.com>

Add myself as maintainer for the Xen pvSCSI stuff.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 MAINTAINERS | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index ebc8128..78c60e6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10008,6 +10008,13 @@ S:	Supported
 F:	arch/x86/pci/*xen*
 F:	drivers/pci/*xen*
 
+XEN PVSCSI DRIVERS
+M:	Juergen Gross <jgross@suse.com>
+L:	xen-devel@lists.xenproject.org (moderated for non-subscribers)
+S:	Supported
+F:	drivers/scsi/xen*
+F:	include/xen/interface/io/vscsiif.h
+
 XEN SWIOTLB SUBSYSTEM
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:	xen-devel@lists.xenproject.org (moderated for non-subscribers)
-- 
1.8.4.5

^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [Xen-devel] [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-27 14:34 ` [PATCH 1/4] Add XEN pvSCSI protocol description jgross
  2014-06-27 17:11   ` Konrad Rzeszutek Wilk
@ 2014-06-27 17:11   ` Konrad Rzeszutek Wilk
  2014-06-30  8:26     ` Juergen Gross
                       ` (3 more replies)
  1 sibling, 4 replies; 34+ messages in thread
From: Konrad Rzeszutek Wilk @ 2014-06-27 17:11 UTC (permalink / raw)
  To: jgross; +Cc: JBottomley, linux-scsi, xen-devel

On Fri, Jun 27, 2014 at 04:34:33PM +0200, jgross@suse.com wrote:
> From: Juergen Gross <jgross@suse.com>
> 
> Add the definition of pvSCSI protocol used between the pvSCSI frontend in a
> XEN domU and the pvSCSI backend in a XEN driver domain (usually Dom0).
> 
> This header was originally provided by Fujitsu for XEN based on Linux 2.6.18.
> Changes are:
> - added comment
> - adapt to Linux style guide
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
>  include/xen/interface/io/vscsiif.h | 110 +++++++++++++++++++++++++++++++++++++
>  1 file changed, 110 insertions(+)
>  create mode 100644 include/xen/interface/io/vscsiif.h
> 
> diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
> new file mode 100644
> index 0000000..f8420f3
> --- /dev/null
> +++ b/include/xen/interface/io/vscsiif.h
> @@ -0,0 +1,110 @@
> +/******************************************************************************
> + * vscsiif.h
> + *
> + * Based on the blkif.h code.
> + *
> + * This interface is to be regarded as a stable API between XEN domains
> + * running potentially different Linux kernel versions.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a copy
> + * of this software and associated documentation files (the "Software"), to
> + * deal in the Software without restriction, including without limitation the
> + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
> + * sell copies of the Software, and to permit persons to whom the Software is
> + * furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
> + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
> + * DEALINGS IN THE SOFTWARE.
> + *
> + * Copyright(c) FUJITSU Limited 2008.
> + */
> +
> +#ifndef __XEN__PUBLIC_IO_SCSI_H__
> +#define __XEN__PUBLIC_IO_SCSI_H__
> +
> +#include "ring.h"
> +#include "../grant_table.h"
> +
> +/* commands between backend and frontend */
> +#define VSCSIIF_ACT_SCSI_CDB		1	/* SCSI CDB command */
> +#define VSCSIIF_ACT_SCSI_ABORT		2	/* SCSI Device(Lun) Abort*/
> +#define VSCSIIF_ACT_SCSI_RESET		3	/* SCSI Device(Lun) Reset*/
> +#define VSCSIIF_ACT_SCSI_SG_PRESET	4	/* Preset SG elements */
> +
> +/*
> + * Maximum scatter/gather segments per request.
> + *
> + * Considering balance between allocating at least 16 "vscsiif_request"
> + * structures on one page (4096 bytes) and the number of scatter/gather
> + * elements needed, we decided to use 26 as a magic number.
> + */
> +#define VSCSIIF_SG_TABLESIZE		26
> +
> +/*
> + * based on Linux kernel 2.6.18

This being a bit more .. new, - do these sizes make sense anymore?
Should they be extended a bit? Or have support for using the
old ones (as default) and then negotiate new sizes with the kernel?
(If of course there is a difference?)
> + */
> +#define VSCSIIF_MAX_COMMAND_SIZE	16
> +#define VSCSIIF_SENSE_BUFFERSIZE	96
> +
> +struct scsiif_request_segment {
> +	grant_ref_t gref;
> +	uint16_t offset;
> +	uint16_t length;
> +};
> +typedef struct scsiif_request_segment vscsiif_segment_t;

No typedefs please.
> +
> +struct vscsiif_request {
> +	uint16_t rqid;		/* private guest value, echoed in resp  */
> +	uint8_t act;		/* command between backend and frontend */
> +	uint8_t cmd_len;
> +
> +	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
> +	uint16_t timeout_per_command;	/* The command is issued by twice
> +					   the value in Backend. */
> +	uint16_t channel, id, lun;
> +	uint16_t padding;
> +	uint8_t sc_data_direction;	/* for DMA_TO_DEVICE(1)
> +					   DMA_FROM_DEVICE(2)
> +					   DMA_NONE(3) requests */
> +	uint8_t nr_segments;		/* Number of pieces of scatter-gather */
> +
> +	vscsiif_segment_t seg[VSCSIIF_SG_TABLESIZE];
> +	uint32_t reserved[3];
> +};

Could you add a comment saying what the size should be. Just in case
somebody extends this in the future - they will know the limits.

> +typedef struct vscsiif_request vscsiif_request_t;

Ditto.
> +
> +#define VSCSIIF_SG_LIST_SIZE ((sizeof(vscsiif_request_t) - 4) /		\
> +				sizeof(vscsiif_segment_t))

That -4 ought to have a comment. In case the scsiif_request_segment
gets a new member or such.
> +
> +struct vscsiif_sg_list {
> +	/* First two fields must match struct vscsiif_request! */
> +	uint16_t rqid;		/* private guest value, must match main req */
> +	uint8_t act;		/* VSCSIIF_ACT_SCSI_SG_PRESET */
> +	uint8_t nr_segments;	/* Number of pieces of scatter-gather */
> +	vscsiif_segment_t seg[VSCSIIF_SG_LIST_SIZE];
> +};
> +typedef struct vscsiif_sg_list vscsiif_sg_list_t;

Ditto.
> +
> +struct vscsiif_response {
> +	uint16_t rqid;
> +	uint8_t act;		/* valid only when backend supports SG_PRESET */

Same comment? First two fields must match 'struct vscsiif_request' ?

> +	uint8_t sense_len;
> +	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
> +	int32_t rslt;
> +	uint32_t residual_len;	/* request bufflen -
> +				   return the value from physical device */
> +	uint32_t reserved[36];
> +};
> +typedef struct vscsiif_response vscsiif_response_t;

I think you know what I am going to say :-)
> +
> +DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
> +
> +#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
> -- 
> 1.8.4.5
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-27 14:34 ` [PATCH 1/4] Add XEN pvSCSI protocol description jgross
@ 2014-06-27 17:11   ` Konrad Rzeszutek Wilk
  2014-06-27 17:11   ` [Xen-devel] " Konrad Rzeszutek Wilk
  1 sibling, 0 replies; 34+ messages in thread
From: Konrad Rzeszutek Wilk @ 2014-06-27 17:11 UTC (permalink / raw)
  To: jgross; +Cc: linux-scsi, JBottomley, xen-devel

On Fri, Jun 27, 2014 at 04:34:33PM +0200, jgross@suse.com wrote:
> From: Juergen Gross <jgross@suse.com>
> 
> Add the definition of pvSCSI protocol used between the pvSCSI frontend in a
> XEN domU and the pvSCSI backend in a XEN driver domain (usually Dom0).
> 
> This header was originally provided by Fujitsu for XEN based on Linux 2.6.18.
> Changes are:
> - added comment
> - adapt to Linux style guide
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
>  include/xen/interface/io/vscsiif.h | 110 +++++++++++++++++++++++++++++++++++++
>  1 file changed, 110 insertions(+)
>  create mode 100644 include/xen/interface/io/vscsiif.h
> 
> diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
> new file mode 100644
> index 0000000..f8420f3
> --- /dev/null
> +++ b/include/xen/interface/io/vscsiif.h
> @@ -0,0 +1,110 @@
> +/******************************************************************************
> + * vscsiif.h
> + *
> + * Based on the blkif.h code.
> + *
> + * This interface is to be regarded as a stable API between XEN domains
> + * running potentially different Linux kernel versions.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a copy
> + * of this software and associated documentation files (the "Software"), to
> + * deal in the Software without restriction, including without limitation the
> + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
> + * sell copies of the Software, and to permit persons to whom the Software is
> + * furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
> + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
> + * DEALINGS IN THE SOFTWARE.
> + *
> + * Copyright(c) FUJITSU Limited 2008.
> + */
> +
> +#ifndef __XEN__PUBLIC_IO_SCSI_H__
> +#define __XEN__PUBLIC_IO_SCSI_H__
> +
> +#include "ring.h"
> +#include "../grant_table.h"
> +
> +/* commands between backend and frontend */
> +#define VSCSIIF_ACT_SCSI_CDB		1	/* SCSI CDB command */
> +#define VSCSIIF_ACT_SCSI_ABORT		2	/* SCSI Device(Lun) Abort*/
> +#define VSCSIIF_ACT_SCSI_RESET		3	/* SCSI Device(Lun) Reset*/
> +#define VSCSIIF_ACT_SCSI_SG_PRESET	4	/* Preset SG elements */
> +
> +/*
> + * Maximum scatter/gather segments per request.
> + *
> + * Considering balance between allocating at least 16 "vscsiif_request"
> + * structures on one page (4096 bytes) and the number of scatter/gather
> + * elements needed, we decided to use 26 as a magic number.
> + */
> +#define VSCSIIF_SG_TABLESIZE		26
> +
> +/*
> + * based on Linux kernel 2.6.18

This being a bit more .. new, - do these sizes make sense anymore?
Should they be extended a bit? Or have support for using the
old ones (as default) and then negotiate new sizes with the kernel?
(If of course there is a difference?)
> + */
> +#define VSCSIIF_MAX_COMMAND_SIZE	16
> +#define VSCSIIF_SENSE_BUFFERSIZE	96
> +
> +struct scsiif_request_segment {
> +	grant_ref_t gref;
> +	uint16_t offset;
> +	uint16_t length;
> +};
> +typedef struct scsiif_request_segment vscsiif_segment_t;

No typedefs please.
> +
> +struct vscsiif_request {
> +	uint16_t rqid;		/* private guest value, echoed in resp  */
> +	uint8_t act;		/* command between backend and frontend */
> +	uint8_t cmd_len;
> +
> +	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
> +	uint16_t timeout_per_command;	/* The command is issued by twice
> +					   the value in Backend. */
> +	uint16_t channel, id, lun;
> +	uint16_t padding;
> +	uint8_t sc_data_direction;	/* for DMA_TO_DEVICE(1)
> +					   DMA_FROM_DEVICE(2)
> +					   DMA_NONE(3) requests */
> +	uint8_t nr_segments;		/* Number of pieces of scatter-gather */
> +
> +	vscsiif_segment_t seg[VSCSIIF_SG_TABLESIZE];
> +	uint32_t reserved[3];
> +};

Could you add a comment saying what the size should be. Just in case
somebody extends this in the future - they will know the limits.

> +typedef struct vscsiif_request vscsiif_request_t;

Ditto.
> +
> +#define VSCSIIF_SG_LIST_SIZE ((sizeof(vscsiif_request_t) - 4) /		\
> +				sizeof(vscsiif_segment_t))

That -4 ought to have a comment. In case the scsiif_request_segment
gets a new member or such.
> +
> +struct vscsiif_sg_list {
> +	/* First two fields must match struct vscsiif_request! */
> +	uint16_t rqid;		/* private guest value, must match main req */
> +	uint8_t act;		/* VSCSIIF_ACT_SCSI_SG_PRESET */
> +	uint8_t nr_segments;	/* Number of pieces of scatter-gather */
> +	vscsiif_segment_t seg[VSCSIIF_SG_LIST_SIZE];
> +};
> +typedef struct vscsiif_sg_list vscsiif_sg_list_t;

Ditto.
> +
> +struct vscsiif_response {
> +	uint16_t rqid;
> +	uint8_t act;		/* valid only when backend supports SG_PRESET */

Same comment? First two fields must match 'struct vscsiif_request' ?

> +	uint8_t sense_len;
> +	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
> +	int32_t rslt;
> +	uint32_t residual_len;	/* request bufflen -
> +				   return the value from physical device */
> +	uint32_t reserved[36];
> +};
> +typedef struct vscsiif_response vscsiif_response_t;

I think you know what I am going to say :-)
> +
> +DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
> +
> +#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
> -- 
> 1.8.4.5
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 4/4] add xen pvscsi maintainer
  2014-06-27 14:34 ` jgross
@ 2014-06-27 17:13   ` Konrad Rzeszutek Wilk
  0 siblings, 0 replies; 34+ messages in thread
From: Konrad Rzeszutek Wilk @ 2014-06-27 17:13 UTC (permalink / raw)
  To: jgross; +Cc: linux-scsi, JBottomley, xen-devel

On Fri, Jun 27, 2014 at 04:34:36PM +0200, jgross@suse.com wrote:
> From: Juergen Gross <jgross@suse.com>
> 
> Add myself as maintainer for the Xen pvSCSI stuff.

Woot!

Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
>  MAINTAINERS | 7 +++++++
>  1 file changed, 7 insertions(+)
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index ebc8128..78c60e6 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -10008,6 +10008,13 @@ S:	Supported
>  F:	arch/x86/pci/*xen*
>  F:	drivers/pci/*xen*
>  
> +XEN PVSCSI DRIVERS
> +M:	Juergen Gross <jgross@suse.com>
> +L:	xen-devel@lists.xenproject.org (moderated for non-subscribers)
> +S:	Supported
> +F:	drivers/scsi/xen*
> +F:	include/xen/interface/io/vscsiif.h
> +
>  XEN SWIOTLB SUBSYSTEM
>  M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>  L:	xen-devel@lists.xenproject.org (moderated for non-subscribers)
> -- 
> 1.8.4.5
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: Add XEN pvSCSI support
  2014-06-27 14:34 Add XEN pvSCSI support jgross
                   ` (7 preceding siblings ...)
  2014-06-27 14:34 ` jgross
@ 2014-06-27 17:45 ` Pasi Kärkkäinen
  2014-06-27 17:51   ` Pasi Kärkkäinen
  2014-06-27 17:51   ` [Xen-devel] " Pasi Kärkkäinen
  8 siblings, 2 replies; 34+ messages in thread
From: Pasi Kärkkäinen @ 2014-06-27 17:45 UTC (permalink / raw)
  To: jgross; +Cc: James Harper, linux-scsi, JBottomley, xen-devel

On Fri, Jun 27, 2014 at 04:34:32PM +0200, jgross@suse.com wrote:
> This series adds XEN pvSCSI support. With pvSCSI it is possible to use physical
> SCSI devices from a XEN domain.
> 
> The support consists of a backend in the privileged Domain-0 doing the real
> I/O and a frontend in the unprivileged domU passing I/O-requests to the backend.
> 
> The code is taken (and adapted) from the original pvSCSI implementation done
> for Linux 2.6 in 2008 by Fujitsu.
> 

Great! 

I added James Harper to CC, I know he has a port of at least scsiback aswell,
and he has a PVSCSI frontend driver included in the GPLPV Windows Xen PV drivers.

Btw do these patches also include the "latest" PVSCSI fixes/enhancements done in the SuSe/SLES kernel?
(for example the RESERVE/RELEASE support).

-- Pasi

> [PATCH 1/4] Add XEN pvSCSI protocol description
> [PATCH 2/4] Introduce xen-scsifront module
> [PATCH 3/4] Introduce XEN scsiback module
> [PATCH 4/4] add xen pvscsi maintainer
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [Xen-devel] Add XEN pvSCSI support
  2014-06-27 17:45 ` Add XEN pvSCSI support Pasi Kärkkäinen
  2014-06-27 17:51   ` Pasi Kärkkäinen
@ 2014-06-27 17:51   ` Pasi Kärkkäinen
  1 sibling, 0 replies; 34+ messages in thread
From: Pasi Kärkkäinen @ 2014-06-27 17:51 UTC (permalink / raw)
  To: jgross; +Cc: James Harper, linux-scsi, JBottomley, xen-devel

On Fri, Jun 27, 2014 at 08:45:25PM +0300, Pasi Kärkkäinen wrote:
> On Fri, Jun 27, 2014 at 04:34:32PM +0200, jgross@suse.com wrote:
> > This series adds XEN pvSCSI support. With pvSCSI it is possible to use physical
> > SCSI devices from a XEN domain.
> > 
> > The support consists of a backend in the privileged Domain-0 doing the real
> > I/O and a frontend in the unprivileged domU passing I/O-requests to the backend.
> > 
> > The code is taken (and adapted) from the original pvSCSI implementation done
> > for Linux 2.6 in 2008 by Fujitsu.
> > 
> 
> Great! 
> 
> I added James Harper to CC, I know he has a port of at least scsiback aswell,
> and he has a PVSCSI frontend driver included in the GPLPV Windows Xen PV drivers.
> 
> Btw do these patches also include the "latest" PVSCSI fixes/enhancements done in the SuSe/SLES kernel?
> (for example the RESERVE/RELEASE support).
> 

Never mind, I noticed that RESERVE/RELEASE are included in the xen-scsiback driver patch.

-- Pasi

--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: Add XEN pvSCSI support
  2014-06-27 17:45 ` Add XEN pvSCSI support Pasi Kärkkäinen
@ 2014-06-27 17:51   ` Pasi Kärkkäinen
  2014-06-27 17:51   ` [Xen-devel] " Pasi Kärkkäinen
  1 sibling, 0 replies; 34+ messages in thread
From: Pasi Kärkkäinen @ 2014-06-27 17:51 UTC (permalink / raw)
  To: jgross; +Cc: James Harper, JBottomley, linux-scsi, xen-devel

On Fri, Jun 27, 2014 at 08:45:25PM +0300, Pasi Kärkkäinen wrote:
> On Fri, Jun 27, 2014 at 04:34:32PM +0200, jgross@suse.com wrote:
> > This series adds XEN pvSCSI support. With pvSCSI it is possible to use physical
> > SCSI devices from a XEN domain.
> > 
> > The support consists of a backend in the privileged Domain-0 doing the real
> > I/O and a frontend in the unprivileged domU passing I/O-requests to the backend.
> > 
> > The code is taken (and adapted) from the original pvSCSI implementation done
> > for Linux 2.6 in 2008 by Fujitsu.
> > 
> 
> Great! 
> 
> I added James Harper to CC, I know he has a port of at least scsiback aswell,
> and he has a PVSCSI frontend driver included in the GPLPV Windows Xen PV drivers.
> 
> Btw do these patches also include the "latest" PVSCSI fixes/enhancements done in the SuSe/SLES kernel?
> (for example the RESERVE/RELEASE support).
> 

Never mind, I noticed that RESERVE/RELEASE are included in the xen-scsiback driver patch.

-- Pasi

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 3/4] Introduce XEN scsiback module
  2014-06-27 14:34 ` jgross
@ 2014-06-28 18:09   ` Christoph Hellwig
  2014-07-11  8:57     ` Juergen Gross
  2014-07-11  8:57     ` Juergen Gross
  2014-06-28 18:09   ` Christoph Hellwig
  1 sibling, 2 replies; 34+ messages in thread
From: Christoph Hellwig @ 2014-06-28 18:09 UTC (permalink / raw)
  To: jgross; +Cc: JBottomley, linux-scsi, xen-devel, target-devel

On Fri, Jun 27, 2014 at 04:34:35PM +0200, jgross@suse.com wrote:
> From: Juergen Gross <jgross@suse.com>
> 
> Introduces the XEN pvSCSI backend. With pvSCSI it is possible for a XEN domU
> to issue SCSI commands to a SCSI LUN assigned to that domU. The SCSI commands
> are passed to the pvSCSI backend in a driver domain (usually Dom0) which is
> owner of the physical device. This allows e.g. to use SCSI tape drives in a
> XEN domU.

This should be written against the generic target core infrastructure
in drivers/target, instead of introducing another simplistic pass
through only SCSI target.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 3/4] Introduce XEN scsiback module
  2014-06-27 14:34 ` jgross
  2014-06-28 18:09   ` Christoph Hellwig
@ 2014-06-28 18:09   ` Christoph Hellwig
  1 sibling, 0 replies; 34+ messages in thread
From: Christoph Hellwig @ 2014-06-28 18:09 UTC (permalink / raw)
  To: jgross; +Cc: target-devel, linux-scsi, JBottomley, xen-devel

On Fri, Jun 27, 2014 at 04:34:35PM +0200, jgross@suse.com wrote:
> From: Juergen Gross <jgross@suse.com>
> 
> Introduces the XEN pvSCSI backend. With pvSCSI it is possible for a XEN domU
> to issue SCSI commands to a SCSI LUN assigned to that domU. The SCSI commands
> are passed to the pvSCSI backend in a driver domain (usually Dom0) which is
> owner of the physical device. This allows e.g. to use SCSI tape drives in a
> XEN domU.

This should be written against the generic target core infrastructure
in drivers/target, instead of introducing another simplistic pass
through only SCSI target.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [Xen-devel] [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-27 17:11   ` [Xen-devel] " Konrad Rzeszutek Wilk
  2014-06-30  8:26     ` Juergen Gross
@ 2014-06-30  8:26     ` Juergen Gross
  2014-06-30 11:02     ` Jan Beulich
  2014-06-30 11:02     ` Jan Beulich
  3 siblings, 0 replies; 34+ messages in thread
From: Juergen Gross @ 2014-06-30  8:26 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk; +Cc: JBottomley, linux-scsi, xen-devel

On 06/27/2014 07:11 PM, Konrad Rzeszutek Wilk wrote:
> On Fri, Jun 27, 2014 at 04:34:33PM +0200, jgross@suse.com wrote:
>> From: Juergen Gross <jgross@suse.com>
>>
>> Add the definition of pvSCSI protocol used between the pvSCSI frontend in a
>> XEN domU and the pvSCSI backend in a XEN driver domain (usually Dom0).
>>
>> This header was originally provided by Fujitsu for XEN based on Linux 2.6.18.
>> Changes are:
>> - added comment
>> - adapt to Linux style guide
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
>> ---
>>   include/xen/interface/io/vscsiif.h | 110 +++++++++++++++++++++++++++++++++++++
>>   1 file changed, 110 insertions(+)
>>   create mode 100644 include/xen/interface/io/vscsiif.h
>>
>> diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
>> new file mode 100644
>> index 0000000..f8420f3
>> --- /dev/null
>> +++ b/include/xen/interface/io/vscsiif.h
>> @@ -0,0 +1,110 @@
>> +/******************************************************************************
>> + * vscsiif.h
>> + *
>> + * Based on the blkif.h code.
>> + *
>> + * This interface is to be regarded as a stable API between XEN domains
>> + * running potentially different Linux kernel versions.
>> + *
>> + * Permission is hereby granted, free of charge, to any person obtaining a copy
>> + * of this software and associated documentation files (the "Software"), to
>> + * deal in the Software without restriction, including without limitation the
>> + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
>> + * sell copies of the Software, and to permit persons to whom the Software is
>> + * furnished to do so, subject to the following conditions:
>> + *
>> + * The above copyright notice and this permission notice shall be included in
>> + * all copies or substantial portions of the Software.
>> + *
>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
>> + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
>> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
>> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
>> + * DEALINGS IN THE SOFTWARE.
>> + *
>> + * Copyright(c) FUJITSU Limited 2008.
>> + */
>> +
>> +#ifndef __XEN__PUBLIC_IO_SCSI_H__
>> +#define __XEN__PUBLIC_IO_SCSI_H__
>> +
>> +#include "ring.h"
>> +#include "../grant_table.h"
>> +
>> +/* commands between backend and frontend */
>> +#define VSCSIIF_ACT_SCSI_CDB		1	/* SCSI CDB command */
>> +#define VSCSIIF_ACT_SCSI_ABORT		2	/* SCSI Device(Lun) Abort*/
>> +#define VSCSIIF_ACT_SCSI_RESET		3	/* SCSI Device(Lun) Reset*/
>> +#define VSCSIIF_ACT_SCSI_SG_PRESET	4	/* Preset SG elements */
>> +
>> +/*
>> + * Maximum scatter/gather segments per request.
>> + *
>> + * Considering balance between allocating at least 16 "vscsiif_request"
>> + * structures on one page (4096 bytes) and the number of scatter/gather
>> + * elements needed, we decided to use 26 as a magic number.
>> + */
>> +#define VSCSIIF_SG_TABLESIZE		26
>> +
>> +/*
>> + * based on Linux kernel 2.6.18
>
> This being a bit more .. new, - do these sizes make sense anymore?
> Should they be extended a bit? Or have support for using the
> old ones (as default) and then negotiate new sizes with the kernel?
> (If of course there is a difference?)

The sizes are still the same. Additionally the sizes can't be changed
without breaking the interface (the request structure must not change
size). Added a comment.

>> + */
>> +#define VSCSIIF_MAX_COMMAND_SIZE	16
>> +#define VSCSIIF_SENSE_BUFFERSIZE	96
>> +
>> +struct scsiif_request_segment {
>> +	grant_ref_t gref;
>> +	uint16_t offset;
>> +	uint16_t length;
>> +};
>> +typedef struct scsiif_request_segment vscsiif_segment_t;
>
> No typedefs please.

Okay.

>> +
>> +struct vscsiif_request {
>> +	uint16_t rqid;		/* private guest value, echoed in resp  */
>> +	uint8_t act;		/* command between backend and frontend */
>> +	uint8_t cmd_len;
>> +
>> +	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
>> +	uint16_t timeout_per_command;	/* The command is issued by twice
>> +					   the value in Backend. */
>> +	uint16_t channel, id, lun;
>> +	uint16_t padding;
>> +	uint8_t sc_data_direction;	/* for DMA_TO_DEVICE(1)
>> +					   DMA_FROM_DEVICE(2)
>> +					   DMA_NONE(3) requests */
>> +	uint8_t nr_segments;		/* Number of pieces of scatter-gather */
>> +
>> +	vscsiif_segment_t seg[VSCSIIF_SG_TABLESIZE];
>> +	uint32_t reserved[3];
>> +};
>
> Could you add a comment saying what the size should be. Just in case
> somebody extends this in the future - they will know the limits.

Okay.

>
>> +typedef struct vscsiif_request vscsiif_request_t;
>
> Ditto.
>> +
>> +#define VSCSIIF_SG_LIST_SIZE ((sizeof(vscsiif_request_t) - 4) /		\
>> +				sizeof(vscsiif_segment_t))
>
> That -4 ought to have a comment. In case the scsiif_request_segment
> gets a new member or such.

I just removed VSCSIIF_SG_LIST_SIZE and struct vscsiif_sg_list as they
are never used.

>> +
>> +struct vscsiif_sg_list {
>> +	/* First two fields must match struct vscsiif_request! */
>> +	uint16_t rqid;		/* private guest value, must match main req */
>> +	uint8_t act;		/* VSCSIIF_ACT_SCSI_SG_PRESET */
>> +	uint8_t nr_segments;	/* Number of pieces of scatter-gather */
>> +	vscsiif_segment_t seg[VSCSIIF_SG_LIST_SIZE];
>> +};
>> +typedef struct vscsiif_sg_list vscsiif_sg_list_t;
>
> Ditto.
>> +
>> +struct vscsiif_response {
>> +	uint16_t rqid;
>> +	uint8_t act;		/* valid only when backend supports SG_PRESET */
>
> Same comment? First two fields must match 'struct vscsiif_request' ?

No. act has no meaning (SG_PRESET was never supported), rqid is used to
identify the request to which the response belongs. Added a comment.

>
>> +	uint8_t sense_len;
>> +	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
>> +	int32_t rslt;
>> +	uint32_t residual_len;	/* request bufflen -
>> +				   return the value from physical device */
>> +	uint32_t reserved[36];
>> +};
>> +typedef struct vscsiif_response vscsiif_response_t;
>
> I think you know what I am going to say :-)

No idea. ;-)


Juergen

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-27 17:11   ` [Xen-devel] " Konrad Rzeszutek Wilk
@ 2014-06-30  8:26     ` Juergen Gross
  2014-06-30  8:26     ` [Xen-devel] " Juergen Gross
                       ` (2 subsequent siblings)
  3 siblings, 0 replies; 34+ messages in thread
From: Juergen Gross @ 2014-06-30  8:26 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk; +Cc: linux-scsi, JBottomley, xen-devel

On 06/27/2014 07:11 PM, Konrad Rzeszutek Wilk wrote:
> On Fri, Jun 27, 2014 at 04:34:33PM +0200, jgross@suse.com wrote:
>> From: Juergen Gross <jgross@suse.com>
>>
>> Add the definition of pvSCSI protocol used between the pvSCSI frontend in a
>> XEN domU and the pvSCSI backend in a XEN driver domain (usually Dom0).
>>
>> This header was originally provided by Fujitsu for XEN based on Linux 2.6.18.
>> Changes are:
>> - added comment
>> - adapt to Linux style guide
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
>> ---
>>   include/xen/interface/io/vscsiif.h | 110 +++++++++++++++++++++++++++++++++++++
>>   1 file changed, 110 insertions(+)
>>   create mode 100644 include/xen/interface/io/vscsiif.h
>>
>> diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
>> new file mode 100644
>> index 0000000..f8420f3
>> --- /dev/null
>> +++ b/include/xen/interface/io/vscsiif.h
>> @@ -0,0 +1,110 @@
>> +/******************************************************************************
>> + * vscsiif.h
>> + *
>> + * Based on the blkif.h code.
>> + *
>> + * This interface is to be regarded as a stable API between XEN domains
>> + * running potentially different Linux kernel versions.
>> + *
>> + * Permission is hereby granted, free of charge, to any person obtaining a copy
>> + * of this software and associated documentation files (the "Software"), to
>> + * deal in the Software without restriction, including without limitation the
>> + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
>> + * sell copies of the Software, and to permit persons to whom the Software is
>> + * furnished to do so, subject to the following conditions:
>> + *
>> + * The above copyright notice and this permission notice shall be included in
>> + * all copies or substantial portions of the Software.
>> + *
>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
>> + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
>> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
>> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
>> + * DEALINGS IN THE SOFTWARE.
>> + *
>> + * Copyright(c) FUJITSU Limited 2008.
>> + */
>> +
>> +#ifndef __XEN__PUBLIC_IO_SCSI_H__
>> +#define __XEN__PUBLIC_IO_SCSI_H__
>> +
>> +#include "ring.h"
>> +#include "../grant_table.h"
>> +
>> +/* commands between backend and frontend */
>> +#define VSCSIIF_ACT_SCSI_CDB		1	/* SCSI CDB command */
>> +#define VSCSIIF_ACT_SCSI_ABORT		2	/* SCSI Device(Lun) Abort*/
>> +#define VSCSIIF_ACT_SCSI_RESET		3	/* SCSI Device(Lun) Reset*/
>> +#define VSCSIIF_ACT_SCSI_SG_PRESET	4	/* Preset SG elements */
>> +
>> +/*
>> + * Maximum scatter/gather segments per request.
>> + *
>> + * Considering balance between allocating at least 16 "vscsiif_request"
>> + * structures on one page (4096 bytes) and the number of scatter/gather
>> + * elements needed, we decided to use 26 as a magic number.
>> + */
>> +#define VSCSIIF_SG_TABLESIZE		26
>> +
>> +/*
>> + * based on Linux kernel 2.6.18
>
> This being a bit more .. new, - do these sizes make sense anymore?
> Should they be extended a bit? Or have support for using the
> old ones (as default) and then negotiate new sizes with the kernel?
> (If of course there is a difference?)

The sizes are still the same. Additionally the sizes can't be changed
without breaking the interface (the request structure must not change
size). Added a comment.

>> + */
>> +#define VSCSIIF_MAX_COMMAND_SIZE	16
>> +#define VSCSIIF_SENSE_BUFFERSIZE	96
>> +
>> +struct scsiif_request_segment {
>> +	grant_ref_t gref;
>> +	uint16_t offset;
>> +	uint16_t length;
>> +};
>> +typedef struct scsiif_request_segment vscsiif_segment_t;
>
> No typedefs please.

Okay.

>> +
>> +struct vscsiif_request {
>> +	uint16_t rqid;		/* private guest value, echoed in resp  */
>> +	uint8_t act;		/* command between backend and frontend */
>> +	uint8_t cmd_len;
>> +
>> +	uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
>> +	uint16_t timeout_per_command;	/* The command is issued by twice
>> +					   the value in Backend. */
>> +	uint16_t channel, id, lun;
>> +	uint16_t padding;
>> +	uint8_t sc_data_direction;	/* for DMA_TO_DEVICE(1)
>> +					   DMA_FROM_DEVICE(2)
>> +					   DMA_NONE(3) requests */
>> +	uint8_t nr_segments;		/* Number of pieces of scatter-gather */
>> +
>> +	vscsiif_segment_t seg[VSCSIIF_SG_TABLESIZE];
>> +	uint32_t reserved[3];
>> +};
>
> Could you add a comment saying what the size should be. Just in case
> somebody extends this in the future - they will know the limits.

Okay.

>
>> +typedef struct vscsiif_request vscsiif_request_t;
>
> Ditto.
>> +
>> +#define VSCSIIF_SG_LIST_SIZE ((sizeof(vscsiif_request_t) - 4) /		\
>> +				sizeof(vscsiif_segment_t))
>
> That -4 ought to have a comment. In case the scsiif_request_segment
> gets a new member or such.

I just removed VSCSIIF_SG_LIST_SIZE and struct vscsiif_sg_list as they
are never used.

>> +
>> +struct vscsiif_sg_list {
>> +	/* First two fields must match struct vscsiif_request! */
>> +	uint16_t rqid;		/* private guest value, must match main req */
>> +	uint8_t act;		/* VSCSIIF_ACT_SCSI_SG_PRESET */
>> +	uint8_t nr_segments;	/* Number of pieces of scatter-gather */
>> +	vscsiif_segment_t seg[VSCSIIF_SG_LIST_SIZE];
>> +};
>> +typedef struct vscsiif_sg_list vscsiif_sg_list_t;
>
> Ditto.
>> +
>> +struct vscsiif_response {
>> +	uint16_t rqid;
>> +	uint8_t act;		/* valid only when backend supports SG_PRESET */
>
> Same comment? First two fields must match 'struct vscsiif_request' ?

No. act has no meaning (SG_PRESET was never supported), rqid is used to
identify the request to which the response belongs. Added a comment.

>
>> +	uint8_t sense_len;
>> +	uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
>> +	int32_t rslt;
>> +	uint32_t residual_len;	/* request bufflen -
>> +				   return the value from physical device */
>> +	uint32_t reserved[36];
>> +};
>> +typedef struct vscsiif_response vscsiif_response_t;
>
> I think you know what I am going to say :-)

No idea. ;-)


Juergen

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [Xen-devel] [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-27 17:11   ` [Xen-devel] " Konrad Rzeszutek Wilk
  2014-06-30  8:26     ` Juergen Gross
  2014-06-30  8:26     ` [Xen-devel] " Juergen Gross
@ 2014-06-30 11:02     ` Jan Beulich
  2014-06-30 11:18       ` Juergen Gross
  2014-06-30 11:18       ` [Xen-devel] " Juergen Gross
  2014-06-30 11:02     ` Jan Beulich
  3 siblings, 2 replies; 34+ messages in thread
From: Jan Beulich @ 2014-06-30 11:02 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk; +Cc: xen-devel, JBottomley, Juergen Gross, linux-scsi

>>> On 27.06.14 at 19:11, <konrad.wilk@oracle.com> wrote:
> On Fri, Jun 27, 2014 at 04:34:33PM +0200, jgross@suse.com wrote:
>> +/*
>> + * Maximum scatter/gather segments per request.
>> + *
>> + * Considering balance between allocating at least 16 "vscsiif_request"
>> + * structures on one page (4096 bytes) and the number of scatter/gather
>> + * elements needed, we decided to use 26 as a magic number.
>> + */
>> +#define VSCSIIF_SG_TABLESIZE		26
>> +
>> +/*
>> + * based on Linux kernel 2.6.18
> 
> This being a bit more .. new, - do these sizes make sense anymore?
> Should they be extended a bit? Or have support for using the
> old ones (as default) and then negotiate new sizes with the kernel?
> (If of course there is a difference?)

As Jürgen already said (and as you should have noticed yourself) -
this is an interface definition that we can't just change. Negotiation
of larger counts is an option, which is what VSCSIIF_ACT_SCSI_SG_PRESET
is intended for (the implementation of which isn't part of this patchset
afaics, but could be made available on top of it).

Most if not all of your other comments are a little questionable too
in this context - any parts you aren't happy about would really
better be addressed towards the canonical header in the Xen tree.
(I know you had other interface headers diverge too in their Linux
incarnation, but personally I don't think this is an appropriate thing
to do, perhaps apart for pure coding style adjustments.)

Jan

Jan
--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-27 17:11   ` [Xen-devel] " Konrad Rzeszutek Wilk
                       ` (2 preceding siblings ...)
  2014-06-30 11:02     ` Jan Beulich
@ 2014-06-30 11:02     ` Jan Beulich
  3 siblings, 0 replies; 34+ messages in thread
From: Jan Beulich @ 2014-06-30 11:02 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk; +Cc: Juergen Gross, linux-scsi, JBottomley, xen-devel

>>> On 27.06.14 at 19:11, <konrad.wilk@oracle.com> wrote:
> On Fri, Jun 27, 2014 at 04:34:33PM +0200, jgross@suse.com wrote:
>> +/*
>> + * Maximum scatter/gather segments per request.
>> + *
>> + * Considering balance between allocating at least 16 "vscsiif_request"
>> + * structures on one page (4096 bytes) and the number of scatter/gather
>> + * elements needed, we decided to use 26 as a magic number.
>> + */
>> +#define VSCSIIF_SG_TABLESIZE		26
>> +
>> +/*
>> + * based on Linux kernel 2.6.18
> 
> This being a bit more .. new, - do these sizes make sense anymore?
> Should they be extended a bit? Or have support for using the
> old ones (as default) and then negotiate new sizes with the kernel?
> (If of course there is a difference?)

As Jürgen already said (and as you should have noticed yourself) -
this is an interface definition that we can't just change. Negotiation
of larger counts is an option, which is what VSCSIIF_ACT_SCSI_SG_PRESET
is intended for (the implementation of which isn't part of this patchset
afaics, but could be made available on top of it).

Most if not all of your other comments are a little questionable too
in this context - any parts you aren't happy about would really
better be addressed towards the canonical header in the Xen tree.
(I know you had other interface headers diverge too in their Linux
incarnation, but personally I don't think this is an appropriate thing
to do, perhaps apart for pure coding style adjustments.)

Jan

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [Xen-devel] [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-30 11:02     ` Jan Beulich
  2014-06-30 11:18       ` Juergen Gross
@ 2014-06-30 11:18       ` Juergen Gross
  2014-06-30 11:29         ` Jan Beulich
  2014-06-30 11:29         ` [Xen-devel] " Jan Beulich
  1 sibling, 2 replies; 34+ messages in thread
From: Juergen Gross @ 2014-06-30 11:18 UTC (permalink / raw)
  To: Jan Beulich, Konrad Rzeszutek Wilk
  Cc: Juergen Gross, linux-scsi, JBottomley, xen-devel

On 06/30/2014 01:02 PM, Jan Beulich wrote:
>>>> On 27.06.14 at 19:11, <konrad.wilk@oracle.com> wrote:
>> On Fri, Jun 27, 2014 at 04:34:33PM +0200, jgross@suse.com wrote:
>>> +/*
>>> + * Maximum scatter/gather segments per request.
>>> + *
>>> + * Considering balance between allocating at least 16 "vscsiif_request"
>>> + * structures on one page (4096 bytes) and the number of scatter/gather
>>> + * elements needed, we decided to use 26 as a magic number.
>>> + */
>>> +#define VSCSIIF_SG_TABLESIZE		26
>>> +
>>> +/*
>>> + * based on Linux kernel 2.6.18
>>
>> This being a bit more .. new, - do these sizes make sense anymore?
>> Should they be extended a bit? Or have support for using the
>> old ones (as default) and then negotiate new sizes with the kernel?
>> (If of course there is a difference?)
>
> As Jürgen already said (and as you should have noticed yourself) -
> this is an interface definition that we can't just change. Negotiation
> of larger counts is an option, which is what VSCSIIF_ACT_SCSI_SG_PRESET
> is intended for (the implementation of which isn't part of this patchset
> afaics, but could be made available on top of it).

I don't think this is the proper way to handle larger SG lists. The
VSCSIIF_ACT_SCSI_SG_PRESET option would just bump the maximum length
up to 31 (currently 26). Or was it thought to be incremental (multiple
presets for one request)? I'd rather add a way to specify SG lists
residing in an own (granted) page (or even multiple pages). This would
allow 512*26 SG entries without having to change the request structure.
The capability to handle this feature could be indicated via xenstore.

> Most if not all of your other comments are a little questionable too
> in this context - any parts you aren't happy about would really
> better be addressed towards the canonical header in the Xen tree.
> (I know you had other interface headers diverge too in their Linux
> incarnation, but personally I don't think this is an appropriate thing
> to do, perhaps apart for pure coding style adjustments.)


Juergen

--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-30 11:02     ` Jan Beulich
@ 2014-06-30 11:18       ` Juergen Gross
  2014-06-30 11:18       ` [Xen-devel] " Juergen Gross
  1 sibling, 0 replies; 34+ messages in thread
From: Juergen Gross @ 2014-06-30 11:18 UTC (permalink / raw)
  To: Jan Beulich, Konrad Rzeszutek Wilk
  Cc: Juergen Gross, JBottomley, linux-scsi, xen-devel

On 06/30/2014 01:02 PM, Jan Beulich wrote:
>>>> On 27.06.14 at 19:11, <konrad.wilk@oracle.com> wrote:
>> On Fri, Jun 27, 2014 at 04:34:33PM +0200, jgross@suse.com wrote:
>>> +/*
>>> + * Maximum scatter/gather segments per request.
>>> + *
>>> + * Considering balance between allocating at least 16 "vscsiif_request"
>>> + * structures on one page (4096 bytes) and the number of scatter/gather
>>> + * elements needed, we decided to use 26 as a magic number.
>>> + */
>>> +#define VSCSIIF_SG_TABLESIZE		26
>>> +
>>> +/*
>>> + * based on Linux kernel 2.6.18
>>
>> This being a bit more .. new, - do these sizes make sense anymore?
>> Should they be extended a bit? Or have support for using the
>> old ones (as default) and then negotiate new sizes with the kernel?
>> (If of course there is a difference?)
>
> As Jürgen already said (and as you should have noticed yourself) -
> this is an interface definition that we can't just change. Negotiation
> of larger counts is an option, which is what VSCSIIF_ACT_SCSI_SG_PRESET
> is intended for (the implementation of which isn't part of this patchset
> afaics, but could be made available on top of it).

I don't think this is the proper way to handle larger SG lists. The
VSCSIIF_ACT_SCSI_SG_PRESET option would just bump the maximum length
up to 31 (currently 26). Or was it thought to be incremental (multiple
presets for one request)? I'd rather add a way to specify SG lists
residing in an own (granted) page (or even multiple pages). This would
allow 512*26 SG entries without having to change the request structure.
The capability to handle this feature could be indicated via xenstore.

> Most if not all of your other comments are a little questionable too
> in this context - any parts you aren't happy about would really
> better be addressed towards the canonical header in the Xen tree.
> (I know you had other interface headers diverge too in their Linux
> incarnation, but personally I don't think this is an appropriate thing
> to do, perhaps apart for pure coding style adjustments.)


Juergen


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [Xen-devel] [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-30 11:18       ` [Xen-devel] " Juergen Gross
  2014-06-30 11:29         ` Jan Beulich
@ 2014-06-30 11:29         ` Jan Beulich
  1 sibling, 0 replies; 34+ messages in thread
From: Jan Beulich @ 2014-06-30 11:29 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk, Juergen Gross; +Cc: xen-devel, JBottomley, linux-scsi

>>> On 30.06.14 at 13:18, <JGross@suse.com> wrote:
> On 06/30/2014 01:02 PM, Jan Beulich wrote:
>>>>> On 27.06.14 at 19:11, <konrad.wilk@oracle.com> wrote:
>>> On Fri, Jun 27, 2014 at 04:34:33PM +0200, jgross@suse.com wrote:
>>>> +/*
>>>> + * Maximum scatter/gather segments per request.
>>>> + *
>>>> + * Considering balance between allocating at least 16 "vscsiif_request"
>>>> + * structures on one page (4096 bytes) and the number of scatter/gather
>>>> + * elements needed, we decided to use 26 as a magic number.
>>>> + */
>>>> +#define VSCSIIF_SG_TABLESIZE		26
>>>> +
>>>> +/*
>>>> + * based on Linux kernel 2.6.18
>>>
>>> This being a bit more .. new, - do these sizes make sense anymore?
>>> Should they be extended a bit? Or have support for using the
>>> old ones (as default) and then negotiate new sizes with the kernel?
>>> (If of course there is a difference?)
>>
>> As Jürgen already said (and as you should have noticed yourself) -
>> this is an interface definition that we can't just change. Negotiation
>> of larger counts is an option, which is what VSCSIIF_ACT_SCSI_SG_PRESET
>> is intended for (the implementation of which isn't part of this patchset
>> afaics, but could be made available on top of it).
> 
> I don't think this is the proper way to handle larger SG lists. The
> VSCSIIF_ACT_SCSI_SG_PRESET option would just bump the maximum length
> up to 31 (currently 26). Or was it thought to be incremental (multiple
> presets for one request)?

Yes, that's how it's to be used. See xen-vscsi-large-requests.patch
it the newer of our trees.

> I'd rather add a way to specify SG lists
> residing in an own (granted) page (or even multiple pages). This would
> allow 512*26 SG entries without having to change the request structure.
> The capability to handle this feature could be indicated via xenstore.

And yes, considering the similar changes in blkif I agree that this
would be the preferred method today. That patch, however,
predates those blkif enhancements.

Jan
--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 1/4] Add XEN pvSCSI protocol description
  2014-06-30 11:18       ` [Xen-devel] " Juergen Gross
@ 2014-06-30 11:29         ` Jan Beulich
  2014-06-30 11:29         ` [Xen-devel] " Jan Beulich
  1 sibling, 0 replies; 34+ messages in thread
From: Jan Beulich @ 2014-06-30 11:29 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk, Juergen Gross; +Cc: linux-scsi, JBottomley, xen-devel

>>> On 30.06.14 at 13:18, <JGross@suse.com> wrote:
> On 06/30/2014 01:02 PM, Jan Beulich wrote:
>>>>> On 27.06.14 at 19:11, <konrad.wilk@oracle.com> wrote:
>>> On Fri, Jun 27, 2014 at 04:34:33PM +0200, jgross@suse.com wrote:
>>>> +/*
>>>> + * Maximum scatter/gather segments per request.
>>>> + *
>>>> + * Considering balance between allocating at least 16 "vscsiif_request"
>>>> + * structures on one page (4096 bytes) and the number of scatter/gather
>>>> + * elements needed, we decided to use 26 as a magic number.
>>>> + */
>>>> +#define VSCSIIF_SG_TABLESIZE		26
>>>> +
>>>> +/*
>>>> + * based on Linux kernel 2.6.18
>>>
>>> This being a bit more .. new, - do these sizes make sense anymore?
>>> Should they be extended a bit? Or have support for using the
>>> old ones (as default) and then negotiate new sizes with the kernel?
>>> (If of course there is a difference?)
>>
>> As Jürgen already said (and as you should have noticed yourself) -
>> this is an interface definition that we can't just change. Negotiation
>> of larger counts is an option, which is what VSCSIIF_ACT_SCSI_SG_PRESET
>> is intended for (the implementation of which isn't part of this patchset
>> afaics, but could be made available on top of it).
> 
> I don't think this is the proper way to handle larger SG lists. The
> VSCSIIF_ACT_SCSI_SG_PRESET option would just bump the maximum length
> up to 31 (currently 26). Or was it thought to be incremental (multiple
> presets for one request)?

Yes, that's how it's to be used. See xen-vscsi-large-requests.patch
it the newer of our trees.

> I'd rather add a way to specify SG lists
> residing in an own (granted) page (or even multiple pages). This would
> allow 512*26 SG entries without having to change the request structure.
> The capability to handle this feature could be indicated via xenstore.

And yes, considering the similar changes in blkif I agree that this
would be the preferred method today. That patch, however,
predates those blkif enhancements.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [Xen-devel] [PATCH 2/4] Introduce xen-scsifront module
  2014-06-27 14:34 ` jgross
  2014-06-30 13:35   ` David Vrabel
@ 2014-06-30 13:35   ` David Vrabel
  2014-06-30 13:51     ` Juergen Gross
  2014-06-30 13:51     ` Juergen Gross
  1 sibling, 2 replies; 34+ messages in thread
From: David Vrabel @ 2014-06-30 13:35 UTC (permalink / raw)
  To: jgross, JBottomley, linux-scsi, xen-devel

On 27/06/14 15:34, jgross@suse.com wrote:
> 
> +/*	.resume			= scsifront_resume, */

Following on from the recent discussion on migration with front/backends
I thought I'd review how this new driver does it.

Is there an expectation that a VM with a PV scsi device cannot be
restored or migrated?

David

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 2/4] Introduce xen-scsifront module
  2014-06-27 14:34 ` jgross
@ 2014-06-30 13:35   ` David Vrabel
  2014-06-30 13:35   ` [Xen-devel] " David Vrabel
  1 sibling, 0 replies; 34+ messages in thread
From: David Vrabel @ 2014-06-30 13:35 UTC (permalink / raw)
  To: jgross, JBottomley, linux-scsi, xen-devel

On 27/06/14 15:34, jgross@suse.com wrote:
> 
> +/*	.resume			= scsifront_resume, */

Following on from the recent discussion on migration with front/backends
I thought I'd review how this new driver does it.

Is there an expectation that a VM with a PV scsi device cannot be
restored or migrated?

David

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [Xen-devel] [PATCH 2/4] Introduce xen-scsifront module
  2014-06-30 13:35   ` [Xen-devel] " David Vrabel
@ 2014-06-30 13:51     ` Juergen Gross
  2014-06-30 13:51     ` Juergen Gross
  1 sibling, 0 replies; 34+ messages in thread
From: Juergen Gross @ 2014-06-30 13:51 UTC (permalink / raw)
  To: David Vrabel, jgross, JBottomley, linux-scsi, xen-devel

On 06/30/2014 03:35 PM, David Vrabel wrote:
> On 27/06/14 15:34, jgross@suse.com wrote:
>>
>> +/*	.resume			= scsifront_resume, */
>
> Following on from the recent discussion on migration with front/backends
> I thought I'd review how this new driver does it.

"New" is an attribute I wouldn't use in this case. The driver was
introduced in 2008, this is just a rework to be able to put it in
the upstream kernel.

> Is there an expectation that a VM with a PV scsi device cannot be
> restored or migrated?


It has been so in the past.

To change this is on my todo list, but that's not top priority.


Juergen

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 2/4] Introduce xen-scsifront module
  2014-06-30 13:35   ` [Xen-devel] " David Vrabel
  2014-06-30 13:51     ` Juergen Gross
@ 2014-06-30 13:51     ` Juergen Gross
  1 sibling, 0 replies; 34+ messages in thread
From: Juergen Gross @ 2014-06-30 13:51 UTC (permalink / raw)
  To: David Vrabel, jgross, JBottomley, linux-scsi, xen-devel

On 06/30/2014 03:35 PM, David Vrabel wrote:
> On 27/06/14 15:34, jgross@suse.com wrote:
>>
>> +/*	.resume			= scsifront_resume, */
>
> Following on from the recent discussion on migration with front/backends
> I thought I'd review how this new driver does it.

"New" is an attribute I wouldn't use in this case. The driver was
introduced in 2008, this is just a rework to be able to put it in
the upstream kernel.

> Is there an expectation that a VM with a PV scsi device cannot be
> restored or migrated?


It has been so in the past.

To change this is on my todo list, but that's not top priority.


Juergen

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: Re: [PATCH 3/4] Introduce XEN scsiback module
  2014-06-28 18:09   ` Christoph Hellwig
  2014-07-11  8:57     ` Juergen Gross
@ 2014-07-11  8:57     ` Juergen Gross
  2014-07-11  9:04       ` Christoph Hellwig
  2014-07-11  9:04       ` Christoph Hellwig
  1 sibling, 2 replies; 34+ messages in thread
From: Juergen Gross @ 2014-07-11  8:57 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: JBottomley, linux-scsi, xen-devel, target-devel

On 06/28/2014 08:09 PM, Christoph Hellwig wrote:
> On Fri, Jun 27, 2014 at 04:34:35PM +0200, jgross@suse.com wrote:
>> From: Juergen Gross <jgross@suse.com>
>>
>> Introduces the XEN pvSCSI backend. With pvSCSI it is possible for a XEN domU
>> to issue SCSI commands to a SCSI LUN assigned to that domU. The SCSI commands
>> are passed to the pvSCSI backend in a driver domain (usually Dom0) which is
>> owner of the physical device. This allows e.g. to use SCSI tape drives in a
>> XEN domU.
>
> This should be written against the generic target core infrastructure
> in drivers/target, instead of introducing another simplistic pass
> through only SCSI target.
> .
>

Just to be sure: you mean something like a combined version of
tcm_vhost and virtio_scsi?


Juergen

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 3/4] Introduce XEN scsiback module
  2014-06-28 18:09   ` Christoph Hellwig
@ 2014-07-11  8:57     ` Juergen Gross
  2014-07-11  8:57     ` Juergen Gross
  1 sibling, 0 replies; 34+ messages in thread
From: Juergen Gross @ 2014-07-11  8:57 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: target-devel, linux-scsi, JBottomley, xen-devel

On 06/28/2014 08:09 PM, Christoph Hellwig wrote:
> On Fri, Jun 27, 2014 at 04:34:35PM +0200, jgross@suse.com wrote:
>> From: Juergen Gross <jgross@suse.com>
>>
>> Introduces the XEN pvSCSI backend. With pvSCSI it is possible for a XEN domU
>> to issue SCSI commands to a SCSI LUN assigned to that domU. The SCSI commands
>> are passed to the pvSCSI backend in a driver domain (usually Dom0) which is
>> owner of the physical device. This allows e.g. to use SCSI tape drives in a
>> XEN domU.
>
> This should be written against the generic target core infrastructure
> in drivers/target, instead of introducing another simplistic pass
> through only SCSI target.
> .
>

Just to be sure: you mean something like a combined version of
tcm_vhost and virtio_scsi?


Juergen

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 3/4] Introduce XEN scsiback module
  2014-07-11  8:57     ` Juergen Gross
@ 2014-07-11  9:04       ` Christoph Hellwig
  2014-07-11  9:04       ` Christoph Hellwig
  1 sibling, 0 replies; 34+ messages in thread
From: Christoph Hellwig @ 2014-07-11  9:04 UTC (permalink / raw)
  To: Juergen Gross
  Cc: Christoph Hellwig, JBottomley, linux-scsi, xen-devel, target-devel

On Fri, Jul 11, 2014 at 10:57:50AM +0200, Juergen Gross wrote:
> >This should be written against the generic target core infrastructure
> >in drivers/target, instead of introducing another simplistic pass
> >through only SCSI target.
> >.
> >
> 
> Just to be sure: you mean something like a combined version of
> tcm_vhost and virtio_scsi?

It would be roughly equivalent to tcm_vhost.  virtio_scsi is the
equivanet of scsifront in Xen.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH 3/4] Introduce XEN scsiback module
  2014-07-11  8:57     ` Juergen Gross
  2014-07-11  9:04       ` Christoph Hellwig
@ 2014-07-11  9:04       ` Christoph Hellwig
  1 sibling, 0 replies; 34+ messages in thread
From: Christoph Hellwig @ 2014-07-11  9:04 UTC (permalink / raw)
  To: Juergen Gross
  Cc: Christoph Hellwig, target-devel, linux-scsi, JBottomley, xen-devel

On Fri, Jul 11, 2014 at 10:57:50AM +0200, Juergen Gross wrote:
> >This should be written against the generic target core infrastructure
> >in drivers/target, instead of introducing another simplistic pass
> >through only SCSI target.
> >.
> >
> 
> Just to be sure: you mean something like a combined version of
> tcm_vhost and virtio_scsi?

It would be roughly equivalent to tcm_vhost.  virtio_scsi is the
equivanet of scsifront in Xen.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [Xen-devel] Add XEN pvSCSI support
  2014-08-08  7:43 jgross
@ 2014-08-08  7:47 ` Jürgen Groß
  0 siblings, 0 replies; 34+ messages in thread
From: Jürgen Groß @ 2014-08-08  7:47 UTC (permalink / raw)
  To: xen-devel, linux-scsi, target-devel, JBottomley, konrad.wilk,
	hch, david.vrabel, JBeulich

Sorry, please ignore.

One wrong patch slipped in, while one is missing.

Will resend with correct patches.


Juergen

On 08/08/2014 09:43 AM, jgross@suse.com wrote:
> This series adds XEN pvSCSI support. With pvSCSI it is possible to use physical
> SCSI devices from a XEN domain.
>
> The support consists of a backend in the privileged Domain-0 doing the real
> I/O and a frontend in the unprivileged domU passing I/O-requests to the backend.
>
> The code is taken (and adapted) from the original pvSCSI implementation done
> for Linux 2.6 in 2008 by Fujitsu.
>
> [PATCH V2 1/4] Add XEN pvSCSI protocol description
> [PATCH V2 2/4] Introduce xen-scsifront module
> [PATCH V2 3/4] Introduce XEN scsiback module
> [PATCH V2 4/4] add xen pvscsi maintainer
>
> Changes in V4:
> - Re-add define for VSCSIIF_ACT_SCSI_SG_PRESET to vscsiif.h to indicate this
>    action value should not be used in future enhancements
>
> Changes in V3:
> - added some comments to the protocol header file
> - removed the CDB emulation from xen-scsiback, handled by core target
>    infrastructure
> - several changes in xen-scsifront after comments from Christoph Hellwig
>
> Changes in V2:
> - use core target infrastructure by backend instead of pure SCSI passthrough
> - add support for larger SG lists by putting them in grant page(s)
> - add command abort capability
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
>


^ permalink raw reply	[flat|nested] 34+ messages in thread

end of thread, other threads:[~2014-08-08  7:47 UTC | newest]

Thread overview: 34+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-06-27 14:34 Add XEN pvSCSI support jgross
2014-06-27 14:34 ` [PATCH 1/4] Add XEN pvSCSI protocol description jgross
2014-06-27 17:11   ` Konrad Rzeszutek Wilk
2014-06-27 17:11   ` [Xen-devel] " Konrad Rzeszutek Wilk
2014-06-30  8:26     ` Juergen Gross
2014-06-30  8:26     ` [Xen-devel] " Juergen Gross
2014-06-30 11:02     ` Jan Beulich
2014-06-30 11:18       ` Juergen Gross
2014-06-30 11:18       ` [Xen-devel] " Juergen Gross
2014-06-30 11:29         ` Jan Beulich
2014-06-30 11:29         ` [Xen-devel] " Jan Beulich
2014-06-30 11:02     ` Jan Beulich
2014-06-27 14:34 ` jgross
2014-06-27 14:34 ` [PATCH 2/4] Introduce xen-scsifront module jgross
2014-06-27 14:34 ` jgross
2014-06-30 13:35   ` David Vrabel
2014-06-30 13:35   ` [Xen-devel] " David Vrabel
2014-06-30 13:51     ` Juergen Gross
2014-06-30 13:51     ` Juergen Gross
2014-06-27 14:34 ` [PATCH 3/4] Introduce XEN scsiback module jgross
2014-06-27 14:34 ` jgross
2014-06-28 18:09   ` Christoph Hellwig
2014-07-11  8:57     ` Juergen Gross
2014-07-11  8:57     ` Juergen Gross
2014-07-11  9:04       ` Christoph Hellwig
2014-07-11  9:04       ` Christoph Hellwig
2014-06-28 18:09   ` Christoph Hellwig
2014-06-27 14:34 ` [PATCH 4/4] add xen pvscsi maintainer jgross
2014-06-27 14:34 ` jgross
2014-06-27 17:13   ` Konrad Rzeszutek Wilk
2014-06-27 17:45 ` Add XEN pvSCSI support Pasi Kärkkäinen
2014-06-27 17:51   ` Pasi Kärkkäinen
2014-06-27 17:51   ` [Xen-devel] " Pasi Kärkkäinen
2014-08-08  7:43 jgross
2014-08-08  7:47 ` [Xen-devel] " Jürgen Groß

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.