xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Julien Grall <julien.grall@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: ian.campbell@citrix.com, stefano.stabellini@eu.citrix.com,
	linux-kernel@vger.kernel.org,
	"Julien Grall" <julien.grall@citrix.com>,
	"David Vrabel" <david.vrabel@citrix.com>,
	"Boris Ostrovsky" <boris.ostrovsky@oracle.com>,
	linux-arm-kernel@lists.infradead.org,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH v2 05/20] block/xen-blkfront: Split blkif_queue_request in 2
Date: Thu, 9 Jul 2015 21:42:17 +0100	[thread overview]
Message-ID: <1436474552-31789-6-git-send-email-julien.grall__24912.4251342893$1436474697$gmane$org@citrix.com> (raw)
In-Reply-To: <1436474552-31789-1-git-send-email-julien.grall@citrix.com>

Currently, blkif_queue_request has 2 distinct execution path:
    - Send a discard request
    - Send a read/write request

The function is also allocating grants to use for generating the
request. Although, this is only used for read/write request.

Rather than having a function with 2 distinct execution path, separate
the function in 2. This will also remove one level of tabulation.

Signed-off-by: Julien Grall <julien.grall@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Roger Pau Monné <roger.pau@citrix.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
---
    Changes in v2:
        - Patch added
---
 drivers/block/xen-blkfront.c | 280 +++++++++++++++++++++++--------------------
 1 file changed, 153 insertions(+), 127 deletions(-)

diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6d89ed3..7107d58 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -392,13 +392,35 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
 	return 0;
 }
 
-/*
- * Generate a Xen blkfront IO request from a blk layer request.  Reads
- * and writes are handled as expected.
- *
- * @req: a request struct
- */
-static int blkif_queue_request(struct request *req)
+static int blkif_queue_discard_req(struct request *req)
+{
+	struct blkfront_info *info = req->rq_disk->private_data;
+	struct blkif_request *ring_req;
+	unsigned long id;
+
+	/* Fill out a communications ring structure. */
+	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+	id = get_id_from_freelist(info);
+	info->shadow[id].request = req;
+
+	ring_req->operation = BLKIF_OP_DISCARD;
+	ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
+	ring_req->u.discard.id = id;
+	ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
+	if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
+		ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
+	else
+		ring_req->u.discard.flag = 0;
+
+	info->ring.req_prod_pvt++;
+
+	/* Keep a private copy so we can reissue requests when recovering. */
+	info->shadow[id].req = *ring_req;
+
+	return 0;
+}
+
+static int blkif_queue_rw_req(struct request *req)
 {
 	struct blkfront_info *info = req->rq_disk->private_data;
 	struct blkif_request *ring_req;
@@ -418,9 +440,6 @@ static int blkif_queue_request(struct request *req)
 	struct scatterlist *sg;
 	int nseg, max_grefs;
 
-	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
-		return 1;
-
 	max_grefs = req->nr_phys_segments;
 	if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
 		/*
@@ -450,139 +469,128 @@ static int blkif_queue_request(struct request *req)
 	id = get_id_from_freelist(info);
 	info->shadow[id].request = req;
 
-	if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
-		ring_req->operation = BLKIF_OP_DISCARD;
-		ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
-		ring_req->u.discard.id = id;
-		ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
-		if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
-			ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
-		else
-			ring_req->u.discard.flag = 0;
+	BUG_ON(info->max_indirect_segments == 0 &&
+	       req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+	BUG_ON(info->max_indirect_segments &&
+	       req->nr_phys_segments > info->max_indirect_segments);
+	nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
+	ring_req->u.rw.id = id;
+	if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+		/*
+		 * The indirect operation can only be a BLKIF_OP_READ or
+		 * BLKIF_OP_WRITE
+		 */
+		BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
+		ring_req->operation = BLKIF_OP_INDIRECT;
+		ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
+			BLKIF_OP_WRITE : BLKIF_OP_READ;
+		ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
+		ring_req->u.indirect.handle = info->handle;
+		ring_req->u.indirect.nr_segments = nseg;
 	} else {
-		BUG_ON(info->max_indirect_segments == 0 &&
-		       req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
-		BUG_ON(info->max_indirect_segments &&
-		       req->nr_phys_segments > info->max_indirect_segments);
-		nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
-		ring_req->u.rw.id = id;
-		if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+		ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
+		ring_req->u.rw.handle = info->handle;
+		ring_req->operation = rq_data_dir(req) ?
+			BLKIF_OP_WRITE : BLKIF_OP_READ;
+		if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
 			/*
-			 * The indirect operation can only be a BLKIF_OP_READ or
-			 * BLKIF_OP_WRITE
+			 * Ideally we can do an unordered flush-to-disk. In case the
+			 * backend onlysupports barriers, use that. A barrier request
+			 * a superset of FUA, so we can implement it the same
+			 * way.  (It's also a FLUSH+FUA, since it is
+			 * guaranteed ordered WRT previous writes.)
 			 */
-			BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
-			ring_req->operation = BLKIF_OP_INDIRECT;
-			ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
-				BLKIF_OP_WRITE : BLKIF_OP_READ;
-			ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
-			ring_req->u.indirect.handle = info->handle;
-			ring_req->u.indirect.nr_segments = nseg;
-		} else {
-			ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
-			ring_req->u.rw.handle = info->handle;
-			ring_req->operation = rq_data_dir(req) ?
-				BLKIF_OP_WRITE : BLKIF_OP_READ;
-			if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
-				/*
-				 * Ideally we can do an unordered flush-to-disk. In case the
-				 * backend onlysupports barriers, use that. A barrier request
-				 * a superset of FUA, so we can implement it the same
-				 * way.  (It's also a FLUSH+FUA, since it is
-				 * guaranteed ordered WRT previous writes.)
-				 */
-				switch (info->feature_flush &
-					((REQ_FLUSH|REQ_FUA))) {
-				case REQ_FLUSH|REQ_FUA:
-					ring_req->operation =
-						BLKIF_OP_WRITE_BARRIER;
-					break;
-				case REQ_FLUSH:
-					ring_req->operation =
-						BLKIF_OP_FLUSH_DISKCACHE;
-					break;
-				default:
-					ring_req->operation = 0;
-				}
+			switch (info->feature_flush &
+				((REQ_FLUSH|REQ_FUA))) {
+			case REQ_FLUSH|REQ_FUA:
+				ring_req->operation =
+					BLKIF_OP_WRITE_BARRIER;
+				break;
+			case REQ_FLUSH:
+				ring_req->operation =
+					BLKIF_OP_FLUSH_DISKCACHE;
+				break;
+			default:
+				ring_req->operation = 0;
 			}
-			ring_req->u.rw.nr_segments = nseg;
 		}
-		for_each_sg(info->shadow[id].sg, sg, nseg, i) {
-			fsect = sg->offset >> 9;
-			lsect = fsect + (sg->length >> 9) - 1;
-
-			if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
-			    (i % SEGS_PER_INDIRECT_FRAME == 0)) {
-				unsigned long uninitialized_var(pfn);
-
-				if (segments)
-					kunmap_atomic(segments);
-
-				n = i / SEGS_PER_INDIRECT_FRAME;
-				if (!info->feature_persistent) {
-					struct page *indirect_page;
-
-					/* Fetch a pre-allocated page to use for indirect grefs */
-					BUG_ON(list_empty(&info->indirect_pages));
-					indirect_page = list_first_entry(&info->indirect_pages,
-					                                 struct page, lru);
-					list_del(&indirect_page->lru);
-					pfn = page_to_pfn(indirect_page);
-				}
-				gnt_list_entry = get_grant(&gref_head, pfn, info);
-				info->shadow[id].indirect_grants[n] = gnt_list_entry;
-				segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
-				ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
+		ring_req->u.rw.nr_segments = nseg;
+	}
+	for_each_sg(info->shadow[id].sg, sg, nseg, i) {
+		fsect = sg->offset >> 9;
+		lsect = fsect + (sg->length >> 9) - 1;
+
+		if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
+		    (i % SEGS_PER_INDIRECT_FRAME == 0)) {
+			unsigned long uninitialized_var(pfn);
+
+			if (segments)
+				kunmap_atomic(segments);
+
+			n = i / SEGS_PER_INDIRECT_FRAME;
+			if (!info->feature_persistent) {
+				struct page *indirect_page;
+
+				/* Fetch a pre-allocated page to use for indirect grefs */
+				BUG_ON(list_empty(&info->indirect_pages));
+				indirect_page = list_first_entry(&info->indirect_pages,
+				                                 struct page, lru);
+				list_del(&indirect_page->lru);
+				pfn = page_to_pfn(indirect_page);
 			}
+			gnt_list_entry = get_grant(&gref_head, pfn, info);
+			info->shadow[id].indirect_grants[n] = gnt_list_entry;
+			segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
+			ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
+		}
 
-			gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
-			ref = gnt_list_entry->gref;
+		gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
+		ref = gnt_list_entry->gref;
 
-			info->shadow[id].grants_used[i] = gnt_list_entry;
+		info->shadow[id].grants_used[i] = gnt_list_entry;
 
-			if (rq_data_dir(req) && info->feature_persistent) {
-				char *bvec_data;
-				void *shared_data;
+		if (rq_data_dir(req) && info->feature_persistent) {
+			char *bvec_data;
+			void *shared_data;
 
-				BUG_ON(sg->offset + sg->length > PAGE_SIZE);
+			BUG_ON(sg->offset + sg->length > PAGE_SIZE);
 
-				shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
-				bvec_data = kmap_atomic(sg_page(sg));
+			shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
+			bvec_data = kmap_atomic(sg_page(sg));
 
-				/*
-				 * this does not wipe data stored outside the
-				 * range sg->offset..sg->offset+sg->length.
-				 * Therefore, blkback *could* see data from
-				 * previous requests. This is OK as long as
-				 * persistent grants are shared with just one
-				 * domain. It may need refactoring if this
-				 * changes
-				 */
-				memcpy(shared_data + sg->offset,
-				       bvec_data   + sg->offset,
-				       sg->length);
+			/*
+			 * this does not wipe data stored outside the
+			 * range sg->offset..sg->offset+sg->length.
+			 * Therefore, blkback *could* see data from
+			 * previous requests. This is OK as long as
+			 * persistent grants are shared with just one
+			 * domain. It may need refactoring if this
+			 * changes
+			 */
+			memcpy(shared_data + sg->offset,
+			       bvec_data   + sg->offset,
+			       sg->length);
 
-				kunmap_atomic(bvec_data);
-				kunmap_atomic(shared_data);
-			}
-			if (ring_req->operation != BLKIF_OP_INDIRECT) {
-				ring_req->u.rw.seg[i] =
-						(struct blkif_request_segment) {
-							.gref       = ref,
-							.first_sect = fsect,
-							.last_sect  = lsect };
-			} else {
-				n = i % SEGS_PER_INDIRECT_FRAME;
-				segments[n] =
+			kunmap_atomic(bvec_data);
+			kunmap_atomic(shared_data);
+		}
+		if (ring_req->operation != BLKIF_OP_INDIRECT) {
+			ring_req->u.rw.seg[i] =
 					(struct blkif_request_segment) {
-							.gref       = ref,
-							.first_sect = fsect,
-							.last_sect  = lsect };
-			}
+						.gref       = ref,
+						.first_sect = fsect,
+						.last_sect  = lsect };
+		} else {
+			n = i % SEGS_PER_INDIRECT_FRAME;
+			segments[n] =
+				(struct blkif_request_segment) {
+						.gref       = ref,
+						.first_sect = fsect,
+						.last_sect  = lsect };
 		}
-		if (segments)
-			kunmap_atomic(segments);
 	}
+	if (segments)
+		kunmap_atomic(segments);
 
 	info->ring.req_prod_pvt++;
 
@@ -595,6 +603,24 @@ static int blkif_queue_request(struct request *req)
 	return 0;
 }
 
+/*
+ * Generate a Xen blkfront IO request from a blk layer request.  Reads
+ * and writes are handled as expected.
+ *
+ * @req: a request struct
+ */
+static int blkif_queue_request(struct request *req)
+{
+	struct blkfront_info *info = req->rq_disk->private_data;
+
+	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
+		return 1;
+
+	if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE)))
+		return blkif_queue_discard_req(req);
+	else
+		return blkif_queue_rw_req(req);
+}
 
 static inline void flush_requests(struct blkfront_info *info)
 {
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2015-07-09 20:44 UTC|newest]

Thread overview: 89+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1436474552-31789-1-git-send-email-julien.grall@citrix.com>
2015-07-09 20:42 ` [PATCH v2 01/20] xen: Add Xen specific page definition Julien Grall
2015-07-16 14:19   ` Stefano Stabellini
     [not found]   ` <alpine.DEB.2.02.1507161515420.17378@kaball.uk.xensource.com>
2015-07-16 14:52     ` Julien Grall
2015-07-24  9:28   ` David Vrabel
     [not found]   ` <55B20540.3020000@citrix.com>
2015-07-24  9:39     ` Julien Grall
     [not found]     ` <55B207C6.4020300@citrix.com>
2015-07-24  9:48       ` David Vrabel
     [not found]       ` <55B209D9.1080602@citrix.com>
2015-07-24  9:51         ` Julien Grall
     [not found]         ` <55B20ABE.7000609@citrix.com>
2015-07-24 10:34           ` David Vrabel
     [not found]           ` <55B214C5.3020501@citrix.com>
2015-07-24 10:43             ` Ian Campbell
2015-07-24 13:03             ` Julien Grall
2015-07-09 20:42 ` [PATCH v2 02/20] xen: Introduce a function to split a Linux page into Xen page Julien Grall
2015-07-16 14:23   ` Stefano Stabellini
     [not found]   ` <alpine.DEB.2.02.1507161520330.17378@kaball.uk.xensource.com>
2015-07-16 14:54     ` Julien Grall
     [not found]     ` <55A7C5A1.7060006@citrix.com>
2015-07-16 15:19       ` Andrew Cooper
2015-07-16 16:09         ` Julien Grall
2015-07-16 16:13           ` Andrew Cooper
2015-07-24  9:31   ` David Vrabel
     [not found]   ` <55B205FB.5080209@citrix.com>
2015-07-24  9:54     ` Julien Grall
     [not found]     ` <55B20B56.7020605@citrix.com>
2015-07-24 10:10       ` David Vrabel
     [not found]       ` <55B20F1F.60902@citrix.com>
2015-07-24 10:20         ` Julien Grall
2015-08-05 14:30         ` Julien Grall
     [not found]         ` <55C21DF3.2090201@citrix.com>
2015-08-05 15:50           ` David Vrabel
     [not found]           ` <55C230C9.7060506@citrix.com>
2015-08-05 16:06             ` Julien Grall
2015-07-09 20:42 ` [PATCH v2 03/20] xen/grant: Introduce helpers to split a page into grant Julien Grall
2015-07-09 20:42 ` [PATCH v2 04/20] xen/grant: Add helper gnttab_page_grant_foreign_access_ref Julien Grall
2015-07-09 20:42 ` Julien Grall [this message]
2015-07-09 20:42 ` [PATCH v2 06/20] block/xen-blkfront: Store a page rather a pfn in the grant structure Julien Grall
2015-07-09 20:42 ` [PATCH v2 07/20] block/xen-blkfront: split get_grant in 2 Julien Grall
2015-07-09 20:42 ` [PATCH v2 08/20] net/xen-netback: xenvif_gop_frag_copy: move GSO check out of the loop Julien Grall
2015-07-09 20:42 ` [PATCH v2 09/20] xen/biomerge: Don't allow biovec to be merge when Linux is not using 4KB page Julien Grall
2015-07-10 19:12   ` Konrad Rzeszutek Wilk
     [not found]   ` <20150710191245.GA31063@l.oracle.com>
2015-07-15  8:56     ` Julien Grall
2015-07-16 15:33     ` Stefano Stabellini
     [not found]     ` <alpine.DEB.2.02.1507161627010.17378@kaball.uk.xensource.com>
2015-07-16 16:15       ` Julien Grall
     [not found]       ` <55A7D8AD.1090102@citrix.com>
2015-07-16 18:30         ` Konrad Rzeszutek Wilk
2015-07-17 13:20         ` Stefano Stabellini
     [not found]         ` <alpine.DEB.2.02.1507171418260.17378@kaball.uk.xensource.com>
2015-07-17 14:44           ` Julien Grall
     [not found]           ` <55A914D5.7080900@citrix.com>
2015-07-17 14:45             ` Stefano Stabellini
     [not found]             ` <alpine.DEB.2.02.1507171545170.17378@kaball.uk.xensource.com>
2015-07-17 14:46               ` Julien Grall
2015-07-09 20:42 ` [PATCH v2 10/20] xen/xenbus: Use Xen page definition Julien Grall
2015-07-16 15:35   ` Stefano Stabellini
2015-07-24  9:49   ` David Vrabel
2015-07-09 20:42 ` [PATCH v2 11/20] tty/hvc: xen: Use xen " Julien Grall
2015-07-09 20:42 ` [PATCH v2 12/20] xen/balloon: Don't rely on the page granularity is the same for Xen and Linux Julien Grall
2015-07-17 14:03   ` Stefano Stabellini
     [not found]   ` <alpine.DEB.2.02.1507161819200.17378@kaball.uk.xensource.com>
2015-07-17 14:32     ` Julien Grall
2015-07-09 20:42 ` [PATCH v2 13/20] xen/events: fifo: Make it running on 64KB granularity Julien Grall
2015-07-09 20:42 ` [PATCH v2 14/20] xen/grant-table: " Julien Grall
2015-07-09 20:42 ` [PATCH v2 15/20] block/xen-blkfront: Make it running on 64KB page granularity Julien Grall
2015-07-21 11:06   ` Roger Pau Monné
     [not found]   ` <55AE27C2.8090803@citrix.com>
2015-07-21 13:07     ` Julien Grall
2015-07-09 20:42 ` [PATCH v2 16/20] block/xen-blkback: " Julien Grall
2015-07-09 20:42 ` [PATCH v2 17/20] net/xen-netfront: " Julien Grall
2015-07-09 20:42 ` [PATCH v2 18/20] net/xen-netback: " Julien Grall
2015-07-09 20:42 ` [PATCH v2 19/20] xen/privcmd: Add support for Linux " Julien Grall
2015-07-09 20:42 ` [PATCH v2 20/20] arm/xen: Add support for " Julien Grall
     [not found] ` <1436474552-31789-20-git-send-email-julien.grall@citrix.com>
2015-07-13 20:13   ` [PATCH v2 19/20] xen/privcmd: Add support for Linux " Boris Ostrovsky
     [not found]   ` <55A41BE4.3080104@oracle.com>
2015-07-13 22:05     ` Julien Grall
     [not found]     ` <55A43638.4030503@citrix.com>
2015-07-14 15:28       ` Boris Ostrovsky
     [not found]       ` <55A52A9E.2000400@oracle.com>
2015-07-14 15:37         ` Julien Grall
2015-07-16 17:12   ` Stefano Stabellini
     [not found]   ` <alpine.DEB.2.02.1507161707300.17378@kaball.uk.xensource.com>
2015-07-16 17:16     ` Stefano Stabellini
2015-07-17 12:50     ` Julien Grall
     [not found] ` <1436474552-31789-4-git-send-email-julien.grall@citrix.com>
2015-07-16 15:01   ` [PATCH v2 03/20] xen/grant: Introduce helpers to split a page into grant Stefano Stabellini
     [not found]   ` <alpine.DEB.2.02.1507161526030.17378@kaball.uk.xensource.com>
2015-07-16 16:07     ` Julien Grall
     [not found]     ` <55A7D6AC.5060004@citrix.com>
2015-07-17 13:10       ` Julien Grall
     [not found] ` <1436474552-31789-5-git-send-email-julien.grall@citrix.com>
2015-07-16 15:05   ` [PATCH v2 04/20] xen/grant: Add helper gnttab_page_grant_foreign_access_ref Stefano Stabellini
     [not found]   ` <alpine.DEB.2.02.1507161603490.17378@kaball.uk.xensource.com>
2015-07-16 16:12     ` Julien Grall
2015-07-24  9:35   ` David Vrabel
     [not found] ` <1436474552-31789-15-git-send-email-julien.grall@citrix.com>
2015-07-16 15:47   ` [PATCH v2 14/20] xen/grant-table: Make it running on 64KB granularity Stefano Stabellini
     [not found]   ` <alpine.DEB.2.02.1507161643380.17378@kaball.uk.xensource.com>
2015-07-16 16:23     ` Julien Grall
     [not found]     ` <55A7DA8F.2040805@citrix.com>
2015-07-17 13:37       ` Stefano Stabellini
     [not found] ` <1436474552-31789-18-git-send-email-julien.grall@citrix.com>
2015-07-20 17:26   ` [PATCH v2 17/20] net/xen-netfront: Make it running on 64KB page granularity Julien Grall
2015-07-20 17:54 ` [PATCH v2 00/20] xen/arm64: Add support for 64KB page Julien Grall
     [not found] ` <1436474552-31789-6-git-send-email-julien.grall@citrix.com>
2015-07-21  9:54   ` [PATCH v2 05/20] block/xen-blkfront: Split blkif_queue_request in 2 Roger Pau Monné
     [not found]   ` <55AE16EC.2020204@citrix.com>
2015-07-21 11:12     ` Julien Grall
     [not found] ` <1436474552-31789-7-git-send-email-julien.grall@citrix.com>
2015-07-16 15:11   ` [PATCH v2 06/20] block/xen-blkfront: Store a page rather a pfn in the grant structure Stefano Stabellini
2015-07-21 10:16   ` Roger Pau Monné
     [not found]   ` <55AE1BE7.5030102@citrix.com>
2015-07-21 11:19     ` Julien Grall
     [not found]   ` <alpine.DEB.2.02.1507161610570.17378@kaball.uk.xensource.com>
2015-07-23 17:18     ` Julien Grall
     [not found] ` <1436474552-31789-8-git-send-email-julien.grall@citrix.com>
2015-07-21 10:30   ` [PATCH v2 07/20] block/xen-blkfront: split get_grant in 2 Roger Pau Monné
     [not found]   ` <55AE1F2A.6010300@citrix.com>
2015-07-21 13:03     ` Julien Grall
     [not found] ` <1436474552-31789-12-git-send-email-julien.grall@citrix.com>
2015-07-16 15:36   ` [PATCH v2 11/20] tty/hvc: xen: Use xen page definition Stefano Stabellini
2015-07-24  9:52   ` David Vrabel
     [not found] ` <1436474552-31789-14-git-send-email-julien.grall@citrix.com>
2015-07-16 15:43   ` [PATCH v2 13/20] xen/events: fifo: Make it running on 64KB granularity Stefano Stabellini
     [not found]   ` <alpine.DEB.2.02.1507161642130.17378@kaball.uk.xensource.com>
2015-07-16 16:18     ` Julien Grall
     [not found]     ` <55A7D955.5090203@citrix.com>
2015-07-17 13:06       ` Stefano Stabellini
2015-07-24 10:36   ` David Vrabel
     [not found]   ` <55B21527.4010601@citrix.com>
2015-08-06 15:43     ` Julien Grall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='1436474552-31789-6-git-send-email-julien.grall__24912.4251342893$1436474697$gmane$org@citrix.com' \
    --to=julien.grall@citrix.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=david.vrabel@citrix.com \
    --cc=ian.campbell@citrix.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=roger.pau@citrix.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).