linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
@ 2018-12-07  4:18 Dongli Zhang
  2018-12-07  9:39 ` [Xen-devel] " Paul Durrant
  2018-12-10 15:14 ` Roger Pau Monné
  0 siblings, 2 replies; 6+ messages in thread
From: Dongli Zhang @ 2018-12-07  4:18 UTC (permalink / raw)
  To: linux-kernel, xen-devel, linux-block; +Cc: konrad.wilk, roger.pau, axboe

The xenstore 'ring-page-order' is used globally for each blkback queue and
therefore should be read from xenstore only once. However, it is obtained
in read_per_ring_refs() which might be called multiple times during the
initialization of each blkback queue.

If the blkfront is malicious and the 'ring-page-order' is set in different
value by blkfront every time before blkback reads it, this may end up at
the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in
xen_blkif_disconnect() when frontend is destroyed.

This patch reworks connect_ring() to read xenstore 'ring-page-order' only
once.

Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
---
 drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++--------------
 1 file changed, 31 insertions(+), 18 deletions(-)

diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index a4bc74e..4a8ce20 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
 /*
  * Each ring may have multi pages, depends on "ring-page-order".
  */
-static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
+static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir,
+			      bool use_ring_page_order)
 {
 	unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
 	struct pending_req *req, *n;
 	int err, i, j;
 	struct xen_blkif *blkif = ring->blkif;
 	struct xenbus_device *dev = blkif->be->dev;
-	unsigned int ring_page_order, nr_grefs, evtchn;
+	unsigned int nr_grefs, evtchn;
 
 	err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
 			  &evtchn);
@@ -936,28 +937,18 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
 		return err;
 	}
 
-	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
-			  &ring_page_order);
-	if (err != 1) {
+	nr_grefs = blkif->nr_ring_pages;
+
+	if (!use_ring_page_order) {
 		err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]);
 		if (err != 1) {
 			err = -EINVAL;
 			xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
 			return err;
 		}
-		nr_grefs = 1;
 	} else {
 		unsigned int i;
 
-		if (ring_page_order > xen_blkif_max_ring_order) {
-			err = -EINVAL;
-			xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d",
-					 dir, ring_page_order,
-					 xen_blkif_max_ring_order);
-			return err;
-		}
-
-		nr_grefs = 1 << ring_page_order;
 		for (i = 0; i < nr_grefs; i++) {
 			char ring_ref_name[RINGREF_NAME_LEN];
 
@@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
 			}
 		}
 	}
-	blkif->nr_ring_pages = nr_grefs;
 
 	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
 		req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
 	size_t xspathsize;
 	const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
 	unsigned int requested_num_queues = 0;
+	bool use_ring_page_order = false;
+	unsigned int ring_page_order;
 
 	pr_debug("%s %s\n", __func__, dev->otherend);
 
@@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
 		 be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
 		 pers_grants ? "persistent grants" : "");
 
+	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
+			   &ring_page_order);
+
+	if (err != 1) {
+		be->blkif->nr_ring_pages = 1;
+	} else {
+		if (ring_page_order > xen_blkif_max_ring_order) {
+			err = -EINVAL;
+			xenbus_dev_fatal(dev, err,
+					 "requested ring page order %d exceed max:%d",
+					 ring_page_order,
+					 xen_blkif_max_ring_order);
+			return err;
+		}
+
+		use_ring_page_order = true;
+		be->blkif->nr_ring_pages = 1 << ring_page_order;
+	}
+
 	if (be->blkif->nr_rings == 1)
-		return read_per_ring_refs(&be->blkif->rings[0], dev->otherend);
+		return read_per_ring_refs(&be->blkif->rings[0], dev->otherend,
+					  use_ring_page_order);
 	else {
 		xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
 		xspath = kmalloc(xspathsize, GFP_KERNEL);
@@ -1088,7 +1100,8 @@ static int connect_ring(struct backend_info *be)
 		for (i = 0; i < be->blkif->nr_rings; i++) {
 			memset(xspath, 0, xspathsize);
 			snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
-			err = read_per_ring_refs(&be->blkif->rings[i], xspath);
+			err = read_per_ring_refs(&be->blkif->rings[i], xspath,
+						 use_ring_page_order);
 			if (err) {
 				kfree(xspath);
 				return err;
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* RE: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
  2018-12-07  4:18 [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront Dongli Zhang
@ 2018-12-07  9:39 ` Paul Durrant
  2018-12-07 15:10   ` Dongli Zhang
  2018-12-10 15:14 ` Roger Pau Monné
  1 sibling, 1 reply; 6+ messages in thread
From: Paul Durrant @ 2018-12-07  9:39 UTC (permalink / raw)
  To: 'Dongli Zhang', linux-kernel, xen-devel, linux-block
  Cc: axboe, Roger Pau Monne, konrad.wilk

> -----Original Message-----
> From: Xen-devel [mailto:xen-devel-bounces@lists.xenproject.org] On Behalf
> Of Dongli Zhang
> Sent: 07 December 2018 04:18
> To: linux-kernel@vger.kernel.org; xen-devel@lists.xenproject.org; linux-
> block@vger.kernel.org
> Cc: axboe@kernel.dk; Roger Pau Monne <roger.pau@citrix.com>;
> konrad.wilk@oracle.com
> Subject: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
> 
> The xenstore 'ring-page-order' is used globally for each blkback queue and
> therefore should be read from xenstore only once. However, it is obtained
> in read_per_ring_refs() which might be called multiple times during the
> initialization of each blkback queue.

That is certainly sub-optimal.

> 
> If the blkfront is malicious and the 'ring-page-order' is set in different
> value by blkfront every time before blkback reads it, this may end up at
> the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in
> xen_blkif_disconnect() when frontend is destroyed.

I can't actually see what useful function blkif->nr_ring_pages actually performs any more. Perhaps you could actually get rid of it?

> 
> This patch reworks connect_ring() to read xenstore 'ring-page-order' only
> once.

That is certainly a good thing :-)

  Paul

> 
> Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
> ---
>  drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++---------
> -----
>  1 file changed, 31 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-
> blkback/xenbus.c
> index a4bc74e..4a8ce20 100644
> --- a/drivers/block/xen-blkback/xenbus.c
> +++ b/drivers/block/xen-blkback/xenbus.c
> @@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
>  /*
>   * Each ring may have multi pages, depends on "ring-page-order".
>   */
> -static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
> *dir)
> +static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
> *dir,
> +			      bool use_ring_page_order)
>  {
>  	unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
>  	struct pending_req *req, *n;
>  	int err, i, j;
>  	struct xen_blkif *blkif = ring->blkif;
>  	struct xenbus_device *dev = blkif->be->dev;
> -	unsigned int ring_page_order, nr_grefs, evtchn;
> +	unsigned int nr_grefs, evtchn;
> 
>  	err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
>  			  &evtchn);
> @@ -936,28 +937,18 @@ static int read_per_ring_refs(struct xen_blkif_ring
> *ring, const char *dir)
>  		return err;
>  	}
> 
> -	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> -			  &ring_page_order);
> -	if (err != 1) {
> +	nr_grefs = blkif->nr_ring_pages;
> +
> +	if (!use_ring_page_order) {
>  		err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
> &ring_ref[0]);
>  		if (err != 1) {
>  			err = -EINVAL;
>  			xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
>  			return err;
>  		}
> -		nr_grefs = 1;
>  	} else {
>  		unsigned int i;
> 
> -		if (ring_page_order > xen_blkif_max_ring_order) {
> -			err = -EINVAL;
> -			xenbus_dev_fatal(dev, err, "%s/request %d ring page
> order exceed max:%d",
> -					 dir, ring_page_order,
> -					 xen_blkif_max_ring_order);
> -			return err;
> -		}
> -
> -		nr_grefs = 1 << ring_page_order;
>  		for (i = 0; i < nr_grefs; i++) {
>  			char ring_ref_name[RINGREF_NAME_LEN];
> 
> @@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring
> *ring, const char *dir)
>  			}
>  		}
>  	}
> -	blkif->nr_ring_pages = nr_grefs;
> 
>  	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
>  		req = kzalloc(sizeof(*req), GFP_KERNEL);
> @@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
>  	size_t xspathsize;
>  	const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-
> NNN" */
>  	unsigned int requested_num_queues = 0;
> +	bool use_ring_page_order = false;
> +	unsigned int ring_page_order;
> 
>  	pr_debug("%s %s\n", __func__, dev->otherend);
> 
> @@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
>  		 be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
>  		 pers_grants ? "persistent grants" : "");
> 
> +	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> +			   &ring_page_order);
> +
> +	if (err != 1) {
> +		be->blkif->nr_ring_pages = 1;
> +	} else {
> +		if (ring_page_order > xen_blkif_max_ring_order) {
> +			err = -EINVAL;
> +			xenbus_dev_fatal(dev, err,
> +					 "requested ring page order %d exceed
> max:%d",
> +					 ring_page_order,
> +					 xen_blkif_max_ring_order);
> +			return err;
> +		}
> +
> +		use_ring_page_order = true;
> +		be->blkif->nr_ring_pages = 1 << ring_page_order;
> +	}
> +
>  	if (be->blkif->nr_rings == 1)
> -		return read_per_ring_refs(&be->blkif->rings[0], dev-
> >otherend);
> +		return read_per_ring_refs(&be->blkif->rings[0], dev->otherend,
> +					  use_ring_page_order);
>  	else {
>  		xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
>  		xspath = kmalloc(xspathsize, GFP_KERNEL);
> @@ -1088,7 +1100,8 @@ static int connect_ring(struct backend_info *be)
>  		for (i = 0; i < be->blkif->nr_rings; i++) {
>  			memset(xspath, 0, xspathsize);
>  			snprintf(xspath, xspathsize, "%s/queue-%u", dev-
> >otherend, i);
> -			err = read_per_ring_refs(&be->blkif->rings[i], xspath);
> +			err = read_per_ring_refs(&be->blkif->rings[i], xspath,
> +						 use_ring_page_order);
>  			if (err) {
>  				kfree(xspath);
>  				return err;
> --
> 2.7.4
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xenproject.org
> https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
  2018-12-07  9:39 ` [Xen-devel] " Paul Durrant
@ 2018-12-07 15:10   ` Dongli Zhang
  2018-12-07 15:15     ` Paul Durrant
  0 siblings, 1 reply; 6+ messages in thread
From: Dongli Zhang @ 2018-12-07 15:10 UTC (permalink / raw)
  To: Paul Durrant, linux-kernel, xen-devel, linux-block
  Cc: axboe, Roger Pau Monne, konrad.wilk

Hi Paul,

On 12/07/2018 05:39 PM, Paul Durrant wrote:
>> -----Original Message-----
>> From: Xen-devel [mailto:xen-devel-bounces@lists.xenproject.org] On Behalf
>> Of Dongli Zhang
>> Sent: 07 December 2018 04:18
>> To: linux-kernel@vger.kernel.org; xen-devel@lists.xenproject.org; linux-
>> block@vger.kernel.org
>> Cc: axboe@kernel.dk; Roger Pau Monne <roger.pau@citrix.com>;
>> konrad.wilk@oracle.com
>> Subject: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
>> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
>>
>> The xenstore 'ring-page-order' is used globally for each blkback queue and
>> therefore should be read from xenstore only once. However, it is obtained
>> in read_per_ring_refs() which might be called multiple times during the
>> initialization of each blkback queue.
> 
> That is certainly sub-optimal.
> 
>>
>> If the blkfront is malicious and the 'ring-page-order' is set in different
>> value by blkfront every time before blkback reads it, this may end up at
>> the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in
>> xen_blkif_disconnect() when frontend is destroyed.
> 
> I can't actually see what useful function blkif->nr_ring_pages actually performs any more. Perhaps you could actually get rid of it?

How about we keep it? Other than reading from xenstore, it is the only place for
us to know the value from 'ring-page-order'.

This helps calculate the initialized number of elements on all
xen_blkif_ring->pending_free lists. That's how "WARN_ON(i !=
(XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" is used to double check if
there is no leak of elements reclaimed from all xen_blkif_ring->pending_free.

It helps vmcore analysis as well. Given blkif->nr_ring_pages, we would be able
to double check if the number of ring buffer slots are correct.

I could not see any drawback leaving blkif->nr_ring_pagesin the code.

Dongli Zhang

> 
>>
>> This patch reworks connect_ring() to read xenstore 'ring-page-order' only
>> once.
> 
> That is certainly a good thing :-)
> 
>   Paul
> 
>>
>> Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
>> ---
>>  drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++---------
>> -----
>>  1 file changed, 31 insertions(+), 18 deletions(-)
>>
>> diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-
>> blkback/xenbus.c
>> index a4bc74e..4a8ce20 100644
>> --- a/drivers/block/xen-blkback/xenbus.c
>> +++ b/drivers/block/xen-blkback/xenbus.c
>> @@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
>>  /*
>>   * Each ring may have multi pages, depends on "ring-page-order".
>>   */
>> -static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
>> *dir)
>> +static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
>> *dir,
>> +			      bool use_ring_page_order)
>>  {
>>  	unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
>>  	struct pending_req *req, *n;
>>  	int err, i, j;
>>  	struct xen_blkif *blkif = ring->blkif;
>>  	struct xenbus_device *dev = blkif->be->dev;
>> -	unsigned int ring_page_order, nr_grefs, evtchn;
>> +	unsigned int nr_grefs, evtchn;
>>
>>  	err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
>>  			  &evtchn);
>> @@ -936,28 +937,18 @@ static int read_per_ring_refs(struct xen_blkif_ring
>> *ring, const char *dir)
>>  		return err;
>>  	}
>>
>> -	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
>> -			  &ring_page_order);
>> -	if (err != 1) {
>> +	nr_grefs = blkif->nr_ring_pages;
>> +
>> +	if (!use_ring_page_order) {
>>  		err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
>> &ring_ref[0]);
>>  		if (err != 1) {
>>  			err = -EINVAL;
>>  			xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
>>  			return err;
>>  		}
>> -		nr_grefs = 1;
>>  	} else {
>>  		unsigned int i;
>>
>> -		if (ring_page_order > xen_blkif_max_ring_order) {
>> -			err = -EINVAL;
>> -			xenbus_dev_fatal(dev, err, "%s/request %d ring page
>> order exceed max:%d",
>> -					 dir, ring_page_order,
>> -					 xen_blkif_max_ring_order);
>> -			return err;
>> -		}
>> -
>> -		nr_grefs = 1 << ring_page_order;
>>  		for (i = 0; i < nr_grefs; i++) {
>>  			char ring_ref_name[RINGREF_NAME_LEN];
>>
>> @@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring
>> *ring, const char *dir)
>>  			}
>>  		}
>>  	}
>> -	blkif->nr_ring_pages = nr_grefs;
>>
>>  	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
>>  		req = kzalloc(sizeof(*req), GFP_KERNEL);
>> @@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
>>  	size_t xspathsize;
>>  	const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-
>> NNN" */
>>  	unsigned int requested_num_queues = 0;
>> +	bool use_ring_page_order = false;
>> +	unsigned int ring_page_order;
>>
>>  	pr_debug("%s %s\n", __func__, dev->otherend);
>>
>> @@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
>>  		 be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
>>  		 pers_grants ? "persistent grants" : "");
>>
>> +	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
>> +			   &ring_page_order);
>> +
>> +	if (err != 1) {
>> +		be->blkif->nr_ring_pages = 1;
>> +	} else {
>> +		if (ring_page_order > xen_blkif_max_ring_order) {
>> +			err = -EINVAL;
>> +			xenbus_dev_fatal(dev, err,
>> +					 "requested ring page order %d exceed
>> max:%d",
>> +					 ring_page_order,
>> +					 xen_blkif_max_ring_order);
>> +			return err;
>> +		}
>> +
>> +		use_ring_page_order = true;
>> +		be->blkif->nr_ring_pages = 1 << ring_page_order;
>> +	}
>> +
>>  	if (be->blkif->nr_rings == 1)
>> -		return read_per_ring_refs(&be->blkif->rings[0], dev-
>>> otherend);
>> +		return read_per_ring_refs(&be->blkif->rings[0], dev->otherend,
>> +					  use_ring_page_order);
>>  	else {
>>  		xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
>>  		xspath = kmalloc(xspathsize, GFP_KERNEL);
>> @@ -1088,7 +1100,8 @@ static int connect_ring(struct backend_info *be)
>>  		for (i = 0; i < be->blkif->nr_rings; i++) {
>>  			memset(xspath, 0, xspathsize);
>>  			snprintf(xspath, xspathsize, "%s/queue-%u", dev-
>>> otherend, i);
>> -			err = read_per_ring_refs(&be->blkif->rings[i], xspath);
>> +			err = read_per_ring_refs(&be->blkif->rings[i], xspath,
>> +						 use_ring_page_order);
>>  			if (err) {
>>  				kfree(xspath);
>>  				return err;
>> --
>> 2.7.4
>>
>>
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@lists.xenproject.org
>> https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
  2018-12-07 15:10   ` Dongli Zhang
@ 2018-12-07 15:15     ` Paul Durrant
  2018-12-07 15:50       ` Dongli Zhang
  0 siblings, 1 reply; 6+ messages in thread
From: Paul Durrant @ 2018-12-07 15:15 UTC (permalink / raw)
  To: 'Dongli Zhang', linux-kernel, xen-devel, linux-block
  Cc: axboe, Roger Pau Monne, konrad.wilk

> -----Original Message-----
> From: Dongli Zhang [mailto:dongli.zhang@oracle.com]
> Sent: 07 December 2018 15:10
> To: Paul Durrant <Paul.Durrant@citrix.com>; linux-kernel@vger.kernel.org;
> xen-devel@lists.xenproject.org; linux-block@vger.kernel.org
> Cc: axboe@kernel.dk; Roger Pau Monne <roger.pau@citrix.com>;
> konrad.wilk@oracle.com
> Subject: Re: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
> 
> Hi Paul,
> 
> On 12/07/2018 05:39 PM, Paul Durrant wrote:
> >> -----Original Message-----
> >> From: Xen-devel [mailto:xen-devel-bounces@lists.xenproject.org] On
> Behalf
> >> Of Dongli Zhang
> >> Sent: 07 December 2018 04:18
> >> To: linux-kernel@vger.kernel.org; xen-devel@lists.xenproject.org;
> linux-
> >> block@vger.kernel.org
> >> Cc: axboe@kernel.dk; Roger Pau Monne <roger.pau@citrix.com>;
> >> konrad.wilk@oracle.com
> >> Subject: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
> >> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
> >>
> >> The xenstore 'ring-page-order' is used globally for each blkback queue
> and
> >> therefore should be read from xenstore only once. However, it is
> obtained
> >> in read_per_ring_refs() which might be called multiple times during the
> >> initialization of each blkback queue.
> >
> > That is certainly sub-optimal.
> >
> >>
> >> If the blkfront is malicious and the 'ring-page-order' is set in
> different
> >> value by blkfront every time before blkback reads it, this may end up
> at
> >> the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));"
> in
> >> xen_blkif_disconnect() when frontend is destroyed.
> >
> > I can't actually see what useful function blkif->nr_ring_pages actually
> performs any more. Perhaps you could actually get rid of it?
> 
> How about we keep it? Other than reading from xenstore, it is the only
> place for
> us to know the value from 'ring-page-order'.
> 
> This helps calculate the initialized number of elements on all
> xen_blkif_ring->pending_free lists. That's how "WARN_ON(i !=
> (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" is used to double
> check if
> there is no leak of elements reclaimed from all xen_blkif_ring-
> >pending_free.
> 
> It helps vmcore analysis as well. Given blkif->nr_ring_pages, we would be
> able
> to double check if the number of ring buffer slots are correct.
> 
> I could not see any drawback leaving blkif->nr_ring_pagesin the code.

No, there's no drawback apart from space, but apart from that cross-check and, as you say, core analysis it seems to have little value.

  Paul

> 
> Dongli Zhang
> 
> >
> >>
> >> This patch reworks connect_ring() to read xenstore 'ring-page-order'
> only
> >> once.
> >
> > That is certainly a good thing :-)
> >
> >   Paul
> >
> >>
> >> Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
> >> ---
> >>  drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++------
> ---
> >> -----
> >>  1 file changed, 31 insertions(+), 18 deletions(-)
> >>
> >> diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-
> >> blkback/xenbus.c
> >> index a4bc74e..4a8ce20 100644
> >> --- a/drivers/block/xen-blkback/xenbus.c
> >> +++ b/drivers/block/xen-blkback/xenbus.c
> >> @@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
> >>  /*
> >>   * Each ring may have multi pages, depends on "ring-page-order".
> >>   */
> >> -static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
> >> *dir)
> >> +static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
> >> *dir,
> >> +			      bool use_ring_page_order)
> >>  {
> >>  	unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
> >>  	struct pending_req *req, *n;
> >>  	int err, i, j;
> >>  	struct xen_blkif *blkif = ring->blkif;
> >>  	struct xenbus_device *dev = blkif->be->dev;
> >> -	unsigned int ring_page_order, nr_grefs, evtchn;
> >> +	unsigned int nr_grefs, evtchn;
> >>
> >>  	err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
> >>  			  &evtchn);
> >> @@ -936,28 +937,18 @@ static int read_per_ring_refs(struct
> xen_blkif_ring
> >> *ring, const char *dir)
> >>  		return err;
> >>  	}
> >>
> >> -	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> >> -			  &ring_page_order);
> >> -	if (err != 1) {
> >> +	nr_grefs = blkif->nr_ring_pages;
> >> +
> >> +	if (!use_ring_page_order) {
> >>  		err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
> >> &ring_ref[0]);
> >>  		if (err != 1) {
> >>  			err = -EINVAL;
> >>  			xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
> >>  			return err;
> >>  		}
> >> -		nr_grefs = 1;
> >>  	} else {
> >>  		unsigned int i;
> >>
> >> -		if (ring_page_order > xen_blkif_max_ring_order) {
> >> -			err = -EINVAL;
> >> -			xenbus_dev_fatal(dev, err, "%s/request %d ring page
> >> order exceed max:%d",
> >> -					 dir, ring_page_order,
> >> -					 xen_blkif_max_ring_order);
> >> -			return err;
> >> -		}
> >> -
> >> -		nr_grefs = 1 << ring_page_order;
> >>  		for (i = 0; i < nr_grefs; i++) {
> >>  			char ring_ref_name[RINGREF_NAME_LEN];
> >>
> >> @@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring
> >> *ring, const char *dir)
> >>  			}
> >>  		}
> >>  	}
> >> -	blkif->nr_ring_pages = nr_grefs;
> >>
> >>  	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
> >>  		req = kzalloc(sizeof(*req), GFP_KERNEL);
> >> @@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
> >>  	size_t xspathsize;
> >>  	const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-
> >> NNN" */
> >>  	unsigned int requested_num_queues = 0;
> >> +	bool use_ring_page_order = false;
> >> +	unsigned int ring_page_order;
> >>
> >>  	pr_debug("%s %s\n", __func__, dev->otherend);
> >>
> >> @@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
> >>  		 be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
> >>  		 pers_grants ? "persistent grants" : "");
> >>
> >> +	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> >> +			   &ring_page_order);
> >> +
> >> +	if (err != 1) {
> >> +		be->blkif->nr_ring_pages = 1;
> >> +	} else {
> >> +		if (ring_page_order > xen_blkif_max_ring_order) {
> >> +			err = -EINVAL;
> >> +			xenbus_dev_fatal(dev, err,
> >> +					 "requested ring page order %d exceed
> >> max:%d",
> >> +					 ring_page_order,
> >> +					 xen_blkif_max_ring_order);
> >> +			return err;
> >> +		}
> >> +
> >> +		use_ring_page_order = true;
> >> +		be->blkif->nr_ring_pages = 1 << ring_page_order;
> >> +	}
> >> +
> >>  	if (be->blkif->nr_rings == 1)
> >> -		return read_per_ring_refs(&be->blkif->rings[0], dev-
> >>> otherend);
> >> +		return read_per_ring_refs(&be->blkif->rings[0], dev->otherend,
> >> +					  use_ring_page_order);
> >>  	else {
> >>  		xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
> >>  		xspath = kmalloc(xspathsize, GFP_KERNEL);
> >> @@ -1088,7 +1100,8 @@ static int connect_ring(struct backend_info *be)
> >>  		for (i = 0; i < be->blkif->nr_rings; i++) {
> >>  			memset(xspath, 0, xspathsize);
> >>  			snprintf(xspath, xspathsize, "%s/queue-%u", dev-
> >>> otherend, i);
> >> -			err = read_per_ring_refs(&be->blkif->rings[i], xspath);
> >> +			err = read_per_ring_refs(&be->blkif->rings[i], xspath,
> >> +						 use_ring_page_order);
> >>  			if (err) {
> >>  				kfree(xspath);
> >>  				return err;
> >> --
> >> 2.7.4
> >>
> >>
> >> _______________________________________________
> >> Xen-devel mailing list
> >> Xen-devel@lists.xenproject.org
> >> https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
  2018-12-07 15:15     ` Paul Durrant
@ 2018-12-07 15:50       ` Dongli Zhang
  0 siblings, 0 replies; 6+ messages in thread
From: Dongli Zhang @ 2018-12-07 15:50 UTC (permalink / raw)
  To: Paul Durrant, linux-kernel, xen-devel, linux-block
  Cc: axboe, Roger Pau Monne, konrad.wilk



On 12/07/2018 11:15 PM, Paul Durrant wrote:
>> -----Original Message-----
>> From: Dongli Zhang [mailto:dongli.zhang@oracle.com]
>> Sent: 07 December 2018 15:10
>> To: Paul Durrant <Paul.Durrant@citrix.com>; linux-kernel@vger.kernel.org;
>> xen-devel@lists.xenproject.org; linux-block@vger.kernel.org
>> Cc: axboe@kernel.dk; Roger Pau Monne <roger.pau@citrix.com>;
>> konrad.wilk@oracle.com
>> Subject: Re: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
>> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
>>
>> Hi Paul,
>>
>> On 12/07/2018 05:39 PM, Paul Durrant wrote:
>>>> -----Original Message-----
>>>> From: Xen-devel [mailto:xen-devel-bounces@lists.xenproject.org] On
>> Behalf
>>>> Of Dongli Zhang
>>>> Sent: 07 December 2018 04:18
>>>> To: linux-kernel@vger.kernel.org; xen-devel@lists.xenproject.org;
>> linux-
>>>> block@vger.kernel.org
>>>> Cc: axboe@kernel.dk; Roger Pau Monne <roger.pau@citrix.com>;
>>>> konrad.wilk@oracle.com
>>>> Subject: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
>>>> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
>>>>
>>>> The xenstore 'ring-page-order' is used globally for each blkback queue
>> and
>>>> therefore should be read from xenstore only once. However, it is
>> obtained
>>>> in read_per_ring_refs() which might be called multiple times during the
>>>> initialization of each blkback queue.
>>>
>>> That is certainly sub-optimal.
>>>
>>>>
>>>> If the blkfront is malicious and the 'ring-page-order' is set in
>> different
>>>> value by blkfront every time before blkback reads it, this may end up
>> at
>>>> the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));"
>> in
>>>> xen_blkif_disconnect() when frontend is destroyed.
>>>
>>> I can't actually see what useful function blkif->nr_ring_pages actually
>> performs any more. Perhaps you could actually get rid of it?
>>
>> How about we keep it? Other than reading from xenstore, it is the only
>> place for
>> us to know the value from 'ring-page-order'.
>>
>> This helps calculate the initialized number of elements on all
>> xen_blkif_ring->pending_free lists. That's how "WARN_ON(i !=
>> (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" is used to double
>> check if
>> there is no leak of elements reclaimed from all xen_blkif_ring-
>>> pending_free.
>>
>> It helps vmcore analysis as well. Given blkif->nr_ring_pages, we would be
>> able
>> to double check if the number of ring buffer slots are correct.
>>
>> I could not see any drawback leaving blkif->nr_ring_pagesin the code.
> 
> No, there's no drawback apart from space, but apart from that cross-check and, as you say, core analysis it seems to have little value.
> 
>   Paul

I will not remove blkif->nr_ring_pages and leave the current patch waiting for
review.

Dongli Zhang



> 
>>
>> Dongli Zhang
>>
>>>
>>>>
>>>> This patch reworks connect_ring() to read xenstore 'ring-page-order'
>> only
>>>> once.
>>>
>>> That is certainly a good thing :-)
>>>
>>>   Paul
>>>
>>>>
>>>> Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
>>>> ---
>>>>  drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++------
>> ---
>>>> -----
>>>>  1 file changed, 31 insertions(+), 18 deletions(-)
>>>>
>>>> diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-
>>>> blkback/xenbus.c
>>>> index a4bc74e..4a8ce20 100644
>>>> --- a/drivers/block/xen-blkback/xenbus.c
>>>> +++ b/drivers/block/xen-blkback/xenbus.c
>>>> @@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
>>>>  /*
>>>>   * Each ring may have multi pages, depends on "ring-page-order".
>>>>   */
>>>> -static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
>>>> *dir)
>>>> +static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
>>>> *dir,
>>>> +			      bool use_ring_page_order)
>>>>  {
>>>>  	unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
>>>>  	struct pending_req *req, *n;
>>>>  	int err, i, j;
>>>>  	struct xen_blkif *blkif = ring->blkif;
>>>>  	struct xenbus_device *dev = blkif->be->dev;
>>>> -	unsigned int ring_page_order, nr_grefs, evtchn;
>>>> +	unsigned int nr_grefs, evtchn;
>>>>
>>>>  	err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
>>>>  			  &evtchn);
>>>> @@ -936,28 +937,18 @@ static int read_per_ring_refs(struct
>> xen_blkif_ring
>>>> *ring, const char *dir)
>>>>  		return err;
>>>>  	}
>>>>
>>>> -	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
>>>> -			  &ring_page_order);
>>>> -	if (err != 1) {
>>>> +	nr_grefs = blkif->nr_ring_pages;
>>>> +
>>>> +	if (!use_ring_page_order) {
>>>>  		err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
>>>> &ring_ref[0]);
>>>>  		if (err != 1) {
>>>>  			err = -EINVAL;
>>>>  			xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
>>>>  			return err;
>>>>  		}
>>>> -		nr_grefs = 1;
>>>>  	} else {
>>>>  		unsigned int i;
>>>>
>>>> -		if (ring_page_order > xen_blkif_max_ring_order) {
>>>> -			err = -EINVAL;
>>>> -			xenbus_dev_fatal(dev, err, "%s/request %d ring page
>>>> order exceed max:%d",
>>>> -					 dir, ring_page_order,
>>>> -					 xen_blkif_max_ring_order);
>>>> -			return err;
>>>> -		}
>>>> -
>>>> -		nr_grefs = 1 << ring_page_order;
>>>>  		for (i = 0; i < nr_grefs; i++) {
>>>>  			char ring_ref_name[RINGREF_NAME_LEN];
>>>>
>>>> @@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring
>>>> *ring, const char *dir)
>>>>  			}
>>>>  		}
>>>>  	}
>>>> -	blkif->nr_ring_pages = nr_grefs;
>>>>
>>>>  	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
>>>>  		req = kzalloc(sizeof(*req), GFP_KERNEL);
>>>> @@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
>>>>  	size_t xspathsize;
>>>>  	const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-
>>>> NNN" */
>>>>  	unsigned int requested_num_queues = 0;
>>>> +	bool use_ring_page_order = false;
>>>> +	unsigned int ring_page_order;
>>>>
>>>>  	pr_debug("%s %s\n", __func__, dev->otherend);
>>>>
>>>> @@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
>>>>  		 be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
>>>>  		 pers_grants ? "persistent grants" : "");
>>>>
>>>> +	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
>>>> +			   &ring_page_order);
>>>> +
>>>> +	if (err != 1) {
>>>> +		be->blkif->nr_ring_pages = 1;
>>>> +	} else {
>>>> +		if (ring_page_order > xen_blkif_max_ring_order) {
>>>> +			err = -EINVAL;
>>>> +			xenbus_dev_fatal(dev, err,
>>>> +					 "requested ring page order %d exceed
>>>> max:%d",
>>>> +					 ring_page_order,
>>>> +					 xen_blkif_max_ring_order);
>>>> +			return err;
>>>> +		}
>>>> +
>>>> +		use_ring_page_order = true;
>>>> +		be->blkif->nr_ring_pages = 1 << ring_page_order;
>>>> +	}
>>>> +
>>>>  	if (be->blkif->nr_rings == 1)
>>>> -		return read_per_ring_refs(&be->blkif->rings[0], dev-
>>>>> otherend);
>>>> +		return read_per_ring_refs(&be->blkif->rings[0], dev->otherend,
>>>> +					  use_ring_page_order);
>>>>  	else {
>>>>  		xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
>>>>  		xspath = kmalloc(xspathsize, GFP_KERNEL);
>>>> @@ -1088,7 +1100,8 @@ static int connect_ring(struct backend_info *be)
>>>>  		for (i = 0; i < be->blkif->nr_rings; i++) {
>>>>  			memset(xspath, 0, xspathsize);
>>>>  			snprintf(xspath, xspathsize, "%s/queue-%u", dev-
>>>>> otherend, i);
>>>> -			err = read_per_ring_refs(&be->blkif->rings[i], xspath);
>>>> +			err = read_per_ring_refs(&be->blkif->rings[i], xspath,
>>>> +						 use_ring_page_order);
>>>>  			if (err) {
>>>>  				kfree(xspath);
>>>>  				return err;
>>>> --
>>>> 2.7.4
>>>>
>>>>
>>>> _______________________________________________
>>>> Xen-devel mailing list
>>>> Xen-devel@lists.xenproject.org
>>>> https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
  2018-12-07  4:18 [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront Dongli Zhang
  2018-12-07  9:39 ` [Xen-devel] " Paul Durrant
@ 2018-12-10 15:14 ` Roger Pau Monné
  1 sibling, 0 replies; 6+ messages in thread
From: Roger Pau Monné @ 2018-12-10 15:14 UTC (permalink / raw)
  To: Dongli Zhang; +Cc: linux-kernel, xen-devel, linux-block, konrad.wilk, axboe

On Fri, Dec 07, 2018 at 12:18:04PM +0800, Dongli Zhang wrote:
> The xenstore 'ring-page-order' is used globally for each blkback queue and
> therefore should be read from xenstore only once. However, it is obtained
> in read_per_ring_refs() which might be called multiple times during the
> initialization of each blkback queue.
> 
> If the blkfront is malicious and the 'ring-page-order' is set in different
> value by blkfront every time before blkback reads it, this may end up at
> the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in
> xen_blkif_disconnect() when frontend is destroyed.
> 
> This patch reworks connect_ring() to read xenstore 'ring-page-order' only
> once.
> 
> Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com>
> ---
>  drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++--------------
>  1 file changed, 31 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
> index a4bc74e..4a8ce20 100644
> --- a/drivers/block/xen-blkback/xenbus.c
> +++ b/drivers/block/xen-blkback/xenbus.c
> @@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
>  /*
>   * Each ring may have multi pages, depends on "ring-page-order".
>   */
> -static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
> +static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir,
> +			      bool use_ring_page_order)

Why not change the order of read_per_ring_refs so that it tries to
fetch the grant references from "ring-refXX" first and then fallback
to reading the legacy grant reference if nr_ring_pages == 1?

This will avoid having to pass an extra parameter to it.

>  {
>  	unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
>  	struct pending_req *req, *n;
>  	int err, i, j;
>  	struct xen_blkif *blkif = ring->blkif;
>  	struct xenbus_device *dev = blkif->be->dev;
> -	unsigned int ring_page_order, nr_grefs, evtchn;
> +	unsigned int nr_grefs, evtchn;
>  
>  	err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
>  			  &evtchn);
> @@ -936,28 +937,18 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
>  		return err;
>  	}
>  
> -	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> -			  &ring_page_order);
> -	if (err != 1) {
> +	nr_grefs = blkif->nr_ring_pages;
> +
> +	if (!use_ring_page_order) {
>  		err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]);
>  		if (err != 1) {
>  			err = -EINVAL;
>  			xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
>  			return err;
>  		}
> -		nr_grefs = 1;
>  	} else {
>  		unsigned int i;
>  
> -		if (ring_page_order > xen_blkif_max_ring_order) {
> -			err = -EINVAL;
> -			xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d",
> -					 dir, ring_page_order,
> -					 xen_blkif_max_ring_order);
> -			return err;
> -		}
> -
> -		nr_grefs = 1 << ring_page_order;
>  		for (i = 0; i < nr_grefs; i++) {
>  			char ring_ref_name[RINGREF_NAME_LEN];
>  
> @@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
>  			}
>  		}
>  	}
> -	blkif->nr_ring_pages = nr_grefs;
>  
>  	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
>  		req = kzalloc(sizeof(*req), GFP_KERNEL);
> @@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
>  	size_t xspathsize;
>  	const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
>  	unsigned int requested_num_queues = 0;
> +	bool use_ring_page_order = false;
> +	unsigned int ring_page_order;
>  
>  	pr_debug("%s %s\n", __func__, dev->otherend);
>  
> @@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
>  		 be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
>  		 pers_grants ? "persistent grants" : "");
>  
> +	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> +			   &ring_page_order);

You can use xenbus_read_unsigned IMO, which will simplify the logic
below a little bit.

Thanks, Roger.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-12-10 15:15 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-12-07  4:18 [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront Dongli Zhang
2018-12-07  9:39 ` [Xen-devel] " Paul Durrant
2018-12-07 15:10   ` Dongli Zhang
2018-12-07 15:15     ` Paul Durrant
2018-12-07 15:50       ` Dongli Zhang
2018-12-10 15:14 ` Roger Pau Monné

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).