All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] ceph: use a shared zero page rather than one per messenger
@ 2012-02-29  3:06 Alex Elder
  2012-02-29  4:18 ` Christoph Hellwig
  2012-02-29 17:40 ` [PATCH, v2] " Alex Elder
  0 siblings, 2 replies; 6+ messages in thread
From: Alex Elder @ 2012-02-29  3:06 UTC (permalink / raw)
  To: ceph-devel

Each messenger allocates a page to be used when writing zeroes
out in the event of error or other abnormal condition.  Just
allocate one at initialization time and have them all share it.

Signed-off-by: Alex Elder <elder@dreamhost.com>
---
  include/linux/ceph/messenger.h |    1 -
  net/ceph/messenger.c           |   45 
+++++++++++++++++++++++++++------------
  2 files changed, 31 insertions(+), 15 deletions(-)

diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index ffbeb2c..6b5af5f 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -54,7 +54,6 @@ struct ceph_connection_operations {
  struct ceph_messenger {
  	struct ceph_entity_inst inst;    /* my name+address */
  	struct ceph_entity_addr my_enc_addr;
-	struct page *zero_page;          /* used in certain error cases */

  	bool nocrc;

diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index ad5b708..068cb7e 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -52,6 +52,9 @@ static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN];
  static DEFINE_SPINLOCK(addr_str_lock);
  static int last_addr_str;

+static struct page *zero_page;		/* used in certain error cases */
+static void *zero_page_address;		/* kernel virtual addr of zero_page */
+
  const char *ceph_pr_addr(const struct sockaddr_storage *ss)
  {
  	int i;
@@ -99,18 +102,43 @@ struct workqueue_struct *ceph_msgr_wq;

  int ceph_msgr_init(void)
  {
+	BUG_ON(zero_page != NULL);
+	zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
+	if (!zero_page) {
+		pr_err("msgr_init failed to alloc zero_page\n");
+		return -ENOMEM;
+	}
+	BUG_ON(zero_page_address != NULL);
+	zero_page_address = kmap(zero_page);
+
  	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
  	if (!ceph_msgr_wq) {
  		pr_err("msgr_init failed to create workqueue\n");
+
+		zero_page_address = NULL;
+		kunmap(zero_page);
+		__free_page(zero_page);
+		zero_page = NULL;
+
  		return -ENOMEM;
  	}
+
  	return 0;
  }
  EXPORT_SYMBOL(ceph_msgr_init);

  void ceph_msgr_exit(void)
  {
+	BUG_ON(ceph_msgr_wq == NULL);
  	destroy_workqueue(ceph_msgr_wq);
+
+	BUG_ON(zero_page_address == NULL);
+	zero_page_address = NULL;
+
+	BUG_ON(zero_page == NULL);
+	kunmap(zero_page);
+	__free_page(zero_page);
+	zero_page = NULL;
  }
  EXPORT_SYMBOL(ceph_msgr_exit);

@@ -835,9 +863,9 @@ static int write_partial_msg_pages(struct 
ceph_connection *con)
  			max_write = bv->bv_len;
  #endif
  		} else {
-			page = con->msgr->zero_page;
+			page = zero_page;
  			if (crc)
-				kaddr = page_address(con->msgr->zero_page);
+				kaddr = zero_page_address;
  		}
  		len = min_t(int, max_write - con->out_msg_pos.page_pos,
  			    total_max_write);
@@ -908,7 +936,7 @@ static int write_partial_skip(struct ceph_connection 
*con)

  	while (con->out_skip > 0) {
  		struct kvec iov = {
-			.iov_base = page_address(con->msgr->zero_page),
+			.iov_base = zero_page_address,
  			.iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
  		};

@@ -2216,15 +2244,6 @@ struct ceph_messenger 
*ceph_messenger_create(struct ceph_entity_addr *myaddr,

  	spin_lock_init(&msgr->global_seq_lock);

-	/* the zero page is needed if a request is "canceled" while the message
-	 * is being written over the socket */
-	msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
-	if (!msgr->zero_page) {
-		kfree(msgr);
-		return ERR_PTR(-ENOMEM);
-	}
-	kmap(msgr->zero_page);
-
  	if (myaddr)
  		msgr->inst.addr = *myaddr;

@@ -2241,8 +2260,6 @@ EXPORT_SYMBOL(ceph_messenger_create);
  void ceph_messenger_destroy(struct ceph_messenger *msgr)
  {
  	dout("destroy %p\n", msgr);
-	kunmap(msgr->zero_page);
-	__free_page(msgr->zero_page);
  	kfree(msgr);
  	dout("destroyed messenger %p\n", msgr);
  }
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] ceph: use a shared zero page rather than one per messenger
  2012-02-29  3:06 [PATCH] ceph: use a shared zero page rather than one per messenger Alex Elder
@ 2012-02-29  4:18 ` Christoph Hellwig
  2012-02-29 17:40 ` [PATCH, v2] " Alex Elder
  1 sibling, 0 replies; 6+ messages in thread
From: Christoph Hellwig @ 2012-02-29  4:18 UTC (permalink / raw)
  To: Alex Elder; +Cc: ceph-devel

On Tue, Feb 28, 2012 at 07:06:22PM -0800, Alex Elder wrote:
> Each messenger allocates a page to be used when writing zeroes
> out in the event of error or other abnormal condition.  Just
> allocate one at initialization time and have them all share it.

Any reason you don't simply use the kernel-wide ZERO_PAGE()?


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH, v2] ceph: use a shared zero page rather than one per messenger
  2012-02-29  3:06 [PATCH] ceph: use a shared zero page rather than one per messenger Alex Elder
  2012-02-29  4:18 ` Christoph Hellwig
@ 2012-02-29 17:40 ` Alex Elder
  2012-03-02 19:21   ` Sage Weil
  1 sibling, 1 reply; 6+ messages in thread
From: Alex Elder @ 2012-02-29 17:40 UTC (permalink / raw)
  To: ceph-devel

Each messenger allocates a page to be used when writing zeroes
out in the event of error or other abnormal condition.  Instead,
use the kernel ZERO_PAGE() for that purpose.

Signed-off-by: Alex Elder <elder@dreamhost.com>
---
v2:  Updated to use kernel ZERO_PAGE()

  include/linux/ceph/messenger.h |    1
  net/ceph/messenger.c           |   43 
+++++++++++++++++++++++++++--------------
  2 files changed, 29 insertions(+), 15 deletions(-)

Index: b/include/linux/ceph/messenger.h
===================================================================
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -54,7 +54,6 @@ struct ceph_connection_operations {
  struct ceph_messenger {
  	struct ceph_entity_inst inst;    /* my name+address */
  	struct ceph_entity_addr my_enc_addr;
-	struct page *zero_page;          /* used in certain error cases */

  	bool nocrc;

Index: b/net/ceph/messenger.c
===================================================================
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -52,6 +52,9 @@ static char addr_str[MAX_ADDR_STR][MAX_A
  static DEFINE_SPINLOCK(addr_str_lock);
  static int last_addr_str;

+static struct page *zero_page;		/* used in certain error cases */
+static void *zero_page_address;		/* kernel virtual addr of zero_page */
+
  const char *ceph_pr_addr(const struct sockaddr_storage *ss)
  {
  	int i;
@@ -99,18 +102,41 @@ struct workqueue_struct *ceph_msgr_wq;

  int ceph_msgr_init(void)
  {
+	BUG_ON(zero_page != NULL);
+	zero_page = ZERO_PAGE(0);
+	page_cache_get(zero_page);
+
+	BUG_ON(zero_page_address != NULL);
+	zero_page_address = kmap(zero_page);
+
  	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
  	if (!ceph_msgr_wq) {
  		pr_err("msgr_init failed to create workqueue\n");
+
+		zero_page_address = NULL;
+		kunmap(zero_page);
+		page_cache_release(zero_page);
+		zero_page = NULL;
+
  		return -ENOMEM;
  	}
+
  	return 0;
  }
  EXPORT_SYMBOL(ceph_msgr_init);

  void ceph_msgr_exit(void)
  {
+	BUG_ON(ceph_msgr_wq == NULL);
  	destroy_workqueue(ceph_msgr_wq);
+
+	BUG_ON(zero_page_address == NULL);
+	zero_page_address = NULL;
+
+	BUG_ON(zero_page == NULL);
+	kunmap(zero_page);
+	page_cache_release(zero_page);
+	zero_page = NULL;
  }
  EXPORT_SYMBOL(ceph_msgr_exit);

@@ -841,9 +867,9 @@ static int write_partial_msg_pages(struc
  			max_write = bv->bv_len;
  #endif
  		} else {
-			page = con->msgr->zero_page;
+			page = zero_page;
  			if (crc)
-				kaddr = page_address(con->msgr->zero_page);
+				kaddr = zero_page_address;
  		}
  		len = min_t(int, max_write - con->out_msg_pos.page_pos,
  			    total_max_write);
@@ -914,7 +940,7 @@ static int write_partial_skip(struct cep

  	while (con->out_skip > 0) {
  		struct kvec iov = {
-			.iov_base = page_address(con->msgr->zero_page),
+			.iov_base = zero_page_address,
  			.iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
  		};

@@ -2222,15 +2248,6 @@ struct ceph_messenger *ceph_messenger_cr

  	spin_lock_init(&msgr->global_seq_lock);

-	/* the zero page is needed if a request is "canceled" while the message
-	 * is being written over the socket */
-	msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
-	if (!msgr->zero_page) {
-		kfree(msgr);
-		return ERR_PTR(-ENOMEM);
-	}
-	kmap(msgr->zero_page);
-
  	if (myaddr)
  		msgr->inst.addr = *myaddr;

@@ -2247,8 +2264,6 @@ EXPORT_SYMBOL(ceph_messenger_create);
  void ceph_messenger_destroy(struct ceph_messenger *msgr)
  {
  	dout("destroy %p\n", msgr);
-	kunmap(msgr->zero_page);
-	__free_page(msgr->zero_page);
  	kfree(msgr);
  	dout("destroyed messenger %p\n", msgr);
  }


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH, v2] ceph: use a shared zero page rather than one per messenger
  2012-02-29 17:40 ` [PATCH, v2] " Alex Elder
@ 2012-03-02 19:21   ` Sage Weil
  2012-03-02 23:14     ` Alex Elder
  0 siblings, 1 reply; 6+ messages in thread
From: Sage Weil @ 2012-03-02 19:21 UTC (permalink / raw)
  To: Alex Elder; +Cc: ceph-devel

On Wed, 29 Feb 2012, Alex Elder wrote:

> Each messenger allocates a page to be used when writing zeroes
> out in the event of error or other abnormal condition.  Instead,
> use the kernel ZERO_PAGE() for that purpose.
> 
> Signed-off-by: Alex Elder <elder@dreamhost.com>
> ---
> v2:  Updated to use kernel ZERO_PAGE()
> 
>  include/linux/ceph/messenger.h |    1
>  net/ceph/messenger.c           |   43
> +++++++++++++++++++++++++++--------------
>  2 files changed, 29 insertions(+), 15 deletions(-)
> 
> Index: b/include/linux/ceph/messenger.h
> ===================================================================
> --- a/include/linux/ceph/messenger.h
> +++ b/include/linux/ceph/messenger.h
> @@ -54,7 +54,6 @@ struct ceph_connection_operations {
>  struct ceph_messenger {
>  	struct ceph_entity_inst inst;    /* my name+address */
>  	struct ceph_entity_addr my_enc_addr;
> -	struct page *zero_page;          /* used in certain error cases */
> 
>  	bool nocrc;
> 
> Index: b/net/ceph/messenger.c
> ===================================================================
> --- a/net/ceph/messenger.c
> +++ b/net/ceph/messenger.c
> @@ -52,6 +52,9 @@ static char addr_str[MAX_ADDR_STR][MAX_A
>  static DEFINE_SPINLOCK(addr_str_lock);
>  static int last_addr_str;
> 
> +static struct page *zero_page;		/* used in certain error cases */
> +static void *zero_page_address;		/* kernel virtual addr of
> zero_page */
> +
>  const char *ceph_pr_addr(const struct sockaddr_storage *ss)
>  {
>  	int i;
> @@ -99,18 +102,41 @@ struct workqueue_struct *ceph_msgr_wq;
> 
>  int ceph_msgr_init(void)
>  {
> +	BUG_ON(zero_page != NULL);
> +	zero_page = ZERO_PAGE(0);
> +	page_cache_get(zero_page);
> +
> +	BUG_ON(zero_page_address != NULL);
> +	zero_page_address = kmap(zero_page);

I've always assumed that kmap() used up an expensive slot, and that pages 
should be kmapped only when needed.  Myabe this should still be done 
inside write_partial_msg_pages()?


> +
>  	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
>  	if (!ceph_msgr_wq) {
>  		pr_err("msgr_init failed to create workqueue\n");
> +
> +		zero_page_address = NULL;
> +		kunmap(zero_page);
> +		page_cache_release(zero_page);
> +		zero_page = NULL;
> +
>  		return -ENOMEM;
>  	}
> +
>  	return 0;
>  }
>  EXPORT_SYMBOL(ceph_msgr_init);
> 
>  void ceph_msgr_exit(void)
>  {
> +	BUG_ON(ceph_msgr_wq == NULL);
>  	destroy_workqueue(ceph_msgr_wq);
> +
> +	BUG_ON(zero_page_address == NULL);
> +	zero_page_address = NULL;
> +
> +	BUG_ON(zero_page == NULL);
> +	kunmap(zero_page);
> +	page_cache_release(zero_page);
> +	zero_page = NULL;
>  }
>  EXPORT_SYMBOL(ceph_msgr_exit);
> 
> @@ -841,9 +867,9 @@ static int write_partial_msg_pages(struc
>  			max_write = bv->bv_len;
>  #endif
>  		} else {
> -			page = con->msgr->zero_page;
> +			page = zero_page;
>  			if (crc)
> -				kaddr = page_address(con->msgr->zero_page);
> +				kaddr = zero_page_address;
>  		}
>  		len = min_t(int, max_write - con->out_msg_pos.page_pos,
>  			    total_max_write);
> @@ -914,7 +940,7 @@ static int write_partial_skip(struct cep
> 
>  	while (con->out_skip > 0) {
>  		struct kvec iov = {
> -			.iov_base = page_address(con->msgr->zero_page),
> +			.iov_base = zero_page_address,
>  			.iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
>  		};
> 
> @@ -2222,15 +2248,6 @@ struct ceph_messenger *ceph_messenger_cr
> 
>  	spin_lock_init(&msgr->global_seq_lock);
> 
> -	/* the zero page is needed if a request is "canceled" while the
> message
> -	 * is being written over the socket */
> -	msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
> -	if (!msgr->zero_page) {
> -		kfree(msgr);
> -		return ERR_PTR(-ENOMEM);
> -	}
> -	kmap(msgr->zero_page);
> -
>  	if (myaddr)
>  		msgr->inst.addr = *myaddr;
> 
> @@ -2247,8 +2264,6 @@ EXPORT_SYMBOL(ceph_messenger_create);
>  void ceph_messenger_destroy(struct ceph_messenger *msgr)
>  {
>  	dout("destroy %p\n", msgr);
> -	kunmap(msgr->zero_page);
> -	__free_page(msgr->zero_page);
>  	kfree(msgr);
>  	dout("destroyed messenger %p\n", msgr);
>  }
> 
> --
> To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH, v2] ceph: use a shared zero page rather than one per messenger
  2012-03-02 19:21   ` Sage Weil
@ 2012-03-02 23:14     ` Alex Elder
  2012-03-03  3:43       ` Alex Elder
  0 siblings, 1 reply; 6+ messages in thread
From: Alex Elder @ 2012-03-02 23:14 UTC (permalink / raw)
  To: Sage Weil; +Cc: ceph-devel

On 03/02/2012 11:21 AM, Sage Weil wrote:
> On Wed, 29 Feb 2012, Alex Elder wrote:
>
>> Each messenger allocates a page to be used when writing zeroes
>> out in the event of error or other abnormal condition.  Instead,
>> use the kernel ZERO_PAGE() for that purpose.
>>
>> Signed-off-by: Alex Elder<elder@dreamhost.com>
>> ---
>> v2:  Updated to use kernel ZERO_PAGE()
>>
>>   include/linux/ceph/messenger.h |    1
>>   net/ceph/messenger.c           |   43
>> +++++++++++++++++++++++++++--------------
>>   2 files changed, 29 insertions(+), 15 deletions(-)
>>
>> Index: b/include/linux/ceph/messenger.h
>> ===================================================================
>> --- a/include/linux/ceph/messenger.h
>> +++ b/include/linux/ceph/messenger.h
>> @@ -54,7 +54,6 @@ struct ceph_connection_operations {
>>   struct ceph_messenger {
>>   	struct ceph_entity_inst inst;    /* my name+address */
>>   	struct ceph_entity_addr my_enc_addr;
>> -	struct page *zero_page;          /* used in certain error cases */
>>
>>   	bool nocrc;
>>
>> Index: b/net/ceph/messenger.c
>> ===================================================================
>> --- a/net/ceph/messenger.c
>> +++ b/net/ceph/messenger.c
>> @@ -52,6 +52,9 @@ static char addr_str[MAX_ADDR_STR][MAX_A
>>   static DEFINE_SPINLOCK(addr_str_lock);
>>   static int last_addr_str;
>>
>> +static struct page *zero_page;		/* used in certain error cases */
>> +static void *zero_page_address;		/* kernel virtual addr of
>> zero_page */
>> +
>>   const char *ceph_pr_addr(const struct sockaddr_storage *ss)
>>   {
>>   	int i;
>> @@ -99,18 +102,41 @@ struct workqueue_struct *ceph_msgr_wq;
>>
>>   int ceph_msgr_init(void)
>>   {
>> +	BUG_ON(zero_page != NULL);
>> +	zero_page = ZERO_PAGE(0);
>> +	page_cache_get(zero_page);
>> +
>> +	BUG_ON(zero_page_address != NULL);
>> +	zero_page_address = kmap(zero_page);
>
> I've always assumed that kmap() used up an expensive slot, and that pages
> should be kmapped only when needed.  Myabe this should still be done
> inside write_partial_msg_pages()?

I think that's only for high memory pages, and especially for
the kernel zero page I doubt that's where it sits.  But you
make a good point.

I'll look into it a bit and if we're not guaranteed it's
"free" to do this I'll re-post with the kmap done inside where
it's needed.  I'll follow up either way.

					-Alex

>
>> +
>>   	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
>>   	if (!ceph_msgr_wq) {
>>   		pr_err("msgr_init failed to create workqueue\n");
>> +
>> +		zero_page_address = NULL;
>> +		kunmap(zero_page);
>> +		page_cache_release(zero_page);
>> +		zero_page = NULL;
>> +
>>   		return -ENOMEM;
>>   	}
>> +
>>   	return 0;
>>   }
>>   EXPORT_SYMBOL(ceph_msgr_init);
>>
>>   void ceph_msgr_exit(void)
>>   {
>> +	BUG_ON(ceph_msgr_wq == NULL);
>>   	destroy_workqueue(ceph_msgr_wq);
>> +
>> +	BUG_ON(zero_page_address == NULL);
>> +	zero_page_address = NULL;
>> +
>> +	BUG_ON(zero_page == NULL);
>> +	kunmap(zero_page);
>> +	page_cache_release(zero_page);
>> +	zero_page = NULL;
>>   }
>>   EXPORT_SYMBOL(ceph_msgr_exit);
>>
>> @@ -841,9 +867,9 @@ static int write_partial_msg_pages(struc
>>   			max_write = bv->bv_len;
>>   #endif
>>   		} else {
>> -			page = con->msgr->zero_page;
>> +			page = zero_page;
>>   			if (crc)
>> -				kaddr = page_address(con->msgr->zero_page);
>> +				kaddr = zero_page_address;
>>   		}
>>   		len = min_t(int, max_write - con->out_msg_pos.page_pos,
>>   			    total_max_write);
>> @@ -914,7 +940,7 @@ static int write_partial_skip(struct cep
>>
>>   	while (con->out_skip>  0) {
>>   		struct kvec iov = {
>> -			.iov_base = page_address(con->msgr->zero_page),
>> +			.iov_base = zero_page_address,
>>   			.iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
>>   		};
>>
>> @@ -2222,15 +2248,6 @@ struct ceph_messenger *ceph_messenger_cr
>>
>>   	spin_lock_init(&msgr->global_seq_lock);
>>
>> -	/* the zero page is needed if a request is "canceled" while the
>> message
>> -	 * is being written over the socket */
>> -	msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
>> -	if (!msgr->zero_page) {
>> -		kfree(msgr);
>> -		return ERR_PTR(-ENOMEM);
>> -	}
>> -	kmap(msgr->zero_page);
>> -
>>   	if (myaddr)
>>   		msgr->inst.addr = *myaddr;
>>
>> @@ -2247,8 +2264,6 @@ EXPORT_SYMBOL(ceph_messenger_create);
>>   void ceph_messenger_destroy(struct ceph_messenger *msgr)
>>   {
>>   	dout("destroy %p\n", msgr);
>> -	kunmap(msgr->zero_page);
>> -	__free_page(msgr->zero_page);
>>   	kfree(msgr);
>>   	dout("destroyed messenger %p\n", msgr);
>>   }
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>>
>>
> --
> To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH, v2] ceph: use a shared zero page rather than one per messenger
  2012-03-02 23:14     ` Alex Elder
@ 2012-03-03  3:43       ` Alex Elder
  0 siblings, 0 replies; 6+ messages in thread
From: Alex Elder @ 2012-03-03  3:43 UTC (permalink / raw)
  To: Sage Weil; +Cc: ceph-devel

On 03/02/2012 03:14 PM, Alex Elder wrote:
> On 03/02/2012 11:21 AM, Sage Weil wrote:
>> On Wed, 29 Feb 2012, Alex Elder wrote:
>>
>>> Each messenger allocates a page to be used when writing zeroes
>>> out in the event of error or other abnormal condition. Instead,
>>> use the kernel ZERO_PAGE() for that purpose.
>>>
>>> Signed-off-by: Alex Elder<elder@dreamhost.com>
>>> ---
>>> v2: Updated to use kernel ZERO_PAGE()
>>>
>>> include/linux/ceph/messenger.h | 1

. . .

>>> + BUG_ON(zero_page_address != NULL);
>>> + zero_page_address = kmap(zero_page);
>>
>> I've always assumed that kmap() used up an expensive slot, and that pages
>> should be kmapped only when needed. Myabe this should still be done
>> inside write_partial_msg_pages()?
>
> I think that's only for high memory pages, and especially for
> the kernel zero page I doubt that's where it sits. But you
> make a good point.
>
> I'll look into it a bit and if we're not guaranteed it's
> "free" to do this I'll re-post with the kmap done inside where
> it's needed. I'll follow up either way.

OK, here's how I'm going to resolve this.  First, the result of this
patch makes things no worse than they were before with respect to
hanging onto a zero page mapping.  Previously, every messenger
allocated its own zero page, and mapped it and held onto that
mapping for the duration of that messenger's lifetime.

With this patch in place, we don't allocate any new page (we
use ZERO_PAGE()) and grab one mapping to it used for all
messengers until the subsystem is shut down.

So basically, since things are left no worse than before, I'm
going to keep this as-is.

I will do a follow-on patch soon (after I get through committing
all those I've put out this week) that will affect the way the
zero page is handled.

						-Alex

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2012-03-03  3:43 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-02-29  3:06 [PATCH] ceph: use a shared zero page rather than one per messenger Alex Elder
2012-02-29  4:18 ` Christoph Hellwig
2012-02-29 17:40 ` [PATCH, v2] " Alex Elder
2012-03-02 19:21   ` Sage Weil
2012-03-02 23:14     ` Alex Elder
2012-03-03  3:43       ` Alex Elder

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.