All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] switch rangeset's lock to rwlock
@ 2014-09-12 12:55 Jan Beulich
  2014-09-18 10:43 ` Tim Deegan
  2014-09-19 16:32 ` Konrad Rzeszutek Wilk
  0 siblings, 2 replies; 22+ messages in thread
From: Jan Beulich @ 2014-09-12 12:55 UTC (permalink / raw)
  To: xen-devel; +Cc: Ian Campbell, Keir Fraser, Ian Jackson, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 3554 bytes --]

As a general library routine, it should behave as efficiently as
possible, even if at present no significant contention is known here.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
With the widened use of rangesets I'd like to re-suggest this change
which I had posted already a couple of years back.

--- a/xen/common/rangeset.c
+++ b/xen/common/rangeset.c
@@ -28,7 +28,7 @@ struct rangeset {
 
     /* Number of ranges that can be allocated */
     long             nr_ranges;
-    spinlock_t       lock;
+    rwlock_t         lock;
 
     /* Pretty-printing name. */
     char             name[32];
@@ -120,7 +120,7 @@ int rangeset_add_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    write_lock(&r->lock);
 
     x = find_range(r, s);
     y = find_range(r, e);
@@ -176,7 +176,7 @@ int rangeset_add_range(
     }
 
  out:
-    spin_unlock(&r->lock);
+    write_unlock(&r->lock);
     return rc;
 }
 
@@ -188,7 +188,7 @@ int rangeset_remove_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    write_lock(&r->lock);
 
     x = find_range(r, s);
     y = find_range(r, e);
@@ -244,7 +244,7 @@ int rangeset_remove_range(
     }
 
  out:
-    spin_unlock(&r->lock);
+    write_unlock(&r->lock);
     return rc;
 }
 
@@ -256,10 +256,10 @@ int rangeset_contains_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
     x = find_range(r, s);
     contains = (x && (x->e >= e));
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return contains;
 }
@@ -272,10 +272,10 @@ int rangeset_overlaps_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
     x = find_range(r, e);
     overlaps = (x && (s <= x->e));
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return overlaps;
 }
@@ -287,13 +287,13 @@ int rangeset_report_ranges(
     struct range *x;
     int rc = 0;
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
 
     for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, x) )
         if ( x->e >= s )
             rc = cb(max(x->s, s), min(x->e, e), ctxt);
 
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return rc;
 }
@@ -331,7 +331,7 @@ struct rangeset *rangeset_new(
     if ( r == NULL )
         return NULL;
 
-    spin_lock_init(&r->lock);
+    rwlock_init(&r->lock);
     INIT_LIST_HEAD(&r->range_list);
     r->nr_ranges = -1;
 
@@ -414,21 +414,21 @@ void rangeset_swap(struct rangeset *a, s
 
     if ( a < b )
     {
-        spin_lock(&a->lock);
-        spin_lock(&b->lock);
+        write_lock(&a->lock);
+        write_lock(&b->lock);
     }
     else
     {
-        spin_lock(&b->lock);
-        spin_lock(&a->lock);
+        write_lock(&b->lock);
+        write_lock(&a->lock);
     }
 
     list_splice_init(&a->range_list, &tmp);
     list_splice_init(&b->range_list, &a->range_list);
     list_splice(&tmp, &b->range_list);
 
-    spin_unlock(&a->lock);
-    spin_unlock(&b->lock);
+    write_unlock(&a->lock);
+    write_unlock(&b->lock);
 }
 
 /*****************************
@@ -446,7 +446,7 @@ void rangeset_printk(
     int nr_printed = 0;
     struct range *x;
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
 
     printk("%-10s {", r->name);
 
@@ -465,7 +465,7 @@ void rangeset_printk(
 
     printk(" }");
 
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 }
 
 void rangeset_domain_printk(




[-- Attachment #2: rangesets-rwlock.patch --]
[-- Type: text/plain, Size: 3584 bytes --]

switch rangeset's lock to rwlock

As a general library routine, it should behave as efficiently as
possible, even if at present no significant contention is known here.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
With the widened use of rangesets I'd like to re-suggest this change
which I had posted already a couple of years back.

--- a/xen/common/rangeset.c
+++ b/xen/common/rangeset.c
@@ -28,7 +28,7 @@ struct rangeset {
 
     /* Number of ranges that can be allocated */
     long             nr_ranges;
-    spinlock_t       lock;
+    rwlock_t         lock;
 
     /* Pretty-printing name. */
     char             name[32];
@@ -120,7 +120,7 @@ int rangeset_add_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    write_lock(&r->lock);
 
     x = find_range(r, s);
     y = find_range(r, e);
@@ -176,7 +176,7 @@ int rangeset_add_range(
     }
 
  out:
-    spin_unlock(&r->lock);
+    write_unlock(&r->lock);
     return rc;
 }
 
@@ -188,7 +188,7 @@ int rangeset_remove_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    write_lock(&r->lock);
 
     x = find_range(r, s);
     y = find_range(r, e);
@@ -244,7 +244,7 @@ int rangeset_remove_range(
     }
 
  out:
-    spin_unlock(&r->lock);
+    write_unlock(&r->lock);
     return rc;
 }
 
@@ -256,10 +256,10 @@ int rangeset_contains_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
     x = find_range(r, s);
     contains = (x && (x->e >= e));
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return contains;
 }
@@ -272,10 +272,10 @@ int rangeset_overlaps_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
     x = find_range(r, e);
     overlaps = (x && (s <= x->e));
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return overlaps;
 }
@@ -287,13 +287,13 @@ int rangeset_report_ranges(
     struct range *x;
     int rc = 0;
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
 
     for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, x) )
         if ( x->e >= s )
             rc = cb(max(x->s, s), min(x->e, e), ctxt);
 
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return rc;
 }
@@ -331,7 +331,7 @@ struct rangeset *rangeset_new(
     if ( r == NULL )
         return NULL;
 
-    spin_lock_init(&r->lock);
+    rwlock_init(&r->lock);
     INIT_LIST_HEAD(&r->range_list);
     r->nr_ranges = -1;
 
@@ -414,21 +414,21 @@ void rangeset_swap(struct rangeset *a, s
 
     if ( a < b )
     {
-        spin_lock(&a->lock);
-        spin_lock(&b->lock);
+        write_lock(&a->lock);
+        write_lock(&b->lock);
     }
     else
     {
-        spin_lock(&b->lock);
-        spin_lock(&a->lock);
+        write_lock(&b->lock);
+        write_lock(&a->lock);
     }
 
     list_splice_init(&a->range_list, &tmp);
     list_splice_init(&b->range_list, &a->range_list);
     list_splice(&tmp, &b->range_list);
 
-    spin_unlock(&a->lock);
-    spin_unlock(&b->lock);
+    write_unlock(&a->lock);
+    write_unlock(&b->lock);
 }
 
 /*****************************
@@ -446,7 +446,7 @@ void rangeset_printk(
     int nr_printed = 0;
     struct range *x;
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
 
     printk("%-10s {", r->name);
 
@@ -465,7 +465,7 @@ void rangeset_printk(
 
     printk(" }");
 
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 }
 
 void rangeset_domain_printk(

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-12 12:55 [PATCH] switch rangeset's lock to rwlock Jan Beulich
@ 2014-09-18 10:43 ` Tim Deegan
  2014-09-18 12:15   ` Jan Beulich
  2014-09-19 16:32 ` Konrad Rzeszutek Wilk
  1 sibling, 1 reply; 22+ messages in thread
From: Tim Deegan @ 2014-09-18 10:43 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Ian Campbell, xen-devel, Keir Fraser, Ian Jackson

At 13:55 +0100 on 12 Sep (1410526507), Jan Beulich wrote:
> As a general library routine, it should behave as efficiently as
> possible, even if at present no significant contention is known here.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> With the widened use of rangesets I'd like to re-suggest this change
> which I had posted already a couple of years back.

Is this addressing an actual (measured) problem or just seems like a
good idea?  If the latter maybe keep it until after 4.5?

Tim.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-18 10:43 ` Tim Deegan
@ 2014-09-18 12:15   ` Jan Beulich
  2014-09-18 13:02     ` Tim Deegan
  0 siblings, 1 reply; 22+ messages in thread
From: Jan Beulich @ 2014-09-18 12:15 UTC (permalink / raw)
  To: Tim Deegan; +Cc: Ian Campbell, xen-devel, Keir Fraser, Ian Jackson

>>> On 18.09.14 at 12:43, <tim@xen.org> wrote:
> At 13:55 +0100 on 12 Sep (1410526507), Jan Beulich wrote:
>> As a general library routine, it should behave as efficiently as
>> possible, even if at present no significant contention is known here.
>> 
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> With the widened use of rangesets I'd like to re-suggest this change
>> which I had posted already a couple of years back.
> 
> Is this addressing an actual (measured) problem or just seems like a
> good idea?  If the latter maybe keep it until after 4.5?

The latter. And yes, I have no problem keeping it until after 4.5,
it's just that the multi-ioreq-server's extended use of rangesets
(as said elsewhere) would seem to make this a reasonable fit for
4.5.

Jan

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-18 12:15   ` Jan Beulich
@ 2014-09-18 13:02     ` Tim Deegan
  2014-09-18 13:32       ` Jan Beulich
  0 siblings, 1 reply; 22+ messages in thread
From: Tim Deegan @ 2014-09-18 13:02 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Ian Campbell, xen-devel, Keir Fraser, Ian Jackson

At 13:15 +0100 on 18 Sep (1411042524), Jan Beulich wrote:
> >>> On 18.09.14 at 12:43, <tim@xen.org> wrote:
> > At 13:55 +0100 on 12 Sep (1410526507), Jan Beulich wrote:
> >> As a general library routine, it should behave as efficiently as
> >> possible, even if at present no significant contention is known here.
> >> 
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >> ---
> >> With the widened use of rangesets I'd like to re-suggest this change
> >> which I had posted already a couple of years back.
> > 
> > Is this addressing an actual (measured) problem or just seems like a
> > good idea?  If the latter maybe keep it until after 4.5?
> 
> The latter. And yes, I have no problem keeping it until after 4.5,
> it's just that the multi-ioreq-server's extended use of rangesets
> (as said elsewhere) would seem to make this a reasonable fit for
> 4.5.

Well, I think it's a question for the release manager, then. :)

I wouldn't be inclined to tinker with them unless we had a measurement
justifying the change.  After all, the rangeset operations are not
long-running ones and we'd be adding some overhead.

Tim.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-18 13:02     ` Tim Deegan
@ 2014-09-18 13:32       ` Jan Beulich
  2014-09-18 14:52         ` Paul Durrant
                           ` (2 more replies)
  0 siblings, 3 replies; 22+ messages in thread
From: Jan Beulich @ 2014-09-18 13:32 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk, Tim Deegan
  Cc: Ian Campbell, xen-devel, Keir Fraser, Ian Jackson

>>> On 18.09.14 at 15:02, <tim@xen.org> wrote:
> At 13:15 +0100 on 18 Sep (1411042524), Jan Beulich wrote:
>> >>> On 18.09.14 at 12:43, <tim@xen.org> wrote:
>> > At 13:55 +0100 on 12 Sep (1410526507), Jan Beulich wrote:
>> >> As a general library routine, it should behave as efficiently as
>> >> possible, even if at present no significant contention is known here.
>> >> 
>> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> >> ---
>> >> With the widened use of rangesets I'd like to re-suggest this change
>> >> which I had posted already a couple of years back.
>> > 
>> > Is this addressing an actual (measured) problem or just seems like a
>> > good idea?  If the latter maybe keep it until after 4.5?
>> 
>> The latter. And yes, I have no problem keeping it until after 4.5,
>> it's just that the multi-ioreq-server's extended use of rangesets
>> (as said elsewhere) would seem to make this a reasonable fit for
>> 4.5.
> 
> Well, I think it's a question for the release manager, then. :)

Konrad?

> I wouldn't be inclined to tinker with them unless we had a measurement
> justifying the change.  After all, the rangeset operations are not
> long-running ones and we'd be adding some overhead.

Well, that's a pretty weak argument: The vHPET lock doesn't
protect long running operations either, yet its conversion to rwlock
did yield a significant improvement. But yes, unless rangesets
participate in something that may get used heavily by a guest,
changing the lock kind would likely not have a significant effect.
Otoh figuring out that lock contention is a problem involves a non-
eglectible amount of work, preemption of which seems at least
desirable.

In fact, in the latest runs with those many-vCPU Windows guests I
see signs of contention even on vpt's pl_time_lock (8 out of 64 vCPU-s
racing for it).

Jan

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-18 13:32       ` Jan Beulich
@ 2014-09-18 14:52         ` Paul Durrant
  2014-09-19 16:33         ` Konrad Rzeszutek Wilk
  2014-09-22  9:42         ` Ian Campbell
  2 siblings, 0 replies; 22+ messages in thread
From: Paul Durrant @ 2014-09-18 14:52 UTC (permalink / raw)
  To: Jan Beulich, Konrad Rzeszutek Wilk, Tim (Xen.org)
  Cc: xen-devel, Keir (Xen.org), Ian Campbell, Ian Jackson

> -----Original Message-----
> From: xen-devel-bounces@lists.xen.org [mailto:xen-devel-
> bounces@lists.xen.org] On Behalf Of Jan Beulich
> Sent: 18 September 2014 14:33
> To: Konrad Rzeszutek Wilk; Tim (Xen.org)
> Cc: Ian Campbell; xen-devel; Keir (Xen.org); Ian Jackson
> Subject: Re: [Xen-devel] [PATCH] switch rangeset's lock to rwlock
> 
> >>> On 18.09.14 at 15:02, <tim@xen.org> wrote:
> > At 13:15 +0100 on 18 Sep (1411042524), Jan Beulich wrote:
> >> >>> On 18.09.14 at 12:43, <tim@xen.org> wrote:
> >> > At 13:55 +0100 on 12 Sep (1410526507), Jan Beulich wrote:
> >> >> As a general library routine, it should behave as efficiently as
> >> >> possible, even if at present no significant contention is known here.
> >> >>
> >> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >> >> ---
> >> >> With the widened use of rangesets I'd like to re-suggest this change
> >> >> which I had posted already a couple of years back.
> >> >
> >> > Is this addressing an actual (measured) problem or just seems like a
> >> > good idea?  If the latter maybe keep it until after 4.5?
> >>
> >> The latter. And yes, I have no problem keeping it until after 4.5,
> >> it's just that the multi-ioreq-server's extended use of rangesets
> >> (as said elsewhere) would seem to make this a reasonable fit for
> >> 4.5.
> >
> > Well, I think it's a question for the release manager, then. :)
> 
> Konrad?
> 
> > I wouldn't be inclined to tinker with them unless we had a measurement
> > justifying the change.  After all, the rangeset operations are not
> > long-running ones and we'd be adding some overhead.
> 
> Well, that's a pretty weak argument: The vHPET lock doesn't
> protect long running operations either, yet its conversion to rwlock
> did yield a significant improvement. But yes, unless rangesets
> participate in something that may get used heavily by a guest,
> changing the lock kind would likely not have a significant effect.

The ioreq server code uses rangesets so multiple vcpus all hitting an aperture registered by an ioreq server could contend.

  Paul

> Otoh figuring out that lock contention is a problem involves a non-
> eglectible amount of work, preemption of which seems at least
> desirable.
> 
> In fact, in the latest runs with those many-vCPU Windows guests I
> see signs of contention even on vpt's pl_time_lock (8 out of 64 vCPU-s
> racing for it).
> 
> Jan
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-12 12:55 [PATCH] switch rangeset's lock to rwlock Jan Beulich
  2014-09-18 10:43 ` Tim Deegan
@ 2014-09-19 16:32 ` Konrad Rzeszutek Wilk
  2014-09-30  8:50   ` Jan Beulich
  1 sibling, 1 reply; 22+ messages in thread
From: Konrad Rzeszutek Wilk @ 2014-09-19 16:32 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Ian Campbell, xen-devel, Keir Fraser, Ian Jackson, Tim Deegan

On Fri, Sep 12, 2014 at 01:55:07PM +0100, Jan Beulich wrote:
> As a general library routine, it should behave as efficiently as
> possible, even if at present no significant contention is known here.
> 

Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

I am comfortable with this going to Xen 4.5.

> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> With the widened use of rangesets I'd like to re-suggest this change
> which I had posted already a couple of years back.
> 
> --- a/xen/common/rangeset.c
> +++ b/xen/common/rangeset.c
> @@ -28,7 +28,7 @@ struct rangeset {
>  
>      /* Number of ranges that can be allocated */
>      long             nr_ranges;
> -    spinlock_t       lock;
> +    rwlock_t         lock;
>  
>      /* Pretty-printing name. */
>      char             name[32];
> @@ -120,7 +120,7 @@ int rangeset_add_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    write_lock(&r->lock);
>  
>      x = find_range(r, s);
>      y = find_range(r, e);
> @@ -176,7 +176,7 @@ int rangeset_add_range(
>      }
>  
>   out:
> -    spin_unlock(&r->lock);
> +    write_unlock(&r->lock);
>      return rc;
>  }
>  
> @@ -188,7 +188,7 @@ int rangeset_remove_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    write_lock(&r->lock);
>  
>      x = find_range(r, s);
>      y = find_range(r, e);
> @@ -244,7 +244,7 @@ int rangeset_remove_range(
>      }
>  
>   out:
> -    spin_unlock(&r->lock);
> +    write_unlock(&r->lock);
>      return rc;
>  }
>  
> @@ -256,10 +256,10 @@ int rangeset_contains_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>      x = find_range(r, s);
>      contains = (x && (x->e >= e));
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  
>      return contains;
>  }
> @@ -272,10 +272,10 @@ int rangeset_overlaps_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>      x = find_range(r, e);
>      overlaps = (x && (s <= x->e));
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  
>      return overlaps;
>  }
> @@ -287,13 +287,13 @@ int rangeset_report_ranges(
>      struct range *x;
>      int rc = 0;
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>  
>      for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, x) )
>          if ( x->e >= s )
>              rc = cb(max(x->s, s), min(x->e, e), ctxt);
>  
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  
>      return rc;
>  }
> @@ -331,7 +331,7 @@ struct rangeset *rangeset_new(
>      if ( r == NULL )
>          return NULL;
>  
> -    spin_lock_init(&r->lock);
> +    rwlock_init(&r->lock);
>      INIT_LIST_HEAD(&r->range_list);
>      r->nr_ranges = -1;
>  
> @@ -414,21 +414,21 @@ void rangeset_swap(struct rangeset *a, s
>  
>      if ( a < b )
>      {
> -        spin_lock(&a->lock);
> -        spin_lock(&b->lock);
> +        write_lock(&a->lock);
> +        write_lock(&b->lock);
>      }
>      else
>      {
> -        spin_lock(&b->lock);
> -        spin_lock(&a->lock);
> +        write_lock(&b->lock);
> +        write_lock(&a->lock);
>      }
>  
>      list_splice_init(&a->range_list, &tmp);
>      list_splice_init(&b->range_list, &a->range_list);
>      list_splice(&tmp, &b->range_list);
>  
> -    spin_unlock(&a->lock);
> -    spin_unlock(&b->lock);
> +    write_unlock(&a->lock);
> +    write_unlock(&b->lock);
>  }
>  
>  /*****************************
> @@ -446,7 +446,7 @@ void rangeset_printk(
>      int nr_printed = 0;
>      struct range *x;
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>  
>      printk("%-10s {", r->name);
>  
> @@ -465,7 +465,7 @@ void rangeset_printk(
>  
>      printk(" }");
>  
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  }
>  
>  void rangeset_domain_printk(
> 
> 
> 

> switch rangeset's lock to rwlock
> 
> As a general library routine, it should behave as efficiently as
> possible, even if at present no significant contention is known here.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> With the widened use of rangesets I'd like to re-suggest this change
> which I had posted already a couple of years back.
> 
> --- a/xen/common/rangeset.c
> +++ b/xen/common/rangeset.c
> @@ -28,7 +28,7 @@ struct rangeset {
>  
>      /* Number of ranges that can be allocated */
>      long             nr_ranges;
> -    spinlock_t       lock;
> +    rwlock_t         lock;
>  
>      /* Pretty-printing name. */
>      char             name[32];
> @@ -120,7 +120,7 @@ int rangeset_add_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    write_lock(&r->lock);
>  
>      x = find_range(r, s);
>      y = find_range(r, e);
> @@ -176,7 +176,7 @@ int rangeset_add_range(
>      }
>  
>   out:
> -    spin_unlock(&r->lock);
> +    write_unlock(&r->lock);
>      return rc;
>  }
>  
> @@ -188,7 +188,7 @@ int rangeset_remove_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    write_lock(&r->lock);
>  
>      x = find_range(r, s);
>      y = find_range(r, e);
> @@ -244,7 +244,7 @@ int rangeset_remove_range(
>      }
>  
>   out:
> -    spin_unlock(&r->lock);
> +    write_unlock(&r->lock);
>      return rc;
>  }
>  
> @@ -256,10 +256,10 @@ int rangeset_contains_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>      x = find_range(r, s);
>      contains = (x && (x->e >= e));
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  
>      return contains;
>  }
> @@ -272,10 +272,10 @@ int rangeset_overlaps_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>      x = find_range(r, e);
>      overlaps = (x && (s <= x->e));
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  
>      return overlaps;
>  }
> @@ -287,13 +287,13 @@ int rangeset_report_ranges(
>      struct range *x;
>      int rc = 0;
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>  
>      for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, x) )
>          if ( x->e >= s )
>              rc = cb(max(x->s, s), min(x->e, e), ctxt);
>  
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  
>      return rc;
>  }
> @@ -331,7 +331,7 @@ struct rangeset *rangeset_new(
>      if ( r == NULL )
>          return NULL;
>  
> -    spin_lock_init(&r->lock);
> +    rwlock_init(&r->lock);
>      INIT_LIST_HEAD(&r->range_list);
>      r->nr_ranges = -1;
>  
> @@ -414,21 +414,21 @@ void rangeset_swap(struct rangeset *a, s
>  
>      if ( a < b )
>      {
> -        spin_lock(&a->lock);
> -        spin_lock(&b->lock);
> +        write_lock(&a->lock);
> +        write_lock(&b->lock);
>      }
>      else
>      {
> -        spin_lock(&b->lock);
> -        spin_lock(&a->lock);
> +        write_lock(&b->lock);
> +        write_lock(&a->lock);
>      }
>  
>      list_splice_init(&a->range_list, &tmp);
>      list_splice_init(&b->range_list, &a->range_list);
>      list_splice(&tmp, &b->range_list);
>  
> -    spin_unlock(&a->lock);
> -    spin_unlock(&b->lock);
> +    write_unlock(&a->lock);
> +    write_unlock(&b->lock);
>  }
>  
>  /*****************************
> @@ -446,7 +446,7 @@ void rangeset_printk(
>      int nr_printed = 0;
>      struct range *x;
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>  
>      printk("%-10s {", r->name);
>  
> @@ -465,7 +465,7 @@ void rangeset_printk(
>  
>      printk(" }");
>  
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  }
>  
>  void rangeset_domain_printk(

> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-18 13:32       ` Jan Beulich
  2014-09-18 14:52         ` Paul Durrant
@ 2014-09-19 16:33         ` Konrad Rzeszutek Wilk
  2014-09-22  9:42         ` Ian Campbell
  2 siblings, 0 replies; 22+ messages in thread
From: Konrad Rzeszutek Wilk @ 2014-09-19 16:33 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Ian Campbell, Ian Jackson, Keir Fraser, Tim Deegan, xen-devel

On Thu, Sep 18, 2014 at 02:32:35PM +0100, Jan Beulich wrote:
> >>> On 18.09.14 at 15:02, <tim@xen.org> wrote:
> > At 13:15 +0100 on 18 Sep (1411042524), Jan Beulich wrote:
> >> >>> On 18.09.14 at 12:43, <tim@xen.org> wrote:
> >> > At 13:55 +0100 on 12 Sep (1410526507), Jan Beulich wrote:
> >> >> As a general library routine, it should behave as efficiently as
> >> >> possible, even if at present no significant contention is known here.
> >> >> 
> >> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >> >> ---
> >> >> With the widened use of rangesets I'd like to re-suggest this change
> >> >> which I had posted already a couple of years back.
> >> > 
> >> > Is this addressing an actual (measured) problem or just seems like a
> >> > good idea?  If the latter maybe keep it until after 4.5?
> >> 
> >> The latter. And yes, I have no problem keeping it until after 4.5,
> >> it's just that the multi-ioreq-server's extended use of rangesets
> >> (as said elsewhere) would seem to make this a reasonable fit for
> >> 4.5.
> > 
> > Well, I think it's a question for the release manager, then. :)
> 
> Konrad?

I am comfortable with it. Responded with an Reviewed-by on the patch
itself and Ack for Xen 4.5.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-18 13:32       ` Jan Beulich
  2014-09-18 14:52         ` Paul Durrant
  2014-09-19 16:33         ` Konrad Rzeszutek Wilk
@ 2014-09-22  9:42         ` Ian Campbell
  2014-09-22 10:34           ` Jan Beulich
  2 siblings, 1 reply; 22+ messages in thread
From: Ian Campbell @ 2014-09-22  9:42 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Ian Jackson, Keir Fraser, Tim Deegan, xen-devel

On Thu, 2014-09-18 at 14:32 +0100, Jan Beulich wrote:
> >>> On 18.09.14 at 15:02, <tim@xen.org> wrote:
> > At 13:15 +0100 on 18 Sep (1411042524), Jan Beulich wrote:
> >> >>> On 18.09.14 at 12:43, <tim@xen.org> wrote:
> >> > At 13:55 +0100 on 12 Sep (1410526507), Jan Beulich wrote:
> >> >> As a general library routine, it should behave as efficiently as
> >> >> possible, even if at present no significant contention is known here.
> >> >> 
> >> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >> >> ---
> >> >> With the widened use of rangesets I'd like to re-suggest this change
> >> >> which I had posted already a couple of years back.
> >> > 
> >> > Is this addressing an actual (measured) problem or just seems like a
> >> > good idea?  If the latter maybe keep it until after 4.5?
> >> 
> >> The latter. And yes, I have no problem keeping it until after 4.5,
> >> it's just that the multi-ioreq-server's extended use of rangesets
> >> (as said elsewhere) would seem to make this a reasonable fit for
> >> 4.5.
> > 
> > Well, I think it's a question for the release manager, then. :)
> 
> Konrad?
> 
> > I wouldn't be inclined to tinker with them unless we had a measurement
> > justifying the change.  After all, the rangeset operations are not
> > long-running ones and we'd be adding some overhead.
> 
> Well, that's a pretty weak argument: The vHPET lock doesn't
> protect long running operations either, yet its conversion to rwlock
> did yield a significant improvement.

That implies the measurement Tim is asking about...

>  But yes, unless rangesets
> participate in something that may get used heavily by a guest,
> changing the lock kind would likely not have a significant effect.
> Otoh figuring out that lock contention is a problem involves a non-
> eglectible amount of work, preemption of which seems at least
> desirable.
> 
> In fact, in the latest runs with those many-vCPU Windows guests I
> see signs of contention even on vpt's pl_time_lock (8 out of 64 vCPU-s
> racing for it).

... as does this, but that isn't affected by this change, is it?

I suppose constructing a test for the rangeset getting hit by e.g. the
ioreq server would be tricky to do unless you happen to already have a
disaggregated qemu setup (which I suppose you don't).

What are the potential negative affects if our gut feeling about the
accesses patterns of the rangesets are wrong? Is the rwlock potentially
slower than the existing lock for the read or uncontended or some other
case?

Ian.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-22  9:42         ` Ian Campbell
@ 2014-09-22 10:34           ` Jan Beulich
  0 siblings, 0 replies; 22+ messages in thread
From: Jan Beulich @ 2014-09-22 10:34 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, Tim Deegan, Keir Fraser, Ian Jackson

>>> On 22.09.14 at 11:42, <Ian.Campbell@eu.citrix.com> wrote:
> On Thu, 2014-09-18 at 14:32 +0100, Jan Beulich wrote:
>> >>> On 18.09.14 at 15:02, <tim@xen.org> wrote:
>> > I wouldn't be inclined to tinker with them unless we had a measurement
>> > justifying the change.  After all, the rangeset operations are not
>> > long-running ones and we'd be adding some overhead.
>> 
>> Well, that's a pretty weak argument: The vHPET lock doesn't
>> protect long running operations either, yet its conversion to rwlock
>> did yield a significant improvement.
> 
> That implies the measurement Tim is asking about...
> 
>>  But yes, unless rangesets
>> participate in something that may get used heavily by a guest,
>> changing the lock kind would likely not have a significant effect.
>> Otoh figuring out that lock contention is a problem involves a non-
>> eglectible amount of work, preemption of which seems at least
>> desirable.
>> 
>> In fact, in the latest runs with those many-vCPU Windows guests I
>> see signs of contention even on vpt's pl_time_lock (8 out of 64 vCPU-s
>> racing for it).
> 
> ... as does this, but that isn't affected by this change, is it?

No, it was merely another statement towards a locked region being
short running not being an argument for there not being potential
(severe) contention.

> I suppose constructing a test for the rangeset getting hit by e.g. the
> ioreq server would be tricky to do unless you happen to already have a
> disaggregated qemu setup (which I suppose you don't).

Correct.

> What are the potential negative affects if our gut feeling about the
> accesses patterns of the rangesets are wrong? Is the rwlock potentially
> slower than the existing lock for the read or uncontended or some other
> case?

The only downside is that the raw lock structure is 4 bytes instead
of 2 for the simply spin lock.

Jan

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-19 16:32 ` Konrad Rzeszutek Wilk
@ 2014-09-30  8:50   ` Jan Beulich
  2014-09-30 12:01     ` Tim Deegan
  0 siblings, 1 reply; 22+ messages in thread
From: Jan Beulich @ 2014-09-30  8:50 UTC (permalink / raw)
  To: Ian Campbell, Ian Jackson, Keir Fraser, Tim Deegan; +Cc: xen-devel

>>> On 19.09.14 at 18:32, <konrad.wilk@oracle.com> wrote:
> On Fri, Sep 12, 2014 at 01:55:07PM +0100, Jan Beulich wrote:
>> As a general library routine, it should behave as efficiently as
>> possible, even if at present no significant contention is known here.
>> 
> 
> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> 
> I am comfortable with this going to Xen 4.5.

Anyone of you wanting to ack this then, or should I nevertheless
postpone it until after 4.5?

Jan

>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> With the widened use of rangesets I'd like to re-suggest this change
>> which I had posted already a couple of years back.
>> 
>> --- a/xen/common/rangeset.c
>> +++ b/xen/common/rangeset.c
>> @@ -28,7 +28,7 @@ struct rangeset {
>>  
>>      /* Number of ranges that can be allocated */
>>      long             nr_ranges;
>> -    spinlock_t       lock;
>> +    rwlock_t         lock;
>>  
>>      /* Pretty-printing name. */
>>      char             name[32];
>> @@ -120,7 +120,7 @@ int rangeset_add_range(
>>  
>>      ASSERT(s <= e);
>>  
>> -    spin_lock(&r->lock);
>> +    write_lock(&r->lock);
>>  
>>      x = find_range(r, s);
>>      y = find_range(r, e);
>> @@ -176,7 +176,7 @@ int rangeset_add_range(
>>      }
>>  
>>   out:
>> -    spin_unlock(&r->lock);
>> +    write_unlock(&r->lock);
>>      return rc;
>>  }
>>  
>> @@ -188,7 +188,7 @@ int rangeset_remove_range(
>>  
>>      ASSERT(s <= e);
>>  
>> -    spin_lock(&r->lock);
>> +    write_lock(&r->lock);
>>  
>>      x = find_range(r, s);
>>      y = find_range(r, e);
>> @@ -244,7 +244,7 @@ int rangeset_remove_range(
>>      }
>>  
>>   out:
>> -    spin_unlock(&r->lock);
>> +    write_unlock(&r->lock);
>>      return rc;
>>  }
>>  
>> @@ -256,10 +256,10 @@ int rangeset_contains_range(
>>  
>>      ASSERT(s <= e);
>>  
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>      x = find_range(r, s);
>>      contains = (x && (x->e >= e));
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>  
>>      return contains;
>>  }
>> @@ -272,10 +272,10 @@ int rangeset_overlaps_range(
>>  
>>      ASSERT(s <= e);
>>  
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>      x = find_range(r, e);
>>      overlaps = (x && (s <= x->e));
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>  
>>      return overlaps;
>>  }
>> @@ -287,13 +287,13 @@ int rangeset_report_ranges(
>>      struct range *x;
>>      int rc = 0;
>>  
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>  
>>      for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, x) )
>>          if ( x->e >= s )
>>              rc = cb(max(x->s, s), min(x->e, e), ctxt);
>>  
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>  
>>      return rc;
>>  }
>> @@ -331,7 +331,7 @@ struct rangeset *rangeset_new(
>>      if ( r == NULL )
>>          return NULL;
>>  
>> -    spin_lock_init(&r->lock);
>> +    rwlock_init(&r->lock);
>>      INIT_LIST_HEAD(&r->range_list);
>>      r->nr_ranges = -1;
>>  
>> @@ -414,21 +414,21 @@ void rangeset_swap(struct rangeset *a, s
>>  
>>      if ( a < b )
>>      {
>> -        spin_lock(&a->lock);
>> -        spin_lock(&b->lock);
>> +        write_lock(&a->lock);
>> +        write_lock(&b->lock);
>>      }
>>      else
>>      {
>> -        spin_lock(&b->lock);
>> -        spin_lock(&a->lock);
>> +        write_lock(&b->lock);
>> +        write_lock(&a->lock);
>>      }
>>  
>>      list_splice_init(&a->range_list, &tmp);
>>      list_splice_init(&b->range_list, &a->range_list);
>>      list_splice(&tmp, &b->range_list);
>>  
>> -    spin_unlock(&a->lock);
>> -    spin_unlock(&b->lock);
>> +    write_unlock(&a->lock);
>> +    write_unlock(&b->lock);
>>  }
>>  
>>  /*****************************
>> @@ -446,7 +446,7 @@ void rangeset_printk(
>>      int nr_printed = 0;
>>      struct range *x;
>>  
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>  
>>      printk("%-10s {", r->name);
>>  
>> @@ -465,7 +465,7 @@ void rangeset_printk(
>>  
>>      printk(" }");
>>  
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>  }
>>  
>>  void rangeset_domain_printk(
>> 
>> 
>> 
> 
>> switch rangeset's lock to rwlock
>> 
>> As a general library routine, it should behave as efficiently as
>> possible, even if at present no significant contention is known here.
>> 
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> With the widened use of rangesets I'd like to re-suggest this change
>> which I had posted already a couple of years back.
>> 
>> --- a/xen/common/rangeset.c
>> +++ b/xen/common/rangeset.c
>> @@ -28,7 +28,7 @@ struct rangeset {
>>  
>>      /* Number of ranges that can be allocated */
>>      long             nr_ranges;
>> -    spinlock_t       lock;
>> +    rwlock_t         lock;
>>  
>>      /* Pretty-printing name. */
>>      char             name[32];
>> @@ -120,7 +120,7 @@ int rangeset_add_range(
>>  
>>      ASSERT(s <= e);
>>  
>> -    spin_lock(&r->lock);
>> +    write_lock(&r->lock);
>>  
>>      x = find_range(r, s);
>>      y = find_range(r, e);
>> @@ -176,7 +176,7 @@ int rangeset_add_range(
>>      }
>>  
>>   out:
>> -    spin_unlock(&r->lock);
>> +    write_unlock(&r->lock);
>>      return rc;
>>  }
>>  
>> @@ -188,7 +188,7 @@ int rangeset_remove_range(
>>  
>>      ASSERT(s <= e);
>>  
>> -    spin_lock(&r->lock);
>> +    write_lock(&r->lock);
>>  
>>      x = find_range(r, s);
>>      y = find_range(r, e);
>> @@ -244,7 +244,7 @@ int rangeset_remove_range(
>>      }
>>  
>>   out:
>> -    spin_unlock(&r->lock);
>> +    write_unlock(&r->lock);
>>      return rc;
>>  }
>>  
>> @@ -256,10 +256,10 @@ int rangeset_contains_range(
>>  
>>      ASSERT(s <= e);
>>  
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>      x = find_range(r, s);
>>      contains = (x && (x->e >= e));
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>  
>>      return contains;
>>  }
>> @@ -272,10 +272,10 @@ int rangeset_overlaps_range(
>>  
>>      ASSERT(s <= e);
>>  
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>      x = find_range(r, e);
>>      overlaps = (x && (s <= x->e));
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>  
>>      return overlaps;
>>  }
>> @@ -287,13 +287,13 @@ int rangeset_report_ranges(
>>      struct range *x;
>>      int rc = 0;
>>  
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>  
>>      for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, x) )
>>          if ( x->e >= s )
>>              rc = cb(max(x->s, s), min(x->e, e), ctxt);
>>  
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>  
>>      return rc;
>>  }
>> @@ -331,7 +331,7 @@ struct rangeset *rangeset_new(
>>      if ( r == NULL )
>>          return NULL;
>>  
>> -    spin_lock_init(&r->lock);
>> +    rwlock_init(&r->lock);
>>      INIT_LIST_HEAD(&r->range_list);
>>      r->nr_ranges = -1;
>>  
>> @@ -414,21 +414,21 @@ void rangeset_swap(struct rangeset *a, s
>>  
>>      if ( a < b )
>>      {
>> -        spin_lock(&a->lock);
>> -        spin_lock(&b->lock);
>> +        write_lock(&a->lock);
>> +        write_lock(&b->lock);
>>      }
>>      else
>>      {
>> -        spin_lock(&b->lock);
>> -        spin_lock(&a->lock);
>> +        write_lock(&b->lock);
>> +        write_lock(&a->lock);
>>      }
>>  
>>      list_splice_init(&a->range_list, &tmp);
>>      list_splice_init(&b->range_list, &a->range_list);
>>      list_splice(&tmp, &b->range_list);
>>  
>> -    spin_unlock(&a->lock);
>> -    spin_unlock(&b->lock);
>> +    write_unlock(&a->lock);
>> +    write_unlock(&b->lock);
>>  }
>>  
>>  /*****************************
>> @@ -446,7 +446,7 @@ void rangeset_printk(
>>      int nr_printed = 0;
>>      struct range *x;
>>  
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>  
>>      printk("%-10s {", r->name);
>>  
>> @@ -465,7 +465,7 @@ void rangeset_printk(
>>  
>>      printk(" }");
>>  
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>  }
>>  
>>  void rangeset_domain_printk(
> 
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@lists.xen.org 
>> http://lists.xen.org/xen-devel 

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-30  8:50   ` Jan Beulich
@ 2014-09-30 12:01     ` Tim Deegan
  2014-09-30 20:53       ` Keir Fraser
  0 siblings, 1 reply; 22+ messages in thread
From: Tim Deegan @ 2014-09-30 12:01 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Ian Campbell, Keir Fraser, Ian Jackson, xen-devel

At 09:50 +0100 on 30 Sep (1412067005), Jan Beulich wrote:
> >>> On 19.09.14 at 18:32, <konrad.wilk@oracle.com> wrote:
> > On Fri, Sep 12, 2014 at 01:55:07PM +0100, Jan Beulich wrote:
> >> As a general library routine, it should behave as efficiently as
> >> possible, even if at present no significant contention is known here.
> >> 
> > 
> > Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> > 
> > I am comfortable with this going to Xen 4.5.
> 
> Anyone of you wanting to ack this then, or should I nevertheless
> postpone it until after 4.5?

If Konrad's happy I am too. :) 
Acked-by: Tim Deegan <tim@xen.org>

Tim.

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-30 12:01     ` Tim Deegan
@ 2014-09-30 20:53       ` Keir Fraser
  2014-10-01  8:57         ` Jan Beulich
  0 siblings, 1 reply; 22+ messages in thread
From: Keir Fraser @ 2014-09-30 20:53 UTC (permalink / raw)
  To: Tim Deegan; +Cc: Keir Fraser, Ian Jackson, Ian Campbell, Jan Beulich, xen-devel


[-- Attachment #1.1: Type: text/plain, Size: 20373 bytes --]

Do the searches ever get long enough that a read lock helps? If any of 
the rangesets is getting large and making the searches slow then it 
would be quite easy to switch from linked list to red-black tree?

I don't mind using a rwlock here though.
Acked-by: Keir Fraser <keir@xen.org>

  -- Keir
> Tim Deegan <mailto:tim@xen.org>
> 30 September 2014 13:01
>
> If Konrad's happy I am too. :)
> Acked-by: Tim Deegan <tim@xen.org>
>
> Tim.
>
> Jan Beulich <mailto:JBeulich@suse.com>
> 30 September 2014 09:50
>>>> On 19.09.14 at 18:32,<konrad.wilk@oracle.com>  wrote:
>> On Fri, Sep 12, 2014 at 01:55:07PM +0100, Jan Beulich wrote:
>>> As a general library routine, it should behave as efficiently as
>>> possible, even if at present no significant contention is known here.
>>>
>> Reviewed-by: Konrad Rzeszutek Wilk<konrad.wilk@oracle.com>
>>
>> I am comfortable with this going to Xen 4.5.
>
> Anyone of you wanting to ack this then, or should I nevertheless
> postpone it until after 4.5?
>
> Jan
>
>>> Signed-off-by: Jan Beulich<jbeulich@suse.com>
>>> ---
>>> With the widened use of rangesets I'd like to re-suggest this change
>>> which I had posted already a couple of years back.
>>>
>>> --- a/xen/common/rangeset.c
>>> +++ b/xen/common/rangeset.c
>>> @@ -28,7 +28,7 @@ struct rangeset {
>>>
>>>       /* Number of ranges that can be allocated */
>>>       long             nr_ranges;
>>> -    spinlock_t       lock;
>>> +    rwlock_t         lock;
>>>
>>>       /* Pretty-printing name. */
>>>       char             name[32];
>>> @@ -120,7 +120,7 @@ int rangeset_add_range(
>>>
>>>       ASSERT(s<= e);
>>>
>>> -    spin_lock(&r->lock);
>>> +    write_lock(&r->lock);
>>>
>>>       x = find_range(r, s);
>>>       y = find_range(r, e);
>>> @@ -176,7 +176,7 @@ int rangeset_add_range(
>>>       }
>>>
>>>    out:
>>> -    spin_unlock(&r->lock);
>>> +    write_unlock(&r->lock);
>>>       return rc;
>>>   }
>>>
>>> @@ -188,7 +188,7 @@ int rangeset_remove_range(
>>>
>>>       ASSERT(s<= e);
>>>
>>> -    spin_lock(&r->lock);
>>> +    write_lock(&r->lock);
>>>
>>>       x = find_range(r, s);
>>>       y = find_range(r, e);
>>> @@ -244,7 +244,7 @@ int rangeset_remove_range(
>>>       }
>>>
>>>    out:
>>> -    spin_unlock(&r->lock);
>>> +    write_unlock(&r->lock);
>>>       return rc;
>>>   }
>>>
>>> @@ -256,10 +256,10 @@ int rangeset_contains_range(
>>>
>>>       ASSERT(s<= e);
>>>
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>>       x = find_range(r, s);
>>>       contains = (x&&  (x->e>= e));
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>>
>>>       return contains;
>>>   }
>>> @@ -272,10 +272,10 @@ int rangeset_overlaps_range(
>>>
>>>       ASSERT(s<= e);
>>>
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>>       x = find_range(r, e);
>>>       overlaps = (x&&  (s<= x->e));
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>>
>>>       return overlaps;
>>>   }
>>> @@ -287,13 +287,13 @@ int rangeset_report_ranges(
>>>       struct range *x;
>>>       int rc = 0;
>>>
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>>
>>>       for ( x = find_range(r, s); x&&  (x->s<= e)&&  !rc; x = next_range(r, x) )
>>>           if ( x->e>= s )
>>>               rc = cb(max(x->s, s), min(x->e, e), ctxt);
>>>
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>>
>>>       return rc;
>>>   }
>>> @@ -331,7 +331,7 @@ struct rangeset *rangeset_new(
>>>       if ( r == NULL )
>>>           return NULL;
>>>
>>> -    spin_lock_init(&r->lock);
>>> +    rwlock_init(&r->lock);
>>>       INIT_LIST_HEAD(&r->range_list);
>>>       r->nr_ranges = -1;
>>>
>>> @@ -414,21 +414,21 @@ void rangeset_swap(struct rangeset *a, s
>>>
>>>       if ( a<  b )
>>>       {
>>> -        spin_lock(&a->lock);
>>> -        spin_lock(&b->lock);
>>> +        write_lock(&a->lock);
>>> +        write_lock(&b->lock);
>>>       }
>>>       else
>>>       {
>>> -        spin_lock(&b->lock);
>>> -        spin_lock(&a->lock);
>>> +        write_lock(&b->lock);
>>> +        write_lock(&a->lock);
>>>       }
>>>
>>>       list_splice_init(&a->range_list,&tmp);
>>>       list_splice_init(&b->range_list,&a->range_list);
>>>       list_splice(&tmp,&b->range_list);
>>>
>>> -    spin_unlock(&a->lock);
>>> -    spin_unlock(&b->lock);
>>> +    write_unlock(&a->lock);
>>> +    write_unlock(&b->lock);
>>>   }
>>>
>>>   /*****************************
>>> @@ -446,7 +446,7 @@ void rangeset_printk(
>>>       int nr_printed = 0;
>>>       struct range *x;
>>>
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>>
>>>       printk("%-10s {", r->name);
>>>
>>> @@ -465,7 +465,7 @@ void rangeset_printk(
>>>
>>>       printk(" }");
>>>
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>>   }
>>>
>>>   void rangeset_domain_printk(
>>>
>>>
>>>
>>> switch rangeset's lock to rwlock
>>>
>>> As a general library routine, it should behave as efficiently as
>>> possible, even if at present no significant contention is known here.
>>>
>>> Signed-off-by: Jan Beulich<jbeulich@suse.com>
>>> ---
>>> With the widened use of rangesets I'd like to re-suggest this change
>>> which I had posted already a couple of years back.
>>>
>>> --- a/xen/common/rangeset.c
>>> +++ b/xen/common/rangeset.c
>>> @@ -28,7 +28,7 @@ struct rangeset {
>>>
>>>       /* Number of ranges that can be allocated */
>>>       long             nr_ranges;
>>> -    spinlock_t       lock;
>>> +    rwlock_t         lock;
>>>
>>>       /* Pretty-printing name. */
>>>       char             name[32];
>>> @@ -120,7 +120,7 @@ int rangeset_add_range(
>>>
>>>       ASSERT(s<= e);
>>>
>>> -    spin_lock(&r->lock);
>>> +    write_lock(&r->lock);
>>>
>>>       x = find_range(r, s);
>>>       y = find_range(r, e);
>>> @@ -176,7 +176,7 @@ int rangeset_add_range(
>>>       }
>>>
>>>    out:
>>> -    spin_unlock(&r->lock);
>>> +    write_unlock(&r->lock);
>>>       return rc;
>>>   }
>>>
>>> @@ -188,7 +188,7 @@ int rangeset_remove_range(
>>>
>>>       ASSERT(s<= e);
>>>
>>> -    spin_lock(&r->lock);
>>> +    write_lock(&r->lock);
>>>
>>>       x = find_range(r, s);
>>>       y = find_range(r, e);
>>> @@ -244,7 +244,7 @@ int rangeset_remove_range(
>>>       }
>>>
>>>    out:
>>> -    spin_unlock(&r->lock);
>>> +    write_unlock(&r->lock);
>>>       return rc;
>>>   }
>>>
>>> @@ -256,10 +256,10 @@ int rangeset_contains_range(
>>>
>>>       ASSERT(s<= e);
>>>
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>>       x = find_range(r, s);
>>>       contains = (x&&  (x->e>= e));
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>>
>>>       return contains;
>>>   }
>>> @@ -272,10 +272,10 @@ int rangeset_overlaps_range(
>>>
>>>       ASSERT(s<= e);
>>>
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>>       x = find_range(r, e);
>>>       overlaps = (x&&  (s<= x->e));
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>>
>>>       return overlaps;
>>>   }
>>> @@ -287,13 +287,13 @@ int rangeset_report_ranges(
>>>       struct range *x;
>>>       int rc = 0;
>>>
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>>
>>>       for ( x = find_range(r, s); x&&  (x->s<= e)&&  !rc; x = next_range(r, x) )
>>>           if ( x->e>= s )
>>>               rc = cb(max(x->s, s), min(x->e, e), ctxt);
>>>
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>>
>>>       return rc;
>>>   }
>>> @@ -331,7 +331,7 @@ struct rangeset *rangeset_new(
>>>       if ( r == NULL )
>>>           return NULL;
>>>
>>> -    spin_lock_init(&r->lock);
>>> +    rwlock_init(&r->lock);
>>>       INIT_LIST_HEAD(&r->range_list);
>>>       r->nr_ranges = -1;
>>>
>>> @@ -414,21 +414,21 @@ void rangeset_swap(struct rangeset *a, s
>>>
>>>       if ( a<  b )
>>>       {
>>> -        spin_lock(&a->lock);
>>> -        spin_lock(&b->lock);
>>> +        write_lock(&a->lock);
>>> +        write_lock(&b->lock);
>>>       }
>>>       else
>>>       {
>>> -        spin_lock(&b->lock);
>>> -        spin_lock(&a->lock);
>>> +        write_lock(&b->lock);
>>> +        write_lock(&a->lock);
>>>       }
>>>
>>>       list_splice_init(&a->range_list,&tmp);
>>>       list_splice_init(&b->range_list,&a->range_list);
>>>       list_splice(&tmp,&b->range_list);
>>>
>>> -    spin_unlock(&a->lock);
>>> -    spin_unlock(&b->lock);
>>> +    write_unlock(&a->lock);
>>> +    write_unlock(&b->lock);
>>>   }
>>>
>>>   /*****************************
>>> @@ -446,7 +446,7 @@ void rangeset_printk(
>>>       int nr_printed = 0;
>>>       struct range *x;
>>>
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>>
>>>       printk("%-10s {", r->name);
>>>
>>> @@ -465,7 +465,7 @@ void rangeset_printk(
>>>
>>>       printk(" }");
>>>
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>>   }
>>>
>>>   void rangeset_domain_printk(
>>> _______________________________________________
>>> Xen-devel mailing list
>>> Xen-devel@lists.xen.org
>>> http://lists.xen.org/xen-devel
>
>
> Konrad Rzeszutek Wilk <mailto:konrad.wilk@oracle.com>
> 19 September 2014 17:32
> On Fri, Sep 12, 2014 at 01:55:07PM +0100, Jan Beulich wrote:
>> As a general library routine, it should behave as efficiently as
>> possible, even if at present no significant contention is known here.
>>
>
> Reviewed-by: Konrad Rzeszutek Wilk<konrad.wilk@oracle.com>
>
> I am comfortable with this going to Xen 4.5.
>
>> Signed-off-by: Jan Beulich<jbeulich@suse.com>
>> ---
>> With the widened use of rangesets I'd like to re-suggest this change
>> which I had posted already a couple of years back.
>>
>> --- a/xen/common/rangeset.c
>> +++ b/xen/common/rangeset.c
>> @@ -28,7 +28,7 @@ struct rangeset {
>>
>>       /* Number of ranges that can be allocated */
>>       long             nr_ranges;
>> -    spinlock_t       lock;
>> +    rwlock_t         lock;
>>
>>       /* Pretty-printing name. */
>>       char             name[32];
>> @@ -120,7 +120,7 @@ int rangeset_add_range(
>>
>>       ASSERT(s<= e);
>>
>> -    spin_lock(&r->lock);
>> +    write_lock(&r->lock);
>>
>>       x = find_range(r, s);
>>       y = find_range(r, e);
>> @@ -176,7 +176,7 @@ int rangeset_add_range(
>>       }
>>
>>    out:
>> -    spin_unlock(&r->lock);
>> +    write_unlock(&r->lock);
>>       return rc;
>>   }
>>
>> @@ -188,7 +188,7 @@ int rangeset_remove_range(
>>
>>       ASSERT(s<= e);
>>
>> -    spin_lock(&r->lock);
>> +    write_lock(&r->lock);
>>
>>       x = find_range(r, s);
>>       y = find_range(r, e);
>> @@ -244,7 +244,7 @@ int rangeset_remove_range(
>>       }
>>
>>    out:
>> -    spin_unlock(&r->lock);
>> +    write_unlock(&r->lock);
>>       return rc;
>>   }
>>
>> @@ -256,10 +256,10 @@ int rangeset_contains_range(
>>
>>       ASSERT(s<= e);
>>
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>       x = find_range(r, s);
>>       contains = (x&&  (x->e>= e));
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>
>>       return contains;
>>   }
>> @@ -272,10 +272,10 @@ int rangeset_overlaps_range(
>>
>>       ASSERT(s<= e);
>>
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>       x = find_range(r, e);
>>       overlaps = (x&&  (s<= x->e));
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>
>>       return overlaps;
>>   }
>> @@ -287,13 +287,13 @@ int rangeset_report_ranges(
>>       struct range *x;
>>       int rc = 0;
>>
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>
>>       for ( x = find_range(r, s); x&&  (x->s<= e)&&  !rc; x = next_range(r, x) )
>>           if ( x->e>= s )
>>               rc = cb(max(x->s, s), min(x->e, e), ctxt);
>>
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>
>>       return rc;
>>   }
>> @@ -331,7 +331,7 @@ struct rangeset *rangeset_new(
>>       if ( r == NULL )
>>           return NULL;
>>
>> -    spin_lock_init(&r->lock);
>> +    rwlock_init(&r->lock);
>>       INIT_LIST_HEAD(&r->range_list);
>>       r->nr_ranges = -1;
>>
>> @@ -414,21 +414,21 @@ void rangeset_swap(struct rangeset *a, s
>>
>>       if ( a<  b )
>>       {
>> -        spin_lock(&a->lock);
>> -        spin_lock(&b->lock);
>> +        write_lock(&a->lock);
>> +        write_lock(&b->lock);
>>       }
>>       else
>>       {
>> -        spin_lock(&b->lock);
>> -        spin_lock(&a->lock);
>> +        write_lock(&b->lock);
>> +        write_lock(&a->lock);
>>       }
>>
>>       list_splice_init(&a->range_list,&tmp);
>>       list_splice_init(&b->range_list,&a->range_list);
>>       list_splice(&tmp,&b->range_list);
>>
>> -    spin_unlock(&a->lock);
>> -    spin_unlock(&b->lock);
>> +    write_unlock(&a->lock);
>> +    write_unlock(&b->lock);
>>   }
>>
>>   /*****************************
>> @@ -446,7 +446,7 @@ void rangeset_printk(
>>       int nr_printed = 0;
>>       struct range *x;
>>
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>
>>       printk("%-10s {", r->name);
>>
>> @@ -465,7 +465,7 @@ void rangeset_printk(
>>
>>       printk(" }");
>>
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>   }
>>
>>   void rangeset_domain_printk(
>>
>>
>>
>
>> switch rangeset's lock to rwlock
>>
>> As a general library routine, it should behave as efficiently as
>> possible, even if at present no significant contention is known here.
>>
>> Signed-off-by: Jan Beulich<jbeulich@suse.com>
>> ---
>> With the widened use of rangesets I'd like to re-suggest this change
>> which I had posted already a couple of years back.
>>
>> --- a/xen/common/rangeset.c
>> +++ b/xen/common/rangeset.c
>> @@ -28,7 +28,7 @@ struct rangeset {
>>
>>       /* Number of ranges that can be allocated */
>>       long             nr_ranges;
>> -    spinlock_t       lock;
>> +    rwlock_t         lock;
>>
>>       /* Pretty-printing name. */
>>       char             name[32];
>> @@ -120,7 +120,7 @@ int rangeset_add_range(
>>
>>       ASSERT(s<= e);
>>
>> -    spin_lock(&r->lock);
>> +    write_lock(&r->lock);
>>
>>       x = find_range(r, s);
>>       y = find_range(r, e);
>> @@ -176,7 +176,7 @@ int rangeset_add_range(
>>       }
>>
>>    out:
>> -    spin_unlock(&r->lock);
>> +    write_unlock(&r->lock);
>>       return rc;
>>   }
>>
>> @@ -188,7 +188,7 @@ int rangeset_remove_range(
>>
>>       ASSERT(s<= e);
>>
>> -    spin_lock(&r->lock);
>> +    write_lock(&r->lock);
>>
>>       x = find_range(r, s);
>>       y = find_range(r, e);
>> @@ -244,7 +244,7 @@ int rangeset_remove_range(
>>       }
>>
>>    out:
>> -    spin_unlock(&r->lock);
>> +    write_unlock(&r->lock);
>>       return rc;
>>   }
>>
>> @@ -256,10 +256,10 @@ int rangeset_contains_range(
>>
>>       ASSERT(s<= e);
>>
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>       x = find_range(r, s);
>>       contains = (x&&  (x->e>= e));
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>
>>       return contains;
>>   }
>> @@ -272,10 +272,10 @@ int rangeset_overlaps_range(
>>
>>       ASSERT(s<= e);
>>
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>       x = find_range(r, e);
>>       overlaps = (x&&  (s<= x->e));
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>
>>       return overlaps;
>>   }
>> @@ -287,13 +287,13 @@ int rangeset_report_ranges(
>>       struct range *x;
>>       int rc = 0;
>>
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>
>>       for ( x = find_range(r, s); x&&  (x->s<= e)&&  !rc; x = next_range(r, x) )
>>           if ( x->e>= s )
>>               rc = cb(max(x->s, s), min(x->e, e), ctxt);
>>
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>
>>       return rc;
>>   }
>> @@ -331,7 +331,7 @@ struct rangeset *rangeset_new(
>>       if ( r == NULL )
>>           return NULL;
>>
>> -    spin_lock_init(&r->lock);
>> +    rwlock_init(&r->lock);
>>       INIT_LIST_HEAD(&r->range_list);
>>       r->nr_ranges = -1;
>>
>> @@ -414,21 +414,21 @@ void rangeset_swap(struct rangeset *a, s
>>
>>       if ( a<  b )
>>       {
>> -        spin_lock(&a->lock);
>> -        spin_lock(&b->lock);
>> +        write_lock(&a->lock);
>> +        write_lock(&b->lock);
>>       }
>>       else
>>       {
>> -        spin_lock(&b->lock);
>> -        spin_lock(&a->lock);
>> +        write_lock(&b->lock);
>> +        write_lock(&a->lock);
>>       }
>>
>>       list_splice_init(&a->range_list,&tmp);
>>       list_splice_init(&b->range_list,&a->range_list);
>>       list_splice(&tmp,&b->range_list);
>>
>> -    spin_unlock(&a->lock);
>> -    spin_unlock(&b->lock);
>> +    write_unlock(&a->lock);
>> +    write_unlock(&b->lock);
>>   }
>>
>>   /*****************************
>> @@ -446,7 +446,7 @@ void rangeset_printk(
>>       int nr_printed = 0;
>>       struct range *x;
>>
>> -    spin_lock(&r->lock);
>> +    read_lock(&r->lock);
>>
>>       printk("%-10s {", r->name);
>>
>> @@ -465,7 +465,7 @@ void rangeset_printk(
>>
>>       printk(" }");
>>
>> -    spin_unlock(&r->lock);
>> +    read_unlock(&r->lock);
>>   }
>>
>>   void rangeset_domain_printk(
>
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@lists.xen.org
>> http://lists.xen.org/xen-devel
>
> Jan Beulich <mailto:JBeulich@suse.com>
> 12 September 2014 13:55
> As a general library routine, it should behave as efficiently as
> possible, even if at present no significant contention is known here.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> With the widened use of rangesets I'd like to re-suggest this change
> which I had posted already a couple of years back.
>
> --- a/xen/common/rangeset.c
> +++ b/xen/common/rangeset.c
> @@ -28,7 +28,7 @@ struct rangeset {
>
> /* Number of ranges that can be allocated */
> long nr_ranges;
> - spinlock_t lock;
> + rwlock_t lock;
>
> /* Pretty-printing name. */
> char name[32];
> @@ -120,7 +120,7 @@ int rangeset_add_range(
>
> ASSERT(s <= e);
>
> - spin_lock(&r->lock);
> + write_lock(&r->lock);
>
> x = find_range(r, s);
> y = find_range(r, e);
> @@ -176,7 +176,7 @@ int rangeset_add_range(
> }
>
> out:
> - spin_unlock(&r->lock);
> + write_unlock(&r->lock);
> return rc;
> }
>
> @@ -188,7 +188,7 @@ int rangeset_remove_range(
>
> ASSERT(s <= e);
>
> - spin_lock(&r->lock);
> + write_lock(&r->lock);
>
> x = find_range(r, s);
> y = find_range(r, e);
> @@ -244,7 +244,7 @@ int rangeset_remove_range(
> }
>
> out:
> - spin_unlock(&r->lock);
> + write_unlock(&r->lock);
> return rc;
> }
>
> @@ -256,10 +256,10 @@ int rangeset_contains_range(
>
> ASSERT(s <= e);
>
> - spin_lock(&r->lock);
> + read_lock(&r->lock);
> x = find_range(r, s);
> contains = (x && (x->e >= e));
> - spin_unlock(&r->lock);
> + read_unlock(&r->lock);
>
> return contains;
> }
> @@ -272,10 +272,10 @@ int rangeset_overlaps_range(
>
> ASSERT(s <= e);
>
> - spin_lock(&r->lock);
> + read_lock(&r->lock);
> x = find_range(r, e);
> overlaps = (x && (s <= x->e));
> - spin_unlock(&r->lock);
> + read_unlock(&r->lock);
>
> return overlaps;
> }
> @@ -287,13 +287,13 @@ int rangeset_report_ranges(
> struct range *x;
> int rc = 0;
>
> - spin_lock(&r->lock);
> + read_lock(&r->lock);
>
> for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, 
> x) )
> if ( x->e >= s )
> rc = cb(max(x->s, s), min(x->e, e), ctxt);
>
> - spin_unlock(&r->lock);
> + read_unlock(&r->lock);
>
> return rc;
> }
> @@ -331,7 +331,7 @@ struct rangeset *rangeset_new(
> if ( r == NULL )
> return NULL;
>
> - spin_lock_init(&r->lock);
> + rwlock_init(&r->lock);
> INIT_LIST_HEAD(&r->range_list);
> r->nr_ranges = -1;
>
> @@ -414,21 +414,21 @@ void rangeset_swap(struct rangeset *a, s
>
> if ( a < b )
> {
> - spin_lock(&a->lock);
> - spin_lock(&b->lock);
> + write_lock(&a->lock);
> + write_lock(&b->lock);
> }
> else
> {
> - spin_lock(&b->lock);
> - spin_lock(&a->lock);
> + write_lock(&b->lock);
> + write_lock(&a->lock);
> }
>
> list_splice_init(&a->range_list, &tmp);
> list_splice_init(&b->range_list, &a->range_list);
> list_splice(&tmp, &b->range_list);
>
> - spin_unlock(&a->lock);
> - spin_unlock(&b->lock);
> + write_unlock(&a->lock);
> + write_unlock(&b->lock);
> }
>
> /*****************************
> @@ -446,7 +446,7 @@ void rangeset_printk(
> int nr_printed = 0;
> struct range *x;
>
> - spin_lock(&r->lock);
> + read_lock(&r->lock);
>
> printk("%-10s {", r->name);
>
> @@ -465,7 +465,7 @@ void rangeset_printk(
>
> printk(" }");
>
> - spin_unlock(&r->lock);
> + read_unlock(&r->lock);
> }
>
> void rangeset_domain_printk(
>
>
>

[-- Attachment #1.2.1: Type: text/html, Size: 26859 bytes --]

[-- Attachment #1.2.2: compose-unknown-contact.jpg --]
[-- Type: image/jpeg, Size: 770 bytes --]

[-- Attachment #2: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-09-30 20:53       ` Keir Fraser
@ 2014-10-01  8:57         ` Jan Beulich
  2014-10-01  9:31           ` Keir Fraser
  0 siblings, 1 reply; 22+ messages in thread
From: Jan Beulich @ 2014-10-01  8:57 UTC (permalink / raw)
  To: Keir Fraser; +Cc: Keir Fraser, Ian Jackson, Tim Deegan, Ian Campbell, xen-devel

>>> On 30.09.14 at 22:53, <keir.xen@gmail.com> wrote:
> Do the searches ever get long enough that a read lock helps? If any of 
> the rangesets is getting large and making the searches slow then it 
> would be quite easy to switch from linked list to red-black tree?

As noted elsewhere, even very brief locking periods can cause
convoys for many-vCPU guests. One case where we observe
this is hvm_get_guest_time_fixed() (without clear route for
mitigation as converting to rw lock is not an option here, and I
didn't get around to try out whether eliminating the lock
altogether in favor of atomic CPU operations would make this
any better).

Jan

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2014-10-01  8:57         ` Jan Beulich
@ 2014-10-01  9:31           ` Keir Fraser
  0 siblings, 0 replies; 22+ messages in thread
From: Keir Fraser @ 2014-10-01  9:31 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Keir Fraser, Ian Jackson, Tim Deegan, Ian Campbell, xen-devel

Jan Beulich wrote:
>>>> On 30.09.14 at 22:53,<keir.xen@gmail.com>  wrote:
>> Do the searches ever get long enough that a read lock helps? If any of
>> the rangesets is getting large and making the searches slow then it
>> would be quite easy to switch from linked list to red-black tree?
>
> As noted elsewhere, even very brief locking periods can cause
> convoys for many-vCPU guests. One case where we observe
> this is hvm_get_guest_time_fixed() (without clear route for
> mitigation as converting to rw lock is not an option here, and I
> didn't get around to try out whether eliminating the lock
> altogether in favor of atomic CPU operations would make this
> any better).

Given how rarely most of these rangesets get updated, it would be nice 
to let the guest itself have lock-free access at the expense of having 
to pause it to make modifications. Although, does a guest ever modify 
any of its own rangesets? I'm not sure that ever happens.

  -- Keir

> Jan
>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2011-03-25 20:52     ` Keir Fraser
@ 2011-03-30 22:44       ` Jeremy Fitzhardinge
  0 siblings, 0 replies; 22+ messages in thread
From: Jeremy Fitzhardinge @ 2011-03-30 22:44 UTC (permalink / raw)
  To: Keir Fraser; +Cc: Dan Magenheimer, xen-devel, Jan Beulich

On 03/25/2011 01:52 PM, Keir Fraser wrote:
> On 25/03/2011 17:52, "Dan Magenheimer" <dan.magenheimer@oracle.com> wrote:
>
>> Tmem (in Xen) does use rwlocks.  After hearing from Jeremy that
>> Linux maintainers wouldn't approve of new users of rwlocks, I
>> redid the locking structure in the in-Linux-kernel version of tmem
>> to avoid using them.  I am fairly sure that the same approach used in
>> zcache can be used in Xen, but have not tried, and it's likely
>> to be a fairly big coding/testing effort that I can't undertake
>> right now.
>>
>> I am also fairly sure that the current Xen tmem locking structure
>> is not suitable for switching to normal spinlocks nor RCU,
>> but am far from an expert in this area.
> Why would a normal spinlock not work?

Should work; any rwlock use that can't be mechanically replaced with
plain spinlocks is buggy anyway.

    J

>  -- Keir
>
>> Dan
>>
>>> -----Original Message-----
>>> From: Keir Fraser [mailto:keir.xen@gmail.com]
>>> Sent: Friday, March 25, 2011 11:09 AM
>>> To: Jan Beulich; xen-devel@lists.xensource.com
>>> Subject: Re: [Xen-devel] [PATCH] switch rangeset's lock to rwlock
>>>
>>> I'd rather get rid of rwlocks altogether and use RCU in any cases where
>>> we
>>> really have contention. Rwlocks don't help unless the read-side
>>> critical
>>> sections are large enough to amortise the cache ping-pong cost of the
>>> locking/unlocking operations. And in Xen we have very few if any
>>> significantly sized critical sections.
>>>
>>> I need to double check, but I believe we have only a couple of rwlock
>>> users
>>> now, and none of the read-side critical sections are large, so in that
>>> case
>>> I suggest we switch them to use spinlocks and kill our rwlock
>>> implementation.
>>>
>>>  -- Keir
>>>
>>> On 25/03/2011 16:49, "Jan Beulich" <JBeulich@novell.com> wrote:
>>>
>>>> As a general library routine, it should behave as efficiently as
>>>> possible, even if at present no significant contention is known here.
>>>>
>>>> Signed-off-by: Jan Beulich <jbeulich@novell.com>
>>>>
>>>> --- a/xen/common/rangeset.c
>>>> +++ b/xen/common/rangeset.c
>>>> @@ -25,7 +25,7 @@ struct rangeset {
>>>>
>>>>      /* Ordered list of ranges contained in this set, and protecting
>>> lock. */
>>>>      struct list_head range_list;
>>>> -    spinlock_t       lock;
>>>> +    rwlock_t         lock;
>>>>
>>>>      /* Pretty-printing name. */
>>>>      char             name[32];
>>>> @@ -103,7 +103,7 @@ int rangeset_add_range(
>>>>
>>>>      ASSERT(s <= e);
>>>>
>>>> -    spin_lock(&r->lock);
>>>> +    write_lock(&r->lock);
>>>>
>>>>      x = find_range(r, s);
>>>>      y = find_range(r, e);
>>>> @@ -159,7 +159,7 @@ int rangeset_add_range(
>>>>      }
>>>>
>>>>   out:
>>>> -    spin_unlock(&r->lock);
>>>> +    write_unlock(&r->lock);
>>>>      return rc;
>>>>  }
>>>>
>>>> @@ -175,7 +175,7 @@ int rangeset_remove_range(
>>>>
>>>>      ASSERT(s <= e);
>>>>
>>>> -    spin_lock(&r->lock);
>>>> +    write_lock(&r->lock);
>>>>
>>>>      x = find_range(r, s);
>>>>      y = find_range(r, e);
>>>> @@ -231,7 +231,7 @@ int rangeset_remove_range(
>>>>      }
>>>>
>>>>   out:
>>>> -    spin_unlock(&r->lock);
>>>> +    write_unlock(&r->lock);
>>>>      return rc;
>>>>  }
>>>>
>>>> @@ -243,10 +243,10 @@ int rangeset_contains_range(
>>>>
>>>>      ASSERT(s <= e);
>>>>
>>>> -    spin_lock(&r->lock);
>>>> +    read_lock(&r->lock);
>>>>      x = find_range(r, s);
>>>>      contains = (x && (x->e >= e));
>>>> -    spin_unlock(&r->lock);
>>>> +    read_unlock(&r->lock);
>>>>
>>>>      return contains;
>>>>  }
>>>> @@ -259,10 +259,10 @@ int rangeset_overlaps_range(
>>>>
>>>>      ASSERT(s <= e);
>>>>
>>>> -    spin_lock(&r->lock);
>>>> +    read_lock(&r->lock);
>>>>      x = find_range(r, e);
>>>>      overlaps = (x && (s <= x->e));
>>>> -    spin_unlock(&r->lock);
>>>> +    read_unlock(&r->lock);
>>>>
>>>>      return overlaps;
>>>>  }
>>>> @@ -274,13 +274,13 @@ int rangeset_report_ranges(
>>>>      struct range *x;
>>>>      int rc = 0;
>>>>
>>>> -    spin_lock(&r->lock);
>>>> +    read_lock(&r->lock);
>>>>
>>>>      for ( x = find_range(r, s); x && (x->s <= e) && !rc; x =
>>> next_range(r, x)
>>>> )
>>>>          if ( x->e >= s )
>>>>              rc = cb(max(x->s, s), min(x->e, e), ctxt);
>>>>
>>>> -    spin_unlock(&r->lock);
>>>> +    read_unlock(&r->lock);
>>>>
>>>>      return rc;
>>>>  }
>>>> @@ -318,7 +318,7 @@ struct rangeset *rangeset_new(
>>>>      if ( r == NULL )
>>>>          return NULL;
>>>>
>>>> -    spin_lock_init(&r->lock);
>>>> +    rwlock_init(&r->lock);
>>>>      INIT_LIST_HEAD(&r->range_list);
>>>>
>>>>      BUG_ON(flags & ~RANGESETF_prettyprint_hex);
>>>> @@ -403,7 +403,7 @@ void rangeset_printk(
>>>>      int nr_printed = 0;
>>>>      struct range *x;
>>>>
>>>> -    spin_lock(&r->lock);
>>>> +    read_lock(&r->lock);
>>>>
>>>>      printk("%-10s {", r->name);
>>>>
>>>> @@ -422,7 +422,7 @@ void rangeset_printk(
>>>>
>>>>      printk(" }");
>>>>
>>>> -    spin_unlock(&r->lock);
>>>> +    read_unlock(&r->lock);
>>>>  }
>>>>
>>>>  void rangeset_domain_printk(
>>>>
>>>>
>>>>
>>>> _______________________________________________
>>>> Xen-devel mailing list
>>>> Xen-devel@lists.xensource.com
>>>> http://lists.xensource.com/xen-devel
>>>
>>>
>>> _______________________________________________
>>> Xen-devel mailing list
>>> Xen-devel@lists.xensource.com
>>> http://lists.xensource.com/xen-devel
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel
>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2011-03-28  8:23   ` Jan Beulich
@ 2011-03-28  8:54     ` Keir Fraser
  0 siblings, 0 replies; 22+ messages in thread
From: Keir Fraser @ 2011-03-28  8:54 UTC (permalink / raw)
  To: Jan Beulich, xen-devel

On 28/03/2011 09:23, "Jan Beulich" <JBeulich@novell.com> wrote:

> As I said in the description, the rangeset code is of general library
> kind, and hence shouldn't make assumptions on the sort of data
> stored in rangesets. While I agree that in many cases the read
> side critical section would be small, there can be exceptions.

Because of rangeset_report_ranges()? Well, we could equally say that it is
not nice to be executing a callback in a spinlock critical section. For
example, the __copy_to_guest_offset() in the current sole callback handler
will be invalid for any guest using xenpaging (when that works properly)
since paging work could be done in there. Not a problem right now of course,
not least because the callback is executed only for dom0.

For that one iterator function I'm sure we could devise a method for
executing callbacks with no lock held, without resorting to RCU. Again, this
would be preferable to using rwlocks.

> Using RCU in this kind of a leaf, independent of almost anything
> else library routine would seem odd.

Callers wouldn't see the RCU-ness.

Not that I'm arguing to make rangesets use RCU, or avoid holding a lock
during report_ranges callbacks. I think concurrency bottlenecks in that code
are a non-issue.

> The only reason to stay with spinlocks was imo if you indeed
> wanted to knock off rwlocks altogether, which would new seem
> contrary to your c/s 23099:612171ff82ea.

Well they're only used now in code that is outside my personal areas of
interest. I'd prefer more sensible locking strategies to be used, but I'm
not going to remove code from Xen as an alternative, nor am I going to do
the work myself. So rwlocks remain in their current limited uses.

>> I need to double check, but I believe we have only a couple of rwlock users
>> now, and none of the read-side critical sections are large, so in that case
>> I suggest we switch them to use spinlocks and kill our rwlock
>> implementation.
> 
> Indeed, there's only tmem without the two patches I had sent. One
> of the reasons for putting them together was to actually have some
> *active* users of rwlocks again, so the code wouldn't have too good
> chances to bit-rot.
> 
> Further, I've taken note of a few other locks that may be
> candidates for conversion to rwlocks (pcidevs_lock being the
> most prominent example), requiring closer inspection and possibly
> code adjustments other than the mere lock kind conversion.

There'd be a bigger win in fragmenting pcidevs_lock, I'm sure. It looks like
it would be easy to do, it covers all sorts of stuff at the moment.

 -- Keir

> Jan
> 

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2011-03-25 17:08 ` Keir Fraser
  2011-03-25 17:52   ` Dan Magenheimer
@ 2011-03-28  8:23   ` Jan Beulich
  2011-03-28  8:54     ` Keir Fraser
  1 sibling, 1 reply; 22+ messages in thread
From: Jan Beulich @ 2011-03-28  8:23 UTC (permalink / raw)
  To: Keir Fraser, xen-devel

>>> On 25.03.11 at 18:08, Keir Fraser <keir.xen@gmail.com> wrote:
> I'd rather get rid of rwlocks altogether and use RCU in any cases where we
> really have contention. Rwlocks don't help unless the read-side critical
> sections are large enough to amortise the cache ping-pong cost of the
> locking/unlocking operations. And in Xen we have very few if any
> significantly sized critical sections.

As I said in the description, the rangeset code is of general library
kind, and hence shouldn't make assumptions on the sort of data
stored in rangesets. While I agree that in many cases the read
side critical section would be small, there can be exceptions.

Using RCU in this kind of a leaf, independent of almost anything
else library routine would seem odd.

The only reason to stay with spinlocks was imo if you indeed
wanted to knock off rwlocks altogether, which would new seem
contrary to your c/s 23099:612171ff82ea.

> I need to double check, but I believe we have only a couple of rwlock users
> now, and none of the read-side critical sections are large, so in that case
> I suggest we switch them to use spinlocks and kill our rwlock
> implementation.

Indeed, there's only tmem without the two patches I had sent. One
of the reasons for putting them together was to actually have some
*active* users of rwlocks again, so the code wouldn't have too good
chances to bit-rot.

Further, I've taken note of a few other locks that may be
candidates for conversion to rwlocks (pcidevs_lock being the
most prominent example), requiring closer inspection and possibly
code adjustments other than the mere lock kind conversion.

Jan

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2011-03-25 17:52   ` Dan Magenheimer
@ 2011-03-25 20:52     ` Keir Fraser
  2011-03-30 22:44       ` Jeremy Fitzhardinge
  0 siblings, 1 reply; 22+ messages in thread
From: Keir Fraser @ 2011-03-25 20:52 UTC (permalink / raw)
  To: Dan Magenheimer, Jan Beulich, xen-devel

On 25/03/2011 17:52, "Dan Magenheimer" <dan.magenheimer@oracle.com> wrote:

> Tmem (in Xen) does use rwlocks.  After hearing from Jeremy that
> Linux maintainers wouldn't approve of new users of rwlocks, I
> redid the locking structure in the in-Linux-kernel version of tmem
> to avoid using them.  I am fairly sure that the same approach used in
> zcache can be used in Xen, but have not tried, and it's likely
> to be a fairly big coding/testing effort that I can't undertake
> right now.
> 
> I am also fairly sure that the current Xen tmem locking structure
> is not suitable for switching to normal spinlocks nor RCU,
> but am far from an expert in this area.

Why would a normal spinlock not work?

 -- Keir

> Dan
> 
>> -----Original Message-----
>> From: Keir Fraser [mailto:keir.xen@gmail.com]
>> Sent: Friday, March 25, 2011 11:09 AM
>> To: Jan Beulich; xen-devel@lists.xensource.com
>> Subject: Re: [Xen-devel] [PATCH] switch rangeset's lock to rwlock
>> 
>> I'd rather get rid of rwlocks altogether and use RCU in any cases where
>> we
>> really have contention. Rwlocks don't help unless the read-side
>> critical
>> sections are large enough to amortise the cache ping-pong cost of the
>> locking/unlocking operations. And in Xen we have very few if any
>> significantly sized critical sections.
>> 
>> I need to double check, but I believe we have only a couple of rwlock
>> users
>> now, and none of the read-side critical sections are large, so in that
>> case
>> I suggest we switch them to use spinlocks and kill our rwlock
>> implementation.
>> 
>>  -- Keir
>> 
>> On 25/03/2011 16:49, "Jan Beulich" <JBeulich@novell.com> wrote:
>> 
>>> As a general library routine, it should behave as efficiently as
>>> possible, even if at present no significant contention is known here.
>>> 
>>> Signed-off-by: Jan Beulich <jbeulich@novell.com>
>>> 
>>> --- a/xen/common/rangeset.c
>>> +++ b/xen/common/rangeset.c
>>> @@ -25,7 +25,7 @@ struct rangeset {
>>> 
>>>      /* Ordered list of ranges contained in this set, and protecting
>> lock. */
>>>      struct list_head range_list;
>>> -    spinlock_t       lock;
>>> +    rwlock_t         lock;
>>> 
>>>      /* Pretty-printing name. */
>>>      char             name[32];
>>> @@ -103,7 +103,7 @@ int rangeset_add_range(
>>> 
>>>      ASSERT(s <= e);
>>> 
>>> -    spin_lock(&r->lock);
>>> +    write_lock(&r->lock);
>>> 
>>>      x = find_range(r, s);
>>>      y = find_range(r, e);
>>> @@ -159,7 +159,7 @@ int rangeset_add_range(
>>>      }
>>> 
>>>   out:
>>> -    spin_unlock(&r->lock);
>>> +    write_unlock(&r->lock);
>>>      return rc;
>>>  }
>>> 
>>> @@ -175,7 +175,7 @@ int rangeset_remove_range(
>>> 
>>>      ASSERT(s <= e);
>>> 
>>> -    spin_lock(&r->lock);
>>> +    write_lock(&r->lock);
>>> 
>>>      x = find_range(r, s);
>>>      y = find_range(r, e);
>>> @@ -231,7 +231,7 @@ int rangeset_remove_range(
>>>      }
>>> 
>>>   out:
>>> -    spin_unlock(&r->lock);
>>> +    write_unlock(&r->lock);
>>>      return rc;
>>>  }
>>> 
>>> @@ -243,10 +243,10 @@ int rangeset_contains_range(
>>> 
>>>      ASSERT(s <= e);
>>> 
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>>      x = find_range(r, s);
>>>      contains = (x && (x->e >= e));
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>> 
>>>      return contains;
>>>  }
>>> @@ -259,10 +259,10 @@ int rangeset_overlaps_range(
>>> 
>>>      ASSERT(s <= e);
>>> 
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>>      x = find_range(r, e);
>>>      overlaps = (x && (s <= x->e));
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>> 
>>>      return overlaps;
>>>  }
>>> @@ -274,13 +274,13 @@ int rangeset_report_ranges(
>>>      struct range *x;
>>>      int rc = 0;
>>> 
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>> 
>>>      for ( x = find_range(r, s); x && (x->s <= e) && !rc; x =
>> next_range(r, x)
>>> )
>>>          if ( x->e >= s )
>>>              rc = cb(max(x->s, s), min(x->e, e), ctxt);
>>> 
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>> 
>>>      return rc;
>>>  }
>>> @@ -318,7 +318,7 @@ struct rangeset *rangeset_new(
>>>      if ( r == NULL )
>>>          return NULL;
>>> 
>>> -    spin_lock_init(&r->lock);
>>> +    rwlock_init(&r->lock);
>>>      INIT_LIST_HEAD(&r->range_list);
>>> 
>>>      BUG_ON(flags & ~RANGESETF_prettyprint_hex);
>>> @@ -403,7 +403,7 @@ void rangeset_printk(
>>>      int nr_printed = 0;
>>>      struct range *x;
>>> 
>>> -    spin_lock(&r->lock);
>>> +    read_lock(&r->lock);
>>> 
>>>      printk("%-10s {", r->name);
>>> 
>>> @@ -422,7 +422,7 @@ void rangeset_printk(
>>> 
>>>      printk(" }");
>>> 
>>> -    spin_unlock(&r->lock);
>>> +    read_unlock(&r->lock);
>>>  }
>>> 
>>>  void rangeset_domain_printk(
>>> 
>>> 
>>> 
>>> _______________________________________________
>>> Xen-devel mailing list
>>> Xen-devel@lists.xensource.com
>>> http://lists.xensource.com/xen-devel
>> 
>> 
>> 
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@lists.xensource.com
>> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* RE: [PATCH] switch rangeset's lock to rwlock
  2011-03-25 17:08 ` Keir Fraser
@ 2011-03-25 17:52   ` Dan Magenheimer
  2011-03-25 20:52     ` Keir Fraser
  2011-03-28  8:23   ` Jan Beulich
  1 sibling, 1 reply; 22+ messages in thread
From: Dan Magenheimer @ 2011-03-25 17:52 UTC (permalink / raw)
  To: Keir Fraser, Jan Beulich, xen-devel

Tmem (in Xen) does use rwlocks.  After hearing from Jeremy that
Linux maintainers wouldn't approve of new users of rwlocks, I
redid the locking structure in the in-Linux-kernel version of tmem
to avoid using them.  I am fairly sure that the same approach used in
zcache can be used in Xen, but have not tried, and it's likely
to be a fairly big coding/testing effort that I can't undertake
right now.

I am also fairly sure that the current Xen tmem locking structure
is not suitable for switching to normal spinlocks nor RCU,
but am far from an expert in this area.

Dan

> -----Original Message-----
> From: Keir Fraser [mailto:keir.xen@gmail.com]
> Sent: Friday, March 25, 2011 11:09 AM
> To: Jan Beulich; xen-devel@lists.xensource.com
> Subject: Re: [Xen-devel] [PATCH] switch rangeset's lock to rwlock
> 
> I'd rather get rid of rwlocks altogether and use RCU in any cases where
> we
> really have contention. Rwlocks don't help unless the read-side
> critical
> sections are large enough to amortise the cache ping-pong cost of the
> locking/unlocking operations. And in Xen we have very few if any
> significantly sized critical sections.
> 
> I need to double check, but I believe we have only a couple of rwlock
> users
> now, and none of the read-side critical sections are large, so in that
> case
> I suggest we switch them to use spinlocks and kill our rwlock
> implementation.
> 
>  -- Keir
> 
> On 25/03/2011 16:49, "Jan Beulich" <JBeulich@novell.com> wrote:
> 
> > As a general library routine, it should behave as efficiently as
> > possible, even if at present no significant contention is known here.
> >
> > Signed-off-by: Jan Beulich <jbeulich@novell.com>
> >
> > --- a/xen/common/rangeset.c
> > +++ b/xen/common/rangeset.c
> > @@ -25,7 +25,7 @@ struct rangeset {
> >
> >      /* Ordered list of ranges contained in this set, and protecting
> lock. */
> >      struct list_head range_list;
> > -    spinlock_t       lock;
> > +    rwlock_t         lock;
> >
> >      /* Pretty-printing name. */
> >      char             name[32];
> > @@ -103,7 +103,7 @@ int rangeset_add_range(
> >
> >      ASSERT(s <= e);
> >
> > -    spin_lock(&r->lock);
> > +    write_lock(&r->lock);
> >
> >      x = find_range(r, s);
> >      y = find_range(r, e);
> > @@ -159,7 +159,7 @@ int rangeset_add_range(
> >      }
> >
> >   out:
> > -    spin_unlock(&r->lock);
> > +    write_unlock(&r->lock);
> >      return rc;
> >  }
> >
> > @@ -175,7 +175,7 @@ int rangeset_remove_range(
> >
> >      ASSERT(s <= e);
> >
> > -    spin_lock(&r->lock);
> > +    write_lock(&r->lock);
> >
> >      x = find_range(r, s);
> >      y = find_range(r, e);
> > @@ -231,7 +231,7 @@ int rangeset_remove_range(
> >      }
> >
> >   out:
> > -    spin_unlock(&r->lock);
> > +    write_unlock(&r->lock);
> >      return rc;
> >  }
> >
> > @@ -243,10 +243,10 @@ int rangeset_contains_range(
> >
> >      ASSERT(s <= e);
> >
> > -    spin_lock(&r->lock);
> > +    read_lock(&r->lock);
> >      x = find_range(r, s);
> >      contains = (x && (x->e >= e));
> > -    spin_unlock(&r->lock);
> > +    read_unlock(&r->lock);
> >
> >      return contains;
> >  }
> > @@ -259,10 +259,10 @@ int rangeset_overlaps_range(
> >
> >      ASSERT(s <= e);
> >
> > -    spin_lock(&r->lock);
> > +    read_lock(&r->lock);
> >      x = find_range(r, e);
> >      overlaps = (x && (s <= x->e));
> > -    spin_unlock(&r->lock);
> > +    read_unlock(&r->lock);
> >
> >      return overlaps;
> >  }
> > @@ -274,13 +274,13 @@ int rangeset_report_ranges(
> >      struct range *x;
> >      int rc = 0;
> >
> > -    spin_lock(&r->lock);
> > +    read_lock(&r->lock);
> >
> >      for ( x = find_range(r, s); x && (x->s <= e) && !rc; x =
> next_range(r, x)
> > )
> >          if ( x->e >= s )
> >              rc = cb(max(x->s, s), min(x->e, e), ctxt);
> >
> > -    spin_unlock(&r->lock);
> > +    read_unlock(&r->lock);
> >
> >      return rc;
> >  }
> > @@ -318,7 +318,7 @@ struct rangeset *rangeset_new(
> >      if ( r == NULL )
> >          return NULL;
> >
> > -    spin_lock_init(&r->lock);
> > +    rwlock_init(&r->lock);
> >      INIT_LIST_HEAD(&r->range_list);
> >
> >      BUG_ON(flags & ~RANGESETF_prettyprint_hex);
> > @@ -403,7 +403,7 @@ void rangeset_printk(
> >      int nr_printed = 0;
> >      struct range *x;
> >
> > -    spin_lock(&r->lock);
> > +    read_lock(&r->lock);
> >
> >      printk("%-10s {", r->name);
> >
> > @@ -422,7 +422,7 @@ void rangeset_printk(
> >
> >      printk(" }");
> >
> > -    spin_unlock(&r->lock);
> > +    read_unlock(&r->lock);
> >  }
> >
> >  void rangeset_domain_printk(
> >
> >
> >
> > _______________________________________________
> > Xen-devel mailing list
> > Xen-devel@lists.xensource.com
> > http://lists.xensource.com/xen-devel
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] switch rangeset's lock to rwlock
  2011-03-25 16:49 Jan Beulich
@ 2011-03-25 17:08 ` Keir Fraser
  2011-03-25 17:52   ` Dan Magenheimer
  2011-03-28  8:23   ` Jan Beulich
  0 siblings, 2 replies; 22+ messages in thread
From: Keir Fraser @ 2011-03-25 17:08 UTC (permalink / raw)
  To: Jan Beulich, xen-devel

I'd rather get rid of rwlocks altogether and use RCU in any cases where we
really have contention. Rwlocks don't help unless the read-side critical
sections are large enough to amortise the cache ping-pong cost of the
locking/unlocking operations. And in Xen we have very few if any
significantly sized critical sections.

I need to double check, but I believe we have only a couple of rwlock users
now, and none of the read-side critical sections are large, so in that case
I suggest we switch them to use spinlocks and kill our rwlock
implementation.

 -- Keir

On 25/03/2011 16:49, "Jan Beulich" <JBeulich@novell.com> wrote:

> As a general library routine, it should behave as efficiently as
> possible, even if at present no significant contention is known here.
> 
> Signed-off-by: Jan Beulich <jbeulich@novell.com>
> 
> --- a/xen/common/rangeset.c
> +++ b/xen/common/rangeset.c
> @@ -25,7 +25,7 @@ struct rangeset {
>  
>      /* Ordered list of ranges contained in this set, and protecting lock. */
>      struct list_head range_list;
> -    spinlock_t       lock;
> +    rwlock_t         lock;
>  
>      /* Pretty-printing name. */
>      char             name[32];
> @@ -103,7 +103,7 @@ int rangeset_add_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    write_lock(&r->lock);
>  
>      x = find_range(r, s);
>      y = find_range(r, e);
> @@ -159,7 +159,7 @@ int rangeset_add_range(
>      }
>  
>   out:
> -    spin_unlock(&r->lock);
> +    write_unlock(&r->lock);
>      return rc;
>  }
>  
> @@ -175,7 +175,7 @@ int rangeset_remove_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    write_lock(&r->lock);
>  
>      x = find_range(r, s);
>      y = find_range(r, e);
> @@ -231,7 +231,7 @@ int rangeset_remove_range(
>      }
>  
>   out:
> -    spin_unlock(&r->lock);
> +    write_unlock(&r->lock);
>      return rc;
>  }
>  
> @@ -243,10 +243,10 @@ int rangeset_contains_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>      x = find_range(r, s);
>      contains = (x && (x->e >= e));
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  
>      return contains;
>  }
> @@ -259,10 +259,10 @@ int rangeset_overlaps_range(
>  
>      ASSERT(s <= e);
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>      x = find_range(r, e);
>      overlaps = (x && (s <= x->e));
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  
>      return overlaps;
>  }
> @@ -274,13 +274,13 @@ int rangeset_report_ranges(
>      struct range *x;
>      int rc = 0;
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>  
>      for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, x)
> )
>          if ( x->e >= s )
>              rc = cb(max(x->s, s), min(x->e, e), ctxt);
>  
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  
>      return rc;
>  }
> @@ -318,7 +318,7 @@ struct rangeset *rangeset_new(
>      if ( r == NULL )
>          return NULL;
>  
> -    spin_lock_init(&r->lock);
> +    rwlock_init(&r->lock);
>      INIT_LIST_HEAD(&r->range_list);
>  
>      BUG_ON(flags & ~RANGESETF_prettyprint_hex);
> @@ -403,7 +403,7 @@ void rangeset_printk(
>      int nr_printed = 0;
>      struct range *x;
>  
> -    spin_lock(&r->lock);
> +    read_lock(&r->lock);
>  
>      printk("%-10s {", r->name);
>  
> @@ -422,7 +422,7 @@ void rangeset_printk(
>  
>      printk(" }");
>  
> -    spin_unlock(&r->lock);
> +    read_unlock(&r->lock);
>  }
>  
>  void rangeset_domain_printk(
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH] switch rangeset's lock to rwlock
@ 2011-03-25 16:49 Jan Beulich
  2011-03-25 17:08 ` Keir Fraser
  0 siblings, 1 reply; 22+ messages in thread
From: Jan Beulich @ 2011-03-25 16:49 UTC (permalink / raw)
  To: xen-devel

[-- Attachment #1: Type: text/plain, Size: 2795 bytes --]

As a general library routine, it should behave as efficiently as
possible, even if at present no significant contention is known here.

Signed-off-by: Jan Beulich <jbeulich@novell.com>

--- a/xen/common/rangeset.c
+++ b/xen/common/rangeset.c
@@ -25,7 +25,7 @@ struct rangeset {
 
     /* Ordered list of ranges contained in this set, and protecting lock. */
     struct list_head range_list;
-    spinlock_t       lock;
+    rwlock_t         lock;
 
     /* Pretty-printing name. */
     char             name[32];
@@ -103,7 +103,7 @@ int rangeset_add_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    write_lock(&r->lock);
 
     x = find_range(r, s);
     y = find_range(r, e);
@@ -159,7 +159,7 @@ int rangeset_add_range(
     }
 
  out:
-    spin_unlock(&r->lock);
+    write_unlock(&r->lock);
     return rc;
 }
 
@@ -175,7 +175,7 @@ int rangeset_remove_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    write_lock(&r->lock);
 
     x = find_range(r, s);
     y = find_range(r, e);
@@ -231,7 +231,7 @@ int rangeset_remove_range(
     }
 
  out:
-    spin_unlock(&r->lock);
+    write_unlock(&r->lock);
     return rc;
 }
 
@@ -243,10 +243,10 @@ int rangeset_contains_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
     x = find_range(r, s);
     contains = (x && (x->e >= e));
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return contains;
 }
@@ -259,10 +259,10 @@ int rangeset_overlaps_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
     x = find_range(r, e);
     overlaps = (x && (s <= x->e));
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return overlaps;
 }
@@ -274,13 +274,13 @@ int rangeset_report_ranges(
     struct range *x;
     int rc = 0;
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
 
     for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, x) )
         if ( x->e >= s )
             rc = cb(max(x->s, s), min(x->e, e), ctxt);
 
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return rc;
 }
@@ -318,7 +318,7 @@ struct rangeset *rangeset_new(
     if ( r == NULL )
         return NULL;
 
-    spin_lock_init(&r->lock);
+    rwlock_init(&r->lock);
     INIT_LIST_HEAD(&r->range_list);
 
     BUG_ON(flags & ~RANGESETF_prettyprint_hex);
@@ -403,7 +403,7 @@ void rangeset_printk(
     int nr_printed = 0;
     struct range *x;
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
 
     printk("%-10s {", r->name);
 
@@ -422,7 +422,7 @@ void rangeset_printk(
 
     printk(" }");
 
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 }
 
 void rangeset_domain_printk(




[-- Attachment #2: rangesets-rwlock.patch --]
[-- Type: text/plain, Size: 2789 bytes --]

As a general library routine, it should behave as efficiently as
possible, even if at present no significant contention is known here.

Signed-off-by: Jan Beulich <jbeulich@novell.com>

--- a/xen/common/rangeset.c
+++ b/xen/common/rangeset.c
@@ -25,7 +25,7 @@ struct rangeset {
 
     /* Ordered list of ranges contained in this set, and protecting lock. */
     struct list_head range_list;
-    spinlock_t       lock;
+    rwlock_t         lock;
 
     /* Pretty-printing name. */
     char             name[32];
@@ -103,7 +103,7 @@ int rangeset_add_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    write_lock(&r->lock);
 
     x = find_range(r, s);
     y = find_range(r, e);
@@ -159,7 +159,7 @@ int rangeset_add_range(
     }
 
  out:
-    spin_unlock(&r->lock);
+    write_unlock(&r->lock);
     return rc;
 }
 
@@ -175,7 +175,7 @@ int rangeset_remove_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    write_lock(&r->lock);
 
     x = find_range(r, s);
     y = find_range(r, e);
@@ -231,7 +231,7 @@ int rangeset_remove_range(
     }
 
  out:
-    spin_unlock(&r->lock);
+    write_unlock(&r->lock);
     return rc;
 }
 
@@ -243,10 +243,10 @@ int rangeset_contains_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
     x = find_range(r, s);
     contains = (x && (x->e >= e));
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return contains;
 }
@@ -259,10 +259,10 @@ int rangeset_overlaps_range(
 
     ASSERT(s <= e);
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
     x = find_range(r, e);
     overlaps = (x && (s <= x->e));
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return overlaps;
 }
@@ -274,13 +274,13 @@ int rangeset_report_ranges(
     struct range *x;
     int rc = 0;
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
 
     for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, x) )
         if ( x->e >= s )
             rc = cb(max(x->s, s), min(x->e, e), ctxt);
 
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 
     return rc;
 }
@@ -318,7 +318,7 @@ struct rangeset *rangeset_new(
     if ( r == NULL )
         return NULL;
 
-    spin_lock_init(&r->lock);
+    rwlock_init(&r->lock);
     INIT_LIST_HEAD(&r->range_list);
 
     BUG_ON(flags & ~RANGESETF_prettyprint_hex);
@@ -403,7 +403,7 @@ void rangeset_printk(
     int nr_printed = 0;
     struct range *x;
 
-    spin_lock(&r->lock);
+    read_lock(&r->lock);
 
     printk("%-10s {", r->name);
 
@@ -422,7 +422,7 @@ void rangeset_printk(
 
     printk(" }");
 
-    spin_unlock(&r->lock);
+    read_unlock(&r->lock);
 }
 
 void rangeset_domain_printk(

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2014-10-01  9:31 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-09-12 12:55 [PATCH] switch rangeset's lock to rwlock Jan Beulich
2014-09-18 10:43 ` Tim Deegan
2014-09-18 12:15   ` Jan Beulich
2014-09-18 13:02     ` Tim Deegan
2014-09-18 13:32       ` Jan Beulich
2014-09-18 14:52         ` Paul Durrant
2014-09-19 16:33         ` Konrad Rzeszutek Wilk
2014-09-22  9:42         ` Ian Campbell
2014-09-22 10:34           ` Jan Beulich
2014-09-19 16:32 ` Konrad Rzeszutek Wilk
2014-09-30  8:50   ` Jan Beulich
2014-09-30 12:01     ` Tim Deegan
2014-09-30 20:53       ` Keir Fraser
2014-10-01  8:57         ` Jan Beulich
2014-10-01  9:31           ` Keir Fraser
  -- strict thread matches above, loose matches on Subject: below --
2011-03-25 16:49 Jan Beulich
2011-03-25 17:08 ` Keir Fraser
2011-03-25 17:52   ` Dan Magenheimer
2011-03-25 20:52     ` Keir Fraser
2011-03-30 22:44       ` Jeremy Fitzhardinge
2011-03-28  8:23   ` Jan Beulich
2011-03-28  8:54     ` Keir Fraser

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.