On 20.06.19 03:03, John Snow wrote: > Signed-off-by: John Snow > --- > util/hbitmap.c | 22 +++++++++++++++++++++- > 1 file changed, 21 insertions(+), 1 deletion(-) > > diff --git a/util/hbitmap.c b/util/hbitmap.c > index 45d1725daf..0d6724b7bc 100644 > --- a/util/hbitmap.c > +++ b/util/hbitmap.c > @@ -777,7 +777,17 @@ void hbitmap_truncate(HBitmap *hb, uint64_t size) > > bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b) > { > - return (a->size == b->size) && (a->granularity == b->granularity); > + return (a->size == b->size); > +} > + > +static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src) > +{ > + uint64_t offset = 0; > + uint64_t count = src->orig_size; > + > + while (hbitmap_next_dirty_area(src, &offset, &count)) { > + hbitmap_set(dst, offset, count); > + } > } > > /** > @@ -804,6 +814,16 @@ bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) > return true; > } > > + if (a->size != b->size) { Don’t you mean s/size/granularity/? Right now, this is dead code, which leads me to asking for a test. (Well, no, I would’ve asked anyway.) Max > + if (a != result) { > + hbitmap_sparse_merge(result, a); > + } > + if (b != result) { > + hbitmap_sparse_merge(result, b); > + } > + return true; > + } > + > /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant. > * It may be possible to improve running times for sparsely populated maps > * by using hbitmap_iter_next, but this is suboptimal for dense maps. >