All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4 0/22] pack bitmaps
@ 2013-12-21 13:56 Jeff King
  2013-12-21 13:59 ` [PATCH v4 01/23] sha1write: make buffer const-correct Jeff King
                   ` (25 more replies)
  0 siblings, 26 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 13:56 UTC (permalink / raw)
  To: git

Here's the v4 re-roll of the pack bitmap series.

The changes from v3 are:

 - reworked add_object_entry refactoring (see patch 11, which is new,
   and patch 12 which builds on it in a more natural way)

 - better error/die reporting from write_reused_pack

 - added Ramsay's PRIx64 compat fix

 - fixed a user-after-free in the warning message of open_pack_bitmap_1

 - minor typo/thinko fixes from Thomas in docs and tests

Interdiff is below.

  [01/23]: sha1write: make buffer const-correct
  [02/23]: revindex: Export new APIs
  [03/23]: pack-objects: Refactor the packing list
  [04/23]: pack-objects: factor out name_hash
  [05/23]: revision: allow setting custom limiter function
  [06/23]: sha1_file: export `git_open_noatime`
  [07/23]: compat: add endianness helpers
  [08/23]: ewah: compressed bitmap implementation
  [09/23]: documentation: add documentation for the bitmap format
  [10/23]: pack-bitmap: add support for bitmap indexes
  [11/23]: pack-objects: split add_object_entry
  [12/23]: pack-objects: use bitmaps when packing objects
  [13/23]: rev-list: add bitmap mode to speed up object lists
  [14/23]: pack-objects: implement bitmap writing
  [15/23]: repack: stop using magic number for ARRAY_SIZE(exts)
  [16/23]: repack: turn exts array into array-of-struct
  [17/23]: repack: handle optional files created by pack-objects
  [18/23]: repack: consider bitmaps when performing repacks
  [19/23]: count-objects: recognize .bitmap in garbage-checking
  [20/23]: t: add basic bitmap functionality tests
  [21/23]: t/perf: add tests for pack bitmaps
  [22/23]: pack-bitmap: implement optional name_hash cache
  [23/23]: compat/mingw.h: Fix the MinGW and msvc builds

---
diff --git a/Documentation/config.txt b/Documentation/config.txt
index e6d3922..499a3c4 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -1866,7 +1866,7 @@ pack.useBitmaps::
 
 pack.writebitmaps::
 	When true, git will write a bitmap index when packing all
-	objects to disk (e.g., as when `git repack -a` is run).  This
+	objects to disk (e.g., when `git repack -a` is run).  This
 	index can speed up the "counting objects" phase of subsequent
 	packs created for clones and fetches, at the cost of some disk
 	space and extra time spent on the initial repack.  Defaults to
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 4504789..fd74197 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -72,11 +72,6 @@ static unsigned long cache_max_small_delta_size = 1000;
 
 static unsigned long window_memory_limit = 0;
 
-enum {
-	OBJECT_ENTRY_EXCLUDE = (1 << 0),
-	OBJECT_ENTRY_NO_TRY_DELTA = (1 << 1)
-};
-
 /*
  * stats
  */
@@ -712,21 +707,20 @@ static struct object_entry **compute_write_order(void)
 
 static off_t write_reused_pack(struct sha1file *f)
 {
-	uint8_t buffer[8192];
+	unsigned char buffer[8192];
 	off_t to_write;
 	int fd;
 
 	if (!is_pack_valid(reuse_packfile))
-		return 0;
+		die("packfile is invalid: %s", reuse_packfile->pack_name);
 
 	fd = git_open_noatime(reuse_packfile->pack_name);
 	if (fd < 0)
-		return 0;
+		die_errno("unable to open packfile for reuse: %s",
+			  reuse_packfile->pack_name);
 
-	if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1) {
-		close(fd);
-		return 0;
-	}
+	if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
+		die_errno("unable to seek in reused packfile");
 
 	if (reuse_packfile_offset < 0)
 		reuse_packfile_offset = reuse_packfile->pack_size - 20;
@@ -736,10 +730,8 @@ static off_t write_reused_pack(struct sha1file *f)
 	while (to_write) {
 		int read_pack = xread(fd, buffer, sizeof(buffer));
 
-		if (read_pack <= 0) {
-			close(fd);
-			return 0;
-		}
+		if (read_pack <= 0)
+			die_errno("unable to read from reused packfile");
 
 		if (read_pack > to_write)
 			read_pack = to_write;
@@ -785,9 +777,6 @@ static void write_pack_file(void)
 			assert(pack_to_stdout);
 
 			packfile_size = write_reused_pack(f);
-			if (!packfile_size)
-				die_errno("failed to re-use existing pack");
-
 			offset += packfile_size;
 		}
 
@@ -909,86 +898,143 @@ static int no_try_delta(const char *path)
 	return 0;
 }
 
-static int add_object_entry_1(const unsigned char *sha1, enum object_type type,
-			      int flags, uint32_t name_hash,
-			      struct packed_git *found_pack, off_t found_offset)
+/*
+ * When adding an object, check whether we have already added it
+ * to our packing list. If so, we can skip. However, if we are
+ * being asked to excludei t, but the previous mention was to include
+ * it, make sure to adjust its flags and tweak our numbers accordingly.
+ *
+ * As an optimization, we pass out the index position where we would have
+ * found the item, since that saves us from having to look it up again a
+ * few lines later when we want to add the new entry.
+ */
+static int have_duplicate_entry(const unsigned char *sha1,
+				int exclude,
+				uint32_t *index_pos)
 {
 	struct object_entry *entry;
-	struct packed_git *p;
-	uint32_t index_pos;
-	int exclude = (flags & OBJECT_ENTRY_EXCLUDE);
-
-	entry = packlist_find(&to_pack, sha1, &index_pos);
-	if (entry) {
-		if (exclude) {
-			if (!entry->preferred_base)
-				nr_result--;
-			entry->preferred_base = 1;
-		}
+
+	entry = packlist_find(&to_pack, sha1, index_pos);
+	if (!entry)
 		return 0;
+
+	if (exclude) {
+		if (!entry->preferred_base)
+			nr_result--;
+		entry->preferred_base = 1;
 	}
 
+	return 1;
+}
+
+/*
+ * Check whether we want the object in the pack (e.g., we do not want
+ * objects found in non-local stores if the "--local" option was used).
+ *
+ * As a side effect of this check, we will find the packed version of this
+ * object, if any. We therefore pass out the pack information to avoid having
+ * to look it up again later.
+ */
+static int want_object_in_pack(const unsigned char *sha1,
+			       int exclude,
+			       struct packed_git **found_pack,
+			       off_t *found_offset)
+{
+	struct packed_git *p;
+
 	if (!exclude && local && has_loose_object_nonlocal(sha1))
 		return 0;
 
-	if (!found_pack) {
-		for (p = packed_git; p; p = p->next) {
-			off_t offset = find_pack_entry_one(sha1, p);
-			if (offset) {
-				if (!found_pack) {
-					if (!is_pack_valid(p)) {
-						warning("packfile %s cannot be accessed", p->pack_name);
-						continue;
-					}
-					found_offset = offset;
-					found_pack = p;
+	*found_pack = NULL;
+	*found_offset = 0;
+
+	for (p = packed_git; p; p = p->next) {
+		off_t offset = find_pack_entry_one(sha1, p);
+		if (offset) {
+			if (!*found_pack) {
+				if (!is_pack_valid(p)) {
+					warning("packfile %s cannot be accessed", p->pack_name);
+					continue;
 				}
-				if (exclude)
-					break;
-				if (incremental)
-					return 0;
-				if (local && !p->pack_local)
-					return 0;
-				if (ignore_packed_keep && p->pack_local && p->pack_keep)
-					return 0;
+				*found_offset = offset;
+				*found_pack = p;
 			}
+			if (exclude)
+				return 1;
+			if (incremental)
+				return 0;
+			if (local && !p->pack_local)
+				return 0;
+			if (ignore_packed_keep && p->pack_local && p->pack_keep)
+				return 0;
 		}
 	}
 
+	return 1;
+}
+
+static void create_object_entry(const unsigned char *sha1,
+				enum object_type type,
+				uint32_t hash,
+				int exclude,
+				int no_try_delta,
+				uint32_t index_pos,
+				struct packed_git *found_pack,
+				off_t found_offset)
+{
+	struct object_entry *entry;
+
 	entry = packlist_alloc(&to_pack, sha1, index_pos);
-	entry->hash = name_hash;
+	entry->hash = hash;
 	if (type)
 		entry->type = type;
 	if (exclude)
 		entry->preferred_base = 1;
 	else
 		nr_result++;
-
-	if (flags & OBJECT_ENTRY_NO_TRY_DELTA)
-		entry->no_try_delta = 1;
-
 	if (found_pack) {
 		entry->in_pack = found_pack;
 		entry->in_pack_offset = found_offset;
 	}
 
-	display_progress(progress_state, to_pack.nr_objects);
-
-	return 1;
+	entry->no_try_delta = no_try_delta;
 }
 
 static int add_object_entry(const unsigned char *sha1, enum object_type type,
 			    const char *name, int exclude)
 {
-	int flags = 0;
+	struct packed_git *found_pack;
+	off_t found_offset;
+	uint32_t index_pos;
 
-	if (exclude)
-		flags |= OBJECT_ENTRY_EXCLUDE;
+	if (have_duplicate_entry(sha1, exclude, &index_pos))
+		return 0;
+
+	if (!want_object_in_pack(sha1, exclude, &found_pack, &found_offset))
+		return 0;
+
+	create_object_entry(sha1, type, pack_name_hash(name),
+			    exclude, name && no_try_delta(name),
+			    index_pos, found_pack, found_offset);
 
-	if (name && no_try_delta(name))
-		flags |= OBJECT_ENTRY_NO_TRY_DELTA;
+	display_progress(progress_state, to_pack.nr_objects);
+	return 1;
+}
 
-	return add_object_entry_1(sha1, type, flags, pack_name_hash(name), NULL, 0);
+static int add_object_entry_from_bitmap(const unsigned char *sha1,
+					enum object_type type,
+					int flags, uint32_t name_hash,
+					struct packed_git *pack, off_t offset)
+{
+	uint32_t index_pos;
+
+	if (have_duplicate_entry(sha1, 0, &index_pos))
+		return 0;
+
+	create_object_entry(sha1, type, name_hash, 0, 0, index_pos, pack, offset);
+
+	display_progress(progress_state, to_pack.nr_objects);
+	return 1;
 }
 
 struct pbase_tree_cache {
@@ -2397,7 +2443,7 @@ static int get_object_list_from_bitmap(struct rev_info *revs)
 		}
 	}
 
-	traverse_bitmap_commit_list(&add_object_entry_1);
+	traverse_bitmap_commit_list(&add_object_entry_from_bitmap);
 	return 0;
 }
 
diff --git a/pack-bitmap.c b/pack-bitmap.c
index 078f7c6..ae0b57b 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -266,7 +266,7 @@ static int open_pack_bitmap_1(struct packed_git *packfile)
 	}
 
 	if (bitmap_git.pack) {
-		warning("ignoring extra bitmap file: %s", idx_name);
+		warning("ignoring extra bitmap file: %s", packfile->pack_name);
 		close(fd);
 		return -1;
 	}
diff --git a/pack-bitmap.h b/pack-bitmap.h
index e4e1a57..8b7f4e9 100644
--- a/pack-bitmap.h
+++ b/pack-bitmap.h
@@ -19,7 +19,7 @@ struct bitmap_disk_header {
 	unsigned char checksum[20];
 };
 
-static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'};;
+static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'};
 
 #define NEEDS_BITMAP (1u<<22)
 
diff --git a/t/perf/p5310-pack-bitmaps.sh b/t/perf/p5310-pack-bitmaps.sh
index 8811fc4..685d46f 100755
--- a/t/perf/p5310-pack-bitmaps.sh
+++ b/t/perf/p5310-pack-bitmaps.sh
@@ -22,7 +22,7 @@ test_perf 'simulated clone' '
 '
 
 test_perf 'simulated fetch' '
-	have=$(git rev-list HEAD --until=1.week.ago -1) &&
+	have=$(git rev-list HEAD~100 -1) &&
 	{
 		echo HEAD &&
 		echo ^$have
@@ -31,7 +31,7 @@ test_perf 'simulated fetch' '
 
 test_expect_success 'create partial bitmap state' '
 	# pick a commit to represent the repo tip in the past
-	cutoff=$(git rev-list HEAD --until=1.week.ago -1) &&
+	cutoff=$(git rev-list HEAD~100 -1) &&
 	orig_tip=$(git rev-parse HEAD) &&
 
 	# now kill off all of the refs and pretend we had
diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh
index 2c2632f..d3a3afa 100755
--- a/t/t5310-pack-bitmaps.sh
+++ b/t/t5310-pack-bitmaps.sh
@@ -127,9 +127,9 @@ test_expect_success JGIT 'we can read jgit bitmaps' '
 '
 
 test_expect_success JGIT 'jgit can read our bitmaps' '
-	git clone . compat-us.git &&
+	git clone . compat-us &&
 	(
-		cd compat-us.git &&
+		cd compat-us &&
 		git repack -adb &&
 		# jgit gc will barf if it does not like our bitmaps
 		jgit gc

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 01/23] sha1write: make buffer const-correct
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
@ 2013-12-21 13:59 ` Jeff King
  2013-12-22  9:06   ` Christian Couder
  2013-12-21 13:59 ` [PATCH v4 02/23] revindex: Export new APIs Jeff King
                   ` (24 subsequent siblings)
  25 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2013-12-21 13:59 UTC (permalink / raw)
  To: git

We are passed a "void *" and write it out without ever
touching it; let's indicate that by using "const".

Signed-off-by: Jeff King <peff@peff.net>
---
 csum-file.c | 6 +++---
 csum-file.h | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/csum-file.c b/csum-file.c
index 53f5375..465971c 100644
--- a/csum-file.c
+++ b/csum-file.c
@@ -11,7 +11,7 @@
 #include "progress.h"
 #include "csum-file.h"
 
-static void flush(struct sha1file *f, void *buf, unsigned int count)
+static void flush(struct sha1file *f, const void *buf, unsigned int count)
 {
 	if (0 <= f->check_fd && count)  {
 		unsigned char check_buffer[8192];
@@ -86,13 +86,13 @@ int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags)
 	return fd;
 }
 
-int sha1write(struct sha1file *f, void *buf, unsigned int count)
+int sha1write(struct sha1file *f, const void *buf, unsigned int count)
 {
 	while (count) {
 		unsigned offset = f->offset;
 		unsigned left = sizeof(f->buffer) - offset;
 		unsigned nr = count > left ? left : count;
-		void *data;
+		const void *data;
 
 		if (f->do_crc)
 			f->crc32 = crc32(f->crc32, buf, nr);
diff --git a/csum-file.h b/csum-file.h
index 3b540bd..9dedb03 100644
--- a/csum-file.h
+++ b/csum-file.h
@@ -34,7 +34,7 @@ extern struct sha1file *sha1fd(int fd, const char *name);
 extern struct sha1file *sha1fd_check(const char *name);
 extern struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp);
 extern int sha1close(struct sha1file *, unsigned char *, unsigned int);
-extern int sha1write(struct sha1file *, void *, unsigned int);
+extern int sha1write(struct sha1file *, const void *, unsigned int);
 extern void sha1flush(struct sha1file *f);
 extern void crc32_begin(struct sha1file *);
 extern uint32_t crc32_end(struct sha1file *);
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 02/23] revindex: Export new APIs
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
  2013-12-21 13:59 ` [PATCH v4 01/23] sha1write: make buffer const-correct Jeff King
@ 2013-12-21 13:59 ` Jeff King
  2013-12-21 13:59 ` [PATCH v4 03/23] pack-objects: Refactor the packing list Jeff King
                   ` (23 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 13:59 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

Allow users to efficiently lookup consecutive entries that are expected
to be found on the same revindex by exporting `find_revindex_position`:
this function takes a pointer to revindex itself, instead of looking up
the proper revindex for a given packfile on each call.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 pack-revindex.c | 38 +++++++++++++++++++++++++-------------
 pack-revindex.h |  8 ++++++++
 2 files changed, 33 insertions(+), 13 deletions(-)

diff --git a/pack-revindex.c b/pack-revindex.c
index b4d2b35..0bb13b1 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -16,11 +16,6 @@
  * get the object sha1 from the main index.
  */
 
-struct pack_revindex {
-	struct packed_git *p;
-	struct revindex_entry *revindex;
-};
-
 static struct pack_revindex *pack_revindex;
 static int pack_revindex_hashsz;
 
@@ -201,15 +196,14 @@ static void create_pack_revindex(struct pack_revindex *rix)
 	sort_revindex(rix->revindex, num_ent, p->pack_size);
 }
 
-struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
+struct pack_revindex *revindex_for_pack(struct packed_git *p)
 {
 	int num;
-	unsigned lo, hi;
 	struct pack_revindex *rix;
-	struct revindex_entry *revindex;
 
 	if (!pack_revindex_hashsz)
 		init_pack_revindex();
+
 	num = pack_revindex_ix(p);
 	if (num < 0)
 		die("internal error: pack revindex fubar");
@@ -217,21 +211,39 @@ struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
 	rix = &pack_revindex[num];
 	if (!rix->revindex)
 		create_pack_revindex(rix);
-	revindex = rix->revindex;
 
-	lo = 0;
-	hi = p->num_objects + 1;
+	return rix;
+}
+
+int find_revindex_position(struct pack_revindex *pridx, off_t ofs)
+{
+	int lo = 0;
+	int hi = pridx->p->num_objects + 1;
+	struct revindex_entry *revindex = pridx->revindex;
+
 	do {
 		unsigned mi = lo + (hi - lo) / 2;
 		if (revindex[mi].offset == ofs) {
-			return revindex + mi;
+			return mi;
 		} else if (ofs < revindex[mi].offset)
 			hi = mi;
 		else
 			lo = mi + 1;
 	} while (lo < hi);
+
 	error("bad offset for revindex");
-	return NULL;
+	return -1;
+}
+
+struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
+{
+	struct pack_revindex *pridx = revindex_for_pack(p);
+	int pos = find_revindex_position(pridx, ofs);
+
+	if (pos < 0)
+		return NULL;
+
+	return pridx->revindex + pos;
 }
 
 void discard_revindex(void)
diff --git a/pack-revindex.h b/pack-revindex.h
index 8d5027a..866ca9c 100644
--- a/pack-revindex.h
+++ b/pack-revindex.h
@@ -6,6 +6,14 @@ struct revindex_entry {
 	unsigned int nr;
 };
 
+struct pack_revindex {
+	struct packed_git *p;
+	struct revindex_entry *revindex;
+};
+
+struct pack_revindex *revindex_for_pack(struct packed_git *p);
+int find_revindex_position(struct pack_revindex *pridx, off_t ofs);
+
 struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs);
 void discard_revindex(void);
 
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 03/23] pack-objects: Refactor the packing list
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
  2013-12-21 13:59 ` [PATCH v4 01/23] sha1write: make buffer const-correct Jeff King
  2013-12-21 13:59 ` [PATCH v4 02/23] revindex: Export new APIs Jeff King
@ 2013-12-21 13:59 ` Jeff King
  2013-12-21 13:59 ` [PATCH v4 04/23] pack-objects: factor out name_hash Jeff King
                   ` (22 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 13:59 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

The hash table that stores the packing list for a given `pack-objects`
run was tightly coupled to the pack-objects code.

In this commit, we refactor the hash table and the underlying storage
array into a `packing_data` struct. The functionality for accessing and
adding entries to the packing list is hence accessible from other parts
of Git besides the `pack-objects` builtin.

This refactoring is a requirement for further patches in this series
that will require accessing the commit packing list from outside of
`pack-objects`.

The hash table implementation has been minimally altered: we now
use table sizes which are always a power of two, to ensure a uniform
index distribution in the array.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 Makefile               |   2 +
 builtin/pack-objects.c | 175 +++++++++++--------------------------------------
 pack-objects.c         | 111 +++++++++++++++++++++++++++++++
 pack-objects.h         |  47 +++++++++++++
 4 files changed, 200 insertions(+), 135 deletions(-)
 create mode 100644 pack-objects.c
 create mode 100644 pack-objects.h

diff --git a/Makefile b/Makefile
index af847f8..48ff0bd 100644
--- a/Makefile
+++ b/Makefile
@@ -694,6 +694,7 @@ LIB_H += notes-merge.h
 LIB_H += notes-utils.h
 LIB_H += notes.h
 LIB_H += object.h
+LIB_H += pack-objects.h
 LIB_H += pack-revindex.h
 LIB_H += pack.h
 LIB_H += parse-options.h
@@ -831,6 +832,7 @@ LIB_OBJS += notes-merge.o
 LIB_OBJS += notes-utils.o
 LIB_OBJS += object.o
 LIB_OBJS += pack-check.o
+LIB_OBJS += pack-objects.o
 LIB_OBJS += pack-revindex.o
 LIB_OBJS += pack-write.o
 LIB_OBJS += pager.o
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 36273dd..f3f0cf9 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -14,6 +14,7 @@
 #include "diff.h"
 #include "revision.h"
 #include "list-objects.h"
+#include "pack-objects.h"
 #include "progress.h"
 #include "refs.h"
 #include "streaming.h"
@@ -25,42 +26,15 @@ static const char *pack_usage[] = {
 	NULL
 };
 
-struct object_entry {
-	struct pack_idx_entry idx;
-	unsigned long size;	/* uncompressed size */
-	struct packed_git *in_pack; 	/* already in pack */
-	off_t in_pack_offset;
-	struct object_entry *delta;	/* delta base object */
-	struct object_entry *delta_child; /* deltified objects who bases me */
-	struct object_entry *delta_sibling; /* other deltified objects who
-					     * uses the same base as me
-					     */
-	void *delta_data;	/* cached delta (uncompressed) */
-	unsigned long delta_size;	/* delta data size (uncompressed) */
-	unsigned long z_delta_size;	/* delta data size (compressed) */
-	enum object_type type;
-	enum object_type in_pack_type;	/* could be delta */
-	uint32_t hash;			/* name hint hash */
-	unsigned char in_pack_header_size;
-	unsigned preferred_base:1; /*
-				    * we do not pack this, but is available
-				    * to be used as the base object to delta
-				    * objects against.
-				    */
-	unsigned no_try_delta:1;
-	unsigned tagged:1; /* near the very tip of refs */
-	unsigned filled:1; /* assigned write-order */
-};
-
 /*
- * Objects we are going to pack are collected in objects array (dynamically
- * expanded).  nr_objects & nr_alloc controls this array.  They are stored
- * in the order we see -- typically rev-list --objects order that gives us
- * nice "minimum seek" order.
+ * Objects we are going to pack are collected in the `to_pack` structure.
+ * It contains an array (dynamically expanded) of the object data, and a map
+ * that can resolve SHA1s to their position in the array.
  */
-static struct object_entry *objects;
+static struct packing_data to_pack;
+
 static struct pack_idx_entry **written_list;
-static uint32_t nr_objects, nr_alloc, nr_result, nr_written;
+static uint32_t nr_result, nr_written;
 
 static int non_empty;
 static int reuse_delta = 1, reuse_object = 1;
@@ -90,21 +64,11 @@ static unsigned long cache_max_small_delta_size = 1000;
 static unsigned long window_memory_limit = 0;
 
 /*
- * The object names in objects array are hashed with this hashtable,
- * to help looking up the entry by object name.
- * This hashtable is built after all the objects are seen.
- */
-static int *object_ix;
-static int object_ix_hashsz;
-static struct object_entry *locate_object_entry(const unsigned char *sha1);
-
-/*
  * stats
  */
 static uint32_t written, written_delta;
 static uint32_t reused, reused_delta;
 
-
 static void *get_delta(struct object_entry *entry)
 {
 	unsigned long size, base_size, delta_size;
@@ -553,12 +517,12 @@ static int mark_tagged(const char *path, const unsigned char *sha1, int flag,
 		       void *cb_data)
 {
 	unsigned char peeled[20];
-	struct object_entry *entry = locate_object_entry(sha1);
+	struct object_entry *entry = packlist_find(&to_pack, sha1, NULL);
 
 	if (entry)
 		entry->tagged = 1;
 	if (!peel_ref(path, peeled)) {
-		entry = locate_object_entry(peeled);
+		entry = packlist_find(&to_pack, peeled, NULL);
 		if (entry)
 			entry->tagged = 1;
 	}
@@ -633,9 +597,10 @@ static struct object_entry **compute_write_order(void)
 {
 	unsigned int i, wo_end, last_untagged;
 
-	struct object_entry **wo = xmalloc(nr_objects * sizeof(*wo));
+	struct object_entry **wo = xmalloc(to_pack.nr_objects * sizeof(*wo));
+	struct object_entry *objects = to_pack.objects;
 
-	for (i = 0; i < nr_objects; i++) {
+	for (i = 0; i < to_pack.nr_objects; i++) {
 		objects[i].tagged = 0;
 		objects[i].filled = 0;
 		objects[i].delta_child = NULL;
@@ -647,7 +612,7 @@ static struct object_entry **compute_write_order(void)
 	 * Make sure delta_sibling is sorted in the original
 	 * recency order.
 	 */
-	for (i = nr_objects; i > 0;) {
+	for (i = to_pack.nr_objects; i > 0;) {
 		struct object_entry *e = &objects[--i];
 		if (!e->delta)
 			continue;
@@ -665,7 +630,7 @@ static struct object_entry **compute_write_order(void)
 	 * Give the objects in the original recency order until
 	 * we see a tagged tip.
 	 */
-	for (i = wo_end = 0; i < nr_objects; i++) {
+	for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
 		if (objects[i].tagged)
 			break;
 		add_to_write_order(wo, &wo_end, &objects[i]);
@@ -675,7 +640,7 @@ static struct object_entry **compute_write_order(void)
 	/*
 	 * Then fill all the tagged tips.
 	 */
-	for (; i < nr_objects; i++) {
+	for (; i < to_pack.nr_objects; i++) {
 		if (objects[i].tagged)
 			add_to_write_order(wo, &wo_end, &objects[i]);
 	}
@@ -683,7 +648,7 @@ static struct object_entry **compute_write_order(void)
 	/*
 	 * And then all remaining commits and tags.
 	 */
-	for (i = last_untagged; i < nr_objects; i++) {
+	for (i = last_untagged; i < to_pack.nr_objects; i++) {
 		if (objects[i].type != OBJ_COMMIT &&
 		    objects[i].type != OBJ_TAG)
 			continue;
@@ -693,7 +658,7 @@ static struct object_entry **compute_write_order(void)
 	/*
 	 * And then all the trees.
 	 */
-	for (i = last_untagged; i < nr_objects; i++) {
+	for (i = last_untagged; i < to_pack.nr_objects; i++) {
 		if (objects[i].type != OBJ_TREE)
 			continue;
 		add_to_write_order(wo, &wo_end, &objects[i]);
@@ -702,13 +667,13 @@ static struct object_entry **compute_write_order(void)
 	/*
 	 * Finally all the rest in really tight order
 	 */
-	for (i = last_untagged; i < nr_objects; i++) {
+	for (i = last_untagged; i < to_pack.nr_objects; i++) {
 		if (!objects[i].filled)
 			add_family_to_write_order(wo, &wo_end, &objects[i]);
 	}
 
-	if (wo_end != nr_objects)
-		die("ordered %u objects, expected %"PRIu32, wo_end, nr_objects);
+	if (wo_end != to_pack.nr_objects)
+		die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
 
 	return wo;
 }
@@ -724,7 +689,7 @@ static void write_pack_file(void)
 
 	if (progress > pack_to_stdout)
 		progress_state = start_progress("Writing objects", nr_result);
-	written_list = xmalloc(nr_objects * sizeof(*written_list));
+	written_list = xmalloc(to_pack.nr_objects * sizeof(*written_list));
 	write_order = compute_write_order();
 
 	do {
@@ -740,7 +705,7 @@ static void write_pack_file(void)
 		if (!offset)
 			die_errno("unable to write pack header");
 		nr_written = 0;
-		for (; i < nr_objects; i++) {
+		for (; i < to_pack.nr_objects; i++) {
 			struct object_entry *e = write_order[i];
 			if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
 				break;
@@ -803,7 +768,7 @@ static void write_pack_file(void)
 			written_list[j]->offset = (off_t)-1;
 		}
 		nr_remaining -= nr_written;
-	} while (nr_remaining && i < nr_objects);
+	} while (nr_remaining && i < to_pack.nr_objects);
 
 	free(written_list);
 	free(write_order);
@@ -813,53 +778,6 @@ static void write_pack_file(void)
 			written, nr_result);
 }
 
-static int locate_object_entry_hash(const unsigned char *sha1)
-{
-	int i;
-	unsigned int ui;
-	memcpy(&ui, sha1, sizeof(unsigned int));
-	i = ui % object_ix_hashsz;
-	while (0 < object_ix[i]) {
-		if (!hashcmp(sha1, objects[object_ix[i] - 1].idx.sha1))
-			return i;
-		if (++i == object_ix_hashsz)
-			i = 0;
-	}
-	return -1 - i;
-}
-
-static struct object_entry *locate_object_entry(const unsigned char *sha1)
-{
-	int i;
-
-	if (!object_ix_hashsz)
-		return NULL;
-
-	i = locate_object_entry_hash(sha1);
-	if (0 <= i)
-		return &objects[object_ix[i]-1];
-	return NULL;
-}
-
-static void rehash_objects(void)
-{
-	uint32_t i;
-	struct object_entry *oe;
-
-	object_ix_hashsz = nr_objects * 3;
-	if (object_ix_hashsz < 1024)
-		object_ix_hashsz = 1024;
-	object_ix = xrealloc(object_ix, sizeof(int) * object_ix_hashsz);
-	memset(object_ix, 0, sizeof(int) * object_ix_hashsz);
-	for (i = 0, oe = objects; i < nr_objects; i++, oe++) {
-		int ix = locate_object_entry_hash(oe->idx.sha1);
-		if (0 <= ix)
-			continue;
-		ix = -1 - ix;
-		object_ix[ix] = i + 1;
-	}
-}
-
 static uint32_t name_hash(const char *name)
 {
 	uint32_t c, hash = 0;
@@ -908,13 +826,12 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type,
 	struct object_entry *entry;
 	struct packed_git *p, *found_pack = NULL;
 	off_t found_offset = 0;
-	int ix;
 	uint32_t hash = name_hash(name);
+	uint32_t index_pos;
 
-	ix = nr_objects ? locate_object_entry_hash(sha1) : -1;
-	if (ix >= 0) {
+	entry = packlist_find(&to_pack, sha1, &index_pos);
+	if (entry) {
 		if (exclude) {
-			entry = objects + object_ix[ix] - 1;
 			if (!entry->preferred_base)
 				nr_result--;
 			entry->preferred_base = 1;
@@ -947,14 +864,7 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type,
 		}
 	}
 
-	if (nr_objects >= nr_alloc) {
-		nr_alloc = (nr_alloc  + 1024) * 3 / 2;
-		objects = xrealloc(objects, nr_alloc * sizeof(*entry));
-	}
-
-	entry = objects + nr_objects++;
-	memset(entry, 0, sizeof(*entry));
-	hashcpy(entry->idx.sha1, sha1);
+	entry = packlist_alloc(&to_pack, sha1, index_pos);
 	entry->hash = hash;
 	if (type)
 		entry->type = type;
@@ -967,12 +877,7 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type,
 		entry->in_pack_offset = found_offset;
 	}
 
-	if (object_ix_hashsz * 3 <= nr_objects * 4)
-		rehash_objects();
-	else
-		object_ix[-1 - ix] = nr_objects;
-
-	display_progress(progress_state, nr_objects);
+	display_progress(progress_state, to_pack.nr_objects);
 
 	if (name && no_try_delta(name))
 		entry->no_try_delta = 1;
@@ -1329,7 +1234,7 @@ static void check_object(struct object_entry *entry)
 			break;
 		}
 
-		if (base_ref && (base_entry = locate_object_entry(base_ref))) {
+		if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {
 			/*
 			 * If base_ref was set above that means we wish to
 			 * reuse delta data, and we even found that base
@@ -1403,12 +1308,12 @@ static void get_object_details(void)
 	uint32_t i;
 	struct object_entry **sorted_by_offset;
 
-	sorted_by_offset = xcalloc(nr_objects, sizeof(struct object_entry *));
-	for (i = 0; i < nr_objects; i++)
-		sorted_by_offset[i] = objects + i;
-	qsort(sorted_by_offset, nr_objects, sizeof(*sorted_by_offset), pack_offset_sort);
+	sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
+	for (i = 0; i < to_pack.nr_objects; i++)
+		sorted_by_offset[i] = to_pack.objects + i;
+	qsort(sorted_by_offset, to_pack.nr_objects, sizeof(*sorted_by_offset), pack_offset_sort);
 
-	for (i = 0; i < nr_objects; i++) {
+	for (i = 0; i < to_pack.nr_objects; i++) {
 		struct object_entry *entry = sorted_by_offset[i];
 		check_object(entry);
 		if (big_file_threshold < entry->size)
@@ -2034,7 +1939,7 @@ static int add_ref_tag(const char *path, const unsigned char *sha1, int flag, vo
 
 	if (!prefixcmp(path, "refs/tags/") && /* is a tag? */
 	    !peel_ref(path, peeled)        && /* peelable? */
-	    locate_object_entry(peeled))      /* object packed? */
+	    packlist_find(&to_pack, peeled, NULL))      /* object packed? */
 		add_object_entry(sha1, OBJ_TAG, NULL, 0);
 	return 0;
 }
@@ -2057,14 +1962,14 @@ static void prepare_pack(int window, int depth)
 	if (!pack_to_stdout)
 		do_check_packed_object_crc = 1;
 
-	if (!nr_objects || !window || !depth)
+	if (!to_pack.nr_objects || !window || !depth)
 		return;
 
-	delta_list = xmalloc(nr_objects * sizeof(*delta_list));
+	delta_list = xmalloc(to_pack.nr_objects * sizeof(*delta_list));
 	nr_deltas = n = 0;
 
-	for (i = 0; i < nr_objects; i++) {
-		struct object_entry *entry = objects + i;
+	for (i = 0; i < to_pack.nr_objects; i++) {
+		struct object_entry *entry = to_pack.objects + i;
 
 		if (entry->delta)
 			/* This happens if we decided to reuse existing
@@ -2342,7 +2247,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
 
 		for (i = 0; i < p->num_objects; i++) {
 			sha1 = nth_packed_object_sha1(p, i);
-			if (!locate_object_entry(sha1) &&
+			if (!packlist_find(&to_pack, sha1, NULL) &&
 				!has_sha1_pack_kept_or_nonlocal(sha1))
 				if (force_object_loose(sha1, p->mtime))
 					die("unable to force loose object");
diff --git a/pack-objects.c b/pack-objects.c
new file mode 100644
index 0000000..d01d851
--- /dev/null
+++ b/pack-objects.c
@@ -0,0 +1,111 @@
+#include "cache.h"
+#include "object.h"
+#include "pack.h"
+#include "pack-objects.h"
+
+static uint32_t locate_object_entry_hash(struct packing_data *pdata,
+					 const unsigned char *sha1,
+					 int *found)
+{
+	uint32_t i, hash, mask = (pdata->index_size - 1);
+
+	memcpy(&hash, sha1, sizeof(uint32_t));
+	i = hash & mask;
+
+	while (pdata->index[i] > 0) {
+		uint32_t pos = pdata->index[i] - 1;
+
+		if (!hashcmp(sha1, pdata->objects[pos].idx.sha1)) {
+			*found = 1;
+			return i;
+		}
+
+		i = (i + 1) & mask;
+	}
+
+	*found = 0;
+	return i;
+}
+
+static inline uint32_t closest_pow2(uint32_t v)
+{
+	v = v - 1;
+	v |= v >> 1;
+	v |= v >> 2;
+	v |= v >> 4;
+	v |= v >> 8;
+	v |= v >> 16;
+	return v + 1;
+}
+
+static void rehash_objects(struct packing_data *pdata)
+{
+	uint32_t i;
+	struct object_entry *entry;
+
+	pdata->index_size = closest_pow2(pdata->nr_objects * 3);
+	if (pdata->index_size < 1024)
+		pdata->index_size = 1024;
+
+	pdata->index = xrealloc(pdata->index, sizeof(uint32_t) * pdata->index_size);
+	memset(pdata->index, 0, sizeof(int) * pdata->index_size);
+
+	entry = pdata->objects;
+
+	for (i = 0; i < pdata->nr_objects; i++) {
+		int found;
+		uint32_t ix = locate_object_entry_hash(pdata, entry->idx.sha1, &found);
+
+		if (found)
+			die("BUG: Duplicate object in hash");
+
+		pdata->index[ix] = i + 1;
+		entry++;
+	}
+}
+
+struct object_entry *packlist_find(struct packing_data *pdata,
+				   const unsigned char *sha1,
+				   uint32_t *index_pos)
+{
+	uint32_t i;
+	int found;
+
+	if (!pdata->index_size)
+		return NULL;
+
+	i = locate_object_entry_hash(pdata, sha1, &found);
+
+	if (index_pos)
+		*index_pos = i;
+
+	if (!found)
+		return NULL;
+
+	return &pdata->objects[pdata->index[i] - 1];
+}
+
+struct object_entry *packlist_alloc(struct packing_data *pdata,
+				    const unsigned char *sha1,
+				    uint32_t index_pos)
+{
+	struct object_entry *new_entry;
+
+	if (pdata->nr_objects >= pdata->nr_alloc) {
+		pdata->nr_alloc = (pdata->nr_alloc  + 1024) * 3 / 2;
+		pdata->objects = xrealloc(pdata->objects,
+					  pdata->nr_alloc * sizeof(*new_entry));
+	}
+
+	new_entry = pdata->objects + pdata->nr_objects++;
+
+	memset(new_entry, 0, sizeof(*new_entry));
+	hashcpy(new_entry->idx.sha1, sha1);
+
+	if (pdata->index_size * 3 <= pdata->nr_objects * 4)
+		rehash_objects(pdata);
+	else
+		pdata->index[index_pos] = pdata->nr_objects;
+
+	return new_entry;
+}
diff --git a/pack-objects.h b/pack-objects.h
new file mode 100644
index 0000000..f528215
--- /dev/null
+++ b/pack-objects.h
@@ -0,0 +1,47 @@
+#ifndef PACK_OBJECTS_H
+#define PACK_OBJECTS_H
+
+struct object_entry {
+	struct pack_idx_entry idx;
+	unsigned long size;	/* uncompressed size */
+	struct packed_git *in_pack;	/* already in pack */
+	off_t in_pack_offset;
+	struct object_entry *delta;	/* delta base object */
+	struct object_entry *delta_child; /* deltified objects who bases me */
+	struct object_entry *delta_sibling; /* other deltified objects who
+					     * uses the same base as me
+					     */
+	void *delta_data;	/* cached delta (uncompressed) */
+	unsigned long delta_size;	/* delta data size (uncompressed) */
+	unsigned long z_delta_size;	/* delta data size (compressed) */
+	enum object_type type;
+	enum object_type in_pack_type;	/* could be delta */
+	uint32_t hash;			/* name hint hash */
+	unsigned char in_pack_header_size;
+	unsigned preferred_base:1; /*
+				    * we do not pack this, but is available
+				    * to be used as the base object to delta
+				    * objects against.
+				    */
+	unsigned no_try_delta:1;
+	unsigned tagged:1; /* near the very tip of refs */
+	unsigned filled:1; /* assigned write-order */
+};
+
+struct packing_data {
+	struct object_entry *objects;
+	uint32_t nr_objects, nr_alloc;
+
+	int32_t *index;
+	uint32_t index_size;
+};
+
+struct object_entry *packlist_alloc(struct packing_data *pdata,
+				    const unsigned char *sha1,
+				    uint32_t index_pos);
+
+struct object_entry *packlist_find(struct packing_data *pdata,
+				   const unsigned char *sha1,
+				   uint32_t *index_pos);
+
+#endif
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 04/23] pack-objects: factor out name_hash
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (2 preceding siblings ...)
  2013-12-21 13:59 ` [PATCH v4 03/23] pack-objects: Refactor the packing list Jeff King
@ 2013-12-21 13:59 ` Jeff King
  2013-12-21 13:59 ` [PATCH v4 05/23] revision: allow setting custom limiter function Jeff King
                   ` (21 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 13:59 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

As the pack-objects system grows beyond the single
pack-objects.c file, more parts (like the soon-to-exist
bitmap code) will need to compute hashes for matching
deltas. Factor out name_hash to make it available to other
files.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 builtin/pack-objects.c | 24 ++----------------------
 pack-objects.h         | 20 ++++++++++++++++++++
 2 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index f3f0cf9..faf746b 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -778,26 +778,6 @@ static void write_pack_file(void)
 			written, nr_result);
 }
 
-static uint32_t name_hash(const char *name)
-{
-	uint32_t c, hash = 0;
-
-	if (!name)
-		return 0;
-
-	/*
-	 * This effectively just creates a sortable number from the
-	 * last sixteen non-whitespace characters. Last characters
-	 * count "most", so things that end in ".c" sort together.
-	 */
-	while ((c = *name++) != 0) {
-		if (isspace(c))
-			continue;
-		hash = (hash >> 2) + (c << 24);
-	}
-	return hash;
-}
-
 static void setup_delta_attr_check(struct git_attr_check *check)
 {
 	static struct git_attr *attr_delta;
@@ -826,7 +806,7 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type,
 	struct object_entry *entry;
 	struct packed_git *p, *found_pack = NULL;
 	off_t found_offset = 0;
-	uint32_t hash = name_hash(name);
+	uint32_t hash = pack_name_hash(name);
 	uint32_t index_pos;
 
 	entry = packlist_find(&to_pack, sha1, &index_pos);
@@ -1082,7 +1062,7 @@ static void add_preferred_base_object(const char *name)
 {
 	struct pbase_tree *it;
 	int cmplen;
-	unsigned hash = name_hash(name);
+	unsigned hash = pack_name_hash(name);
 
 	if (!num_preferred_base || check_pbase_path(hash))
 		return;
diff --git a/pack-objects.h b/pack-objects.h
index f528215..90ad0a8 100644
--- a/pack-objects.h
+++ b/pack-objects.h
@@ -44,4 +44,24 @@ struct object_entry *packlist_find(struct packing_data *pdata,
 				   const unsigned char *sha1,
 				   uint32_t *index_pos);
 
+static inline uint32_t pack_name_hash(const char *name)
+{
+	uint32_t c, hash = 0;
+
+	if (!name)
+		return 0;
+
+	/*
+	 * This effectively just creates a sortable number from the
+	 * last sixteen non-whitespace characters. Last characters
+	 * count "most", so things that end in ".c" sort together.
+	 */
+	while ((c = *name++) != 0) {
+		if (isspace(c))
+			continue;
+		hash = (hash >> 2) + (c << 24);
+	}
+	return hash;
+}
+
 #endif
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 05/23] revision: allow setting custom limiter function
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (3 preceding siblings ...)
  2013-12-21 13:59 ` [PATCH v4 04/23] pack-objects: factor out name_hash Jeff King
@ 2013-12-21 13:59 ` Jeff King
  2013-12-21 13:59 ` [PATCH v4 06/23] sha1_file: export `git_open_noatime` Jeff King
                   ` (20 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 13:59 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

This commit enables users of `struct rev_info` to peform custom limiting
during a revision walk (i.e. `get_revision`).

If the field `include_check` has been set to a callback, this callback
will be issued once for each commit before it is added to the "pending"
list of the revwalk. If the include check returns 0, the commit will be
marked as added but won't be pushed to the pending list, effectively
limiting the walk.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 revision.c | 4 ++++
 revision.h | 2 ++
 2 files changed, 6 insertions(+)

diff --git a/revision.c b/revision.c
index 0173e01..cddd605 100644
--- a/revision.c
+++ b/revision.c
@@ -779,6 +779,10 @@ static int add_parents_to_list(struct rev_info *revs, struct commit *commit,
 		return 0;
 	commit->object.flags |= ADDED;
 
+	if (revs->include_check &&
+	    !revs->include_check(commit, revs->include_check_data))
+		return 0;
+
 	/*
 	 * If the commit is uninteresting, don't try to
 	 * prune parents - we want the maximal uninteresting
diff --git a/revision.h b/revision.h
index e7f1d21..9957f3c 100644
--- a/revision.h
+++ b/revision.h
@@ -168,6 +168,8 @@ struct rev_info {
 	unsigned long min_age;
 	int min_parents;
 	int max_parents;
+	int (*include_check)(struct commit *, void *);
+	void *include_check_data;
 
 	/* diff info for patches and for paths limiting */
 	struct diff_options diffopt;
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 06/23] sha1_file: export `git_open_noatime`
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (4 preceding siblings ...)
  2013-12-21 13:59 ` [PATCH v4 05/23] revision: allow setting custom limiter function Jeff King
@ 2013-12-21 13:59 ` Jeff King
  2013-12-21 13:59 ` [PATCH v4 07/23] compat: add endianness helpers Jeff King
                   ` (19 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 13:59 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

The `git_open_noatime` helper can be of general interest for other
consumers of git's different on-disk formats.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 cache.h     | 1 +
 sha1_file.c | 4 +---
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/cache.h b/cache.h
index 5e3fc72..f2e5aa7 100644
--- a/cache.h
+++ b/cache.h
@@ -780,6 +780,7 @@ extern int hash_sha1_file(const void *buf, unsigned long len, const char *type,
 extern int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *return_sha1);
 extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *);
 extern int force_object_loose(const unsigned char *sha1, time_t mtime);
+extern int git_open_noatime(const char *name);
 extern void *map_sha1_file(const unsigned char *sha1, unsigned long *size);
 extern int unpack_sha1_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
 extern int parse_sha1_header(const char *hdr, unsigned long *sizep);
diff --git a/sha1_file.c b/sha1_file.c
index f80bbe4..4714bd8 100644
--- a/sha1_file.c
+++ b/sha1_file.c
@@ -239,8 +239,6 @@ char *sha1_pack_index_name(const unsigned char *sha1)
 struct alternate_object_database *alt_odb_list;
 static struct alternate_object_database **alt_odb_tail;
 
-static int git_open_noatime(const char *name);
-
 /*
  * Prepare alternate object database registry.
  *
@@ -1357,7 +1355,7 @@ int check_sha1_signature(const unsigned char *sha1, void *map,
 	return hashcmp(sha1, real_sha1) ? -1 : 0;
 }
 
-static int git_open_noatime(const char *name)
+int git_open_noatime(const char *name)
 {
 	static int sha1_file_open_flag = O_NOATIME;
 
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 07/23] compat: add endianness helpers
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (5 preceding siblings ...)
  2013-12-21 13:59 ` [PATCH v4 06/23] sha1_file: export `git_open_noatime` Jeff King
@ 2013-12-21 13:59 ` Jeff King
  2013-12-21 13:59 ` [PATCH v4 08/23] ewah: compressed bitmap implementation Jeff King
                   ` (18 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 13:59 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

The POSIX standard doesn't currently define a `ntohll`/`htonll`
function pair to perform network-to-host and host-to-network
swaps of 64-bit data. These 64-bit swaps are necessary for the on-disk
storage of EWAH bitmaps if they are not in native byte order.

Many thanks to Ramsay Jones <ramsay@ramsay1.demon.co.uk> and
Torsten Bögershausen <tboegi@web.de> for cygwin/mingw/msvc
portability fixes.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 compat/bswap.h | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 75 insertions(+), 1 deletion(-)

diff --git a/compat/bswap.h b/compat/bswap.h
index 5061214..c18a78e 100644
--- a/compat/bswap.h
+++ b/compat/bswap.h
@@ -17,7 +17,20 @@ static inline uint32_t default_swab32(uint32_t val)
 		((val & 0x000000ff) << 24));
 }
 
+static inline uint64_t default_bswap64(uint64_t val)
+{
+	return (((val & (uint64_t)0x00000000000000ffULL) << 56) |
+		((val & (uint64_t)0x000000000000ff00ULL) << 40) |
+		((val & (uint64_t)0x0000000000ff0000ULL) << 24) |
+		((val & (uint64_t)0x00000000ff000000ULL) <<  8) |
+		((val & (uint64_t)0x000000ff00000000ULL) >>  8) |
+		((val & (uint64_t)0x0000ff0000000000ULL) >> 24) |
+		((val & (uint64_t)0x00ff000000000000ULL) >> 40) |
+		((val & (uint64_t)0xff00000000000000ULL) >> 56));
+}
+
 #undef bswap32
+#undef bswap64
 
 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
 
@@ -32,15 +45,42 @@ static inline uint32_t git_bswap32(uint32_t x)
 	return result;
 }
 
+#define bswap64 git_bswap64
+#if defined(__x86_64__)
+static inline uint64_t git_bswap64(uint64_t x)
+{
+	uint64_t result;
+	if (__builtin_constant_p(x))
+		result = default_bswap64(x);
+	else
+		__asm__("bswap %q0" : "=r" (result) : "0" (x));
+	return result;
+}
+#else
+static inline uint64_t git_bswap64(uint64_t x)
+{
+	union { uint64_t i64; uint32_t i32[2]; } tmp, result;
+	if (__builtin_constant_p(x))
+		result.i64 = default_bswap64(x);
+	else {
+		tmp.i64 = x;
+		result.i32[0] = git_bswap32(tmp.i32[1]);
+		result.i32[1] = git_bswap32(tmp.i32[0]);
+	}
+	return result.i64;
+}
+#endif
+
 #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
 
 #include <stdlib.h>
 
 #define bswap32(x) _byteswap_ulong(x)
+#define bswap64(x) _byteswap_uint64(x)
 
 #endif
 
-#ifdef bswap32
+#if defined(bswap32)
 
 #undef ntohl
 #undef htonl
@@ -48,3 +88,37 @@ static inline uint32_t git_bswap32(uint32_t x)
 #define htonl(x) bswap32(x)
 
 #endif
+
+#if defined(bswap64)
+
+#undef ntohll
+#undef htonll
+#define ntohll(x) bswap64(x)
+#define htonll(x) bswap64(x)
+
+#else
+
+#undef ntohll
+#undef htonll
+
+#if !defined(__BYTE_ORDER)
+# if defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
+#  define __BYTE_ORDER BYTE_ORDER
+#  define __LITTLE_ENDIAN LITTLE_ENDIAN
+#  define __BIG_ENDIAN BIG_ENDIAN
+# endif
+#endif
+
+#if !defined(__BYTE_ORDER)
+# error "Cannot determine endianness"
+#endif
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+# define ntohll(n) (n)
+# define htonll(n) (n)
+#else
+# define ntohll(n) default_bswap64(n)
+# define htonll(n) default_bswap64(n)
+#endif
+
+#endif
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 08/23] ewah: compressed bitmap implementation
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (6 preceding siblings ...)
  2013-12-21 13:59 ` [PATCH v4 07/23] compat: add endianness helpers Jeff King
@ 2013-12-21 13:59 ` Jeff King
  2014-01-23  2:05   ` Jonathan Nieder
  2013-12-21 13:59 ` [PATCH v4 09/23] documentation: add documentation for the bitmap format Jeff King
                   ` (17 subsequent siblings)
  25 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2013-12-21 13:59 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

EWAH is a word-aligned compressed variant of a bitset (i.e. a data
structure that acts as a 0-indexed boolean array for many entries).

It uses a 64-bit run-length encoding (RLE) compression scheme,
trading some compression for better processing speed.

The goal of this word-aligned implementation is not to achieve
the best compression, but rather to improve query processing time.
As it stands right now, this EWAH implementation will always be more
efficient storage-wise than its uncompressed alternative.

EWAH arrays will be used as the on-disk format to store reachability
bitmaps for all objects in a repository while keeping reasonable sizes,
in the same way that JGit does.

This EWAH implementation is a mostly straightforward port of the
original `javaewah` library that JGit currently uses. The library is
self-contained and has been embedded whole (4 files) inside the `ewah`
folder to ease redistribution.

The library is re-licensed under the GPLv2 with the permission of Daniel
Lemire, the original author. The source code for the C version can
be found on GitHub:

	https://github.com/vmg/libewok

The original Java implementation can also be found on GitHub:

	https://github.com/lemire/javaewah

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 Makefile           |  11 +-
 ewah/bitmap.c      | 221 ++++++++++++++++
 ewah/ewah_bitmap.c | 726 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 ewah/ewah_io.c     | 193 ++++++++++++++
 ewah/ewah_rlw.c    | 115 +++++++++
 ewah/ewok.h        | 235 +++++++++++++++++
 ewah/ewok_rlw.h    | 114 +++++++++
 7 files changed, 1613 insertions(+), 2 deletions(-)
 create mode 100644 ewah/bitmap.c
 create mode 100644 ewah/ewah_bitmap.c
 create mode 100644 ewah/ewah_io.c
 create mode 100644 ewah/ewah_rlw.c
 create mode 100644 ewah/ewok.h
 create mode 100644 ewah/ewok_rlw.h

diff --git a/Makefile b/Makefile
index 48ff0bd..64a1ed7 100644
--- a/Makefile
+++ b/Makefile
@@ -667,6 +667,8 @@ LIB_H += diff.h
 LIB_H += diffcore.h
 LIB_H += dir.h
 LIB_H += exec_cmd.h
+LIB_H += ewah/ewok.h
+LIB_H += ewah/ewok_rlw.h
 LIB_H += fetch-pack.h
 LIB_H += fmt-merge-msg.h
 LIB_H += fsck.h
@@ -800,6 +802,10 @@ LIB_OBJS += dir.o
 LIB_OBJS += editor.o
 LIB_OBJS += entry.o
 LIB_OBJS += environment.o
+LIB_OBJS += ewah/bitmap.o
+LIB_OBJS += ewah/ewah_bitmap.o
+LIB_OBJS += ewah/ewah_io.o
+LIB_OBJS += ewah/ewah_rlw.o
 LIB_OBJS += exec_cmd.o
 LIB_OBJS += fetch-pack.o
 LIB_OBJS += fsck.o
@@ -2474,8 +2480,9 @@ profile-clean:
 	$(RM) $(addsuffix *.gcno,$(addprefix $(PROFILE_DIR)/, $(object_dirs)))
 
 clean: profile-clean coverage-clean
-	$(RM) *.o *.res block-sha1/*.o ppc/*.o compat/*.o compat/*/*.o xdiff/*.o vcs-svn/*.o \
-		builtin/*.o $(LIB_FILE) $(XDIFF_LIB) $(VCSSVN_LIB)
+	$(RM) *.o *.res block-sha1/*.o ppc/*.o compat/*.o compat/*/*.o
+	$(RM) xdiff/*.o vcs-svn/*.o ewah/*.o builtin/*.o
+	$(RM) $(LIB_FILE) $(XDIFF_LIB) $(VCSSVN_LIB)
 	$(RM) $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) git$X
 	$(RM) $(TEST_PROGRAMS) $(NO_INSTALL)
 	$(RM) -r bin-wrappers $(dep_dirs)
diff --git a/ewah/bitmap.c b/ewah/bitmap.c
new file mode 100644
index 0000000..710e58c
--- /dev/null
+++ b/ewah/bitmap.c
@@ -0,0 +1,221 @@
+/**
+ * Copyright 2013, GitHub, Inc
+ * Copyright 2009-2013, Daniel Lemire, Cliff Moon,
+ *	David McIntosh, Robert Becho, Google Inc. and Veronika Zenz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "git-compat-util.h"
+#include "ewok.h"
+
+#define MASK(x) ((eword_t)1 << (x % BITS_IN_WORD))
+#define BLOCK(x) (x / BITS_IN_WORD)
+
+struct bitmap *bitmap_new(void)
+{
+	struct bitmap *bitmap = ewah_malloc(sizeof(struct bitmap));
+	bitmap->words = ewah_calloc(32, sizeof(eword_t));
+	bitmap->word_alloc = 32;
+	return bitmap;
+}
+
+void bitmap_set(struct bitmap *self, size_t pos)
+{
+	size_t block = BLOCK(pos);
+
+	if (block >= self->word_alloc) {
+		size_t old_size = self->word_alloc;
+		self->word_alloc = block * 2;
+		self->words = ewah_realloc(self->words,
+			self->word_alloc * sizeof(eword_t));
+
+		memset(self->words + old_size, 0x0,
+			(self->word_alloc - old_size) * sizeof(eword_t));
+	}
+
+	self->words[block] |= MASK(pos);
+}
+
+void bitmap_clear(struct bitmap *self, size_t pos)
+{
+	size_t block = BLOCK(pos);
+
+	if (block < self->word_alloc)
+		self->words[block] &= ~MASK(pos);
+}
+
+int bitmap_get(struct bitmap *self, size_t pos)
+{
+	size_t block = BLOCK(pos);
+	return block < self->word_alloc &&
+		(self->words[block] & MASK(pos)) != 0;
+}
+
+struct ewah_bitmap *bitmap_to_ewah(struct bitmap *bitmap)
+{
+	struct ewah_bitmap *ewah = ewah_new();
+	size_t i, running_empty_words = 0;
+	eword_t last_word = 0;
+
+	for (i = 0; i < bitmap->word_alloc; ++i) {
+		if (bitmap->words[i] == 0) {
+			running_empty_words++;
+			continue;
+		}
+
+		if (last_word != 0)
+			ewah_add(ewah, last_word);
+
+		if (running_empty_words > 0) {
+			ewah_add_empty_words(ewah, 0, running_empty_words);
+			running_empty_words = 0;
+		}
+
+		last_word = bitmap->words[i];
+	}
+
+	ewah_add(ewah, last_word);
+	return ewah;
+}
+
+struct bitmap *ewah_to_bitmap(struct ewah_bitmap *ewah)
+{
+	struct bitmap *bitmap = bitmap_new();
+	struct ewah_iterator it;
+	eword_t blowup;
+	size_t i = 0;
+
+	ewah_iterator_init(&it, ewah);
+
+	while (ewah_iterator_next(&blowup, &it)) {
+		if (i >= bitmap->word_alloc) {
+			bitmap->word_alloc *= 1.5;
+			bitmap->words = ewah_realloc(
+				bitmap->words, bitmap->word_alloc * sizeof(eword_t));
+		}
+
+		bitmap->words[i++] = blowup;
+	}
+
+	bitmap->word_alloc = i;
+	return bitmap;
+}
+
+void bitmap_and_not(struct bitmap *self, struct bitmap *other)
+{
+	const size_t count = (self->word_alloc < other->word_alloc) ?
+		self->word_alloc : other->word_alloc;
+
+	size_t i;
+
+	for (i = 0; i < count; ++i)
+		self->words[i] &= ~other->words[i];
+}
+
+void bitmap_or_ewah(struct bitmap *self, struct ewah_bitmap *other)
+{
+	size_t original_size = self->word_alloc;
+	size_t other_final = (other->bit_size / BITS_IN_WORD) + 1;
+	size_t i = 0;
+	struct ewah_iterator it;
+	eword_t word;
+
+	if (self->word_alloc < other_final) {
+		self->word_alloc = other_final;
+		self->words = ewah_realloc(self->words,
+			self->word_alloc * sizeof(eword_t));
+		memset(self->words + original_size, 0x0,
+			(self->word_alloc - original_size) * sizeof(eword_t));
+	}
+
+	ewah_iterator_init(&it, other);
+
+	while (ewah_iterator_next(&word, &it))
+		self->words[i++] |= word;
+}
+
+void bitmap_each_bit(struct bitmap *self, ewah_callback callback, void *data)
+{
+	size_t pos = 0, i;
+
+	for (i = 0; i < self->word_alloc; ++i) {
+		eword_t word = self->words[i];
+		uint32_t offset;
+
+		if (word == (eword_t)~0) {
+			for (offset = 0; offset < BITS_IN_WORD; ++offset)
+				callback(pos++, data);
+		} else {
+			for (offset = 0; offset < BITS_IN_WORD; ++offset) {
+				if ((word >> offset) == 0)
+					break;
+
+				offset += ewah_bit_ctz64(word >> offset);
+				callback(pos + offset, data);
+			}
+			pos += BITS_IN_WORD;
+		}
+	}
+}
+
+size_t bitmap_popcount(struct bitmap *self)
+{
+	size_t i, count = 0;
+
+	for (i = 0; i < self->word_alloc; ++i)
+		count += ewah_bit_popcount64(self->words[i]);
+
+	return count;
+}
+
+int bitmap_equals(struct bitmap *self, struct bitmap *other)
+{
+	struct bitmap *big, *small;
+	size_t i;
+
+	if (self->word_alloc < other->word_alloc) {
+		small = self;
+		big = other;
+	} else {
+		small = other;
+		big = self;
+	}
+
+	for (i = 0; i < small->word_alloc; ++i) {
+		if (small->words[i] != big->words[i])
+			return 0;
+	}
+
+	for (; i < big->word_alloc; ++i) {
+		if (big->words[i] != 0)
+			return 0;
+	}
+
+	return 1;
+}
+
+void bitmap_reset(struct bitmap *bitmap)
+{
+	memset(bitmap->words, 0x0, bitmap->word_alloc * sizeof(eword_t));
+}
+
+void bitmap_free(struct bitmap *bitmap)
+{
+	if (bitmap == NULL)
+		return;
+
+	free(bitmap->words);
+	free(bitmap);
+}
diff --git a/ewah/ewah_bitmap.c b/ewah/ewah_bitmap.c
new file mode 100644
index 0000000..f104b87
--- /dev/null
+++ b/ewah/ewah_bitmap.c
@@ -0,0 +1,726 @@
+/**
+ * Copyright 2013, GitHub, Inc
+ * Copyright 2009-2013, Daniel Lemire, Cliff Moon,
+ *	David McIntosh, Robert Becho, Google Inc. and Veronika Zenz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "git-compat-util.h"
+#include "ewok.h"
+#include "ewok_rlw.h"
+
+static inline size_t min_size(size_t a, size_t b)
+{
+	return a < b ? a : b;
+}
+
+static inline size_t max_size(size_t a, size_t b)
+{
+	return a > b ? a : b;
+}
+
+static inline void buffer_grow(struct ewah_bitmap *self, size_t new_size)
+{
+	size_t rlw_offset = (uint8_t *)self->rlw - (uint8_t *)self->buffer;
+
+	if (self->alloc_size >= new_size)
+		return;
+
+	self->alloc_size = new_size;
+	self->buffer = ewah_realloc(self->buffer,
+		self->alloc_size * sizeof(eword_t));
+	self->rlw = self->buffer + (rlw_offset / sizeof(size_t));
+}
+
+static inline void buffer_push(struct ewah_bitmap *self, eword_t value)
+{
+	if (self->buffer_size + 1 >= self->alloc_size)
+		buffer_grow(self, self->buffer_size * 3 / 2);
+
+	self->buffer[self->buffer_size++] = value;
+}
+
+static void buffer_push_rlw(struct ewah_bitmap *self, eword_t value)
+{
+	buffer_push(self, value);
+	self->rlw = self->buffer + self->buffer_size - 1;
+}
+
+static size_t add_empty_words(struct ewah_bitmap *self, int v, size_t number)
+{
+	size_t added = 0;
+	eword_t runlen, can_add;
+
+	if (rlw_get_run_bit(self->rlw) != v && rlw_size(self->rlw) == 0) {
+		rlw_set_run_bit(self->rlw, v);
+	} else if (rlw_get_literal_words(self->rlw) != 0 ||
+			rlw_get_run_bit(self->rlw) != v) {
+		buffer_push_rlw(self, 0);
+		if (v) rlw_set_run_bit(self->rlw, v);
+		added++;
+	}
+
+	runlen = rlw_get_running_len(self->rlw);
+	can_add = min_size(number, RLW_LARGEST_RUNNING_COUNT - runlen);
+
+	rlw_set_running_len(self->rlw, runlen + can_add);
+	number -= can_add;
+
+	while (number >= RLW_LARGEST_RUNNING_COUNT) {
+		buffer_push_rlw(self, 0);
+		added++;
+		if (v) rlw_set_run_bit(self->rlw, v);
+		rlw_set_running_len(self->rlw, RLW_LARGEST_RUNNING_COUNT);
+		number -= RLW_LARGEST_RUNNING_COUNT;
+	}
+
+	if (number > 0) {
+		buffer_push_rlw(self, 0);
+		added++;
+
+		if (v) rlw_set_run_bit(self->rlw, v);
+		rlw_set_running_len(self->rlw, number);
+	}
+
+	return added;
+}
+
+size_t ewah_add_empty_words(struct ewah_bitmap *self, int v, size_t number)
+{
+	if (number == 0)
+		return 0;
+
+	self->bit_size += number * BITS_IN_WORD;
+	return add_empty_words(self, v, number);
+}
+
+static size_t add_literal(struct ewah_bitmap *self, eword_t new_data)
+{
+	eword_t current_num = rlw_get_literal_words(self->rlw);
+
+	if (current_num >= RLW_LARGEST_LITERAL_COUNT) {
+		buffer_push_rlw(self, 0);
+
+		rlw_set_literal_words(self->rlw, 1);
+		buffer_push(self, new_data);
+		return 2;
+	}
+
+	rlw_set_literal_words(self->rlw, current_num + 1);
+
+	/* sanity check */
+	assert(rlw_get_literal_words(self->rlw) == current_num + 1);
+
+	buffer_push(self, new_data);
+	return 1;
+}
+
+void ewah_add_dirty_words(
+	struct ewah_bitmap *self, const eword_t *buffer,
+	size_t number, int negate)
+{
+	size_t literals, can_add;
+
+	while (1) {
+		literals = rlw_get_literal_words(self->rlw);
+		can_add = min_size(number, RLW_LARGEST_LITERAL_COUNT - literals);
+
+		rlw_set_literal_words(self->rlw, literals + can_add);
+
+		if (self->buffer_size + can_add >= self->alloc_size)
+			buffer_grow(self, (self->buffer_size + can_add) * 3 / 2);
+
+		if (negate) {
+			size_t i;
+			for (i = 0; i < can_add; ++i)
+				self->buffer[self->buffer_size++] = ~buffer[i];
+		} else {
+			memcpy(self->buffer + self->buffer_size,
+				buffer, can_add * sizeof(eword_t));
+			self->buffer_size += can_add;
+		}
+
+		self->bit_size += can_add * BITS_IN_WORD;
+
+		if (number - can_add == 0)
+			break;
+
+		buffer_push_rlw(self, 0);
+		buffer += can_add;
+		number -= can_add;
+	}
+}
+
+static size_t add_empty_word(struct ewah_bitmap *self, int v)
+{
+	int no_literal = (rlw_get_literal_words(self->rlw) == 0);
+	eword_t run_len = rlw_get_running_len(self->rlw);
+
+	if (no_literal && run_len == 0) {
+		rlw_set_run_bit(self->rlw, v);
+		assert(rlw_get_run_bit(self->rlw) == v);
+	}
+
+	if (no_literal && rlw_get_run_bit(self->rlw) == v &&
+		run_len < RLW_LARGEST_RUNNING_COUNT) {
+		rlw_set_running_len(self->rlw, run_len + 1);
+		assert(rlw_get_running_len(self->rlw) == run_len + 1);
+		return 0;
+	} else {
+		buffer_push_rlw(self, 0);
+
+		assert(rlw_get_running_len(self->rlw) == 0);
+		assert(rlw_get_run_bit(self->rlw) == 0);
+		assert(rlw_get_literal_words(self->rlw) == 0);
+
+		rlw_set_run_bit(self->rlw, v);
+		assert(rlw_get_run_bit(self->rlw) == v);
+
+		rlw_set_running_len(self->rlw, 1);
+		assert(rlw_get_running_len(self->rlw) == 1);
+		assert(rlw_get_literal_words(self->rlw) == 0);
+		return 1;
+	}
+}
+
+size_t ewah_add(struct ewah_bitmap *self, eword_t word)
+{
+	self->bit_size += BITS_IN_WORD;
+
+	if (word == 0)
+		return add_empty_word(self, 0);
+
+	if (word == (eword_t)(~0))
+		return add_empty_word(self, 1);
+
+	return add_literal(self, word);
+}
+
+void ewah_set(struct ewah_bitmap *self, size_t i)
+{
+	const size_t dist =
+		(i + BITS_IN_WORD) / BITS_IN_WORD -
+		(self->bit_size + BITS_IN_WORD - 1) / BITS_IN_WORD;
+
+	assert(i >= self->bit_size);
+
+	self->bit_size = i + 1;
+
+	if (dist > 0) {
+		if (dist > 1)
+			add_empty_words(self, 0, dist - 1);
+
+		add_literal(self, (eword_t)1 << (i % BITS_IN_WORD));
+		return;
+	}
+
+	if (rlw_get_literal_words(self->rlw) == 0) {
+		rlw_set_running_len(self->rlw,
+			rlw_get_running_len(self->rlw) - 1);
+		add_literal(self, (eword_t)1 << (i % BITS_IN_WORD));
+		return;
+	}
+
+	self->buffer[self->buffer_size - 1] |=
+		((eword_t)1 << (i % BITS_IN_WORD));
+
+	/* check if we just completed a stream of 1s */
+	if (self->buffer[self->buffer_size - 1] == (eword_t)(~0)) {
+		self->buffer[--self->buffer_size] = 0;
+		rlw_set_literal_words(self->rlw,
+			rlw_get_literal_words(self->rlw) - 1);
+		add_empty_word(self, 1);
+	}
+}
+
+void ewah_each_bit(struct ewah_bitmap *self, void (*callback)(size_t, void*), void *payload)
+{
+	size_t pos = 0;
+	size_t pointer = 0;
+	size_t k;
+
+	while (pointer < self->buffer_size) {
+		eword_t *word = &self->buffer[pointer];
+
+		if (rlw_get_run_bit(word)) {
+			size_t len = rlw_get_running_len(word) * BITS_IN_WORD;
+			for (k = 0; k < len; ++k, ++pos)
+				callback(pos, payload);
+		} else {
+			pos += rlw_get_running_len(word) * BITS_IN_WORD;
+		}
+
+		++pointer;
+
+		for (k = 0; k < rlw_get_literal_words(word); ++k) {
+			int c;
+
+			/* todo: zero count optimization */
+			for (c = 0; c < BITS_IN_WORD; ++c, ++pos) {
+				if ((self->buffer[pointer] & ((eword_t)1 << c)) != 0)
+					callback(pos, payload);
+			}
+
+			++pointer;
+		}
+	}
+}
+
+struct ewah_bitmap *ewah_new(void)
+{
+	struct ewah_bitmap *self;
+
+	self = ewah_malloc(sizeof(struct ewah_bitmap));
+	if (self == NULL)
+		return NULL;
+
+	self->buffer = ewah_malloc(32 * sizeof(eword_t));
+	self->alloc_size = 32;
+
+	ewah_clear(self);
+	return self;
+}
+
+void ewah_clear(struct ewah_bitmap *self)
+{
+	self->buffer_size = 1;
+	self->buffer[0] = 0;
+	self->bit_size = 0;
+	self->rlw = self->buffer;
+}
+
+void ewah_free(struct ewah_bitmap *self)
+{
+	if (!self)
+		return;
+
+	if (self->alloc_size)
+		free(self->buffer);
+
+	free(self);
+}
+
+static void read_new_rlw(struct ewah_iterator *it)
+{
+	const eword_t *word = NULL;
+
+	it->literals = 0;
+	it->compressed = 0;
+
+	while (1) {
+		word = &it->buffer[it->pointer];
+
+		it->rl = rlw_get_running_len(word);
+		it->lw = rlw_get_literal_words(word);
+		it->b = rlw_get_run_bit(word);
+
+		if (it->rl || it->lw)
+			return;
+
+		if (it->pointer < it->buffer_size - 1) {
+			it->pointer++;
+		} else {
+			it->pointer = it->buffer_size;
+			return;
+		}
+	}
+}
+
+int ewah_iterator_next(eword_t *next, struct ewah_iterator *it)
+{
+	if (it->pointer >= it->buffer_size)
+		return 0;
+
+	if (it->compressed < it->rl) {
+		it->compressed++;
+		*next = it->b ? (eword_t)(~0) : 0;
+	} else {
+		assert(it->literals < it->lw);
+
+		it->literals++;
+		it->pointer++;
+
+		assert(it->pointer < it->buffer_size);
+
+		*next = it->buffer[it->pointer];
+	}
+
+	if (it->compressed == it->rl && it->literals == it->lw) {
+		if (++it->pointer < it->buffer_size)
+			read_new_rlw(it);
+	}
+
+	return 1;
+}
+
+void ewah_iterator_init(struct ewah_iterator *it, struct ewah_bitmap *parent)
+{
+	it->buffer = parent->buffer;
+	it->buffer_size = parent->buffer_size;
+	it->pointer = 0;
+
+	it->lw = 0;
+	it->rl = 0;
+	it->compressed = 0;
+	it->literals = 0;
+	it->b = 0;
+
+	if (it->pointer < it->buffer_size)
+		read_new_rlw(it);
+}
+
+void ewah_dump(struct ewah_bitmap *self)
+{
+	size_t i;
+	fprintf(stderr, "%"PRIuMAX" bits | %"PRIuMAX" words | ",
+		(uintmax_t)self->bit_size, (uintmax_t)self->buffer_size);
+
+	for (i = 0; i < self->buffer_size; ++i)
+		fprintf(stderr, "%016"PRIx64" ", (uint64_t)self->buffer[i]);
+
+	fprintf(stderr, "\n");
+}
+
+void ewah_not(struct ewah_bitmap *self)
+{
+	size_t pointer = 0;
+
+	while (pointer < self->buffer_size) {
+		eword_t *word = &self->buffer[pointer];
+		size_t literals, k;
+
+		rlw_xor_run_bit(word);
+		++pointer;
+
+		literals = rlw_get_literal_words(word);
+		for (k = 0; k < literals; ++k) {
+			self->buffer[pointer] = ~self->buffer[pointer];
+			++pointer;
+		}
+	}
+}
+
+void ewah_xor(
+	struct ewah_bitmap *ewah_i,
+	struct ewah_bitmap *ewah_j,
+	struct ewah_bitmap *out)
+{
+	struct rlw_iterator rlw_i;
+	struct rlw_iterator rlw_j;
+	size_t literals;
+
+	rlwit_init(&rlw_i, ewah_i);
+	rlwit_init(&rlw_j, ewah_j);
+
+	while (rlwit_word_size(&rlw_i) > 0 && rlwit_word_size(&rlw_j) > 0) {
+		while (rlw_i.rlw.running_len > 0 || rlw_j.rlw.running_len > 0) {
+			struct rlw_iterator *prey, *predator;
+			size_t index;
+			int negate_words;
+
+			if (rlw_i.rlw.running_len < rlw_j.rlw.running_len) {
+				prey = &rlw_i;
+				predator = &rlw_j;
+			} else {
+				prey = &rlw_j;
+				predator = &rlw_i;
+			}
+
+			negate_words = !!predator->rlw.running_bit;
+			index = rlwit_discharge(prey, out,
+				predator->rlw.running_len, negate_words);
+
+			ewah_add_empty_words(out, negate_words,
+				predator->rlw.running_len - index);
+
+			rlwit_discard_first_words(predator,
+				predator->rlw.running_len);
+		}
+
+		literals = min_size(
+			rlw_i.rlw.literal_words,
+			rlw_j.rlw.literal_words);
+
+		if (literals) {
+			size_t k;
+
+			for (k = 0; k < literals; ++k) {
+				ewah_add(out,
+					rlw_i.buffer[rlw_i.literal_word_start + k] ^
+					rlw_j.buffer[rlw_j.literal_word_start + k]
+				);
+			}
+
+			rlwit_discard_first_words(&rlw_i, literals);
+			rlwit_discard_first_words(&rlw_j, literals);
+		}
+	}
+
+	if (rlwit_word_size(&rlw_i) > 0)
+		rlwit_discharge(&rlw_i, out, ~0, 0);
+	else
+		rlwit_discharge(&rlw_j, out, ~0, 0);
+
+	out->bit_size = max_size(ewah_i->bit_size, ewah_j->bit_size);
+}
+
+void ewah_and(
+	struct ewah_bitmap *ewah_i,
+	struct ewah_bitmap *ewah_j,
+	struct ewah_bitmap *out)
+{
+	struct rlw_iterator rlw_i;
+	struct rlw_iterator rlw_j;
+	size_t literals;
+
+	rlwit_init(&rlw_i, ewah_i);
+	rlwit_init(&rlw_j, ewah_j);
+
+	while (rlwit_word_size(&rlw_i) > 0 && rlwit_word_size(&rlw_j) > 0) {
+		while (rlw_i.rlw.running_len > 0 || rlw_j.rlw.running_len > 0) {
+			struct rlw_iterator *prey, *predator;
+
+			if (rlw_i.rlw.running_len < rlw_j.rlw.running_len) {
+				prey = &rlw_i;
+				predator = &rlw_j;
+			} else {
+				prey = &rlw_j;
+				predator = &rlw_i;
+			}
+
+			if (predator->rlw.running_bit == 0) {
+				ewah_add_empty_words(out, 0,
+					predator->rlw.running_len);
+				rlwit_discard_first_words(prey,
+					predator->rlw.running_len);
+				rlwit_discard_first_words(predator,
+					predator->rlw.running_len);
+			} else {
+				size_t index = rlwit_discharge(prey, out,
+					predator->rlw.running_len, 0);
+				ewah_add_empty_words(out, 0,
+					predator->rlw.running_len - index);
+				rlwit_discard_first_words(predator,
+					predator->rlw.running_len);
+			}
+		}
+
+		literals = min_size(
+			rlw_i.rlw.literal_words,
+			rlw_j.rlw.literal_words);
+
+		if (literals) {
+			size_t k;
+
+			for (k = 0; k < literals; ++k) {
+				ewah_add(out,
+					rlw_i.buffer[rlw_i.literal_word_start + k] &
+					rlw_j.buffer[rlw_j.literal_word_start + k]
+				);
+			}
+
+			rlwit_discard_first_words(&rlw_i, literals);
+			rlwit_discard_first_words(&rlw_j, literals);
+		}
+	}
+
+	if (rlwit_word_size(&rlw_i) > 0)
+		rlwit_discharge_empty(&rlw_i, out);
+	else
+		rlwit_discharge_empty(&rlw_j, out);
+
+	out->bit_size = max_size(ewah_i->bit_size, ewah_j->bit_size);
+}
+
+void ewah_and_not(
+	struct ewah_bitmap *ewah_i,
+	struct ewah_bitmap *ewah_j,
+	struct ewah_bitmap *out)
+{
+	struct rlw_iterator rlw_i;
+	struct rlw_iterator rlw_j;
+	size_t literals;
+
+	rlwit_init(&rlw_i, ewah_i);
+	rlwit_init(&rlw_j, ewah_j);
+
+	while (rlwit_word_size(&rlw_i) > 0 && rlwit_word_size(&rlw_j) > 0) {
+		while (rlw_i.rlw.running_len > 0 || rlw_j.rlw.running_len > 0) {
+			struct rlw_iterator *prey, *predator;
+
+			if (rlw_i.rlw.running_len < rlw_j.rlw.running_len) {
+				prey = &rlw_i;
+				predator = &rlw_j;
+			} else {
+				prey = &rlw_j;
+				predator = &rlw_i;
+			}
+
+			if ((predator->rlw.running_bit && prey == &rlw_i) ||
+				(!predator->rlw.running_bit && prey != &rlw_i)) {
+				ewah_add_empty_words(out, 0,
+					predator->rlw.running_len);
+				rlwit_discard_first_words(prey,
+					predator->rlw.running_len);
+				rlwit_discard_first_words(predator,
+					predator->rlw.running_len);
+			} else {
+				size_t index;
+				int negate_words;
+
+				negate_words = (&rlw_i != prey);
+				index = rlwit_discharge(prey, out,
+					predator->rlw.running_len, negate_words);
+				ewah_add_empty_words(out, negate_words,
+					predator->rlw.running_len - index);
+				rlwit_discard_first_words(predator,
+					predator->rlw.running_len);
+			}
+		}
+
+		literals = min_size(
+			rlw_i.rlw.literal_words,
+			rlw_j.rlw.literal_words);
+
+		if (literals) {
+			size_t k;
+
+			for (k = 0; k < literals; ++k) {
+				ewah_add(out,
+					rlw_i.buffer[rlw_i.literal_word_start + k] &
+					~(rlw_j.buffer[rlw_j.literal_word_start + k])
+				);
+			}
+
+			rlwit_discard_first_words(&rlw_i, literals);
+			rlwit_discard_first_words(&rlw_j, literals);
+		}
+	}
+
+	if (rlwit_word_size(&rlw_i) > 0)
+		rlwit_discharge(&rlw_i, out, ~0, 0);
+	else
+		rlwit_discharge_empty(&rlw_j, out);
+
+	out->bit_size = max_size(ewah_i->bit_size, ewah_j->bit_size);
+}
+
+void ewah_or(
+	struct ewah_bitmap *ewah_i,
+	struct ewah_bitmap *ewah_j,
+	struct ewah_bitmap *out)
+{
+	struct rlw_iterator rlw_i;
+	struct rlw_iterator rlw_j;
+	size_t literals;
+
+	rlwit_init(&rlw_i, ewah_i);
+	rlwit_init(&rlw_j, ewah_j);
+
+	while (rlwit_word_size(&rlw_i) > 0 && rlwit_word_size(&rlw_j) > 0) {
+		while (rlw_i.rlw.running_len > 0 || rlw_j.rlw.running_len > 0) {
+			struct rlw_iterator *prey, *predator;
+
+			if (rlw_i.rlw.running_len < rlw_j.rlw.running_len) {
+				prey = &rlw_i;
+				predator = &rlw_j;
+			} else {
+				prey = &rlw_j;
+				predator = &rlw_i;
+			}
+
+			if (predator->rlw.running_bit) {
+				ewah_add_empty_words(out, 0,
+					predator->rlw.running_len);
+				rlwit_discard_first_words(prey,
+					predator->rlw.running_len);
+				rlwit_discard_first_words(predator,
+					predator->rlw.running_len);
+			} else {
+				size_t index = rlwit_discharge(prey, out,
+					predator->rlw.running_len, 0);
+				ewah_add_empty_words(out, 0,
+					predator->rlw.running_len - index);
+				rlwit_discard_first_words(predator,
+					predator->rlw.running_len);
+			}
+		}
+
+		literals = min_size(
+			rlw_i.rlw.literal_words,
+			rlw_j.rlw.literal_words);
+
+		if (literals) {
+			size_t k;
+
+			for (k = 0; k < literals; ++k) {
+				ewah_add(out,
+					rlw_i.buffer[rlw_i.literal_word_start + k] |
+					rlw_j.buffer[rlw_j.literal_word_start + k]
+				);
+			}
+
+			rlwit_discard_first_words(&rlw_i, literals);
+			rlwit_discard_first_words(&rlw_j, literals);
+		}
+	}
+
+	if (rlwit_word_size(&rlw_i) > 0)
+		rlwit_discharge(&rlw_i, out, ~0, 0);
+	else
+		rlwit_discharge(&rlw_j, out, ~0, 0);
+
+	out->bit_size = max_size(ewah_i->bit_size, ewah_j->bit_size);
+}
+
+
+#define BITMAP_POOL_MAX 16
+static struct ewah_bitmap *bitmap_pool[BITMAP_POOL_MAX];
+static size_t bitmap_pool_size;
+
+struct ewah_bitmap *ewah_pool_new(void)
+{
+	if (bitmap_pool_size)
+		return bitmap_pool[--bitmap_pool_size];
+
+	return ewah_new();
+}
+
+void ewah_pool_free(struct ewah_bitmap *self)
+{
+	if (self == NULL)
+		return;
+
+	if (bitmap_pool_size == BITMAP_POOL_MAX ||
+		self->alloc_size == 0) {
+		ewah_free(self);
+		return;
+	}
+
+	ewah_clear(self);
+	bitmap_pool[bitmap_pool_size++] = self;
+}
+
+uint32_t ewah_checksum(struct ewah_bitmap *self)
+{
+	const uint8_t *p = (uint8_t *)self->buffer;
+	uint32_t crc = (uint32_t)self->bit_size;
+	size_t size = self->buffer_size * sizeof(eword_t);
+
+	while (size--)
+		crc = (crc << 5) - crc + (uint32_t)*p++;
+
+	return crc;
+}
diff --git a/ewah/ewah_io.c b/ewah/ewah_io.c
new file mode 100644
index 0000000..aed0da6
--- /dev/null
+++ b/ewah/ewah_io.c
@@ -0,0 +1,193 @@
+/**
+ * Copyright 2013, GitHub, Inc
+ * Copyright 2009-2013, Daniel Lemire, Cliff Moon,
+ *	David McIntosh, Robert Becho, Google Inc. and Veronika Zenz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "git-compat-util.h"
+#include "ewok.h"
+
+int ewah_serialize_native(struct ewah_bitmap *self, int fd)
+{
+	uint32_t write32;
+	size_t to_write = self->buffer_size * 8;
+
+	/* 32 bit -- bit size for the map */
+	write32 = (uint32_t)self->bit_size;
+	if (write(fd, &write32, 4) != 4)
+		return -1;
+
+	/** 32 bit -- number of compressed 64-bit words */
+	write32 = (uint32_t)self->buffer_size;
+	if (write(fd, &write32, 4) != 4)
+		return -1;
+
+	if (write(fd, self->buffer, to_write) != to_write)
+		return -1;
+
+	/** 32 bit -- position for the RLW */
+	write32 = self->rlw - self->buffer;
+	if (write(fd, &write32, 4) != 4)
+		return -1;
+
+	return (3 * 4) + to_write;
+}
+
+int ewah_serialize_to(struct ewah_bitmap *self,
+		      int (*write_fun)(void *, const void *, size_t),
+		      void *data)
+{
+	size_t i;
+	eword_t dump[2048];
+	const size_t words_per_dump = sizeof(dump) / sizeof(eword_t);
+	uint32_t bitsize, word_count, rlw_pos;
+
+	const eword_t *buffer;
+	size_t words_left;
+
+	/* 32 bit -- bit size for the map */
+	bitsize =  htonl((uint32_t)self->bit_size);
+	if (write_fun(data, &bitsize, 4) != 4)
+		return -1;
+
+	/** 32 bit -- number of compressed 64-bit words */
+	word_count =  htonl((uint32_t)self->buffer_size);
+	if (write_fun(data, &word_count, 4) != 4)
+		return -1;
+
+	/** 64 bit x N -- compressed words */
+	buffer = self->buffer;
+	words_left = self->buffer_size;
+
+	while (words_left >= words_per_dump) {
+		for (i = 0; i < words_per_dump; ++i, ++buffer)
+			dump[i] = htonll(*buffer);
+
+		if (write_fun(data, dump, sizeof(dump)) != sizeof(dump))
+			return -1;
+
+		words_left -= words_per_dump;
+	}
+
+	if (words_left) {
+		for (i = 0; i < words_left; ++i, ++buffer)
+			dump[i] = htonll(*buffer);
+
+		if (write_fun(data, dump, words_left * 8) != words_left * 8)
+			return -1;
+	}
+
+	/** 32 bit -- position for the RLW */
+	rlw_pos = (uint8_t*)self->rlw - (uint8_t *)self->buffer;
+	rlw_pos = htonl(rlw_pos / sizeof(eword_t));
+
+	if (write_fun(data, &rlw_pos, 4) != 4)
+		return -1;
+
+	return (3 * 4) + (self->buffer_size * 8);
+}
+
+static int write_helper(void *fd, const void *buf, size_t len)
+{
+	return write((intptr_t)fd, buf, len);
+}
+
+int ewah_serialize(struct ewah_bitmap *self, int fd)
+{
+	return ewah_serialize_to(self, write_helper, (void *)(intptr_t)fd);
+}
+
+int ewah_read_mmap(struct ewah_bitmap *self, void *map, size_t len)
+{
+	uint32_t *read32 = map;
+	eword_t *read64;
+	size_t i;
+
+	self->bit_size = ntohl(*read32++);
+	self->buffer_size = self->alloc_size = ntohl(*read32++);
+	self->buffer = ewah_realloc(self->buffer,
+		self->alloc_size * sizeof(eword_t));
+
+	if (!self->buffer)
+		return -1;
+
+	for (i = 0, read64 = (void *)read32; i < self->buffer_size; ++i)
+		self->buffer[i] = ntohll(*read64++);
+
+	read32 = (void *)read64;
+	self->rlw = self->buffer + ntohl(*read32++);
+
+	return (3 * 4) + (self->buffer_size * 8);
+}
+
+int ewah_deserialize(struct ewah_bitmap *self, int fd)
+{
+	size_t i;
+	eword_t dump[2048];
+	const size_t words_per_dump = sizeof(dump) / sizeof(eword_t);
+	uint32_t bitsize, word_count, rlw_pos;
+
+	eword_t *buffer = NULL;
+	size_t words_left;
+
+	ewah_clear(self);
+
+	/* 32 bit -- bit size for the map */
+	if (read(fd, &bitsize, 4) != 4)
+		return -1;
+
+	self->bit_size = (size_t)ntohl(bitsize);
+
+	/** 32 bit -- number of compressed 64-bit words */
+	if (read(fd, &word_count, 4) != 4)
+		return -1;
+
+	self->buffer_size = self->alloc_size = (size_t)ntohl(word_count);
+	self->buffer = ewah_realloc(self->buffer,
+		self->alloc_size * sizeof(eword_t));
+
+	if (!self->buffer)
+		return -1;
+
+	/** 64 bit x N -- compressed words */
+	buffer = self->buffer;
+	words_left = self->buffer_size;
+
+	while (words_left >= words_per_dump) {
+		if (read(fd, dump, sizeof(dump)) != sizeof(dump))
+			return -1;
+
+		for (i = 0; i < words_per_dump; ++i, ++buffer)
+			*buffer = ntohll(dump[i]);
+
+		words_left -= words_per_dump;
+	}
+
+	if (words_left) {
+		if (read(fd, dump, words_left * 8) != words_left * 8)
+			return -1;
+
+		for (i = 0; i < words_left; ++i, ++buffer)
+			*buffer = ntohll(dump[i]);
+	}
+
+	/** 32 bit -- position for the RLW */
+	if (read(fd, &rlw_pos, 4) != 4)
+		return -1;
+
+	self->rlw = self->buffer + ntohl(rlw_pos);
+	return 0;
+}
diff --git a/ewah/ewah_rlw.c b/ewah/ewah_rlw.c
new file mode 100644
index 0000000..c723f1a
--- /dev/null
+++ b/ewah/ewah_rlw.c
@@ -0,0 +1,115 @@
+/**
+ * Copyright 2013, GitHub, Inc
+ * Copyright 2009-2013, Daniel Lemire, Cliff Moon,
+ *	David McIntosh, Robert Becho, Google Inc. and Veronika Zenz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#include "git-compat-util.h"
+#include "ewok.h"
+#include "ewok_rlw.h"
+
+static inline int next_word(struct rlw_iterator *it)
+{
+	if (it->pointer >= it->size)
+		return 0;
+
+	it->rlw.word = &it->buffer[it->pointer];
+	it->pointer += rlw_get_literal_words(it->rlw.word) + 1;
+
+	it->rlw.literal_words = rlw_get_literal_words(it->rlw.word);
+	it->rlw.running_len = rlw_get_running_len(it->rlw.word);
+	it->rlw.running_bit = rlw_get_run_bit(it->rlw.word);
+	it->rlw.literal_word_offset = 0;
+
+	return 1;
+}
+
+void rlwit_init(struct rlw_iterator *it, struct ewah_bitmap *from_ewah)
+{
+	it->buffer = from_ewah->buffer;
+	it->size = from_ewah->buffer_size;
+	it->pointer = 0;
+
+	next_word(it);
+
+	it->literal_word_start = rlwit_literal_words(it) +
+		it->rlw.literal_word_offset;
+}
+
+void rlwit_discard_first_words(struct rlw_iterator *it, size_t x)
+{
+	while (x > 0) {
+		size_t discard;
+
+		if (it->rlw.running_len > x) {
+			it->rlw.running_len -= x;
+			return;
+		}
+
+		x -= it->rlw.running_len;
+		it->rlw.running_len = 0;
+
+		discard = (x > it->rlw.literal_words) ? it->rlw.literal_words : x;
+
+		it->literal_word_start += discard;
+		it->rlw.literal_words -= discard;
+		x -= discard;
+
+		if (x > 0 || rlwit_word_size(it) == 0) {
+			if (!next_word(it))
+				break;
+
+			it->literal_word_start =
+				rlwit_literal_words(it) + it->rlw.literal_word_offset;
+		}
+	}
+}
+
+size_t rlwit_discharge(
+	struct rlw_iterator *it, struct ewah_bitmap *out, size_t max, int negate)
+{
+	size_t index = 0;
+
+	while (index < max && rlwit_word_size(it) > 0) {
+		size_t pd, pl = it->rlw.running_len;
+
+		if (index + pl > max)
+			pl = max - index;
+
+		ewah_add_empty_words(out, it->rlw.running_bit ^ negate, pl);
+		index += pl;
+
+		pd = it->rlw.literal_words;
+		if (pd + index > max)
+			pd = max - index;
+
+		ewah_add_dirty_words(out,
+			it->buffer + it->literal_word_start, pd, negate);
+
+		rlwit_discard_first_words(it, pd + pl);
+		index += pd;
+	}
+
+	return index;
+}
+
+void rlwit_discharge_empty(struct rlw_iterator *it, struct ewah_bitmap *out)
+{
+	while (rlwit_word_size(it) > 0) {
+		ewah_add_empty_words(out, 0, rlwit_word_size(it));
+		rlwit_discard_first_words(it, rlwit_word_size(it));
+	}
+}
diff --git a/ewah/ewok.h b/ewah/ewok.h
new file mode 100644
index 0000000..619afaa
--- /dev/null
+++ b/ewah/ewok.h
@@ -0,0 +1,235 @@
+/**
+ * Copyright 2013, GitHub, Inc
+ * Copyright 2009-2013, Daniel Lemire, Cliff Moon,
+ *	David McIntosh, Robert Becho, Google Inc. and Veronika Zenz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef __EWOK_BITMAP_H__
+#define __EWOK_BITMAP_H__
+
+#ifndef ewah_malloc
+#	define ewah_malloc xmalloc
+#endif
+#ifndef ewah_realloc
+#	define ewah_realloc xrealloc
+#endif
+#ifndef ewah_calloc
+#	define ewah_calloc xcalloc
+#endif
+
+typedef uint64_t eword_t;
+#define BITS_IN_WORD (sizeof(eword_t) * 8)
+
+/**
+ * Do not use __builtin_popcountll. The GCC implementation
+ * is notoriously slow on all platforms.
+ *
+ * See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36041
+ */
+static inline uint32_t ewah_bit_popcount64(uint64_t x)
+{
+	x = (x & 0x5555555555555555ULL) + ((x >>  1) & 0x5555555555555555ULL);
+	x = (x & 0x3333333333333333ULL) + ((x >>  2) & 0x3333333333333333ULL);
+	x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x >>  4) & 0x0F0F0F0F0F0F0F0FULL);
+	return (x * 0x0101010101010101ULL) >> 56;
+}
+
+#ifdef __GNUC__
+#define ewah_bit_ctz64(x) __builtin_ctzll(x)
+#else
+static inline int ewah_bit_ctz64(uint64_t x)
+{
+	int n = 0;
+	if ((x & 0xffffffff) == 0) { x >>= 32; n += 32; }
+	if ((x &     0xffff) == 0) { x >>= 16; n += 16; }
+	if ((x &       0xff) == 0) { x >>=  8; n +=  8; }
+	if ((x &        0xf) == 0) { x >>=  4; n +=  4; }
+	if ((x &        0x3) == 0) { x >>=  2; n +=  2; }
+	if ((x &        0x1) == 0) { x >>=  1; n +=  1; }
+	return n + !x;
+}
+#endif
+
+struct ewah_bitmap {
+	eword_t *buffer;
+	size_t buffer_size;
+	size_t alloc_size;
+	size_t bit_size;
+	eword_t *rlw;
+};
+
+typedef void (*ewah_callback)(size_t pos, void *);
+
+struct ewah_bitmap *ewah_pool_new(void);
+void ewah_pool_free(struct ewah_bitmap *self);
+
+/**
+ * Allocate a new EWAH Compressed bitmap
+ */
+struct ewah_bitmap *ewah_new(void);
+
+/**
+ * Clear all the bits in the bitmap. Does not free or resize
+ * memory.
+ */
+void ewah_clear(struct ewah_bitmap *self);
+
+/**
+ * Free all the memory of the bitmap
+ */
+void ewah_free(struct ewah_bitmap *self);
+
+int ewah_serialize_to(struct ewah_bitmap *self,
+		      int (*write_fun)(void *out, const void *buf, size_t len),
+		      void *out);
+int ewah_serialize(struct ewah_bitmap *self, int fd);
+int ewah_serialize_native(struct ewah_bitmap *self, int fd);
+
+int ewah_deserialize(struct ewah_bitmap *self, int fd);
+int ewah_read_mmap(struct ewah_bitmap *self, void *map, size_t len);
+int ewah_read_mmap_native(struct ewah_bitmap *self, void *map, size_t len);
+
+uint32_t ewah_checksum(struct ewah_bitmap *self);
+
+/**
+ * Logical not (bitwise negation) in-place on the bitmap
+ *
+ * This operation is linear time based on the size of the bitmap.
+ */
+void ewah_not(struct ewah_bitmap *self);
+
+/**
+ * Call the given callback with the position of every single bit
+ * that has been set on the bitmap.
+ *
+ * This is an efficient operation that does not fully decompress
+ * the bitmap.
+ */
+void ewah_each_bit(struct ewah_bitmap *self, ewah_callback callback, void *payload);
+
+/**
+ * Set a given bit on the bitmap.
+ *
+ * The bit at position `pos` will be set to true. Because of the
+ * way that the bitmap is compressed, a set bit cannot be unset
+ * later on.
+ *
+ * Furthermore, since the bitmap uses streaming compression, bits
+ * can only set incrementally.
+ *
+ * E.g.
+ *		ewah_set(bitmap, 1); // ok
+ *		ewah_set(bitmap, 76); // ok
+ *		ewah_set(bitmap, 77); // ok
+ *		ewah_set(bitmap, 8712800127); // ok
+ *		ewah_set(bitmap, 25); // failed, assert raised
+ */
+void ewah_set(struct ewah_bitmap *self, size_t i);
+
+struct ewah_iterator {
+	const eword_t *buffer;
+	size_t buffer_size;
+
+	size_t pointer;
+	eword_t compressed, literals;
+	eword_t rl, lw;
+	int b;
+};
+
+/**
+ * Initialize a new iterator to run through the bitmap in uncompressed form.
+ *
+ * The iterator can be stack allocated. The underlying bitmap must not be freed
+ * before the iteration is over.
+ *
+ * E.g.
+ *
+ *		struct ewah_bitmap *bitmap = ewah_new();
+ *		struct ewah_iterator it;
+ *
+ *		ewah_iterator_init(&it, bitmap);
+ */
+void ewah_iterator_init(struct ewah_iterator *it, struct ewah_bitmap *parent);
+
+/**
+ * Yield every single word in the bitmap in uncompressed form. This is:
+ * yield single words (32-64 bits) where each bit represents an actual
+ * bit from the bitmap.
+ *
+ * Return: true if a word was yield, false if there are no words left
+ */
+int ewah_iterator_next(eword_t *next, struct ewah_iterator *it);
+
+void ewah_or(
+	struct ewah_bitmap *ewah_i,
+	struct ewah_bitmap *ewah_j,
+	struct ewah_bitmap *out);
+
+void ewah_and_not(
+	struct ewah_bitmap *ewah_i,
+	struct ewah_bitmap *ewah_j,
+	struct ewah_bitmap *out);
+
+void ewah_xor(
+	struct ewah_bitmap *ewah_i,
+	struct ewah_bitmap *ewah_j,
+	struct ewah_bitmap *out);
+
+void ewah_and(
+	struct ewah_bitmap *ewah_i,
+	struct ewah_bitmap *ewah_j,
+	struct ewah_bitmap *out);
+
+void ewah_dump(struct ewah_bitmap *self);
+
+/**
+ * Direct word access
+ */
+size_t ewah_add_empty_words(struct ewah_bitmap *self, int v, size_t number);
+void ewah_add_dirty_words(
+	struct ewah_bitmap *self, const eword_t *buffer, size_t number, int negate);
+size_t ewah_add(struct ewah_bitmap *self, eword_t word);
+
+
+/**
+ * Uncompressed, old-school bitmap that can be efficiently compressed
+ * into an `ewah_bitmap`.
+ */
+struct bitmap {
+	eword_t *words;
+	size_t word_alloc;
+};
+
+struct bitmap *bitmap_new(void);
+void bitmap_set(struct bitmap *self, size_t pos);
+void bitmap_clear(struct bitmap *self, size_t pos);
+int bitmap_get(struct bitmap *self, size_t pos);
+void bitmap_reset(struct bitmap *self);
+void bitmap_free(struct bitmap *self);
+int bitmap_equals(struct bitmap *self, struct bitmap *other);
+int bitmap_is_subset(struct bitmap *self, struct bitmap *super);
+
+struct ewah_bitmap * bitmap_to_ewah(struct bitmap *bitmap);
+struct bitmap *ewah_to_bitmap(struct ewah_bitmap *ewah);
+
+void bitmap_and_not(struct bitmap *self, struct bitmap *other);
+void bitmap_or_ewah(struct bitmap *self, struct ewah_bitmap *other);
+void bitmap_or(struct bitmap *self, const struct bitmap *other);
+
+void bitmap_each_bit(struct bitmap *self, ewah_callback callback, void *data);
+size_t bitmap_popcount(struct bitmap *self);
+
+#endif
diff --git a/ewah/ewok_rlw.h b/ewah/ewok_rlw.h
new file mode 100644
index 0000000..63efdf9
--- /dev/null
+++ b/ewah/ewok_rlw.h
@@ -0,0 +1,114 @@
+/**
+ * Copyright 2013, GitHub, Inc
+ * Copyright 2009-2013, Daniel Lemire, Cliff Moon,
+ *	David McIntosh, Robert Becho, Google Inc. and Veronika Zenz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+#ifndef __EWOK_RLW_H__
+#define __EWOK_RLW_H__
+
+#define RLW_RUNNING_BITS (sizeof(eword_t) * 4)
+#define RLW_LITERAL_BITS (sizeof(eword_t) * 8 - 1 - RLW_RUNNING_BITS)
+
+#define RLW_LARGEST_RUNNING_COUNT (((eword_t)1 << RLW_RUNNING_BITS) - 1)
+#define RLW_LARGEST_LITERAL_COUNT (((eword_t)1 << RLW_LITERAL_BITS) - 1)
+
+#define RLW_LARGEST_RUNNING_COUNT_SHIFT (RLW_LARGEST_RUNNING_COUNT << 1)
+
+#define RLW_RUNNING_LEN_PLUS_BIT (((eword_t)1 << (RLW_RUNNING_BITS + 1)) - 1)
+
+static int rlw_get_run_bit(const eword_t *word)
+{
+	return *word & (eword_t)1;
+}
+
+static inline void rlw_set_run_bit(eword_t *word, int b)
+{
+	if (b) {
+		*word |= (eword_t)1;
+	} else {
+		*word &= (eword_t)(~1);
+	}
+}
+
+static inline void rlw_xor_run_bit(eword_t *word)
+{
+	if (*word & 1) {
+		*word &= (eword_t)(~1);
+	} else {
+		*word |= (eword_t)1;
+	}
+}
+
+static inline void rlw_set_running_len(eword_t *word, eword_t l)
+{
+	*word |= RLW_LARGEST_RUNNING_COUNT_SHIFT;
+	*word &= (l << 1) | (~RLW_LARGEST_RUNNING_COUNT_SHIFT);
+}
+
+static inline eword_t rlw_get_running_len(const eword_t *word)
+{
+	return (*word >> 1) & RLW_LARGEST_RUNNING_COUNT;
+}
+
+static inline eword_t rlw_get_literal_words(const eword_t *word)
+{
+	return *word >> (1 + RLW_RUNNING_BITS);
+}
+
+static inline void rlw_set_literal_words(eword_t *word, eword_t l)
+{
+	*word |= ~RLW_RUNNING_LEN_PLUS_BIT;
+	*word &= (l << (RLW_RUNNING_BITS + 1)) | RLW_RUNNING_LEN_PLUS_BIT;
+}
+
+static inline eword_t rlw_size(const eword_t *self)
+{
+	return rlw_get_running_len(self) + rlw_get_literal_words(self);
+}
+
+struct rlw_iterator {
+	const eword_t *buffer;
+	size_t size;
+	size_t pointer;
+	size_t literal_word_start;
+
+	struct {
+		const eword_t *word;
+		int literal_words;
+		int running_len;
+		int literal_word_offset;
+		int running_bit;
+	} rlw;
+};
+
+void rlwit_init(struct rlw_iterator *it, struct ewah_bitmap *bitmap);
+void rlwit_discard_first_words(struct rlw_iterator *it, size_t x);
+size_t rlwit_discharge(
+	struct rlw_iterator *it, struct ewah_bitmap *out, size_t max, int negate);
+void rlwit_discharge_empty(struct rlw_iterator *it, struct ewah_bitmap *out);
+
+static inline size_t rlwit_word_size(struct rlw_iterator *it)
+{
+	return it->rlw.running_len + it->rlw.literal_words;
+}
+
+static inline size_t rlwit_literal_words(struct rlw_iterator *it)
+{
+	return it->pointer - it->rlw.literal_words;
+}
+
+#endif
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 09/23] documentation: add documentation for the bitmap format
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (7 preceding siblings ...)
  2013-12-21 13:59 ` [PATCH v4 08/23] ewah: compressed bitmap implementation Jeff King
@ 2013-12-21 13:59 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 10/23] pack-bitmap: add support for bitmap indexes Jeff King
                   ` (16 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 13:59 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

This is the technical documentation for the JGit-compatible Bitmap v1
on-disk format.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 Documentation/technical/bitmap-format.txt | 131 ++++++++++++++++++++++++++++++
 1 file changed, 131 insertions(+)
 create mode 100644 Documentation/technical/bitmap-format.txt

diff --git a/Documentation/technical/bitmap-format.txt b/Documentation/technical/bitmap-format.txt
new file mode 100644
index 0000000..7a86bd7
--- /dev/null
+++ b/Documentation/technical/bitmap-format.txt
@@ -0,0 +1,131 @@
+GIT bitmap v1 format
+====================
+
+	- A header appears at the beginning:
+
+		4-byte signature: {'B', 'I', 'T', 'M'}
+
+		2-byte version number (network byte order)
+			The current implementation only supports version 1
+			of the bitmap index (the same one as JGit).
+
+		2-byte flags (network byte order)
+
+			The following flags are supported:
+
+			- BITMAP_OPT_FULL_DAG (0x1) REQUIRED
+			This flag must always be present. It implies that the bitmap
+			index has been generated for a packfile with full closure
+			(i.e. where every single object in the packfile can find
+			 its parent links inside the same packfile). This is a
+			requirement for the bitmap index format, also present in JGit,
+			that greatly reduces the complexity of the implementation.
+
+		4-byte entry count (network byte order)
+
+			The total count of entries (bitmapped commits) in this bitmap index.
+
+		20-byte checksum
+
+			The SHA1 checksum of the pack this bitmap index belongs to.
+
+	- 4 EWAH bitmaps that act as type indexes
+
+		Type indexes are serialized after the hash cache in the shape
+		of four EWAH bitmaps stored consecutively (see Appendix A for
+		the serialization format of an EWAH bitmap).
+
+		There is a bitmap for each Git object type, stored in the following
+		order:
+
+			- Commits
+			- Trees
+			- Blobs
+			- Tags
+
+		In each bitmap, the `n`th bit is set to true if the `n`th object
+		in the packfile is of that type.
+
+		The obvious consequence is that the OR of all 4 bitmaps will result
+		in a full set (all bits set), and the AND of all 4 bitmaps will
+		result in an empty bitmap (no bits set).
+
+	- N entries with compressed bitmaps, one for each indexed commit
+
+		Where `N` is the total amount of entries in this bitmap index.
+		Each entry contains the following:
+
+		- 4-byte object position (network byte order)
+			The position **in the index for the packfile** where the
+			bitmap for this commit is found.
+
+		- 1-byte XOR-offset
+			The xor offset used to compress this bitmap. For an entry
+			in position `x`, a XOR offset of `y` means that the actual
+			bitmap representing this commit is composed by XORing the
+			bitmap for this entry with the bitmap in entry `x-y` (i.e.
+			the bitmap `y` entries before this one).
+
+			Note that this compression can be recursive. In order to
+			XOR this entry with a previous one, the previous entry needs
+			to be decompressed first, and so on.
+
+			The hard-limit for this offset is 160 (an entry can only be
+			xor'ed against one of the 160 entries preceding it). This
+			number is always positive, and hence entries are always xor'ed
+			with **previous** bitmaps, not bitmaps that will come afterwards
+			in the index.
+
+		- 1-byte flags for this bitmap
+			At the moment the only available flag is `0x1`, which hints
+			that this bitmap can be re-used when rebuilding bitmap indexes
+			for the repository.
+
+		- The compressed bitmap itself, see Appendix A.
+
+== Appendix A: Serialization format for an EWAH bitmap
+
+Ewah bitmaps are serialized in the same protocol as the JAVAEWAH
+library, making them backwards compatible with the JGit
+implementation:
+
+	- 4-byte number of bits of the resulting UNCOMPRESSED bitmap
+
+	- 4-byte number of words of the COMPRESSED bitmap, when stored
+
+	- N x 8-byte words, as specified by the previous field
+
+		This is the actual content of the compressed bitmap.
+
+	- 4-byte position of the current RLW for the compressed
+		bitmap
+
+All words are stored in network byte order for their corresponding
+sizes.
+
+The compressed bitmap is stored in a form of run-length encoding, as
+follows.  It consists of a concatenation of an arbitrary number of
+chunks.  Each chunk consists of one or more 64-bit words
+
+     H  L_1  L_2  L_3 .... L_M
+
+H is called RLW (run length word).  It consists of (from lower to higher
+order bits):
+
+     - 1 bit: the repeated bit B
+
+     - 32 bits: repetition count K (unsigned)
+
+     - 31 bits: literal word count M (unsigned)
+
+The bitstream represented by the above chunk is then:
+
+     - K repetitions of B
+
+     - The bits stored in `L_1` through `L_M`.  Within a word, bits at
+       lower order come earlier in the stream than those at higher
+       order.
+
+The next word after `L_M` (if any) must again be a RLW, for the next
+chunk.  For efficient appending to the bitstream, the EWAH stores a
+pointer to the last RLW in the stream.
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 10/23] pack-bitmap: add support for bitmap indexes
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (8 preceding siblings ...)
  2013-12-21 13:59 ` [PATCH v4 09/23] documentation: add documentation for the bitmap format Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 11/23] pack-objects: split add_object_entry Jeff King
                   ` (15 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

A bitmap index is a `.bitmap` file that can be found inside
`$GIT_DIR/objects/pack/`, next to its corresponding packfile, and
contains precalculated reachability information for selected commits.
The full specification of the format for these bitmap indexes can be found
in `Documentation/technical/bitmap-format.txt`.

For a given commit SHA1, if it happens to be available in the bitmap
index, its bitmap will represent every single object that is reachable
from the commit itself. The nth bit in the bitmap is the nth object in
the packfile; if it's set to 1, the object is reachable.

By using the bitmaps available in the index, this commit implements
several new functions:

	- `prepare_bitmap_git`
	- `prepare_bitmap_walk`
	- `traverse_bitmap_commit_list`
	- `reuse_partial_packfile_from_bitmap`

The `prepare_bitmap_walk` function tries to build a bitmap of all the
objects that can be reached from the commit roots of a given `rev_info`
struct by using the following algorithm:

- If all the interesting commits for a revision walk are available in
the index, the resulting reachability bitmap is the bitwise OR of all
the individual bitmaps.

- When the full set of WANTs is not available in the index, we perform a
partial revision walk using the commits that don't have bitmaps as
roots, and limiting the revision walk as soon as we reach a commit that
has a corresponding bitmap. The earlier OR'ed bitmap with all the
indexed commits can now be completed as this walk progresses, so the end
result is the full reachability list.

- For revision walks with a HAVEs set (a set of commits that are deemed
uninteresting), first we perform the same method as for the WANTs, but
using our HAVEs as roots, in order to obtain a full reachability bitmap
of all the uninteresting commits. This bitmap then can be used to:

	a) limit the subsequent walk when building the WANTs bitmap
	b) finding the final set of interesting commits by performing an
	   AND-NOT of the WANTs and the HAVEs.

If `prepare_bitmap_walk` runs successfully, the resulting bitmap is
stored and the equivalent of a `traverse_commit_list` call can be
performed by using `traverse_bitmap_commit_list`; the bitmap version
of this call yields the objects straight from the packfile index
(without having to look them up or parse them) and hence is several
orders of magnitude faster.

As an extra optimization, when `prepare_bitmap_walk` succeeds, the
`reuse_partial_packfile_from_bitmap` call can be attempted: it will find
the amount of objects at the beginning of the on-disk packfile that can
be reused as-is, and return an offset into the packfile. The source
packfile can then be loaded and the bytes up to `offset` can be written
directly to the result without having to consider the entires inside the
packfile individually.

If the `prepare_bitmap_walk` call fails (e.g. because no bitmap files
are available), the `rev_info` struct is left untouched, and can be used
to perform a manual rev-walk using `traverse_commit_list`.

Hence, this new set of functions are a generic API that allows to
perform the equivalent of

	git rev-list --objects [roots...] [^uninteresting...]

for any set of commits, even if they don't have specific bitmaps
generated for them.

In further patches, we'll use this bitmap traversal optimization to
speed up the `pack-objects` and `rev-list` commands.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 Makefile      |   2 +
 khash.h       | 338 ++++++++++++++++++++
 pack-bitmap.c | 970 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 pack-bitmap.h |  43 +++
 4 files changed, 1353 insertions(+)
 create mode 100644 khash.h
 create mode 100644 pack-bitmap.c
 create mode 100644 pack-bitmap.h

diff --git a/Makefile b/Makefile
index 64a1ed7..b983d78 100644
--- a/Makefile
+++ b/Makefile
@@ -699,6 +699,7 @@ LIB_H += object.h
 LIB_H += pack-objects.h
 LIB_H += pack-revindex.h
 LIB_H += pack.h
+LIB_H += pack-bitmap.h
 LIB_H += parse-options.h
 LIB_H += patch-ids.h
 LIB_H += pathspec.h
@@ -837,6 +838,7 @@ LIB_OBJS += notes-cache.o
 LIB_OBJS += notes-merge.o
 LIB_OBJS += notes-utils.o
 LIB_OBJS += object.o
+LIB_OBJS += pack-bitmap.o
 LIB_OBJS += pack-check.o
 LIB_OBJS += pack-objects.o
 LIB_OBJS += pack-revindex.o
diff --git a/khash.h b/khash.h
new file mode 100644
index 0000000..57ff603
--- /dev/null
+++ b/khash.h
@@ -0,0 +1,338 @@
+/* The MIT License
+
+   Copyright (c) 2008, 2009, 2011 by Attractive Chaos <attractor@live.co.uk>
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice shall be
+   included in all copies or substantial portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+   SOFTWARE.
+*/
+
+#ifndef __AC_KHASH_H
+#define __AC_KHASH_H
+
+#define AC_VERSION_KHASH_H "0.2.8"
+
+typedef uint32_t khint32_t;
+typedef uint64_t khint64_t;
+
+typedef khint32_t khint_t;
+typedef khint_t khiter_t;
+
+#define __ac_isempty(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&2)
+#define __ac_isdel(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&1)
+#define __ac_iseither(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&3)
+#define __ac_set_isdel_false(flag, i) (flag[i>>4]&=~(1ul<<((i&0xfU)<<1)))
+#define __ac_set_isempty_false(flag, i) (flag[i>>4]&=~(2ul<<((i&0xfU)<<1)))
+#define __ac_set_isboth_false(flag, i) (flag[i>>4]&=~(3ul<<((i&0xfU)<<1)))
+#define __ac_set_isdel_true(flag, i) (flag[i>>4]|=1ul<<((i&0xfU)<<1))
+
+#define __ac_fsize(m) ((m) < 16? 1 : (m)>>4)
+
+#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
+
+static inline khint_t __ac_X31_hash_string(const char *s)
+{
+	khint_t h = (khint_t)*s;
+	if (h) for (++s ; *s; ++s) h = (h << 5) - h + (khint_t)*s;
+	return h;
+}
+
+#define kh_str_hash_func(key) __ac_X31_hash_string(key)
+#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0)
+
+static const double __ac_HASH_UPPER = 0.77;
+
+#define __KHASH_TYPE(name, khkey_t, khval_t) \
+	typedef struct { \
+		khint_t n_buckets, size, n_occupied, upper_bound; \
+		khint32_t *flags; \
+		khkey_t *keys; \
+		khval_t *vals; \
+	} kh_##name##_t;
+
+#define __KHASH_PROTOTYPES(name, khkey_t, khval_t)	 					\
+	extern kh_##name##_t *kh_init_##name(void);							\
+	extern void kh_destroy_##name(kh_##name##_t *h);					\
+	extern void kh_clear_##name(kh_##name##_t *h);						\
+	extern khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); 	\
+	extern int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \
+	extern khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \
+	extern void kh_del_##name(kh_##name##_t *h, khint_t x);
+
+#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+	SCOPE kh_##name##_t *kh_init_##name(void) {							\
+		return (kh_##name##_t*)xcalloc(1, sizeof(kh_##name##_t));		\
+	}																	\
+	SCOPE void kh_destroy_##name(kh_##name##_t *h)						\
+	{																	\
+		if (h) {														\
+			free((void *)h->keys); free(h->flags);					\
+			free((void *)h->vals);										\
+			free(h);													\
+		}																\
+	}																	\
+	SCOPE void kh_clear_##name(kh_##name##_t *h)						\
+	{																	\
+		if (h && h->flags) {											\
+			memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khint32_t)); \
+			h->size = h->n_occupied = 0;								\
+		}																\
+	}																	\
+	SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) 	\
+	{																	\
+		if (h->n_buckets) {												\
+			khint_t k, i, last, mask, step = 0; \
+			mask = h->n_buckets - 1;									\
+			k = __hash_func(key); i = k & mask;							\
+			last = i; \
+			while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
+				i = (i + (++step)) & mask; \
+				if (i == last) return h->n_buckets;						\
+			}															\
+			return __ac_iseither(h->flags, i)? h->n_buckets : i;		\
+		} else return 0;												\
+	}																	\
+	SCOPE int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \
+	{ /* This function uses 0.25*n_buckets bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets. */ \
+		khint32_t *new_flags = NULL;										\
+		khint_t j = 1;													\
+		{																\
+			kroundup32(new_n_buckets); 									\
+			if (new_n_buckets < 4) new_n_buckets = 4;					\
+			if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0;	/* requested size is too small */ \
+			else { /* hash table size to be changed (shrink or expand); rehash */ \
+				new_flags = (khint32_t*)xmalloc(__ac_fsize(new_n_buckets) * sizeof(khint32_t));	\
+				if (!new_flags) return -1;								\
+				memset(new_flags, 0xaa, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
+				if (h->n_buckets < new_n_buckets) {	/* expand */		\
+					khkey_t *new_keys = (khkey_t*)xrealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
+					if (!new_keys) return -1;							\
+					h->keys = new_keys;									\
+					if (kh_is_map) {									\
+						khval_t *new_vals = (khval_t*)xrealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
+						if (!new_vals) return -1;						\
+						h->vals = new_vals;								\
+					}													\
+				} /* otherwise shrink */								\
+			}															\
+		}																\
+		if (j) { /* rehashing is needed */								\
+			for (j = 0; j != h->n_buckets; ++j) {						\
+				if (__ac_iseither(h->flags, j) == 0) {					\
+					khkey_t key = h->keys[j];							\
+					khval_t val;										\
+					khint_t new_mask;									\
+					new_mask = new_n_buckets - 1; 						\
+					if (kh_is_map) val = h->vals[j];					\
+					__ac_set_isdel_true(h->flags, j);					\
+					while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \
+						khint_t k, i, step = 0; \
+						k = __hash_func(key);							\
+						i = k & new_mask;								\
+						while (!__ac_isempty(new_flags, i)) i = (i + (++step)) & new_mask; \
+						__ac_set_isempty_false(new_flags, i);			\
+						if (i < h->n_buckets && __ac_iseither(h->flags, i) == 0) { /* kick out the existing element */ \
+							{ khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \
+							if (kh_is_map) { khval_t tmp = h->vals[i]; h->vals[i] = val; val = tmp; } \
+							__ac_set_isdel_true(h->flags, i); /* mark it as deleted in the old hash table */ \
+						} else { /* write the element and jump out of the loop */ \
+							h->keys[i] = key;							\
+							if (kh_is_map) h->vals[i] = val;			\
+							break;										\
+						}												\
+					}													\
+				}														\
+			}															\
+			if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \
+				h->keys = (khkey_t*)xrealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
+				if (kh_is_map) h->vals = (khval_t*)xrealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
+			}															\
+			free(h->flags); /* free the working space */				\
+			h->flags = new_flags;										\
+			h->n_buckets = new_n_buckets;								\
+			h->n_occupied = h->size;									\
+			h->upper_bound = (khint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \
+		}																\
+		return 0;														\
+	}																	\
+	SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
+	{																	\
+		khint_t x;														\
+		if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \
+			if (h->n_buckets > (h->size<<1)) {							\
+				if (kh_resize_##name(h, h->n_buckets - 1) < 0) { /* clear "deleted" elements */ \
+					*ret = -1; return h->n_buckets;						\
+				}														\
+			} else if (kh_resize_##name(h, h->n_buckets + 1) < 0) { /* expand the hash table */ \
+				*ret = -1; return h->n_buckets;							\
+			}															\
+		} /* TODO: to implement automatically shrinking; resize() already support shrinking */ \
+		{																\
+			khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \
+			x = site = h->n_buckets; k = __hash_func(key); i = k & mask; \
+			if (__ac_isempty(h->flags, i)) x = i; /* for speed up */	\
+			else {														\
+				last = i; \
+				while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
+					if (__ac_isdel(h->flags, i)) site = i;				\
+					i = (i + (++step)) & mask; \
+					if (i == last) { x = site; break; }					\
+				}														\
+				if (x == h->n_buckets) {								\
+					if (__ac_isempty(h->flags, i) && site != h->n_buckets) x = site; \
+					else x = i;											\
+				}														\
+			}															\
+		}																\
+		if (__ac_isempty(h->flags, x)) { /* not present at all */		\
+			h->keys[x] = key;											\
+			__ac_set_isboth_false(h->flags, x);							\
+			++h->size; ++h->n_occupied;									\
+			*ret = 1;													\
+		} else if (__ac_isdel(h->flags, x)) { /* deleted */				\
+			h->keys[x] = key;											\
+			__ac_set_isboth_false(h->flags, x);							\
+			++h->size;													\
+			*ret = 2;													\
+		} else *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \
+		return x;														\
+	}																	\
+	SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x)				\
+	{																	\
+		if (x != h->n_buckets && !__ac_iseither(h->flags, x)) {			\
+			__ac_set_isdel_true(h->flags, x);							\
+			--h->size;													\
+		}																\
+	}
+
+#define KHASH_DECLARE(name, khkey_t, khval_t)		 					\
+	__KHASH_TYPE(name, khkey_t, khval_t) 								\
+	__KHASH_PROTOTYPES(name, khkey_t, khval_t)
+
+#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+	__KHASH_TYPE(name, khkey_t, khval_t) 								\
+	__KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
+
+#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
+	KHASH_INIT2(name, static inline, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
+
+/* Other convenient macros... */
+
+/*! @function
+  @abstract     Test whether a bucket contains data.
+  @param  h     Pointer to the hash table [khash_t(name)*]
+  @param  x     Iterator to the bucket [khint_t]
+  @return       1 if containing data; 0 otherwise [int]
+ */
+#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x)))
+
+/*! @function
+  @abstract     Get key given an iterator
+  @param  h     Pointer to the hash table [khash_t(name)*]
+  @param  x     Iterator to the bucket [khint_t]
+  @return       Key [type of keys]
+ */
+#define kh_key(h, x) ((h)->keys[x])
+
+/*! @function
+  @abstract     Get value given an iterator
+  @param  h     Pointer to the hash table [khash_t(name)*]
+  @param  x     Iterator to the bucket [khint_t]
+  @return       Value [type of values]
+  @discussion   For hash sets, calling this results in segfault.
+ */
+#define kh_val(h, x) ((h)->vals[x])
+
+/*! @function
+  @abstract     Alias of kh_val()
+ */
+#define kh_value(h, x) ((h)->vals[x])
+
+/*! @function
+  @abstract     Get the start iterator
+  @param  h     Pointer to the hash table [khash_t(name)*]
+  @return       The start iterator [khint_t]
+ */
+#define kh_begin(h) (khint_t)(0)
+
+/*! @function
+  @abstract     Get the end iterator
+  @param  h     Pointer to the hash table [khash_t(name)*]
+  @return       The end iterator [khint_t]
+ */
+#define kh_end(h) ((h)->n_buckets)
+
+/*! @function
+  @abstract     Get the number of elements in the hash table
+  @param  h     Pointer to the hash table [khash_t(name)*]
+  @return       Number of elements in the hash table [khint_t]
+ */
+#define kh_size(h) ((h)->size)
+
+/*! @function
+  @abstract     Get the number of buckets in the hash table
+  @param  h     Pointer to the hash table [khash_t(name)*]
+  @return       Number of buckets in the hash table [khint_t]
+ */
+#define kh_n_buckets(h) ((h)->n_buckets)
+
+/*! @function
+  @abstract     Iterate over the entries in the hash table
+  @param  h     Pointer to the hash table [khash_t(name)*]
+  @param  kvar  Variable to which key will be assigned
+  @param  vvar  Variable to which value will be assigned
+  @param  code  Block of code to execute
+ */
+#define kh_foreach(h, kvar, vvar, code) { khint_t __i;		\
+	for (__i = kh_begin(h); __i != kh_end(h); ++__i) {		\
+		if (!kh_exist(h,__i)) continue;						\
+		(kvar) = kh_key(h,__i);								\
+		(vvar) = kh_val(h,__i);								\
+		code;												\
+	} }
+
+/*! @function
+  @abstract     Iterate over the values in the hash table
+  @param  h     Pointer to the hash table [khash_t(name)*]
+  @param  vvar  Variable to which value will be assigned
+  @param  code  Block of code to execute
+ */
+#define kh_foreach_value(h, vvar, code) { khint_t __i;		\
+	for (__i = kh_begin(h); __i != kh_end(h); ++__i) {		\
+		if (!kh_exist(h,__i)) continue;						\
+		(vvar) = kh_val(h,__i);								\
+		code;												\
+	} }
+
+static inline khint_t __kh_oid_hash(const unsigned char *oid)
+{
+	khint_t hash;
+	memcpy(&hash, oid, sizeof(hash));
+	return hash;
+}
+
+#define __kh_oid_cmp(a, b) (hashcmp(a, b) == 0)
+
+KHASH_INIT(sha1, const unsigned char *, void *, 1, __kh_oid_hash, __kh_oid_cmp)
+typedef kh_sha1_t khash_sha1;
+
+KHASH_INIT(sha1_pos, const unsigned char *, int, 1, __kh_oid_hash, __kh_oid_cmp)
+typedef kh_sha1_pos_t khash_sha1_pos;
+
+#endif /* __AC_KHASH_H */
diff --git a/pack-bitmap.c b/pack-bitmap.c
new file mode 100644
index 0000000..33e7482
--- /dev/null
+++ b/pack-bitmap.c
@@ -0,0 +1,970 @@
+#include "cache.h"
+#include "commit.h"
+#include "tag.h"
+#include "diff.h"
+#include "revision.h"
+#include "progress.h"
+#include "list-objects.h"
+#include "pack.h"
+#include "pack-bitmap.h"
+#include "pack-revindex.h"
+#include "pack-objects.h"
+
+/*
+ * An entry on the bitmap index, representing the bitmap for a given
+ * commit.
+ */
+struct stored_bitmap {
+	unsigned char sha1[20];
+	struct ewah_bitmap *root;
+	struct stored_bitmap *xor;
+	int flags;
+};
+
+/*
+ * The currently active bitmap index. By design, repositories only have
+ * a single bitmap index available (the index for the biggest packfile in
+ * the repository), since bitmap indexes need full closure.
+ *
+ * If there is more than one bitmap index available (e.g. because of alternates),
+ * the active bitmap index is the largest one.
+ */
+static struct bitmap_index {
+	/* Packfile to which this bitmap index belongs to */
+	struct packed_git *pack;
+
+	/* reverse index for the packfile */
+	struct pack_revindex *reverse_index;
+
+	/*
+	 * Mark the first `reuse_objects` in the packfile as reused:
+	 * they will be sent as-is without using them for repacking
+	 * calculations
+	 */
+	uint32_t reuse_objects;
+
+	/* mmapped buffer of the whole bitmap index */
+	unsigned char *map;
+	size_t map_size; /* size of the mmaped buffer */
+	size_t map_pos; /* current position when loading the index */
+
+	/*
+	 * Type indexes.
+	 *
+	 * Each bitmap marks which objects in the packfile  are of the given
+	 * type. This provides type information when yielding the objects from
+	 * the packfile during a walk, which allows for better delta bases.
+	 */
+	struct ewah_bitmap *commits;
+	struct ewah_bitmap *trees;
+	struct ewah_bitmap *blobs;
+	struct ewah_bitmap *tags;
+
+	/* Map from SHA1 -> `stored_bitmap` for all the bitmapped comits */
+	khash_sha1 *bitmaps;
+
+	/* Number of bitmapped commits */
+	uint32_t entry_count;
+
+	/*
+	 * Extended index.
+	 *
+	 * When trying to perform bitmap operations with objects that are not
+	 * packed in `pack`, these objects are added to this "fake index" and
+	 * are assumed to appear at the end of the packfile for all operations
+	 */
+	struct eindex {
+		struct object **objects;
+		uint32_t *hashes;
+		uint32_t count, alloc;
+		khash_sha1_pos *positions;
+	} ext_index;
+
+	/* Bitmap result of the last performed walk */
+	struct bitmap *result;
+
+	/* Version of the bitmap index */
+	unsigned int version;
+
+	unsigned loaded : 1;
+
+} bitmap_git;
+
+static struct ewah_bitmap *lookup_stored_bitmap(struct stored_bitmap *st)
+{
+	struct ewah_bitmap *parent;
+	struct ewah_bitmap *composed;
+
+	if (st->xor == NULL)
+		return st->root;
+
+	composed = ewah_pool_new();
+	parent = lookup_stored_bitmap(st->xor);
+	ewah_xor(st->root, parent, composed);
+
+	ewah_pool_free(st->root);
+	st->root = composed;
+	st->xor = NULL;
+
+	return composed;
+}
+
+/*
+ * Read a bitmap from the current read position on the mmaped
+ * index, and increase the read position accordingly
+ */
+static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index)
+{
+	struct ewah_bitmap *b = ewah_pool_new();
+
+	int bitmap_size = ewah_read_mmap(b,
+		index->map + index->map_pos,
+		index->map_size - index->map_pos);
+
+	if (bitmap_size < 0) {
+		error("Failed to load bitmap index (corrupted?)");
+		ewah_pool_free(b);
+		return NULL;
+	}
+
+	index->map_pos += bitmap_size;
+	return b;
+}
+
+static int load_bitmap_header(struct bitmap_index *index)
+{
+	struct bitmap_disk_header *header = (void *)index->map;
+
+	if (index->map_size < sizeof(*header) + 20)
+		return error("Corrupted bitmap index (missing header data)");
+
+	if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0)
+		return error("Corrupted bitmap index file (wrong header)");
+
+	index->version = ntohs(header->version);
+	if (index->version != 1)
+		return error("Unsupported version for bitmap index file (%d)", index->version);
+
+	/* Parse known bitmap format options */
+	{
+		uint32_t flags = ntohs(header->options);
+
+		if ((flags & BITMAP_OPT_FULL_DAG) == 0)
+			return error("Unsupported options for bitmap index file "
+				"(Git requires BITMAP_OPT_FULL_DAG)");
+	}
+
+	index->entry_count = ntohl(header->entry_count);
+	index->map_pos += sizeof(*header);
+	return 0;
+}
+
+static struct stored_bitmap *store_bitmap(struct bitmap_index *index,
+					  struct ewah_bitmap *root,
+					  const unsigned char *sha1,
+					  struct stored_bitmap *xor_with,
+					  int flags)
+{
+	struct stored_bitmap *stored;
+	khiter_t hash_pos;
+	int ret;
+
+	stored = xmalloc(sizeof(struct stored_bitmap));
+	stored->root = root;
+	stored->xor = xor_with;
+	stored->flags = flags;
+	hashcpy(stored->sha1, sha1);
+
+	hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret);
+
+	/* a 0 return code means the insertion succeeded with no changes,
+	 * because the SHA1 already existed on the map. this is bad, there
+	 * shouldn't be duplicated commits in the index */
+	if (ret == 0) {
+		error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1));
+		return NULL;
+	}
+
+	kh_value(index->bitmaps, hash_pos) = stored;
+	return stored;
+}
+
+static int load_bitmap_entries_v1(struct bitmap_index *index)
+{
+	static const size_t MAX_XOR_OFFSET = 160;
+
+	uint32_t i;
+	struct stored_bitmap **recent_bitmaps;
+	struct bitmap_disk_entry *entry;
+
+	recent_bitmaps = xcalloc(MAX_XOR_OFFSET, sizeof(struct stored_bitmap));
+
+	for (i = 0; i < index->entry_count; ++i) {
+		int xor_offset, flags;
+		struct ewah_bitmap *bitmap = NULL;
+		struct stored_bitmap *xor_bitmap = NULL;
+		uint32_t commit_idx_pos;
+		const unsigned char *sha1;
+
+		entry = (struct bitmap_disk_entry *)(index->map + index->map_pos);
+		index->map_pos += sizeof(struct bitmap_disk_entry);
+
+		commit_idx_pos = ntohl(entry->object_pos);
+		sha1 = nth_packed_object_sha1(index->pack, commit_idx_pos);
+
+		xor_offset = (int)entry->xor_offset;
+		flags = (int)entry->flags;
+
+		bitmap = read_bitmap_1(index);
+		if (!bitmap)
+			return -1;
+
+		if (xor_offset > MAX_XOR_OFFSET || xor_offset > i)
+			return error("Corrupted bitmap pack index");
+
+		if (xor_offset > 0) {
+			xor_bitmap = recent_bitmaps[(i - xor_offset) % MAX_XOR_OFFSET];
+
+			if (xor_bitmap == NULL)
+				return error("Invalid XOR offset in bitmap pack index");
+		}
+
+		recent_bitmaps[i % MAX_XOR_OFFSET] = store_bitmap(
+			index, bitmap, sha1, xor_bitmap, flags);
+	}
+
+	return 0;
+}
+
+static int open_pack_bitmap_1(struct packed_git *packfile)
+{
+	int fd;
+	struct stat st;
+	char *idx_name;
+
+	if (open_pack_index(packfile))
+		return -1;
+
+	idx_name = pack_bitmap_filename(packfile);
+	fd = git_open_noatime(idx_name);
+	free(idx_name);
+
+	if (fd < 0)
+		return -1;
+
+	if (fstat(fd, &st)) {
+		close(fd);
+		return -1;
+	}
+
+	if (bitmap_git.pack) {
+		warning("ignoring extra bitmap file: %s", packfile->pack_name);
+		close(fd);
+		return -1;
+	}
+
+	bitmap_git.pack = packfile;
+	bitmap_git.map_size = xsize_t(st.st_size);
+	bitmap_git.map = xmmap(NULL, bitmap_git.map_size, PROT_READ, MAP_PRIVATE, fd, 0);
+	bitmap_git.map_pos = 0;
+	close(fd);
+
+	if (load_bitmap_header(&bitmap_git) < 0) {
+		munmap(bitmap_git.map, bitmap_git.map_size);
+		bitmap_git.map = NULL;
+		bitmap_git.map_size = 0;
+		return -1;
+	}
+
+	return 0;
+}
+
+static int load_pack_bitmap(void)
+{
+	assert(bitmap_git.map && !bitmap_git.loaded);
+
+	bitmap_git.bitmaps = kh_init_sha1();
+	bitmap_git.ext_index.positions = kh_init_sha1_pos();
+	bitmap_git.reverse_index = revindex_for_pack(bitmap_git.pack);
+
+	if (!(bitmap_git.commits = read_bitmap_1(&bitmap_git)) ||
+		!(bitmap_git.trees = read_bitmap_1(&bitmap_git)) ||
+		!(bitmap_git.blobs = read_bitmap_1(&bitmap_git)) ||
+		!(bitmap_git.tags = read_bitmap_1(&bitmap_git)))
+		goto failed;
+
+	if (load_bitmap_entries_v1(&bitmap_git) < 0)
+		goto failed;
+
+	bitmap_git.loaded = 1;
+	return 0;
+
+failed:
+	munmap(bitmap_git.map, bitmap_git.map_size);
+	bitmap_git.map = NULL;
+	bitmap_git.map_size = 0;
+	return -1;
+}
+
+char *pack_bitmap_filename(struct packed_git *p)
+{
+	char *idx_name;
+	int len;
+
+	len = strlen(p->pack_name) - strlen(".pack");
+	idx_name = xmalloc(len + strlen(".bitmap") + 1);
+
+	memcpy(idx_name, p->pack_name, len);
+	memcpy(idx_name + len, ".bitmap", strlen(".bitmap") + 1);
+
+	return idx_name;
+}
+
+static int open_pack_bitmap(void)
+{
+	struct packed_git *p;
+	int ret = -1;
+
+	assert(!bitmap_git.map && !bitmap_git.loaded);
+
+	prepare_packed_git();
+	for (p = packed_git; p; p = p->next) {
+		if (open_pack_bitmap_1(p) == 0)
+			ret = 0;
+	}
+
+	return ret;
+}
+
+int prepare_bitmap_git(void)
+{
+	if (bitmap_git.loaded)
+		return 0;
+
+	if (!open_pack_bitmap())
+		return load_pack_bitmap();
+
+	return -1;
+}
+
+struct include_data {
+	struct bitmap *base;
+	struct bitmap *seen;
+};
+
+static inline int bitmap_position_extended(const unsigned char *sha1)
+{
+	khash_sha1_pos *positions = bitmap_git.ext_index.positions;
+	khiter_t pos = kh_get_sha1_pos(positions, sha1);
+
+	if (pos < kh_end(positions)) {
+		int bitmap_pos = kh_value(positions, pos);
+		return bitmap_pos + bitmap_git.pack->num_objects;
+	}
+
+	return -1;
+}
+
+static inline int bitmap_position_packfile(const unsigned char *sha1)
+{
+	off_t offset = find_pack_entry_one(sha1, bitmap_git.pack);
+	if (!offset)
+		return -1;
+
+	return find_revindex_position(bitmap_git.reverse_index, offset);
+}
+
+static int bitmap_position(const unsigned char *sha1)
+{
+	int pos = bitmap_position_packfile(sha1);
+	return (pos >= 0) ? pos : bitmap_position_extended(sha1);
+}
+
+static int ext_index_add_object(struct object *object, const char *name)
+{
+	struct eindex *eindex = &bitmap_git.ext_index;
+
+	khiter_t hash_pos;
+	int hash_ret;
+	int bitmap_pos;
+
+	hash_pos = kh_put_sha1_pos(eindex->positions, object->sha1, &hash_ret);
+	if (hash_ret > 0) {
+		if (eindex->count >= eindex->alloc) {
+			eindex->alloc = (eindex->alloc + 16) * 3 / 2;
+			eindex->objects = xrealloc(eindex->objects,
+				eindex->alloc * sizeof(struct object *));
+			eindex->hashes = xrealloc(eindex->hashes,
+				eindex->alloc * sizeof(uint32_t));
+		}
+
+		bitmap_pos = eindex->count;
+		eindex->objects[eindex->count] = object;
+		eindex->hashes[eindex->count] = pack_name_hash(name);
+		kh_value(eindex->positions, hash_pos) = bitmap_pos;
+		eindex->count++;
+	} else {
+		bitmap_pos = kh_value(eindex->positions, hash_pos);
+	}
+
+	return bitmap_pos + bitmap_git.pack->num_objects;
+}
+
+static void show_object(struct object *object, const struct name_path *path,
+			const char *last, void *data)
+{
+	struct bitmap *base = data;
+	int bitmap_pos;
+
+	bitmap_pos = bitmap_position(object->sha1);
+
+	if (bitmap_pos < 0) {
+		char *name = path_name(path, last);
+		bitmap_pos = ext_index_add_object(object, name);
+		free(name);
+	}
+
+	bitmap_set(base, bitmap_pos);
+}
+
+static void show_commit(struct commit *commit, void *data)
+{
+}
+
+static int add_to_include_set(struct include_data *data,
+			      const unsigned char *sha1,
+			      int bitmap_pos)
+{
+	khiter_t hash_pos;
+
+	if (data->seen && bitmap_get(data->seen, bitmap_pos))
+		return 0;
+
+	if (bitmap_get(data->base, bitmap_pos))
+		return 0;
+
+	hash_pos = kh_get_sha1(bitmap_git.bitmaps, sha1);
+	if (hash_pos < kh_end(bitmap_git.bitmaps)) {
+		struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, hash_pos);
+		bitmap_or_ewah(data->base, lookup_stored_bitmap(st));
+		return 0;
+	}
+
+	bitmap_set(data->base, bitmap_pos);
+	return 1;
+}
+
+static int should_include(struct commit *commit, void *_data)
+{
+	struct include_data *data = _data;
+	int bitmap_pos;
+
+	bitmap_pos = bitmap_position(commit->object.sha1);
+	if (bitmap_pos < 0)
+		bitmap_pos = ext_index_add_object((struct object *)commit, NULL);
+
+	if (!add_to_include_set(data, commit->object.sha1, bitmap_pos)) {
+		struct commit_list *parent = commit->parents;
+
+		while (parent) {
+			parent->item->object.flags |= SEEN;
+			parent = parent->next;
+		}
+
+		return 0;
+	}
+
+	return 1;
+}
+
+static struct bitmap *find_objects(struct rev_info *revs,
+				   struct object_list *roots,
+				   struct bitmap *seen)
+{
+	struct bitmap *base = NULL;
+	int needs_walk = 0;
+
+	struct object_list *not_mapped = NULL;
+
+	/*
+	 * Go through all the roots for the walk. The ones that have bitmaps
+	 * on the bitmap index will be `or`ed together to form an initial
+	 * global reachability analysis.
+	 *
+	 * The ones without bitmaps in the index will be stored in the
+	 * `not_mapped_list` for further processing.
+	 */
+	while (roots) {
+		struct object *object = roots->item;
+		roots = roots->next;
+
+		if (object->type == OBJ_COMMIT) {
+			khiter_t pos = kh_get_sha1(bitmap_git.bitmaps, object->sha1);
+
+			if (pos < kh_end(bitmap_git.bitmaps)) {
+				struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos);
+				struct ewah_bitmap *or_with = lookup_stored_bitmap(st);
+
+				if (base == NULL)
+					base = ewah_to_bitmap(or_with);
+				else
+					bitmap_or_ewah(base, or_with);
+
+				object->flags |= SEEN;
+				continue;
+			}
+		}
+
+		object_list_insert(object, &not_mapped);
+	}
+
+	/*
+	 * Best case scenario: We found bitmaps for all the roots,
+	 * so the resulting `or` bitmap has the full reachability analysis
+	 */
+	if (not_mapped == NULL)
+		return base;
+
+	roots = not_mapped;
+
+	/*
+	 * Let's iterate through all the roots that don't have bitmaps to
+	 * check if we can determine them to be reachable from the existing
+	 * global bitmap.
+	 *
+	 * If we cannot find them in the existing global bitmap, we'll need
+	 * to push them to an actual walk and run it until we can confirm
+	 * they are reachable
+	 */
+	while (roots) {
+		struct object *object = roots->item;
+		int pos;
+
+		roots = roots->next;
+		pos = bitmap_position(object->sha1);
+
+		if (pos < 0 || base == NULL || !bitmap_get(base, pos)) {
+			object->flags &= ~UNINTERESTING;
+			add_pending_object(revs, object, "");
+			needs_walk = 1;
+		} else {
+			object->flags |= SEEN;
+		}
+	}
+
+	if (needs_walk) {
+		struct include_data incdata;
+
+		if (base == NULL)
+			base = bitmap_new();
+
+		incdata.base = base;
+		incdata.seen = seen;
+
+		revs->include_check = should_include;
+		revs->include_check_data = &incdata;
+
+		if (prepare_revision_walk(revs))
+			die("revision walk setup failed");
+
+		traverse_commit_list(revs, show_commit, show_object, base);
+	}
+
+	return base;
+}
+
+static void show_extended_objects(struct bitmap *objects,
+				  show_reachable_fn show_reach)
+{
+	struct eindex *eindex = &bitmap_git.ext_index;
+	uint32_t i;
+
+	for (i = 0; i < eindex->count; ++i) {
+		struct object *obj;
+
+		if (!bitmap_get(objects, bitmap_git.pack->num_objects + i))
+			continue;
+
+		obj = eindex->objects[i];
+		show_reach(obj->sha1, obj->type, 0, eindex->hashes[i], NULL, 0);
+	}
+}
+
+static void show_objects_for_type(
+	struct bitmap *objects,
+	struct ewah_bitmap *type_filter,
+	enum object_type object_type,
+	show_reachable_fn show_reach)
+{
+	size_t pos = 0, i = 0;
+	uint32_t offset;
+
+	struct ewah_iterator it;
+	eword_t filter;
+
+	if (bitmap_git.reuse_objects == bitmap_git.pack->num_objects)
+		return;
+
+	ewah_iterator_init(&it, type_filter);
+
+	while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) {
+		eword_t word = objects->words[i] & filter;
+
+		for (offset = 0; offset < BITS_IN_WORD; ++offset) {
+			const unsigned char *sha1;
+			struct revindex_entry *entry;
+			uint32_t hash = 0;
+
+			if ((word >> offset) == 0)
+				break;
+
+			offset += ewah_bit_ctz64(word >> offset);
+
+			if (pos + offset < bitmap_git.reuse_objects)
+				continue;
+
+			entry = &bitmap_git.reverse_index->revindex[pos + offset];
+			sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr);
+
+			show_reach(sha1, object_type, 0, hash, bitmap_git.pack, entry->offset);
+		}
+
+		pos += BITS_IN_WORD;
+		i++;
+	}
+}
+
+static int in_bitmapped_pack(struct object_list *roots)
+{
+	while (roots) {
+		struct object *object = roots->item;
+		roots = roots->next;
+
+		if (find_pack_entry_one(object->sha1, bitmap_git.pack) > 0)
+			return 1;
+	}
+
+	return 0;
+}
+
+int prepare_bitmap_walk(struct rev_info *revs)
+{
+	unsigned int i;
+	unsigned int pending_nr = revs->pending.nr;
+	struct object_array_entry *pending_e = revs->pending.objects;
+
+	struct object_list *wants = NULL;
+	struct object_list *haves = NULL;
+
+	struct bitmap *wants_bitmap = NULL;
+	struct bitmap *haves_bitmap = NULL;
+
+	if (!bitmap_git.loaded) {
+		/* try to open a bitmapped pack, but don't parse it yet
+		 * because we may not need to use it */
+		if (open_pack_bitmap() < 0)
+			return -1;
+	}
+
+	for (i = 0; i < pending_nr; ++i) {
+		struct object *object = pending_e[i].item;
+
+		if (object->type == OBJ_NONE)
+			parse_object_or_die(object->sha1, NULL);
+
+		while (object->type == OBJ_TAG) {
+			struct tag *tag = (struct tag *) object;
+
+			if (object->flags & UNINTERESTING)
+				object_list_insert(object, &haves);
+			else
+				object_list_insert(object, &wants);
+
+			if (!tag->tagged)
+				die("bad tag");
+			object = parse_object_or_die(tag->tagged->sha1, NULL);
+		}
+
+		if (object->flags & UNINTERESTING)
+			object_list_insert(object, &haves);
+		else
+			object_list_insert(object, &wants);
+	}
+
+	/*
+	 * if we have a HAVES list, but none of those haves is contained
+	 * in the packfile that has a bitmap, we don't have anything to
+	 * optimize here
+	 */
+	if (haves && !in_bitmapped_pack(haves))
+		return -1;
+
+	/* if we don't want anything, we're done here */
+	if (!wants)
+		return -1;
+
+	/*
+	 * now we're going to use bitmaps, so load the actual bitmap entries
+	 * from disk. this is the point of no return; after this the rev_list
+	 * becomes invalidated and we must perform the revwalk through bitmaps
+	 */
+	if (!bitmap_git.loaded && load_pack_bitmap() < 0)
+		return -1;
+
+	revs->pending.nr = 0;
+	revs->pending.alloc = 0;
+	revs->pending.objects = NULL;
+
+	if (haves) {
+		haves_bitmap = find_objects(revs, haves, NULL);
+		reset_revision_walk();
+
+		if (haves_bitmap == NULL)
+			die("BUG: failed to perform bitmap walk");
+	}
+
+	wants_bitmap = find_objects(revs, wants, haves_bitmap);
+
+	if (!wants_bitmap)
+		die("BUG: failed to perform bitmap walk");
+
+	if (haves_bitmap)
+		bitmap_and_not(wants_bitmap, haves_bitmap);
+
+	bitmap_git.result = wants_bitmap;
+
+	bitmap_free(haves_bitmap);
+	return 0;
+}
+
+int reuse_partial_packfile_from_bitmap(struct packed_git **packfile,
+				       uint32_t *entries,
+				       off_t *up_to)
+{
+	/*
+	 * Reuse the packfile content if we need more than
+	 * 90% of its objects
+	 */
+	static const double REUSE_PERCENT = 0.9;
+
+	struct bitmap *result = bitmap_git.result;
+	uint32_t reuse_threshold;
+	uint32_t i, reuse_objects = 0;
+
+	assert(result);
+
+	for (i = 0; i < result->word_alloc; ++i) {
+		if (result->words[i] != (eword_t)~0) {
+			reuse_objects += ewah_bit_ctz64(~result->words[i]);
+			break;
+		}
+
+		reuse_objects += BITS_IN_WORD;
+	}
+
+#ifdef GIT_BITMAP_DEBUG
+	{
+		const unsigned char *sha1;
+		struct revindex_entry *entry;
+
+		entry = &bitmap_git.reverse_index->revindex[reuse_objects];
+		sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr);
+
+		fprintf(stderr, "Failed to reuse at %d (%016llx)\n",
+			reuse_objects, result->words[i]);
+		fprintf(stderr, " %s\n", sha1_to_hex(sha1));
+	}
+#endif
+
+	if (!reuse_objects)
+		return -1;
+
+	if (reuse_objects >= bitmap_git.pack->num_objects) {
+		bitmap_git.reuse_objects = *entries = bitmap_git.pack->num_objects;
+		*up_to = -1; /* reuse the full pack */
+		*packfile = bitmap_git.pack;
+		return 0;
+	}
+
+	reuse_threshold = bitmap_popcount(bitmap_git.result) * REUSE_PERCENT;
+
+	if (reuse_objects < reuse_threshold)
+		return -1;
+
+	bitmap_git.reuse_objects = *entries = reuse_objects;
+	*up_to = bitmap_git.reverse_index->revindex[reuse_objects].offset;
+	*packfile = bitmap_git.pack;
+
+	return 0;
+}
+
+void traverse_bitmap_commit_list(show_reachable_fn show_reachable)
+{
+	assert(bitmap_git.result);
+
+	show_objects_for_type(bitmap_git.result, bitmap_git.commits,
+		OBJ_COMMIT, show_reachable);
+	show_objects_for_type(bitmap_git.result, bitmap_git.trees,
+		OBJ_TREE, show_reachable);
+	show_objects_for_type(bitmap_git.result, bitmap_git.blobs,
+		OBJ_BLOB, show_reachable);
+	show_objects_for_type(bitmap_git.result, bitmap_git.tags,
+		OBJ_TAG, show_reachable);
+
+	show_extended_objects(bitmap_git.result, show_reachable);
+
+	bitmap_free(bitmap_git.result);
+	bitmap_git.result = NULL;
+}
+
+static uint32_t count_object_type(struct bitmap *objects,
+				  enum object_type type)
+{
+	struct eindex *eindex = &bitmap_git.ext_index;
+
+	uint32_t i = 0, count = 0;
+	struct ewah_iterator it;
+	eword_t filter;
+
+	switch (type) {
+	case OBJ_COMMIT:
+		ewah_iterator_init(&it, bitmap_git.commits);
+		break;
+
+	case OBJ_TREE:
+		ewah_iterator_init(&it, bitmap_git.trees);
+		break;
+
+	case OBJ_BLOB:
+		ewah_iterator_init(&it, bitmap_git.blobs);
+		break;
+
+	case OBJ_TAG:
+		ewah_iterator_init(&it, bitmap_git.tags);
+		break;
+
+	default:
+		return 0;
+	}
+
+	while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) {
+		eword_t word = objects->words[i++] & filter;
+		count += ewah_bit_popcount64(word);
+	}
+
+	for (i = 0; i < eindex->count; ++i) {
+		if (eindex->objects[i]->type == type &&
+			bitmap_get(objects, bitmap_git.pack->num_objects + i))
+			count++;
+	}
+
+	return count;
+}
+
+void count_bitmap_commit_list(uint32_t *commits, uint32_t *trees,
+			      uint32_t *blobs, uint32_t *tags)
+{
+	assert(bitmap_git.result);
+
+	if (commits)
+		*commits = count_object_type(bitmap_git.result, OBJ_COMMIT);
+
+	if (trees)
+		*trees = count_object_type(bitmap_git.result, OBJ_TREE);
+
+	if (blobs)
+		*blobs = count_object_type(bitmap_git.result, OBJ_BLOB);
+
+	if (tags)
+		*tags = count_object_type(bitmap_git.result, OBJ_TAG);
+}
+
+struct bitmap_test_data {
+	struct bitmap *base;
+	struct progress *prg;
+	size_t seen;
+};
+
+static void test_show_object(struct object *object,
+			     const struct name_path *path,
+			     const char *last, void *data)
+{
+	struct bitmap_test_data *tdata = data;
+	int bitmap_pos;
+
+	bitmap_pos = bitmap_position(object->sha1);
+	if (bitmap_pos < 0)
+		die("Object not in bitmap: %s\n", sha1_to_hex(object->sha1));
+
+	bitmap_set(tdata->base, bitmap_pos);
+	display_progress(tdata->prg, ++tdata->seen);
+}
+
+static void test_show_commit(struct commit *commit, void *data)
+{
+	struct bitmap_test_data *tdata = data;
+	int bitmap_pos;
+
+	bitmap_pos = bitmap_position(commit->object.sha1);
+	if (bitmap_pos < 0)
+		die("Object not in bitmap: %s\n", sha1_to_hex(commit->object.sha1));
+
+	bitmap_set(tdata->base, bitmap_pos);
+	display_progress(tdata->prg, ++tdata->seen);
+}
+
+void test_bitmap_walk(struct rev_info *revs)
+{
+	struct object *root;
+	struct bitmap *result = NULL;
+	khiter_t pos;
+	size_t result_popcnt;
+	struct bitmap_test_data tdata;
+
+	if (prepare_bitmap_git())
+		die("failed to load bitmap indexes");
+
+	if (revs->pending.nr != 1)
+		die("you must specify exactly one commit to test");
+
+	fprintf(stderr, "Bitmap v%d test (%d entries loaded)\n",
+		bitmap_git.version, bitmap_git.entry_count);
+
+	root = revs->pending.objects[0].item;
+	pos = kh_get_sha1(bitmap_git.bitmaps, root->sha1);
+
+	if (pos < kh_end(bitmap_git.bitmaps)) {
+		struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos);
+		struct ewah_bitmap *bm = lookup_stored_bitmap(st);
+
+		fprintf(stderr, "Found bitmap for %s. %d bits / %08x checksum\n",
+			sha1_to_hex(root->sha1), (int)bm->bit_size, ewah_checksum(bm));
+
+		result = ewah_to_bitmap(bm);
+	}
+
+	if (result == NULL)
+		die("Commit %s doesn't have an indexed bitmap", sha1_to_hex(root->sha1));
+
+	revs->tag_objects = 1;
+	revs->tree_objects = 1;
+	revs->blob_objects = 1;
+
+	result_popcnt = bitmap_popcount(result);
+
+	if (prepare_revision_walk(revs))
+		die("revision walk setup failed");
+
+	tdata.base = bitmap_new();
+	tdata.prg = start_progress("Verifying bitmap entries", result_popcnt);
+	tdata.seen = 0;
+
+	traverse_commit_list(revs, &test_show_commit, &test_show_object, &tdata);
+
+	stop_progress(&tdata.prg);
+
+	if (bitmap_equals(result, tdata.base))
+		fprintf(stderr, "OK!\n");
+	else
+		fprintf(stderr, "Mismatch!\n");
+}
diff --git a/pack-bitmap.h b/pack-bitmap.h
new file mode 100644
index 0000000..b4510d5
--- /dev/null
+++ b/pack-bitmap.h
@@ -0,0 +1,43 @@
+#ifndef PACK_BITMAP_H
+#define PACK_BITMAP_H
+
+#include "ewah/ewok.h"
+#include "khash.h"
+
+struct bitmap_disk_entry {
+	uint32_t object_pos;
+	uint8_t xor_offset;
+	uint8_t flags;
+} __attribute__((packed));
+
+struct bitmap_disk_header {
+	char magic[4];
+	uint16_t version;
+	uint16_t options;
+	uint32_t entry_count;
+	unsigned char checksum[20];
+};
+
+static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'};
+
+enum pack_bitmap_opts {
+	BITMAP_OPT_FULL_DAG = 1
+};
+
+typedef int (*show_reachable_fn)(
+	const unsigned char *sha1,
+	enum object_type type,
+	int flags,
+	uint32_t hash,
+	struct packed_git *found_pack,
+	off_t found_offset);
+
+int prepare_bitmap_git(void);
+void count_bitmap_commit_list(uint32_t *commits, uint32_t *trees, uint32_t *blobs, uint32_t *tags);
+void traverse_bitmap_commit_list(show_reachable_fn show_reachable);
+void test_bitmap_walk(struct rev_info *revs);
+char *pack_bitmap_filename(struct packed_git *p);
+int prepare_bitmap_walk(struct rev_info *revs);
+int reuse_partial_packfile_from_bitmap(struct packed_git **packfile, uint32_t *entries, off_t *up_to);
+
+#endif
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 11/23] pack-objects: split add_object_entry
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (9 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 10/23] pack-bitmap: add support for bitmap indexes Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 12/23] pack-objects: use bitmaps when packing objects Jeff King
                   ` (14 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

This function actually does three things:

  1. Check whether we've already added the object to our
     packing list.

  2. Check whether the object meets our criteria for adding.

  3. Actually add the object to our packing list.

It's a little hard to see these three phases, because they
happen linearly in the rather long function. Instead, this
patch breaks them up into three separate helper functions.

The result is a little easier to follow, though it
unfortunately suffers from some optimization
interdependencies between the stages (e.g., during step 3 we
use the packing list index from step 1 and the packfile
information from step 2).

More importantly, though, the various parts can be
composed differently, as they will be in the next patch.

Signed-off-by: Jeff King <peff@peff.net>
---
 builtin/pack-objects.c | 98 +++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 78 insertions(+), 20 deletions(-)

diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index faf746b..13b171d 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -800,41 +800,69 @@ static int no_try_delta(const char *path)
 	return 0;
 }
 
-static int add_object_entry(const unsigned char *sha1, enum object_type type,
-			    const char *name, int exclude)
+/*
+ * When adding an object, check whether we have already added it
+ * to our packing list. If so, we can skip. However, if we are
+ * being asked to excludei t, but the previous mention was to include
+ * it, make sure to adjust its flags and tweak our numbers accordingly.
+ *
+ * As an optimization, we pass out the index position where we would have
+ * found the item, since that saves us from having to look it up again a
+ * few lines later when we want to add the new entry.
+ */
+static int have_duplicate_entry(const unsigned char *sha1,
+				int exclude,
+				uint32_t *index_pos)
 {
 	struct object_entry *entry;
-	struct packed_git *p, *found_pack = NULL;
-	off_t found_offset = 0;
-	uint32_t hash = pack_name_hash(name);
-	uint32_t index_pos;
 
-	entry = packlist_find(&to_pack, sha1, &index_pos);
-	if (entry) {
-		if (exclude) {
-			if (!entry->preferred_base)
-				nr_result--;
-			entry->preferred_base = 1;
-		}
+	entry = packlist_find(&to_pack, sha1, index_pos);
+	if (!entry)
 		return 0;
+
+	if (exclude) {
+		if (!entry->preferred_base)
+			nr_result--;
+		entry->preferred_base = 1;
 	}
 
+	return 1;
+}
+
+/*
+ * Check whether we want the object in the pack (e.g., we do not want
+ * objects found in non-local stores if the "--local" option was used).
+ *
+ * As a side effect of this check, we will find the packed version of this
+ * object, if any. We therefore pass out the pack information to avoid having
+ * to look it up again later.
+ */
+static int want_object_in_pack(const unsigned char *sha1,
+			       int exclude,
+			       struct packed_git **found_pack,
+			       off_t *found_offset)
+{
+	struct packed_git *p;
+
 	if (!exclude && local && has_loose_object_nonlocal(sha1))
 		return 0;
 
+	*found_pack = NULL;
+	*found_offset = 0;
+
 	for (p = packed_git; p; p = p->next) {
 		off_t offset = find_pack_entry_one(sha1, p);
 		if (offset) {
-			if (!found_pack) {
+			if (!*found_pack) {
 				if (!is_pack_valid(p)) {
 					warning("packfile %s cannot be accessed", p->pack_name);
 					continue;
 				}
-				found_offset = offset;
-				found_pack = p;
+				*found_offset = offset;
+				*found_pack = p;
 			}
 			if (exclude)
-				break;
+				return 1;
 			if (incremental)
 				return 0;
 			if (local && !p->pack_local)
@@ -844,6 +872,20 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type,
 		}
 	}
 
+	return 1;
+}
+
+static void create_object_entry(const unsigned char *sha1,
+				enum object_type type,
+				uint32_t hash,
+				int exclude,
+				int no_try_delta,
+				uint32_t index_pos,
+				struct packed_git *found_pack,
+				off_t found_offset)
+{
+	struct object_entry *entry;
+
 	entry = packlist_alloc(&to_pack, sha1, index_pos);
 	entry->hash = hash;
 	if (type)
@@ -857,11 +899,27 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type,
 		entry->in_pack_offset = found_offset;
 	}
 
-	display_progress(progress_state, to_pack.nr_objects);
+	entry->no_try_delta = no_try_delta;
+}
+
+static int add_object_entry(const unsigned char *sha1, enum object_type type,
+			    const char *name, int exclude)
+{
+	struct packed_git *found_pack;
+	off_t found_offset;
+	uint32_t index_pos;
 
-	if (name && no_try_delta(name))
-		entry->no_try_delta = 1;
+	if (have_duplicate_entry(sha1, exclude, &index_pos))
+		return 0;
 
+	if (!want_object_in_pack(sha1, exclude, &found_pack, &found_offset))
+		return 0;
+
+	create_object_entry(sha1, type, pack_name_hash(name),
+			    exclude, name && no_try_delta(name),
+			    index_pos, found_pack, found_offset);
+
+	display_progress(progress_state, to_pack.nr_objects);
 	return 1;
 }
 
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 12/23] pack-objects: use bitmaps when packing objects
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (10 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 11/23] pack-objects: split add_object_entry Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 13/23] rev-list: add bitmap mode to speed up object lists Jeff King
                   ` (13 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

In this patch, we use the bitmap API to perform the `Counting Objects`
phase in pack-objects, rather than a traditional walk through the object
graph. For a reasonably-packed large repo, the time to fetch and clone
is often dominated by the full-object revision walk during the Counting
Objects phase. Using bitmaps can reduce the CPU time required on the
server (and therefore start sending the actual pack data with less
delay).

For bitmaps to be used, the following must be true:

  1. We must be packing to stdout (as a normal `pack-objects` from
     `upload-pack` would do).

  2. There must be a .bitmap index containing at least one of the
     "have" objects that the client is asking for.

  3. Bitmaps must be enabled (they are enabled by default, but can be
     disabled by setting `pack.usebitmaps` to false, or by using
     `--no-use-bitmap-index` on the command-line).

If any of these is not true, we fall back to doing a normal walk of the
object graph.

Here are some sample timings from a full pack of `torvalds/linux` (i.e.
something very similar to what would be generated for a clone of the
repository) that show the speedup produced by various
methods:

    [existing graph traversal]
    $ time git pack-objects --all --stdout --no-use-bitmap-index \
			    </dev/null >/dev/null
    Counting objects: 3237103, done.
    Compressing objects: 100% (508752/508752), done.
    Total 3237103 (delta 2699584), reused 3237103 (delta 2699584)

    real    0m44.111s
    user    0m42.396s
    sys     0m3.544s

    [bitmaps only, without partial pack reuse; note that
     pack reuse is automatic, so timing this required a
     patch to disable it]
    $ time git pack-objects --all --stdout </dev/null >/dev/null
    Counting objects: 3237103, done.
    Compressing objects: 100% (508752/508752), done.
    Total 3237103 (delta 2699584), reused 3237103 (delta 2699584)

    real    0m5.413s
    user    0m5.604s
    sys     0m1.804s

    [bitmaps with pack reuse (what you get with this patch)]
    $ time git pack-objects --all --stdout </dev/null >/dev/null
    Reusing existing pack: 3237103, done.
    Total 3237103 (delta 0), reused 0 (delta 0)

    real    0m1.636s
    user    0m1.460s
    sys     0m0.172s

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 Documentation/config.txt |   6 +++
 builtin/pack-objects.c   | 107 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 113 insertions(+)

diff --git a/Documentation/config.txt b/Documentation/config.txt
index ab26963..a981369 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -1858,6 +1858,12 @@ pack.packSizeLimit::
 	Common unit suffixes of 'k', 'm', or 'g' are
 	supported.
 
+pack.useBitmaps::
+	When true, git will use pack bitmaps (if available) when packing
+	to stdout (e.g., during the server side of a fetch). Defaults to
+	true. You should not generally need to turn this off unless
+	you are debugging pack bitmaps.
+
 pager.<cmd>::
 	If the value is boolean, turns on or off pagination of the
 	output of a particular Git subcommand when writing to a tty.
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 13b171d..030d894 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -19,6 +19,7 @@
 #include "refs.h"
 #include "streaming.h"
 #include "thread-utils.h"
+#include "pack-bitmap.h"
 
 static const char *pack_usage[] = {
 	N_("git pack-objects --stdout [options...] [< ref-list | < object-list]"),
@@ -57,6 +58,12 @@ static struct progress *progress_state;
 static int pack_compression_level = Z_DEFAULT_COMPRESSION;
 static int pack_compression_seen;
 
+static struct packed_git *reuse_packfile;
+static uint32_t reuse_packfile_objects;
+static off_t reuse_packfile_offset;
+
+static int use_bitmap_index = 1;
+
 static unsigned long delta_cache_size = 0;
 static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
 static unsigned long cache_max_small_delta_size = 1000;
@@ -678,6 +685,46 @@ static struct object_entry **compute_write_order(void)
 	return wo;
 }
 
+static off_t write_reused_pack(struct sha1file *f)
+{
+	unsigned char buffer[8192];
+	off_t to_write;
+	int fd;
+
+	if (!is_pack_valid(reuse_packfile))
+		die("packfile is invalid: %s", reuse_packfile->pack_name);
+
+	fd = git_open_noatime(reuse_packfile->pack_name);
+	if (fd < 0)
+		die_errno("unable to open packfile for reuse: %s",
+			  reuse_packfile->pack_name);
+
+	if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
+		die_errno("unable to seek in reused packfile");
+
+	if (reuse_packfile_offset < 0)
+		reuse_packfile_offset = reuse_packfile->pack_size - 20;
+
+	to_write = reuse_packfile_offset - sizeof(struct pack_header);
+
+	while (to_write) {
+		int read_pack = xread(fd, buffer, sizeof(buffer));
+
+		if (read_pack <= 0)
+			die_errno("unable to read from reused packfile");
+
+		if (read_pack > to_write)
+			read_pack = to_write;
+
+		sha1write(f, buffer, read_pack);
+		to_write -= read_pack;
+	}
+
+	close(fd);
+	written += reuse_packfile_objects;
+	return reuse_packfile_offset - sizeof(struct pack_header);
+}
+
 static void write_pack_file(void)
 {
 	uint32_t i = 0, j;
@@ -704,6 +751,15 @@ static void write_pack_file(void)
 		offset = write_pack_header(f, nr_remaining);
 		if (!offset)
 			die_errno("unable to write pack header");
+
+		if (reuse_packfile) {
+			off_t packfile_size;
+			assert(pack_to_stdout);
+
+			packfile_size = write_reused_pack(f);
+			offset += packfile_size;
+		}
+
 		nr_written = 0;
 		for (; i < to_pack.nr_objects; i++) {
 			struct object_entry *e = write_order[i];
@@ -923,6 +979,22 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type,
 	return 1;
 }
 
+static int add_object_entry_from_bitmap(const unsigned char *sha1,
+					enum object_type type,
+					int flags, uint32_t name_hash,
+					struct packed_git *pack, off_t offset)
+{
+	uint32_t index_pos;
+
+	if (have_duplicate_entry(sha1, 0, &index_pos))
+		return 0;
+
+	create_object_entry(sha1, type, name_hash, 0, 0, index_pos, pack, offset);
+
+	display_progress(progress_state, to_pack.nr_objects);
+	return 1;
+}
+
 struct pbase_tree_cache {
 	unsigned char sha1[20];
 	int ref;
@@ -2085,6 +2157,10 @@ static int git_pack_config(const char *k, const char *v, void *cb)
 		cache_max_small_delta_size = git_config_int(k, v);
 		return 0;
 	}
+	if (!strcmp(k, "pack.usebitmaps")) {
+		use_bitmap_index = git_config_bool(k, v);
+		return 0;
+	}
 	if (!strcmp(k, "pack.threads")) {
 		delta_search_threads = git_config_int(k, v);
 		if (delta_search_threads < 0)
@@ -2293,6 +2369,29 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
 	}
 }
 
+static int get_object_list_from_bitmap(struct rev_info *revs)
+{
+	if (prepare_bitmap_walk(revs) < 0)
+		return -1;
+
+	if (!reuse_partial_packfile_from_bitmap(
+			&reuse_packfile,
+			&reuse_packfile_objects,
+			&reuse_packfile_offset)) {
+		assert(reuse_packfile_objects);
+		nr_result += reuse_packfile_objects;
+
+		if (progress) {
+			fprintf(stderr, "Reusing existing pack: %d, done.\n",
+				reuse_packfile_objects);
+			fflush(stderr);
+		}
+	}
+
+	traverse_bitmap_commit_list(&add_object_entry_from_bitmap);
+	return 0;
+}
+
 static void get_object_list(int ac, const char **av)
 {
 	struct rev_info revs;
@@ -2320,6 +2419,9 @@ static void get_object_list(int ac, const char **av)
 			die("bad revision '%s'", line);
 	}
 
+	if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
+		return;
+
 	if (prepare_revision_walk(&revs))
 		die("revision walk setup failed");
 	mark_edges_uninteresting(&revs, show_edge);
@@ -2449,6 +2551,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
 			    N_("pack compression level")),
 		OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
 			    N_("do not hide commits by grafts"), 0),
+		OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,
+			 N_("use a bitmap index if available to speed up counting objects")),
 		OPT_END(),
 	};
 
@@ -2515,6 +2619,9 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
 	if (keep_unreachable && unpack_unreachable)
 		die("--keep-unreachable and --unpack-unreachable are incompatible.");
 
+	if (!use_internal_rev_list || !pack_to_stdout || is_repository_shallow())
+		use_bitmap_index = 0;
+
 	if (progress && all_progress_implied)
 		progress = 2;
 
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 13/23] rev-list: add bitmap mode to speed up object lists
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (11 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 12/23] pack-objects: use bitmaps when packing objects Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 14/23] pack-objects: implement bitmap writing Jeff King
                   ` (12 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

The bitmap reachability index used to speed up the counting objects
phase during `pack-objects` can also be used to optimize a normal
rev-list if the only thing required are the SHA1s of the objects during
the list (i.e., not the path names at which trees and blobs were found).

Calling `git rev-list --objects --use-bitmap-index [committish]` will
perform an object iteration based on a bitmap result instead of actually
walking the object graph.

These are some example timings for `torvalds/linux` (warm cache,
best-of-five):

    $ time git rev-list --objects master > /dev/null

    real    0m34.191s
    user    0m33.904s
    sys     0m0.268s

    $ time git rev-list --objects --use-bitmap-index master > /dev/null

    real    0m1.041s
    user    0m0.976s
    sys     0m0.064s

Likewise, using `git rev-list --count --use-bitmap-index` will speed up
the counting operation by building the resulting bitmap and performing a
fast popcount (number of bits set on the bitmap) on the result.

Here are some sample timings of different ways to count commits in
`torvalds/linux`:

    $ time git rev-list master | wc -l
        399882

        real    0m6.524s
        user    0m6.060s
        sys     0m3.284s

    $ time git rev-list --count master
        399882

        real    0m4.318s
        user    0m4.236s
        sys     0m0.076s

    $ time git rev-list --use-bitmap-index --count master
        399882

        real    0m0.217s
        user    0m0.176s
        sys     0m0.040s

This also respects negative refs, so you can use it to count
a slice of history:

        $ time git rev-list --count v3.0..master
        144843

        real    0m1.971s
        user    0m1.932s
        sys     0m0.036s

        $ time git rev-list --use-bitmap-index --count v3.0..master
        real    0m0.280s
        user    0m0.220s
        sys     0m0.056s

Though note that the closer the endpoints, the less it helps. In the
traversal case, we have fewer commits to cross, so we take less time.
But the bitmap time is dominated by generating the pack revindex, which
is constant with respect to the refs given.

Note that you cannot yet get a fast --left-right count of a symmetric
difference (e.g., "--count --left-right master...topic"). The slow part
of that walk actually happens during the merge-base determination when
we parse "master...topic". Even though a count does not actually need to
know the real merge base (it only needs to take the symmetric difference
of the bitmaps), the revision code would require some refactoring to
handle this case.

Additionally, a `--test-bitmap` flag has been added that will perform
the same rev-list manually (i.e. using a normal revwalk) and using
bitmaps, and verify that the results are the same. This can be used to
exercise the bitmap code, and also to verify that the contents of the
.bitmap file are sane.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 Documentation/git-rev-list.txt     |  1 +
 Documentation/rev-list-options.txt |  8 ++++++++
 builtin/rev-list.c                 | 39 ++++++++++++++++++++++++++++++++++++++
 3 files changed, 48 insertions(+)

diff --git a/Documentation/git-rev-list.txt b/Documentation/git-rev-list.txt
index 045b37b..7a1585d 100644
--- a/Documentation/git-rev-list.txt
+++ b/Documentation/git-rev-list.txt
@@ -55,6 +55,7 @@ SYNOPSIS
 	     [ \--reverse ]
 	     [ \--walk-reflogs ]
 	     [ \--no-walk ] [ \--do-walk ]
+	     [ \--use-bitmap-index ]
 	     <commit>... [ \-- <paths>... ]
 
 DESCRIPTION
diff --git a/Documentation/rev-list-options.txt b/Documentation/rev-list-options.txt
index 5bdfb42..c236b85 100644
--- a/Documentation/rev-list-options.txt
+++ b/Documentation/rev-list-options.txt
@@ -274,6 +274,14 @@ See also linkgit:git-reflog[1].
 	Output excluded boundary commits. Boundary commits are
 	prefixed with `-`.
 
+ifdef::git-rev-list[]
+--use-bitmap-index::
+
+	Try to speed up the traversal using the pack bitmap index (if
+	one is available). Note that when traversing with `--objects`,
+	trees and blobs will not have their associated path printed.
+endif::git-rev-list[]
+
 --
 
 History Simplification
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index 4fc1616..5209255 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -3,6 +3,8 @@
 #include "diff.h"
 #include "revision.h"
 #include "list-objects.h"
+#include "pack.h"
+#include "pack-bitmap.h"
 #include "builtin.h"
 #include "log-tree.h"
 #include "graph.h"
@@ -257,6 +259,18 @@ static int show_bisect_vars(struct rev_list_info *info, int reaches, int all)
 	return 0;
 }
 
+static int show_object_fast(
+	const unsigned char *sha1,
+	enum object_type type,
+	int exclude,
+	uint32_t name_hash,
+	struct packed_git *found_pack,
+	off_t found_offset)
+{
+	fprintf(stdout, "%s\n", sha1_to_hex(sha1));
+	return 1;
+}
+
 int cmd_rev_list(int argc, const char **argv, const char *prefix)
 {
 	struct rev_info revs;
@@ -265,6 +279,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
 	int bisect_list = 0;
 	int bisect_show_vars = 0;
 	int bisect_find_all = 0;
+	int use_bitmap_index = 0;
 
 	git_config(git_default_config, NULL);
 	init_revisions(&revs, prefix);
@@ -306,6 +321,14 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
 			bisect_show_vars = 1;
 			continue;
 		}
+		if (!strcmp(arg, "--use-bitmap-index")) {
+			use_bitmap_index = 1;
+			continue;
+		}
+		if (!strcmp(arg, "--test-bitmap")) {
+			test_bitmap_walk(&revs);
+			return 0;
+		}
 		usage(rev_list_usage);
 
 	}
@@ -333,6 +356,22 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
 	if (bisect_list)
 		revs.limited = 1;
 
+	if (use_bitmap_index) {
+		if (revs.count && !revs.left_right && !revs.cherry_mark) {
+			uint32_t commit_count;
+			if (!prepare_bitmap_walk(&revs)) {
+				count_bitmap_commit_list(&commit_count, NULL, NULL, NULL);
+				printf("%d\n", commit_count);
+				return 0;
+			}
+		} else if (revs.tag_objects && revs.tree_objects && revs.blob_objects) {
+			if (!prepare_bitmap_walk(&revs)) {
+				traverse_bitmap_commit_list(&show_object_fast);
+				return 0;
+			}
+		}
+	}
+
 	if (prepare_revision_walk(&revs))
 		die("revision walk setup failed");
 	if (revs.tree_objects)
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 14/23] pack-objects: implement bitmap writing
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (12 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 13/23] rev-list: add bitmap mode to speed up object lists Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 15/23] repack: stop using magic number for ARRAY_SIZE(exts) Jeff King
                   ` (11 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

This commit extends more the functionality of `pack-objects` by allowing
it to write out a `.bitmap` index next to any written packs, together
with the `.idx` index that currently gets written.

If bitmap writing is enabled for a given repository (either by calling
`pack-objects` with the `--write-bitmap-index` flag or by having
`pack.writebitmaps` set to `true` in the config) and pack-objects is
writing a packfile that would normally be indexed (i.e. not piping to
stdout), we will attempt to write the corresponding bitmap index for the
packfile.

Bitmap index writing happens after the packfile and its index has been
successfully written to disk (`finish_tmp_packfile`). The process is
performed in several steps:

    1. `bitmap_writer_set_checksum`: this call stores the partial
       checksum for the packfile being written; the checksum will be
       written in the resulting bitmap index to verify its integrity

    2. `bitmap_writer_build_type_index`: this call uses the array of
       `struct object_entry` that has just been sorted when writing out
       the actual packfile index to disk to generate 4 type-index bitmaps
       (one for each object type).

       These bitmaps have their nth bit set if the given object is of
       the bitmap's type. E.g. the nth bit of the Commits bitmap will be
       1 if the nth object in the packfile index is a commit.

       This is a very cheap operation because the bitmap writing code has
       access to the metadata stored in the `struct object_entry` array,
       and hence the real type for each object in the packfile.

    3. `bitmap_writer_reuse_bitmaps`: if there exists an existing bitmap
       index for one of the packfiles we're trying to repack, this call
       will efficiently rebuild the existing bitmaps so they can be
       reused on the new index. All the existing bitmaps will be stored
       in a `reuse` hash table, and the commit selection phase will
       prioritize these when selecting, as they can be written directly
       to the new index without having to perform a revision walk to
       fill the bitmap. This can greatly speed up the repack of a
       repository that already has bitmaps.

    4. `bitmap_writer_select_commits`: if bitmap writing is enabled for
       a given `pack-objects` run, the sequence of commits generated
       during the Counting Objects phase will be stored in an array.

       We then use that array to build up the list of selected commits.
       Writing a bitmap in the index for each object in the repository
       would be cost-prohibitive, so we use a simple heuristic to pick
       the commits that will be indexed with bitmaps.

       The current heuristics are a simplified version of JGit's
       original implementation. We select a higher density of commits
       depending on their age: the 100 most recent commits are always
       selected, after that we pick 1 commit of each 100, and the gap
       increases as the commits grow older. On top of that, we make sure
       that every single branch that has not been merged (all the tips
       that would be required from a clone) gets their own bitmap, and
       when selecting commits between a gap, we tend to prioritize the
       commit with the most parents.

       Do note that there is no right/wrong way to perform commit
       selection; different selection algorithms will result in
       different commits being selected, but there's no such thing as
       "missing a commit". The bitmap walker algorithm implemented in
       `prepare_bitmap_walk` is able to adapt to missing bitmaps by
       performing manual walks that complete the bitmap: the ideal
       selection algorithm, however, would select the commits that are
       more likely to be used as roots for a walk in the future (e.g.
       the tips of each branch, and so on) to ensure a bitmap for them
       is always available.

    5. `bitmap_writer_build`: this is the computationally expensive part
       of bitmap generation. Based on the list of commits that were
       selected in the previous step, we perform several incremental
       walks to generate the bitmap for each commit.

       The walks begin from the oldest commit, and are built up
       incrementally for each branch. E.g. consider this dag where A, B,
       C, D, E, F are the selected commits, and a, b, c, e are a chunk
       of simplified history that will not receive bitmaps.

            A---a---B--b--C--c--D
                     \
                      E--e--F

       We start by building the bitmap for A, using A as the root for a
       revision walk and marking all the objects that are reachable
       until the walk is over. Once this bitmap is stored, we reuse the
       bitmap walker to perform the walk for B, assuming that once we
       reach A again, the walk will be terminated because A has already
       been SEEN on the previous walk.

       This process is repeated for C, and D, but when we try to
       generate the bitmaps for E, we can reuse neither the current walk
       nor the bitmap we have generated so far.

       What we do now is resetting both the walk and clearing the
       bitmap, and performing the walk from scratch using E as the
       origin. This new walk, however, does not need to be completed.
       Once we hit B, we can lookup the bitmap we have already stored
       for that commit and OR it with the existing bitmap we've composed
       so far, allowing us to limit the walk early.

       After all the bitmaps have been generated, another iteration
       through the list of commits is performed to find the best XOR
       offsets for compression before writing them to disk. Because of
       the incremental nature of these bitmaps, XORing one of them with
       its predecesor results in a minimal "bitmap delta" most of the
       time. We can write this delta to the on-disk bitmap index, and
       then re-compose the original bitmaps by XORing them again when
       loaded.

       This is a phase very similar to pack-object's `find_delta` (using
       bitmaps instead of objects, of course), except the heuristics
       have been greatly simplified: we only check the 10 bitmaps before
       any given one to find best compressing one. This gives good
       results in practice, because there is locality in the ordering of
       the objects (and therefore bitmaps) in the packfile.

     6. `bitmap_writer_finish`: the last step in the process is
	serializing to disk all the bitmap data that has been generated
	in the two previous steps.

	The bitmap is written to a tmp file and then moved atomically to
	its final destination, using the same process as
	`pack-write.c:write_idx_file`.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 Documentation/config.txt |   8 +
 Makefile                 |   1 +
 builtin/pack-objects.c   |  53 +++++
 pack-bitmap-write.c      | 535 +++++++++++++++++++++++++++++++++++++++++++++++
 pack-bitmap.c            |  92 ++++++++
 pack-bitmap.h            |  19 ++
 pack-objects.h           |   1 +
 pack-write.c             |   2 +
 8 files changed, 711 insertions(+)
 create mode 100644 pack-bitmap-write.c

diff --git a/Documentation/config.txt b/Documentation/config.txt
index a981369..4b0c368 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -1864,6 +1864,14 @@ pack.useBitmaps::
 	true. You should not generally need to turn this off unless
 	you are debugging pack bitmaps.
 
+pack.writebitmaps::
+	When true, git will write a bitmap index when packing all
+	objects to disk (e.g., when `git repack -a` is run).  This
+	index can speed up the "counting objects" phase of subsequent
+	packs created for clones and fetches, at the cost of some disk
+	space and extra time spent on the initial repack.  Defaults to
+	false.
+
 pager.<cmd>::
 	If the value is boolean, turns on or off pagination of the
 	output of a particular Git subcommand when writing to a tty.
diff --git a/Makefile b/Makefile
index b983d78..555d44c 100644
--- a/Makefile
+++ b/Makefile
@@ -839,6 +839,7 @@ LIB_OBJS += notes-merge.o
 LIB_OBJS += notes-utils.o
 LIB_OBJS += object.o
 LIB_OBJS += pack-bitmap.o
+LIB_OBJS += pack-bitmap-write.o
 LIB_OBJS += pack-check.o
 LIB_OBJS += pack-objects.o
 LIB_OBJS += pack-revindex.o
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 030d894..fd6ae01 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -63,6 +63,7 @@ static uint32_t reuse_packfile_objects;
 static off_t reuse_packfile_offset;
 
 static int use_bitmap_index = 1;
+static int write_bitmap_index;
 
 static unsigned long delta_cache_size = 0;
 static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
@@ -76,6 +77,24 @@ static unsigned long window_memory_limit = 0;
 static uint32_t written, written_delta;
 static uint32_t reused, reused_delta;
 
+/*
+ * Indexed commits
+ */
+static struct commit **indexed_commits;
+static unsigned int indexed_commits_nr;
+static unsigned int indexed_commits_alloc;
+
+static void index_commit_for_bitmap(struct commit *commit)
+{
+	if (indexed_commits_nr >= indexed_commits_alloc) {
+		indexed_commits_alloc = (indexed_commits_alloc + 32) * 2;
+		indexed_commits = xrealloc(indexed_commits,
+			indexed_commits_alloc * sizeof(struct commit *));
+	}
+
+	indexed_commits[indexed_commits_nr++] = commit;
+}
+
 static void *get_delta(struct object_entry *entry)
 {
 	unsigned long size, base_size, delta_size;
@@ -812,9 +831,30 @@ static void write_pack_file(void)
 			if (sizeof(tmpname) <= strlen(base_name) + 50)
 				die("pack base name '%s' too long", base_name);
 			snprintf(tmpname, sizeof(tmpname), "%s-", base_name);
+
+			if (write_bitmap_index) {
+				bitmap_writer_set_checksum(sha1);
+				bitmap_writer_build_type_index(written_list, nr_written);
+			}
+
 			finish_tmp_packfile(tmpname, pack_tmp_name,
 					    written_list, nr_written,
 					    &pack_idx_opts, sha1);
+
+			if (write_bitmap_index) {
+				char *end_of_name_prefix = strrchr(tmpname, 0);
+				sprintf(end_of_name_prefix, "%s.bitmap", sha1_to_hex(sha1));
+
+				stop_progress(&progress_state);
+
+				bitmap_writer_show_progress(progress);
+				bitmap_writer_reuse_bitmaps(&to_pack);
+				bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
+				bitmap_writer_build(&to_pack);
+				bitmap_writer_finish(written_list, nr_written, tmpname);
+				write_bitmap_index = 0;
+			}
+
 			free(pack_tmp_name);
 			puts(sha1_to_hex(sha1));
 		}
@@ -2157,6 +2197,10 @@ static int git_pack_config(const char *k, const char *v, void *cb)
 		cache_max_small_delta_size = git_config_int(k, v);
 		return 0;
 	}
+	if (!strcmp(k, "pack.writebitmaps")) {
+		write_bitmap_index = git_config_bool(k, v);
+		return 0;
+	}
 	if (!strcmp(k, "pack.usebitmaps")) {
 		use_bitmap_index = git_config_bool(k, v);
 		return 0;
@@ -2219,6 +2263,9 @@ static void show_commit(struct commit *commit, void *data)
 {
 	add_object_entry(commit->object.sha1, OBJ_COMMIT, NULL, 0);
 	commit->object.flags |= OBJECT_ADDED;
+
+	if (write_bitmap_index)
+		index_commit_for_bitmap(commit);
 }
 
 static void show_object(struct object *obj,
@@ -2411,6 +2458,7 @@ static void get_object_list(int ac, const char **av)
 		if (*line == '-') {
 			if (!strcmp(line, "--not")) {
 				flags ^= UNINTERESTING;
+				write_bitmap_index = 0;
 				continue;
 			}
 			die("not a rev '%s'", line);
@@ -2553,6 +2601,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
 			    N_("do not hide commits by grafts"), 0),
 		OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,
 			 N_("use a bitmap index if available to speed up counting objects")),
+		OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index,
+			 N_("write a bitmap index together with the pack index")),
 		OPT_END(),
 	};
 
@@ -2622,6 +2672,9 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
 	if (!use_internal_rev_list || !pack_to_stdout || is_repository_shallow())
 		use_bitmap_index = 0;
 
+	if (pack_to_stdout || !rev_list_all)
+		write_bitmap_index = 0;
+
 	if (progress && all_progress_implied)
 		progress = 2;
 
diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c
new file mode 100644
index 0000000..954a74d
--- /dev/null
+++ b/pack-bitmap-write.c
@@ -0,0 +1,535 @@
+#include "cache.h"
+#include "commit.h"
+#include "tag.h"
+#include "diff.h"
+#include "revision.h"
+#include "list-objects.h"
+#include "progress.h"
+#include "pack-revindex.h"
+#include "pack.h"
+#include "pack-bitmap.h"
+#include "sha1-lookup.h"
+#include "pack-objects.h"
+
+struct bitmapped_commit {
+	struct commit *commit;
+	struct ewah_bitmap *bitmap;
+	struct ewah_bitmap *write_as;
+	int flags;
+	int xor_offset;
+	uint32_t commit_pos;
+};
+
+struct bitmap_writer {
+	struct ewah_bitmap *commits;
+	struct ewah_bitmap *trees;
+	struct ewah_bitmap *blobs;
+	struct ewah_bitmap *tags;
+
+	khash_sha1 *bitmaps;
+	khash_sha1 *reused;
+	struct packing_data *to_pack;
+
+	struct bitmapped_commit *selected;
+	unsigned int selected_nr, selected_alloc;
+
+	struct progress *progress;
+	int show_progress;
+	unsigned char pack_checksum[20];
+};
+
+static struct bitmap_writer writer;
+
+void bitmap_writer_show_progress(int show)
+{
+	writer.show_progress = show;
+}
+
+/**
+ * Build the initial type index for the packfile
+ */
+void bitmap_writer_build_type_index(struct pack_idx_entry **index,
+				    uint32_t index_nr)
+{
+	uint32_t i;
+
+	writer.commits = ewah_new();
+	writer.trees = ewah_new();
+	writer.blobs = ewah_new();
+	writer.tags = ewah_new();
+
+	for (i = 0; i < index_nr; ++i) {
+		struct object_entry *entry = (struct object_entry *)index[i];
+		enum object_type real_type;
+
+		entry->in_pack_pos = i;
+
+		switch (entry->type) {
+		case OBJ_COMMIT:
+		case OBJ_TREE:
+		case OBJ_BLOB:
+		case OBJ_TAG:
+			real_type = entry->type;
+			break;
+
+		default:
+			real_type = sha1_object_info(entry->idx.sha1, NULL);
+			break;
+		}
+
+		switch (real_type) {
+		case OBJ_COMMIT:
+			ewah_set(writer.commits, i);
+			break;
+
+		case OBJ_TREE:
+			ewah_set(writer.trees, i);
+			break;
+
+		case OBJ_BLOB:
+			ewah_set(writer.blobs, i);
+			break;
+
+		case OBJ_TAG:
+			ewah_set(writer.tags, i);
+			break;
+
+		default:
+			die("Missing type information for %s (%d/%d)",
+			    sha1_to_hex(entry->idx.sha1), real_type, entry->type);
+		}
+	}
+}
+
+/**
+ * Compute the actual bitmaps
+ */
+static struct object **seen_objects;
+static unsigned int seen_objects_nr, seen_objects_alloc;
+
+static inline void push_bitmapped_commit(struct commit *commit, struct ewah_bitmap *reused)
+{
+	if (writer.selected_nr >= writer.selected_alloc) {
+		writer.selected_alloc = (writer.selected_alloc + 32) * 2;
+		writer.selected = xrealloc(writer.selected,
+					   writer.selected_alloc * sizeof(struct bitmapped_commit));
+	}
+
+	writer.selected[writer.selected_nr].commit = commit;
+	writer.selected[writer.selected_nr].bitmap = reused;
+	writer.selected[writer.selected_nr].flags = 0;
+
+	writer.selected_nr++;
+}
+
+static inline void mark_as_seen(struct object *object)
+{
+	ALLOC_GROW(seen_objects, seen_objects_nr + 1, seen_objects_alloc);
+	seen_objects[seen_objects_nr++] = object;
+}
+
+static inline void reset_all_seen(void)
+{
+	unsigned int i;
+	for (i = 0; i < seen_objects_nr; ++i) {
+		seen_objects[i]->flags &= ~(SEEN | ADDED | SHOWN);
+	}
+	seen_objects_nr = 0;
+}
+
+static uint32_t find_object_pos(const unsigned char *sha1)
+{
+	struct object_entry *entry = packlist_find(writer.to_pack, sha1, NULL);
+
+	if (!entry) {
+		die("Failed to write bitmap index. Packfile doesn't have full closure "
+			"(object %s is missing)", sha1_to_hex(sha1));
+	}
+
+	return entry->in_pack_pos;
+}
+
+static void show_object(struct object *object, const struct name_path *path,
+			const char *last, void *data)
+{
+	struct bitmap *base = data;
+	bitmap_set(base, find_object_pos(object->sha1));
+	mark_as_seen(object);
+}
+
+static void show_commit(struct commit *commit, void *data)
+{
+	mark_as_seen((struct object *)commit);
+}
+
+static int
+add_to_include_set(struct bitmap *base, struct commit *commit)
+{
+	khiter_t hash_pos;
+	uint32_t bitmap_pos = find_object_pos(commit->object.sha1);
+
+	if (bitmap_get(base, bitmap_pos))
+		return 0;
+
+	hash_pos = kh_get_sha1(writer.bitmaps, commit->object.sha1);
+	if (hash_pos < kh_end(writer.bitmaps)) {
+		struct bitmapped_commit *bc = kh_value(writer.bitmaps, hash_pos);
+		bitmap_or_ewah(base, bc->bitmap);
+		return 0;
+	}
+
+	bitmap_set(base, bitmap_pos);
+	return 1;
+}
+
+static int
+should_include(struct commit *commit, void *_data)
+{
+	struct bitmap *base = _data;
+
+	if (!add_to_include_set(base, commit)) {
+		struct commit_list *parent = commit->parents;
+
+		mark_as_seen((struct object *)commit);
+
+		while (parent) {
+			parent->item->object.flags |= SEEN;
+			mark_as_seen((struct object *)parent->item);
+			parent = parent->next;
+		}
+
+		return 0;
+	}
+
+	return 1;
+}
+
+static void compute_xor_offsets(void)
+{
+	static const int MAX_XOR_OFFSET_SEARCH = 10;
+
+	int i, next = 0;
+
+	while (next < writer.selected_nr) {
+		struct bitmapped_commit *stored = &writer.selected[next];
+
+		int best_offset = 0;
+		struct ewah_bitmap *best_bitmap = stored->bitmap;
+		struct ewah_bitmap *test_xor;
+
+		for (i = 1; i <= MAX_XOR_OFFSET_SEARCH; ++i) {
+			int curr = next - i;
+
+			if (curr < 0)
+				break;
+
+			test_xor = ewah_pool_new();
+			ewah_xor(writer.selected[curr].bitmap, stored->bitmap, test_xor);
+
+			if (test_xor->buffer_size < best_bitmap->buffer_size) {
+				if (best_bitmap != stored->bitmap)
+					ewah_pool_free(best_bitmap);
+
+				best_bitmap = test_xor;
+				best_offset = i;
+			} else {
+				ewah_pool_free(test_xor);
+			}
+		}
+
+		stored->xor_offset = best_offset;
+		stored->write_as = best_bitmap;
+
+		next++;
+	}
+}
+
+void bitmap_writer_build(struct packing_data *to_pack)
+{
+	static const double REUSE_BITMAP_THRESHOLD = 0.2;
+
+	int i, reuse_after, need_reset;
+	struct bitmap *base = bitmap_new();
+	struct rev_info revs;
+
+	writer.bitmaps = kh_init_sha1();
+	writer.to_pack = to_pack;
+
+	if (writer.show_progress)
+		writer.progress = start_progress("Building bitmaps", writer.selected_nr);
+
+	init_revisions(&revs, NULL);
+	revs.tag_objects = 1;
+	revs.tree_objects = 1;
+	revs.blob_objects = 1;
+	revs.no_walk = 0;
+
+	revs.include_check = should_include;
+	reset_revision_walk();
+
+	reuse_after = writer.selected_nr * REUSE_BITMAP_THRESHOLD;
+	need_reset = 0;
+
+	for (i = writer.selected_nr - 1; i >= 0; --i) {
+		struct bitmapped_commit *stored;
+		struct object *object;
+
+		khiter_t hash_pos;
+		int hash_ret;
+
+		stored = &writer.selected[i];
+		object = (struct object *)stored->commit;
+
+		if (stored->bitmap == NULL) {
+			if (i < writer.selected_nr - 1 &&
+			    (need_reset ||
+			     !in_merge_bases(writer.selected[i + 1].commit,
+					     stored->commit))) {
+			    bitmap_reset(base);
+			    reset_all_seen();
+			}
+
+			add_pending_object(&revs, object, "");
+			revs.include_check_data = base;
+
+			if (prepare_revision_walk(&revs))
+				die("revision walk setup failed");
+
+			traverse_commit_list(&revs, show_commit, show_object, base);
+
+			revs.pending.nr = 0;
+			revs.pending.alloc = 0;
+			revs.pending.objects = NULL;
+
+			stored->bitmap = bitmap_to_ewah(base);
+			need_reset = 0;
+		} else
+			need_reset = 1;
+
+		if (i >= reuse_after)
+			stored->flags |= BITMAP_FLAG_REUSE;
+
+		hash_pos = kh_put_sha1(writer.bitmaps, object->sha1, &hash_ret);
+		if (hash_ret == 0)
+			die("Duplicate entry when writing index: %s",
+			    sha1_to_hex(object->sha1));
+
+		kh_value(writer.bitmaps, hash_pos) = stored;
+		display_progress(writer.progress, writer.selected_nr - i);
+	}
+
+	bitmap_free(base);
+	stop_progress(&writer.progress);
+
+	compute_xor_offsets();
+}
+
+/**
+ * Select the commits that will be bitmapped
+ */
+static inline unsigned int next_commit_index(unsigned int idx)
+{
+	static const unsigned int MIN_COMMITS = 100;
+	static const unsigned int MAX_COMMITS = 5000;
+
+	static const unsigned int MUST_REGION = 100;
+	static const unsigned int MIN_REGION = 20000;
+
+	unsigned int offset, next;
+
+	if (idx <= MUST_REGION)
+		return 0;
+
+	if (idx <= MIN_REGION) {
+		offset = idx - MUST_REGION;
+		return (offset < MIN_COMMITS) ? offset : MIN_COMMITS;
+	}
+
+	offset = idx - MIN_REGION;
+	next = (offset < MAX_COMMITS) ? offset : MAX_COMMITS;
+
+	return (next > MIN_COMMITS) ? next : MIN_COMMITS;
+}
+
+static int date_compare(const void *_a, const void *_b)
+{
+	struct commit *a = *(struct commit **)_a;
+	struct commit *b = *(struct commit **)_b;
+	return (long)b->date - (long)a->date;
+}
+
+void bitmap_writer_reuse_bitmaps(struct packing_data *to_pack)
+{
+	if (prepare_bitmap_git() < 0)
+		return;
+
+	writer.reused = kh_init_sha1();
+	rebuild_existing_bitmaps(to_pack, writer.reused, writer.show_progress);
+}
+
+static struct ewah_bitmap *find_reused_bitmap(const unsigned char *sha1)
+{
+	khiter_t hash_pos;
+
+	if (!writer.reused)
+		return NULL;
+
+	hash_pos = kh_get_sha1(writer.reused, sha1);
+	if (hash_pos >= kh_end(writer.reused))
+		return NULL;
+
+	return kh_value(writer.reused, hash_pos);
+}
+
+void bitmap_writer_select_commits(struct commit **indexed_commits,
+				  unsigned int indexed_commits_nr,
+				  int max_bitmaps)
+{
+	unsigned int i = 0, j, next;
+
+	qsort(indexed_commits, indexed_commits_nr, sizeof(indexed_commits[0]),
+	      date_compare);
+
+	if (writer.show_progress)
+		writer.progress = start_progress("Selecting bitmap commits", 0);
+
+	if (indexed_commits_nr < 100) {
+		for (i = 0; i < indexed_commits_nr; ++i)
+			push_bitmapped_commit(indexed_commits[i], NULL);
+		return;
+	}
+
+	for (;;) {
+		struct ewah_bitmap *reused_bitmap = NULL;
+		struct commit *chosen = NULL;
+
+		next = next_commit_index(i);
+
+		if (i + next >= indexed_commits_nr)
+			break;
+
+		if (max_bitmaps > 0 && writer.selected_nr >= max_bitmaps) {
+			writer.selected_nr = max_bitmaps;
+			break;
+		}
+
+		if (next == 0) {
+			chosen = indexed_commits[i];
+			reused_bitmap = find_reused_bitmap(chosen->object.sha1);
+		} else {
+			chosen = indexed_commits[i + next];
+
+			for (j = 0; j <= next; ++j) {
+				struct commit *cm = indexed_commits[i + j];
+
+				reused_bitmap = find_reused_bitmap(cm->object.sha1);
+				if (reused_bitmap || (cm->object.flags & NEEDS_BITMAP) != 0) {
+					chosen = cm;
+					break;
+				}
+
+				if (cm->parents && cm->parents->next)
+					chosen = cm;
+			}
+		}
+
+		push_bitmapped_commit(chosen, reused_bitmap);
+
+		i += next + 1;
+		display_progress(writer.progress, i);
+	}
+
+	stop_progress(&writer.progress);
+}
+
+
+static int sha1write_ewah_helper(void *f, const void *buf, size_t len)
+{
+	/* sha1write will die on error */
+	sha1write(f, buf, len);
+	return len;
+}
+
+/**
+ * Write the bitmap index to disk
+ */
+static inline void dump_bitmap(struct sha1file *f, struct ewah_bitmap *bitmap)
+{
+	if (ewah_serialize_to(bitmap, sha1write_ewah_helper, f) < 0)
+		die("Failed to write bitmap index");
+}
+
+static const unsigned char *sha1_access(size_t pos, void *table)
+{
+	struct pack_idx_entry **index = table;
+	return index[pos]->sha1;
+}
+
+static void write_selected_commits_v1(struct sha1file *f,
+				      struct pack_idx_entry **index,
+				      uint32_t index_nr)
+{
+	int i;
+
+	for (i = 0; i < writer.selected_nr; ++i) {
+		struct bitmapped_commit *stored = &writer.selected[i];
+		struct bitmap_disk_entry on_disk;
+
+		int commit_pos =
+			sha1_pos(stored->commit->object.sha1, index, index_nr, sha1_access);
+
+		if (commit_pos < 0)
+			die("BUG: trying to write commit not in index");
+
+		on_disk.object_pos = htonl(commit_pos);
+		on_disk.xor_offset = stored->xor_offset;
+		on_disk.flags = stored->flags;
+
+		sha1write(f, &on_disk, sizeof(on_disk));
+		dump_bitmap(f, stored->write_as);
+	}
+}
+
+void bitmap_writer_set_checksum(unsigned char *sha1)
+{
+	hashcpy(writer.pack_checksum, sha1);
+}
+
+void bitmap_writer_finish(struct pack_idx_entry **index,
+			  uint32_t index_nr,
+			  const char *filename)
+{
+	static char tmp_file[PATH_MAX];
+	static uint16_t default_version = 1;
+	static uint16_t flags = BITMAP_OPT_FULL_DAG;
+	struct sha1file *f;
+
+	struct bitmap_disk_header header;
+
+	int fd = odb_mkstemp(tmp_file, sizeof(tmp_file), "pack/tmp_bitmap_XXXXXX");
+
+	if (fd < 0)
+		die_errno("unable to create '%s'", tmp_file);
+	f = sha1fd(fd, tmp_file);
+
+	memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE));
+	header.version = htons(default_version);
+	header.options = htons(flags);
+	header.entry_count = htonl(writer.selected_nr);
+	memcpy(header.checksum, writer.pack_checksum, 20);
+
+	sha1write(f, &header, sizeof(header));
+	dump_bitmap(f, writer.commits);
+	dump_bitmap(f, writer.trees);
+	dump_bitmap(f, writer.blobs);
+	dump_bitmap(f, writer.tags);
+	write_selected_commits_v1(f, index, index_nr);
+
+	sha1close(f, NULL, CSUM_FSYNC);
+
+	if (adjust_shared_perm(tmp_file))
+		die_errno("unable to make temporary bitmap file readable");
+
+	if (rename(tmp_file, filename))
+		die_errno("unable to rename temporary bitmap file to '%s'", filename);
+}
diff --git a/pack-bitmap.c b/pack-bitmap.c
index 33e7482..82090a6 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -968,3 +968,95 @@ void test_bitmap_walk(struct rev_info *revs)
 	else
 		fprintf(stderr, "Mismatch!\n");
 }
+
+static int rebuild_bitmap(uint32_t *reposition,
+			  struct ewah_bitmap *source,
+			  struct bitmap *dest)
+{
+	uint32_t pos = 0;
+	struct ewah_iterator it;
+	eword_t word;
+
+	ewah_iterator_init(&it, source);
+
+	while (ewah_iterator_next(&word, &it)) {
+		uint32_t offset, bit_pos;
+
+		for (offset = 0; offset < BITS_IN_WORD; ++offset) {
+			if ((word >> offset) == 0)
+				break;
+
+			offset += ewah_bit_ctz64(word >> offset);
+
+			bit_pos = reposition[pos + offset];
+			if (bit_pos > 0)
+				bitmap_set(dest, bit_pos - 1);
+			else /* can't reuse, we don't have the object */
+				return -1;
+		}
+
+		pos += BITS_IN_WORD;
+	}
+	return 0;
+}
+
+int rebuild_existing_bitmaps(struct packing_data *mapping,
+			     khash_sha1 *reused_bitmaps,
+			     int show_progress)
+{
+	uint32_t i, num_objects;
+	uint32_t *reposition;
+	struct bitmap *rebuild;
+	struct stored_bitmap *stored;
+	struct progress *progress = NULL;
+
+	khiter_t hash_pos;
+	int hash_ret;
+
+	if (prepare_bitmap_git() < 0)
+		return -1;
+
+	num_objects = bitmap_git.pack->num_objects;
+	reposition = xcalloc(num_objects, sizeof(uint32_t));
+
+	for (i = 0; i < num_objects; ++i) {
+		const unsigned char *sha1;
+		struct revindex_entry *entry;
+		struct object_entry *oe;
+
+		entry = &bitmap_git.reverse_index->revindex[i];
+		sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr);
+		oe = packlist_find(mapping, sha1, NULL);
+
+		if (oe)
+			reposition[i] = oe->in_pack_pos + 1;
+	}
+
+	rebuild = bitmap_new();
+	i = 0;
+
+	if (show_progress)
+		progress = start_progress("Reusing bitmaps", 0);
+
+	kh_foreach_value(bitmap_git.bitmaps, stored, {
+		if (stored->flags & BITMAP_FLAG_REUSE) {
+			if (!rebuild_bitmap(reposition,
+					    lookup_stored_bitmap(stored),
+					    rebuild)) {
+				hash_pos = kh_put_sha1(reused_bitmaps,
+						       stored->sha1,
+						       &hash_ret);
+				kh_value(reused_bitmaps, hash_pos) =
+					bitmap_to_ewah(rebuild);
+			}
+			bitmap_reset(rebuild);
+			display_progress(progress, ++i);
+		}
+	});
+
+	stop_progress(&progress);
+
+	free(reposition);
+	bitmap_free(rebuild);
+	return 0;
+}
diff --git a/pack-bitmap.h b/pack-bitmap.h
index b4510d5..09acf02 100644
--- a/pack-bitmap.h
+++ b/pack-bitmap.h
@@ -3,6 +3,7 @@
 
 #include "ewah/ewok.h"
 #include "khash.h"
+#include "pack-objects.h"
 
 struct bitmap_disk_entry {
 	uint32_t object_pos;
@@ -20,10 +21,16 @@ struct bitmap_disk_header {
 
 static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'};
 
+#define NEEDS_BITMAP (1u<<22)
+
 enum pack_bitmap_opts {
 	BITMAP_OPT_FULL_DAG = 1
 };
 
+enum pack_bitmap_flags {
+	BITMAP_FLAG_REUSE = 0x1
+};
+
 typedef int (*show_reachable_fn)(
 	const unsigned char *sha1,
 	enum object_type type,
@@ -39,5 +46,17 @@ void test_bitmap_walk(struct rev_info *revs);
 char *pack_bitmap_filename(struct packed_git *p);
 int prepare_bitmap_walk(struct rev_info *revs);
 int reuse_partial_packfile_from_bitmap(struct packed_git **packfile, uint32_t *entries, off_t *up_to);
+int rebuild_existing_bitmaps(struct packing_data *mapping, khash_sha1 *reused_bitmaps, int show_progress);
+
+void bitmap_writer_show_progress(int show);
+void bitmap_writer_set_checksum(unsigned char *sha1);
+void bitmap_writer_build_type_index(struct pack_idx_entry **index, uint32_t index_nr);
+void bitmap_writer_reuse_bitmaps(struct packing_data *to_pack);
+void bitmap_writer_select_commits(struct commit **indexed_commits,
+		unsigned int indexed_commits_nr, int max_bitmaps);
+void bitmap_writer_build(struct packing_data *to_pack);
+void bitmap_writer_finish(struct pack_idx_entry **index,
+			  uint32_t index_nr,
+			  const char *filename);
 
 #endif
diff --git a/pack-objects.h b/pack-objects.h
index 90ad0a8..d1b98b3 100644
--- a/pack-objects.h
+++ b/pack-objects.h
@@ -17,6 +17,7 @@ struct object_entry {
 	enum object_type type;
 	enum object_type in_pack_type;	/* could be delta */
 	uint32_t hash;			/* name hint hash */
+	unsigned int in_pack_pos;
 	unsigned char in_pack_header_size;
 	unsigned preferred_base:1; /*
 				    * we do not pack this, but is available
diff --git a/pack-write.c b/pack-write.c
index ca9e63b..6203d37 100644
--- a/pack-write.c
+++ b/pack-write.c
@@ -371,5 +371,7 @@ void finish_tmp_packfile(char *name_buffer,
 	if (rename(idx_tmp_name, name_buffer))
 		die_errno("unable to rename temporary index file");
 
+	*end_of_name_prefix = '\0';
+
 	free((void *)idx_tmp_name);
 }
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 15/23] repack: stop using magic number for ARRAY_SIZE(exts)
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (13 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 14/23] pack-objects: implement bitmap writing Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 16/23] repack: turn exts array into array-of-struct Jeff King
                   ` (10 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

We have a static array of extensions, but hardcode the size
of the array in our loops. Let's pull out this magic number,
which will make it easier to change.

Signed-off-by: Jeff King <peff@peff.net>
---
 builtin/repack.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/builtin/repack.c b/builtin/repack.c
index a0ff5c7..2e88975 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -115,7 +115,7 @@ static void remove_redundant_pack(const char *dir_name, const char *base_name)
 
 int cmd_repack(int argc, const char **argv, const char *prefix)
 {
-	const char *exts[2] = {".pack", ".idx"};
+	const char *exts[] = {".pack", ".idx"};
 	struct child_process cmd;
 	struct string_list_item *item;
 	struct argv_array cmd_args = ARGV_ARRAY_INIT;
@@ -258,7 +258,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 	 */
 	failed = 0;
 	for_each_string_list_item(item, &names) {
-		for (ext = 0; ext < 2; ext++) {
+		for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
 			char *fname, *fname_old;
 			fname = mkpathdup("%s/%s%s", packdir,
 						item->string, exts[ext]);
@@ -315,7 +315,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 
 	/* Now the ones with the same name are out of the way... */
 	for_each_string_list_item(item, &names) {
-		for (ext = 0; ext < 2; ext++) {
+		for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
 			char *fname, *fname_old;
 			struct stat statbuffer;
 			fname = mkpathdup("%s/pack-%s%s",
@@ -335,7 +335,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 
 	/* Remove the "old-" files */
 	for_each_string_list_item(item, &names) {
-		for (ext = 0; ext < 2; ext++) {
+		for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
 			char *fname;
 			fname = mkpath("%s/old-pack-%s%s",
 					packdir,
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 16/23] repack: turn exts array into array-of-struct
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (14 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 15/23] repack: stop using magic number for ARRAY_SIZE(exts) Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 17/23] repack: handle optional files created by pack-objects Jeff King
                   ` (9 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

This is slightly more verbose, but will let us annotate the
extensions with further options in future commits.

Signed-off-by: Jeff King <peff@peff.net>
---
 builtin/repack.c | 17 +++++++++++------
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git a/builtin/repack.c b/builtin/repack.c
index 2e88975..a176de2 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -115,7 +115,12 @@ static void remove_redundant_pack(const char *dir_name, const char *base_name)
 
 int cmd_repack(int argc, const char **argv, const char *prefix)
 {
-	const char *exts[] = {".pack", ".idx"};
+	struct {
+		const char *name;
+	} exts[] = {
+		{".pack"},
+		{".idx"},
+	};
 	struct child_process cmd;
 	struct string_list_item *item;
 	struct argv_array cmd_args = ARGV_ARRAY_INIT;
@@ -261,14 +266,14 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 		for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
 			char *fname, *fname_old;
 			fname = mkpathdup("%s/%s%s", packdir,
-						item->string, exts[ext]);
+						item->string, exts[ext].name);
 			if (!file_exists(fname)) {
 				free(fname);
 				continue;
 			}
 
 			fname_old = mkpath("%s/old-%s%s", packdir,
-						item->string, exts[ext]);
+						item->string, exts[ext].name);
 			if (file_exists(fname_old))
 				if (unlink(fname_old))
 					failed = 1;
@@ -319,9 +324,9 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 			char *fname, *fname_old;
 			struct stat statbuffer;
 			fname = mkpathdup("%s/pack-%s%s",
-					packdir, item->string, exts[ext]);
+					packdir, item->string, exts[ext].name);
 			fname_old = mkpathdup("%s-%s%s",
-					packtmp, item->string, exts[ext]);
+					packtmp, item->string, exts[ext].name);
 			if (!stat(fname_old, &statbuffer)) {
 				statbuffer.st_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
 				chmod(fname_old, statbuffer.st_mode);
@@ -340,7 +345,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 			fname = mkpath("%s/old-pack-%s%s",
 					packdir,
 					item->string,
-					exts[ext]);
+					exts[ext].name);
 			if (remove_path(fname))
 				warning(_("removing '%s' failed"), fname);
 		}
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 17/23] repack: handle optional files created by pack-objects
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (15 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 16/23] repack: turn exts array into array-of-struct Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 18/23] repack: consider bitmaps when performing repacks Jeff King
                   ` (8 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

We ask pack-objects to pack to a set of temporary files, and
then rename them into place. Some files that pack-objects
creates may be optional (like a .bitmap file), in which case
we would not want to call rename(). We already call stat()
and make the chmod optional if the file cannot be accessed.
We could simply skip the rename step in this case, but that
would be a minor regression in noticing problems with
non-optional files (like the .pack and .idx files).

Instead, we can now annotate extensions as optional, and
skip them if they don't exist (and otherwise rely on
rename() to barf).

Signed-off-by: Jeff King <peff@peff.net>
---
 builtin/repack.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/builtin/repack.c b/builtin/repack.c
index a176de2..8b7dfd0 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -117,6 +117,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 {
 	struct {
 		const char *name;
+		unsigned optional:1;
 	} exts[] = {
 		{".pack"},
 		{".idx"},
@@ -323,6 +324,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 		for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
 			char *fname, *fname_old;
 			struct stat statbuffer;
+			int exists = 0;
 			fname = mkpathdup("%s/pack-%s%s",
 					packdir, item->string, exts[ext].name);
 			fname_old = mkpathdup("%s-%s%s",
@@ -330,9 +332,12 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 			if (!stat(fname_old, &statbuffer)) {
 				statbuffer.st_mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
 				chmod(fname_old, statbuffer.st_mode);
+				exists = 1;
+			}
+			if (exists || !exts[ext].optional) {
+				if (rename(fname_old, fname))
+					die_errno(_("renaming '%s' failed"), fname_old);
 			}
-			if (rename(fname_old, fname))
-				die_errno(_("renaming '%s' failed"), fname_old);
 			free(fname);
 			free(fname_old);
 		}
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 18/23] repack: consider bitmaps when performing repacks
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (16 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 17/23] repack: handle optional files created by pack-objects Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 19/23] count-objects: recognize .bitmap in garbage-checking Jeff King
                   ` (7 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

Since `pack-objects` will write a `.bitmap` file next to the `.pack` and
`.idx` files, this commit teaches `git-repack` to consider the new
bitmap indexes (if they exist) when performing repack operations.

This implies moving old bitmap indexes out of the way if we are
repacking a repository that already has them, and moving the newly
generated bitmap indexes into the `objects/pack` directory, next to
their corresponding packfiles.

Since `git repack` is now capable of handling these `.bitmap` files,
a normal `git gc` run on a repository that has `pack.writebitmaps` set
to true in its config file will generate bitmap indexes as part of the
garbage collection process.

Alternatively, `git repack` can be called with the `-b` switch to
explicitly generate bitmap indexes if you are experimenting
and don't want them on all the time.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 Documentation/git-repack.txt | 9 ++++++++-
 builtin/repack.c             | 9 ++++++++-
 2 files changed, 16 insertions(+), 2 deletions(-)

diff --git a/Documentation/git-repack.txt b/Documentation/git-repack.txt
index 4c1aff6..dad186c 100644
--- a/Documentation/git-repack.txt
+++ b/Documentation/git-repack.txt
@@ -9,7 +9,7 @@ git-repack - Pack unpacked objects in a repository
 SYNOPSIS
 --------
 [verse]
-'git repack' [-a] [-A] [-d] [-f] [-F] [-l] [-n] [-q] [--window=<n>] [--depth=<n>]
+'git repack' [-a] [-A] [-d] [-f] [-F] [-l] [-n] [-q] [-b] [--window=<n>] [--depth=<n>]
 
 DESCRIPTION
 -----------
@@ -110,6 +110,13 @@ other objects in that pack they already have locally.
 	The default is unlimited, unless the config variable
 	`pack.packSizeLimit` is set.
 
+-b::
+--write-bitmap-index::
+	Write a reachability bitmap index as part of the repack. This
+	only makes sense when used with `-a` or `-A`, as the bitmaps
+	must be able to refer to all reachable objects. This option
+	overrides the setting of `pack.writebitmaps`.
+
 
 Configuration
 -------------
diff --git a/builtin/repack.c b/builtin/repack.c
index 8b7dfd0..239f278 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -94,7 +94,7 @@ static void get_non_kept_pack_filenames(struct string_list *fname_list)
 
 static void remove_redundant_pack(const char *dir_name, const char *base_name)
 {
-	const char *exts[] = {".pack", ".idx", ".keep"};
+	const char *exts[] = {".pack", ".idx", ".keep", ".bitmap"};
 	int i;
 	struct strbuf buf = STRBUF_INIT;
 	size_t plen;
@@ -121,6 +121,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 	} exts[] = {
 		{".pack"},
 		{".idx"},
+		{".bitmap", 1},
 	};
 	struct child_process cmd;
 	struct string_list_item *item;
@@ -143,6 +144,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 	int no_update_server_info = 0;
 	int quiet = 0;
 	int local = 0;
+	int write_bitmap = -1;
 
 	struct option builtin_repack_options[] = {
 		OPT_BIT('a', NULL, &pack_everything,
@@ -161,6 +163,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 		OPT__QUIET(&quiet, N_("be quiet")),
 		OPT_BOOL('l', "local", &local,
 				N_("pass --local to git-pack-objects")),
+		OPT_BOOL('b', "write-bitmap-index", &write_bitmap,
+				N_("write bitmap index")),
 		OPT_STRING(0, "unpack-unreachable", &unpack_unreachable, N_("approxidate"),
 				N_("with -A, do not loosen objects older than this")),
 		OPT_INTEGER(0, "window", &window,
@@ -202,6 +206,9 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
 		argv_array_pushf(&cmd_args, "--no-reuse-delta");
 	if (no_reuse_object)
 		argv_array_pushf(&cmd_args, "--no-reuse-object");
+	if (write_bitmap >= 0)
+		argv_array_pushf(&cmd_args, "--%swrite-bitmap-index",
+				 write_bitmap ? "" : "no-");
 
 	if (pack_everything & ALL_INTO_ONE) {
 		get_non_kept_pack_filenames(&existing_packs);
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 19/23] count-objects: recognize .bitmap in garbage-checking
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (17 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 18/23] repack: consider bitmaps when performing repacks Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 20/23] t: add basic bitmap functionality tests Jeff King
                   ` (6 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

From: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>

Count-objects will report any "garbage" files in the packs
directory, including files whose extensions it does not
know (case 1), and files whose matching ".pack" file is
missing (case 2).  Without having learned about ".bitmap"
files, the current code reports all such files as garbage
(case 1), even if their pack exists. Instead, they should be
treated as case 2.

Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 sha1_file.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/sha1_file.c b/sha1_file.c
index 4714bd8..1294962 100644
--- a/sha1_file.c
+++ b/sha1_file.c
@@ -1194,6 +1194,7 @@ static void prepare_packed_git_one(char *objdir, int local)
 
 		if (has_extension(de->d_name, ".idx") ||
 		    has_extension(de->d_name, ".pack") ||
+		    has_extension(de->d_name, ".bitmap") ||
 		    has_extension(de->d_name, ".keep"))
 			string_list_append(&garbage, path);
 		else
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 20/23] t: add basic bitmap functionality tests
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (18 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 19/23] count-objects: recognize .bitmap in garbage-checking Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 21/23] t/perf: add tests for pack bitmaps Jeff King
                   ` (5 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

Now that we can read and write bitmaps, we can exercise them
with some basic functionality tests. These tests aren't
particularly useful for seeing the benefit, as the test
repo is too small for it to make a difference. However, we
can at least check that using bitmaps does not break anything.

Signed-off-by: Jeff King <peff@peff.net>
---
 t/t5310-pack-bitmaps.sh | 138 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 138 insertions(+)
 create mode 100755 t/t5310-pack-bitmaps.sh

diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh
new file mode 100755
index 0000000..d2b0c45
--- /dev/null
+++ b/t/t5310-pack-bitmaps.sh
@@ -0,0 +1,138 @@
+#!/bin/sh
+
+test_description='exercise basic bitmap functionality'
+. ./test-lib.sh
+
+test_expect_success 'setup repo with moderate-sized history' '
+	for i in $(test_seq 1 10); do
+		test_commit $i
+	done &&
+	git checkout -b other HEAD~5 &&
+	for i in $(test_seq 1 10); do
+		test_commit side-$i
+	done &&
+	git checkout master &&
+	blob=$(echo tagged-blob | git hash-object -w --stdin) &&
+	git tag tagged-blob $blob &&
+	git config pack.writebitmaps true
+'
+
+test_expect_success 'full repack creates bitmaps' '
+	git repack -ad &&
+	ls .git/objects/pack/ | grep bitmap >output &&
+	test_line_count = 1 output
+'
+
+test_expect_success 'rev-list --test-bitmap verifies bitmaps' '
+	git rev-list --test-bitmap HEAD
+'
+
+rev_list_tests() {
+	state=$1
+
+	test_expect_success "counting commits via bitmap ($state)" '
+		git rev-list --count HEAD >expect &&
+		git rev-list --use-bitmap-index --count HEAD >actual &&
+		test_cmp expect actual
+	'
+
+	test_expect_success "counting partial commits via bitmap ($state)" '
+		git rev-list --count HEAD~5..HEAD >expect &&
+		git rev-list --use-bitmap-index --count HEAD~5..HEAD >actual &&
+		test_cmp expect actual
+	'
+
+	test_expect_success "counting non-linear history ($state)" '
+		git rev-list --count other...master >expect &&
+		git rev-list --use-bitmap-index --count other...master >actual &&
+		test_cmp expect actual
+	'
+
+	test_expect_success "enumerate --objects ($state)" '
+		git rev-list --objects --use-bitmap-index HEAD >tmp &&
+		cut -d" " -f1 <tmp >tmp2 &&
+		sort <tmp2 >actual &&
+		git rev-list --objects HEAD >tmp &&
+		cut -d" " -f1 <tmp >tmp2 &&
+		sort <tmp2 >expect &&
+		test_cmp expect actual
+	'
+
+	test_expect_success "bitmap --objects handles non-commit objects ($state)" '
+		git rev-list --objects --use-bitmap-index HEAD tagged-blob >actual &&
+		grep $blob actual
+	'
+}
+
+rev_list_tests 'full bitmap'
+
+test_expect_success 'clone from bitmapped repository' '
+	git clone --no-local --bare . clone.git &&
+	git rev-parse HEAD >expect &&
+	git --git-dir=clone.git rev-parse HEAD >actual &&
+	test_cmp expect actual
+'
+
+test_expect_success 'setup further non-bitmapped commits' '
+	for i in $(test_seq 1 10); do
+		test_commit further-$i
+	done
+'
+
+rev_list_tests 'partial bitmap'
+
+test_expect_success 'fetch (partial bitmap)' '
+	git --git-dir=clone.git fetch origin master:master &&
+	git rev-parse HEAD >expect &&
+	git --git-dir=clone.git rev-parse HEAD >actual &&
+	test_cmp expect actual
+'
+
+test_expect_success 'incremental repack cannot create bitmaps' '
+	test_commit more-1 &&
+	test_must_fail git repack -d
+'
+
+test_expect_success 'incremental repack can disable bitmaps' '
+	test_commit more-2 &&
+	git repack -d --no-write-bitmap-index
+'
+
+test_expect_success 'full repack, reusing previous bitmaps' '
+	git repack -ad &&
+	ls .git/objects/pack/ | grep bitmap >output &&
+	test_line_count = 1 output
+'
+
+test_expect_success 'fetch (full bitmap)' '
+	git --git-dir=clone.git fetch origin master:master &&
+	git rev-parse HEAD >expect &&
+	git --git-dir=clone.git rev-parse HEAD >actual &&
+	test_cmp expect actual
+'
+
+test_lazy_prereq JGIT '
+	type jgit
+'
+
+test_expect_success JGIT 'we can read jgit bitmaps' '
+	git clone . compat-jgit &&
+	(
+		cd compat-jgit &&
+		rm -f .git/objects/pack/*.bitmap &&
+		jgit gc &&
+		git rev-list --test-bitmap HEAD
+	)
+'
+
+test_expect_success JGIT 'jgit can read our bitmaps' '
+	git clone . compat-us &&
+	(
+		cd compat-us &&
+		git repack -adb &&
+		# jgit gc will barf if it does not like our bitmaps
+		jgit gc
+	)
+'
+
+test_done
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 21/23] t/perf: add tests for pack bitmaps
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (19 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 20/23] t: add basic bitmap functionality tests Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 22/23] pack-bitmap: implement optional name_hash cache Jeff King
                   ` (4 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

This adds a few basic perf tests for the pack bitmap code to
show off its improvements. The tests are:

  1. How long does it take to do a repack (it gets slower
     with bitmaps, since we have to do extra work)?

  2. How long does it take to do a clone (it gets faster
     with bitmaps)?

  3. How does a small fetch perform when we've just
     repacked?

  4. How does a clone perform when we haven't repacked since
     a week of pushes?

Here are results against linux.git:

Test                      origin/master       this tree
-----------------------------------------------------------------------
5310.2: repack to disk    33.64(32.64+2.04)   67.67(66.75+1.84) +101.2%
5310.3: simulated clone   30.49(29.47+2.05)   1.20(1.10+0.10) -96.1%
5310.4: simulated fetch   3.49(6.79+0.06)     5.57(22.35+0.07) +59.6%
5310.6: partial bitmap    36.70(43.87+1.81)   8.18(21.92+0.73) -77.7%

You can see that we do take longer to repack, but we do way
better for further clones. A small fetch performs a bit
worse, as we spend way more time on delta compression (note
the heavy user CPU time, as we have 8 threads) due to the
lack of name hashes for the bitmapped objects.

The final test shows how the bitmaps degrade over time
between packs. There's still a significant speedup over the
non-bitmap case, but we don't do quite as well (we have to
spend time accessing the "new" objects the old fashioned
way, including delta compression).

Signed-off-by: Jeff King <peff@peff.net>
---
 t/perf/p5310-pack-bitmaps.sh | 56 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 56 insertions(+)
 create mode 100755 t/perf/p5310-pack-bitmaps.sh

diff --git a/t/perf/p5310-pack-bitmaps.sh b/t/perf/p5310-pack-bitmaps.sh
new file mode 100755
index 0000000..8c6ae45
--- /dev/null
+++ b/t/perf/p5310-pack-bitmaps.sh
@@ -0,0 +1,56 @@
+#!/bin/sh
+
+test_description='Tests pack performance using bitmaps'
+. ./perf-lib.sh
+
+test_perf_large_repo
+
+# note that we do everything through config,
+# since we want to be able to compare bitmap-aware
+# git versus non-bitmap git
+test_expect_success 'setup bitmap config' '
+	git config pack.writebitmaps true
+'
+
+test_perf 'repack to disk' '
+	git repack -ad
+'
+
+test_perf 'simulated clone' '
+	git pack-objects --stdout --all </dev/null >/dev/null
+'
+
+test_perf 'simulated fetch' '
+	have=$(git rev-list HEAD~100 -1) &&
+	{
+		echo HEAD &&
+		echo ^$have
+	} | git pack-objects --revs --stdout >/dev/null
+'
+
+test_expect_success 'create partial bitmap state' '
+	# pick a commit to represent the repo tip in the past
+	cutoff=$(git rev-list HEAD~100 -1) &&
+	orig_tip=$(git rev-parse HEAD) &&
+
+	# now kill off all of the refs and pretend we had
+	# just the one tip
+	rm -rf .git/logs .git/refs/* .git/packed-refs
+	git update-ref HEAD $cutoff
+
+	# and then repack, which will leave us with a nice
+	# big bitmap pack of the "old" history, and all of
+	# the new history will be loose, as if it had been pushed
+	# up incrementally and exploded via unpack-objects
+	git repack -Ad
+
+	# and now restore our original tip, as if the pushes
+	# had happened
+	git update-ref HEAD $orig_tip
+'
+
+test_perf 'partial bitmap' '
+	git pack-objects --stdout --all </dev/null >/dev/null
+'
+
+test_done
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 22/23] pack-bitmap: implement optional name_hash cache
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (20 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 21/23] t/perf: add tests for pack bitmaps Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-21 14:00 ` [PATCH v4 23/23] compat/mingw.h: Fix the MinGW and msvc builds Jeff King
                   ` (3 subsequent siblings)
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

From: Vicent Marti <tanoku@gmail.com>

When we use pack bitmaps rather than walking the object
graph, we end up with the list of objects to include in the
packfile, but we do not know the path at which any tree or
blob objects would be found.

In a recently packed repository, this is fine. A fetch would
use the paths only as a heuristic in the delta compression
phase, and a fully packed repository should not need to do
much delta compression.

As time passes, though, we may acquire more objects on top
of our large bitmapped pack. If clients fetch frequently,
then they never even look at the bitmapped history, and all
works as usual. However, a client who has not fetched since
the last bitmap repack will have "have" tips in the
bitmapped history, but "want" newer objects.

The bitmaps themselves degrade gracefully in this
circumstance. We manually walk the more recent bits of
history, and then use bitmaps when we hit them.

But we would also like to perform delta compression between
the newer objects and the bitmapped objects (both to delta
against what we know the user already has, but also between
"new" and "old" objects that the user is fetching). The lack
of pathnames makes our delta heuristics much less effective.

This patch adds an optional cache of the 32-bit name_hash
values to the end of the bitmap file. If present, a reader
can use it to match bitmapped and non-bitmapped names during
delta compression.

Here are perf results for p5310:

Test                      origin/master       HEAD^                      HEAD
-------------------------------------------------------------------------------------------------
5310.2: repack to disk    36.81(37.82+1.43)   47.70(48.74+1.41) +29.6%   47.75(48.70+1.51) +29.7%
5310.3: simulated clone   30.78(29.70+2.14)   1.08(0.97+0.10) -96.5%     1.07(0.94+0.12) -96.5%
5310.4: simulated fetch   3.16(6.10+0.08)     3.54(10.65+0.06) +12.0%    1.70(3.07+0.06) -46.2%
5310.6: partial bitmap    36.76(43.19+1.81)   6.71(11.25+0.76) -81.7%    4.08(6.26+0.46) -88.9%

You can see that the time spent on an incremental fetch goes
down, as our delta heuristics are able to do their work.
And we save time on the partial bitmap clone for the same
reason.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 Documentation/config.txt                  | 11 +++++++++++
 Documentation/technical/bitmap-format.txt | 33 +++++++++++++++++++++++++++++++
 builtin/pack-objects.c                    | 10 +++++++++-
 pack-bitmap-write.c                       | 21 ++++++++++++++++++--
 pack-bitmap.c                             | 11 +++++++++++
 pack-bitmap.h                             |  6 ++++--
 t/perf/p5310-pack-bitmaps.sh              |  3 ++-
 t/t5310-pack-bitmaps.sh                   |  3 ++-
 8 files changed, 91 insertions(+), 7 deletions(-)

diff --git a/Documentation/config.txt b/Documentation/config.txt
index 4b0c368..499a3c4 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -1872,6 +1872,17 @@ pack.writebitmaps::
 	space and extra time spent on the initial repack.  Defaults to
 	false.
 
+pack.writeBitmapHashCache::
+	When true, git will include a "hash cache" section in the bitmap
+	index (if one is written). This cache can be used to feed git's
+	delta heuristics, potentially leading to better deltas between
+	bitmapped and non-bitmapped objects (e.g., when serving a fetch
+	between an older, bitmapped pack and objects that have been
+	pushed since the last gc). The downside is that it consumes 4
+	bytes per object of disk space, and that JGit's bitmap
+	implementation does not understand it, causing it to complain if
+	Git and JGit are used on the same repository. Defaults to false.
+
 pager.<cmd>::
 	If the value is boolean, turns on or off pagination of the
 	output of a particular Git subcommand when writing to a tty.
diff --git a/Documentation/technical/bitmap-format.txt b/Documentation/technical/bitmap-format.txt
index 7a86bd7..f8c18a0 100644
--- a/Documentation/technical/bitmap-format.txt
+++ b/Documentation/technical/bitmap-format.txt
@@ -21,6 +21,12 @@ GIT bitmap v1 format
 			requirement for the bitmap index format, also present in JGit,
 			that greatly reduces the complexity of the implementation.
 
+			- BITMAP_OPT_HASH_CACHE (0x4)
+			If present, the end of the bitmap file contains
+			`N` 32-bit name-hash values, one per object in the
+			pack. The format and meaning of the name-hash is
+			described below.
+
 		4-byte entry count (network byte order)
 
 			The total count of entries (bitmapped commits) in this bitmap index.
@@ -129,3 +135,30 @@ The bitstream represented by the above chunk is then:
 The next word after `L_M` (if any) must again be a RLW, for the next
 chunk.  For efficient appending to the bitstream, the EWAH stores a
 pointer to the last RLW in the stream.
+
+
+== Appendix B: Optional Bitmap Sections
+
+These sections may or may not be present in the `.bitmap` file; their
+presence is indicated by the header flags section described above.
+
+Name-hash cache
+---------------
+
+If the BITMAP_OPT_HASH_CACHE flag is set, the end of the bitmap contains
+a cache of 32-bit values, one per object in the pack. The value at
+position `i` is the hash of the pathname at which the `i`th object
+(counting in index order) in the pack can be found.  This can be fed
+into the delta heuristics to compare objects with similar pathnames.
+
+The hash algorithm used is:
+
+    hash = 0;
+    while ((c = *name++))
+	    if (!isspace(c))
+		    hash = (hash >> 2) + (c << 24);
+
+Note that this hashing scheme is tied to the BITMAP_OPT_HASH_CACHE flag.
+If implementations want to choose a different hashing scheme, they are
+free to do so, but MUST allocate a new header flag (because comparing
+hashes made under two different schemes would be pointless).
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index fd6ae01..fd74197 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -64,6 +64,7 @@ static off_t reuse_packfile_offset;
 
 static int use_bitmap_index = 1;
 static int write_bitmap_index;
+static uint16_t write_bitmap_options;
 
 static unsigned long delta_cache_size = 0;
 static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
@@ -851,7 +852,8 @@ static void write_pack_file(void)
 				bitmap_writer_reuse_bitmaps(&to_pack);
 				bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
 				bitmap_writer_build(&to_pack);
-				bitmap_writer_finish(written_list, nr_written, tmpname);
+				bitmap_writer_finish(written_list, nr_written,
+						     tmpname, write_bitmap_options);
 				write_bitmap_index = 0;
 			}
 
@@ -2201,6 +2203,12 @@ static int git_pack_config(const char *k, const char *v, void *cb)
 		write_bitmap_index = git_config_bool(k, v);
 		return 0;
 	}
+	if (!strcmp(k, "pack.writebitmaphashcache")) {
+		if (git_config_bool(k, v))
+			write_bitmap_options |= BITMAP_OPT_HASH_CACHE;
+		else
+			write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
+	}
 	if (!strcmp(k, "pack.usebitmaps")) {
 		use_bitmap_index = git_config_bool(k, v);
 		return 0;
diff --git a/pack-bitmap-write.c b/pack-bitmap-write.c
index 954a74d..1218bef 100644
--- a/pack-bitmap-write.c
+++ b/pack-bitmap-write.c
@@ -490,6 +490,19 @@ static void write_selected_commits_v1(struct sha1file *f,
 	}
 }
 
+static void write_hash_cache(struct sha1file *f,
+			     struct pack_idx_entry **index,
+			     uint32_t index_nr)
+{
+	uint32_t i;
+
+	for (i = 0; i < index_nr; ++i) {
+		struct object_entry *entry = (struct object_entry *)index[i];
+		uint32_t hash_value = htonl(entry->hash);
+		sha1write(f, &hash_value, sizeof(hash_value));
+	}
+}
+
 void bitmap_writer_set_checksum(unsigned char *sha1)
 {
 	hashcpy(writer.pack_checksum, sha1);
@@ -497,7 +510,8 @@ void bitmap_writer_set_checksum(unsigned char *sha1)
 
 void bitmap_writer_finish(struct pack_idx_entry **index,
 			  uint32_t index_nr,
-			  const char *filename)
+			  const char *filename,
+			  uint16_t options)
 {
 	static char tmp_file[PATH_MAX];
 	static uint16_t default_version = 1;
@@ -514,7 +528,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
 
 	memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE));
 	header.version = htons(default_version);
-	header.options = htons(flags);
+	header.options = htons(flags | options);
 	header.entry_count = htonl(writer.selected_nr);
 	memcpy(header.checksum, writer.pack_checksum, 20);
 
@@ -525,6 +539,9 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
 	dump_bitmap(f, writer.tags);
 	write_selected_commits_v1(f, index, index_nr);
 
+	if (options & BITMAP_OPT_HASH_CACHE)
+		write_hash_cache(f, index, index_nr);
+
 	sha1close(f, NULL, CSUM_FSYNC);
 
 	if (adjust_shared_perm(tmp_file))
diff --git a/pack-bitmap.c b/pack-bitmap.c
index 82090a6..ae0b57b 100644
--- a/pack-bitmap.c
+++ b/pack-bitmap.c
@@ -66,6 +66,9 @@ static struct bitmap_index {
 	/* Number of bitmapped commits */
 	uint32_t entry_count;
 
+	/* Name-hash cache (or NULL if not present). */
+	uint32_t *hashes;
+
 	/*
 	 * Extended index.
 	 *
@@ -152,6 +155,11 @@ static int load_bitmap_header(struct bitmap_index *index)
 		if ((flags & BITMAP_OPT_FULL_DAG) == 0)
 			return error("Unsupported options for bitmap index file "
 				"(Git requires BITMAP_OPT_FULL_DAG)");
+
+		if (flags & BITMAP_OPT_HASH_CACHE) {
+			unsigned char *end = index->map + index->map_size - 20;
+			index->hashes = ((uint32_t *)end) - index->pack->num_objects;
+		}
 	}
 
 	index->entry_count = ntohl(header->entry_count);
@@ -626,6 +634,9 @@ static void show_objects_for_type(
 			entry = &bitmap_git.reverse_index->revindex[pos + offset];
 			sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr);
 
+			if (bitmap_git.hashes)
+				hash = ntohl(bitmap_git.hashes[entry->nr]);
+
 			show_reach(sha1, object_type, 0, hash, bitmap_git.pack, entry->offset);
 		}
 
diff --git a/pack-bitmap.h b/pack-bitmap.h
index 09acf02..8b7f4e9 100644
--- a/pack-bitmap.h
+++ b/pack-bitmap.h
@@ -24,7 +24,8 @@ static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'};
 #define NEEDS_BITMAP (1u<<22)
 
 enum pack_bitmap_opts {
-	BITMAP_OPT_FULL_DAG = 1
+	BITMAP_OPT_FULL_DAG = 1,
+	BITMAP_OPT_HASH_CACHE = 4,
 };
 
 enum pack_bitmap_flags {
@@ -57,6 +58,7 @@ void bitmap_writer_select_commits(struct commit **indexed_commits,
 void bitmap_writer_build(struct packing_data *to_pack);
 void bitmap_writer_finish(struct pack_idx_entry **index,
 			  uint32_t index_nr,
-			  const char *filename);
+			  const char *filename,
+			  uint16_t options);
 
 #endif
diff --git a/t/perf/p5310-pack-bitmaps.sh b/t/perf/p5310-pack-bitmaps.sh
index 8c6ae45..685d46f 100755
--- a/t/perf/p5310-pack-bitmaps.sh
+++ b/t/perf/p5310-pack-bitmaps.sh
@@ -9,7 +9,8 @@ test_perf_large_repo
 # since we want to be able to compare bitmap-aware
 # git versus non-bitmap git
 test_expect_success 'setup bitmap config' '
-	git config pack.writebitmaps true
+	git config pack.writebitmaps true &&
+	git config pack.writebitmaphashcache true
 '
 
 test_perf 'repack to disk' '
diff --git a/t/t5310-pack-bitmaps.sh b/t/t5310-pack-bitmaps.sh
index d2b0c45..d3a3afa 100755
--- a/t/t5310-pack-bitmaps.sh
+++ b/t/t5310-pack-bitmaps.sh
@@ -14,7 +14,8 @@ test_expect_success 'setup repo with moderate-sized history' '
 	git checkout master &&
 	blob=$(echo tagged-blob | git hash-object -w --stdin) &&
 	git tag tagged-blob $blob &&
-	git config pack.writebitmaps true
+	git config pack.writebitmaps true &&
+	git config pack.writebitmaphashcache true
 '
 
 test_expect_success 'full repack creates bitmaps' '
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH v4 23/23] compat/mingw.h: Fix the MinGW and msvc builds
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (21 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 22/23] pack-bitmap: implement optional name_hash cache Jeff King
@ 2013-12-21 14:00 ` Jeff King
  2013-12-25 22:08   ` Erik Faye-Lund
  2013-12-21 14:03 ` [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (2 subsequent siblings)
  25 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:00 UTC (permalink / raw)
  To: git

From: Ramsay Jones <ramsay@ramsay1.demon.co.uk>

Signed-off-by: Ramsay Jones <ramsay@ramsay1.demon.co.uk>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 compat/mingw.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/compat/mingw.h b/compat/mingw.h
index 92cd728..8828ede 100644
--- a/compat/mingw.h
+++ b/compat/mingw.h
@@ -345,6 +345,7 @@ static inline char *mingw_find_last_dir_sep(const char *path)
 #define PATH_SEP ';'
 #define PRIuMAX "I64u"
 #define PRId64 "I64d"
+#define PRIx64 "I64x"
 
 void mingw_open_html(const char *path);
 #define open_html mingw_open_html
-- 
1.8.5.1.399.g900e7cd

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 0/22] pack bitmaps
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (22 preceding siblings ...)
  2013-12-21 14:00 ` [PATCH v4 23/23] compat/mingw.h: Fix the MinGW and msvc builds Jeff King
@ 2013-12-21 14:03 ` Jeff King
  2013-12-21 14:05 ` Jeff King
  2013-12-21 18:34 ` Thomas Rast
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:03 UTC (permalink / raw)
  To: git

On Sat, Dec 21, 2013 at 08:56:51AM -0500, Jeff King wrote:

> Interdiff is below.
> 
>   [01/23]: sha1write: make buffer const-correct
>   [02/23]: revindex: Export new APIs
>   [03/23]: pack-objects: Refactor the packing list
>   [04/23]: pack-objects: factor out name_hash
>   [05/23]: revision: allow setting custom limiter function
>   [06/23]: sha1_file: export `git_open_noatime`
>   [07/23]: compat: add endianness helpers
>   [08/23]: ewah: compressed bitmap implementation
>   [09/23]: documentation: add documentation for the bitmap format

By the way, the patches are identical up through 09/23. I think the
first one is already merged into another topic, too, so it may be worth
building on that instead of re-applying.

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 0/22] pack bitmaps
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (23 preceding siblings ...)
  2013-12-21 14:03 ` [PATCH v4 0/22] pack bitmaps Jeff King
@ 2013-12-21 14:05 ` Jeff King
  2013-12-21 18:34 ` Thomas Rast
  25 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2013-12-21 14:05 UTC (permalink / raw)
  To: git

On Sat, Dec 21, 2013 at 08:56:51AM -0500, Jeff King wrote:

> The changes from v3 are:
> 
>  - reworked add_object_entry refactoring (see patch 11, which is new,
>    and patch 12 which builds on it in a more natural way)
> 
>  - better error/die reporting from write_reused_pack
> 
>  - added Ramsay's PRIx64 compat fix
> 
>  - fixed a user-after-free in the warning message of open_pack_bitmap_1
> 
>  - minor typo/thinko fixes from Thomas in docs and tests

One thing explicitly _not_ here is ripping out khash in favor of
Karsten's hash system. That is still on the table, but I'd much rather
do it on top if we are going to.

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 0/22] pack bitmaps
  2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
                   ` (24 preceding siblings ...)
  2013-12-21 14:05 ` Jeff King
@ 2013-12-21 18:34 ` Thomas Rast
  25 siblings, 0 replies; 68+ messages in thread
From: Thomas Rast @ 2013-12-21 18:34 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King <peff@peff.net> writes:

> Here's the v4 re-roll of the pack bitmap series.
>
> The changes from v3 are:
>
>  - reworked add_object_entry refactoring (see patch 11, which is new,
>    and patch 12 which builds on it in a more natural way)

This now looks like this (pasting because it is hard to see in the diffs):

  static int add_object_entry(const unsigned char *sha1, enum object_type type,
                              const char *name, int exclude)
  {
          struct packed_git *found_pack;
          off_t found_offset;
          uint32_t index_pos;

          if (have_duplicate_entry(sha1, exclude, &index_pos))
                  return 0;

          if (!want_object_in_pack(sha1, exclude, &found_pack, &found_offset))
                  return 0;

          create_object_entry(sha1, type, pack_name_hash(name),
                              exclude, name && no_try_delta(name),
                              index_pos, found_pack, found_offset);

          display_progress(progress_state, to_pack.nr_objects);
          return 1;
  }

  static int add_object_entry_from_bitmap(const unsigned char *sha1,
                                          enum object_type type,
                                          int flags, uint32_t name_hash,
                                          struct packed_git *pack, off_t offset)
  {
          uint32_t index_pos;

          if (have_duplicate_entry(sha1, 0, &index_pos))
                  return 0;

          create_object_entry(sha1, type, name_hash, 0, 0, index_pos, pack, offset);

          display_progress(progress_state, to_pack.nr_objects);
          return 1;
  }


Much nicer.  Thanks for going the extra mile!

-- 
Thomas Rast
tr@thomasrast.ch

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 01/23] sha1write: make buffer const-correct
  2013-12-21 13:59 ` [PATCH v4 01/23] sha1write: make buffer const-correct Jeff King
@ 2013-12-22  9:06   ` Christian Couder
  0 siblings, 0 replies; 68+ messages in thread
From: Christian Couder @ 2013-12-22  9:06 UTC (permalink / raw)
  To: Jeff King; +Cc: git

On Sat, Dec 21, 2013 at 2:59 PM, Jeff King <peff@peff.net> wrote:
> We are passed a "void *" and write it out without ever

s/are passed/pass/

Cheers,
Christian.

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 23/23] compat/mingw.h: Fix the MinGW and msvc builds
  2013-12-21 14:00 ` [PATCH v4 23/23] compat/mingw.h: Fix the MinGW and msvc builds Jeff King
@ 2013-12-25 22:08   ` Erik Faye-Lund
  2013-12-28 10:00     ` Jeff King
  0 siblings, 1 reply; 68+ messages in thread
From: Erik Faye-Lund @ 2013-12-25 22:08 UTC (permalink / raw)
  To: Jeff King; +Cc: GIT Mailing-list

On Sat, Dec 21, 2013 at 3:00 PM, Jeff King <peff@peff.net> wrote:
> From: Ramsay Jones <ramsay@ramsay1.demon.co.uk>
>
> Signed-off-by: Ramsay Jones <ramsay@ramsay1.demon.co.uk>
> Signed-off-by: Junio C Hamano <gitster@pobox.com>
> Signed-off-by: Jeff King <peff@peff.net>
> ---
>  compat/mingw.h | 1 +
>  1 file changed, 1 insertion(+)
>
> diff --git a/compat/mingw.h b/compat/mingw.h
> index 92cd728..8828ede 100644
> --- a/compat/mingw.h
> +++ b/compat/mingw.h
> @@ -345,6 +345,7 @@ static inline char *mingw_find_last_dir_sep(const char *path)
>  #define PATH_SEP ';'
>  #define PRIuMAX "I64u"
>  #define PRId64 "I64d"
> +#define PRIx64 "I64x"
>

Please, move this before patch #8, and adjust the commit message.

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 23/23] compat/mingw.h: Fix the MinGW and msvc builds
  2013-12-25 22:08   ` Erik Faye-Lund
@ 2013-12-28 10:00     ` Jeff King
  2013-12-28 10:06       ` Vicent Martí
  2013-12-28 15:58       ` Ramsay Jones
  0 siblings, 2 replies; 68+ messages in thread
From: Jeff King @ 2013-12-28 10:00 UTC (permalink / raw)
  To: Erik Faye-Lund; +Cc: Junio C Hamano, GIT Mailing-list

On Wed, Dec 25, 2013 at 11:08:57PM +0100, Erik Faye-Lund wrote:

> On Sat, Dec 21, 2013 at 3:00 PM, Jeff King <peff@peff.net> wrote:
> > From: Ramsay Jones <ramsay@ramsay1.demon.co.uk>
> >
> > Signed-off-by: Ramsay Jones <ramsay@ramsay1.demon.co.uk>
> > Signed-off-by: Junio C Hamano <gitster@pobox.com>
> > Signed-off-by: Jeff King <peff@peff.net>
> > ---
> >  compat/mingw.h | 1 +
> >  1 file changed, 1 insertion(+)
> >
> > diff --git a/compat/mingw.h b/compat/mingw.h
> > index 92cd728..8828ede 100644
> > --- a/compat/mingw.h
> > +++ b/compat/mingw.h
> > @@ -345,6 +345,7 @@ static inline char *mingw_find_last_dir_sep(const char *path)
> >  #define PATH_SEP ';'
> >  #define PRIuMAX "I64u"
> >  #define PRId64 "I64d"
> > +#define PRIx64 "I64x"
> >
> 
> Please, move this before patch #8, and adjust the commit message.

Yeah, that makes sense. Though I think we can do one better and simply
remove the need for it entirely. The only use of PRIx64 is in a
debugging function that does not get called.

How about squashing the patch below into patch 8 ("ewah: compressed
bitmap implementation"):

diff --git a/ewah/ewah_bitmap.c b/ewah/ewah_bitmap.c
index f104b87..9ced2da 100644
--- a/ewah/ewah_bitmap.c
+++ b/ewah/ewah_bitmap.c
@@ -381,18 +381,6 @@ void ewah_iterator_init(struct ewah_iterator *it, struct ewah_bitmap *parent)
 		read_new_rlw(it);
 }
 
-void ewah_dump(struct ewah_bitmap *self)
-{
-	size_t i;
-	fprintf(stderr, "%"PRIuMAX" bits | %"PRIuMAX" words | ",
-		(uintmax_t)self->bit_size, (uintmax_t)self->buffer_size);
-
-	for (i = 0; i < self->buffer_size; ++i)
-		fprintf(stderr, "%016"PRIx64" ", (uint64_t)self->buffer[i]);
-
-	fprintf(stderr, "\n");
-}
-
 void ewah_not(struct ewah_bitmap *self)
 {
 	size_t pointer = 0;
diff --git a/ewah/ewok.h b/ewah/ewok.h
index 619afaa..43adeb5 100644
--- a/ewah/ewok.h
+++ b/ewah/ewok.h
@@ -193,8 +193,6 @@ void ewah_and(
 	struct ewah_bitmap *ewah_j,
 	struct ewah_bitmap *out);
 
-void ewah_dump(struct ewah_bitmap *self);
-
 /**
  * Direct word access
  */

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 23/23] compat/mingw.h: Fix the MinGW and msvc builds
  2013-12-28 10:00     ` Jeff King
@ 2013-12-28 10:06       ` Vicent Martí
  2013-12-28 15:58       ` Ramsay Jones
  1 sibling, 0 replies; 68+ messages in thread
From: Vicent Martí @ 2013-12-28 10:06 UTC (permalink / raw)
  To: Jeff King; +Cc: Erik Faye-Lund, Junio C Hamano, GIT Mailing-list

Sounds good. We don't really need the dump anyway.

On Sat, Dec 28, 2013 at 11:00 AM, Jeff King <peff@peff.net> wrote:
> On Wed, Dec 25, 2013 at 11:08:57PM +0100, Erik Faye-Lund wrote:
>
>> On Sat, Dec 21, 2013 at 3:00 PM, Jeff King <peff@peff.net> wrote:
>> > From: Ramsay Jones <ramsay@ramsay1.demon.co.uk>
>> >
>> > Signed-off-by: Ramsay Jones <ramsay@ramsay1.demon.co.uk>
>> > Signed-off-by: Junio C Hamano <gitster@pobox.com>
>> > Signed-off-by: Jeff King <peff@peff.net>
>> > ---
>> >  compat/mingw.h | 1 +
>> >  1 file changed, 1 insertion(+)
>> >
>> > diff --git a/compat/mingw.h b/compat/mingw.h
>> > index 92cd728..8828ede 100644
>> > --- a/compat/mingw.h
>> > +++ b/compat/mingw.h
>> > @@ -345,6 +345,7 @@ static inline char *mingw_find_last_dir_sep(const char *path)
>> >  #define PATH_SEP ';'
>> >  #define PRIuMAX "I64u"
>> >  #define PRId64 "I64d"
>> > +#define PRIx64 "I64x"
>> >
>>
>> Please, move this before patch #8, and adjust the commit message.
>
> Yeah, that makes sense. Though I think we can do one better and simply
> remove the need for it entirely. The only use of PRIx64 is in a
> debugging function that does not get called.
>
> How about squashing the patch below into patch 8 ("ewah: compressed
> bitmap implementation"):
>
> diff --git a/ewah/ewah_bitmap.c b/ewah/ewah_bitmap.c
> index f104b87..9ced2da 100644
> --- a/ewah/ewah_bitmap.c
> +++ b/ewah/ewah_bitmap.c
> @@ -381,18 +381,6 @@ void ewah_iterator_init(struct ewah_iterator *it, struct ewah_bitmap *parent)
>                 read_new_rlw(it);
>  }
>
> -void ewah_dump(struct ewah_bitmap *self)
> -{
> -       size_t i;
> -       fprintf(stderr, "%"PRIuMAX" bits | %"PRIuMAX" words | ",
> -               (uintmax_t)self->bit_size, (uintmax_t)self->buffer_size);
> -
> -       for (i = 0; i < self->buffer_size; ++i)
> -               fprintf(stderr, "%016"PRIx64" ", (uint64_t)self->buffer[i]);
> -
> -       fprintf(stderr, "\n");
> -}
> -
>  void ewah_not(struct ewah_bitmap *self)
>  {
>         size_t pointer = 0;
> diff --git a/ewah/ewok.h b/ewah/ewok.h
> index 619afaa..43adeb5 100644
> --- a/ewah/ewok.h
> +++ b/ewah/ewok.h
> @@ -193,8 +193,6 @@ void ewah_and(
>         struct ewah_bitmap *ewah_j,
>         struct ewah_bitmap *out);
>
> -void ewah_dump(struct ewah_bitmap *self);
> -
>  /**
>   * Direct word access
>   */
> --
> To unsubscribe from this list: send the line "unsubscribe git" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 23/23] compat/mingw.h: Fix the MinGW and msvc builds
  2013-12-28 10:00     ` Jeff King
  2013-12-28 10:06       ` Vicent Martí
@ 2013-12-28 15:58       ` Ramsay Jones
  1 sibling, 0 replies; 68+ messages in thread
From: Ramsay Jones @ 2013-12-28 15:58 UTC (permalink / raw)
  To: Jeff King, Erik Faye-Lund; +Cc: Junio C Hamano, GIT Mailing-list

On 28/12/13 10:00, Jeff King wrote:
> On Wed, Dec 25, 2013 at 11:08:57PM +0100, Erik Faye-Lund wrote:
> 
>> On Sat, Dec 21, 2013 at 3:00 PM, Jeff King <peff@peff.net> wrote:
>>> From: Ramsay Jones <ramsay@ramsay1.demon.co.uk>
>>>
>>> Signed-off-by: Ramsay Jones <ramsay@ramsay1.demon.co.uk>
>>> Signed-off-by: Junio C Hamano <gitster@pobox.com>
>>> Signed-off-by: Jeff King <peff@peff.net>
>>> ---
>>>  compat/mingw.h | 1 +
>>>  1 file changed, 1 insertion(+)
>>>
>>> diff --git a/compat/mingw.h b/compat/mingw.h
>>> index 92cd728..8828ede 100644
>>> --- a/compat/mingw.h
>>> +++ b/compat/mingw.h
>>> @@ -345,6 +345,7 @@ static inline char *mingw_find_last_dir_sep(const char *path)
>>>  #define PATH_SEP ';'
>>>  #define PRIuMAX "I64u"
>>>  #define PRId64 "I64d"
>>> +#define PRIx64 "I64x"
>>>
>>
>> Please, move this before patch #8, and adjust the commit message.
> 
> Yeah, that makes sense. Though I think we can do one better and simply
> remove the need for it entirely. The only use of PRIx64 is in a
> debugging function that does not get called.
> 
> How about squashing the patch below into patch 8 ("ewah: compressed
> bitmap implementation"):
> 
> diff --git a/ewah/ewah_bitmap.c b/ewah/ewah_bitmap.c
> index f104b87..9ced2da 100644
> --- a/ewah/ewah_bitmap.c
> +++ b/ewah/ewah_bitmap.c
> @@ -381,18 +381,6 @@ void ewah_iterator_init(struct ewah_iterator *it, struct ewah_bitmap *parent)
>  		read_new_rlw(it);
>  }
>  
> -void ewah_dump(struct ewah_bitmap *self)
> -{
> -	size_t i;
> -	fprintf(stderr, "%"PRIuMAX" bits | %"PRIuMAX" words | ",
> -		(uintmax_t)self->bit_size, (uintmax_t)self->buffer_size);
> -
> -	for (i = 0; i < self->buffer_size; ++i)
> -		fprintf(stderr, "%016"PRIx64" ", (uint64_t)self->buffer[i]);
> -
> -	fprintf(stderr, "\n");
> -}
> -
>  void ewah_not(struct ewah_bitmap *self)
>  {
>  	size_t pointer = 0;
> diff --git a/ewah/ewok.h b/ewah/ewok.h
> index 619afaa..43adeb5 100644
> --- a/ewah/ewok.h
> +++ b/ewah/ewok.h
> @@ -193,8 +193,6 @@ void ewah_and(
>  	struct ewah_bitmap *ewah_j,
>  	struct ewah_bitmap *out);
>  
> -void ewah_dump(struct ewah_bitmap *self);
> -
>  /**
>   * Direct word access
>   */

I'm always in favour of removing unused (or unwanted) code! :-D

ATB,
Ramsay Jones

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2013-12-21 13:59 ` [PATCH v4 08/23] ewah: compressed bitmap implementation Jeff King
@ 2014-01-23  2:05   ` Jonathan Nieder
  2014-01-23 18:33     ` Jeff King
  0 siblings, 1 reply; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23  2:05 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Hi,

Jeff King wrote:

> EWAH is a word-aligned compressed variant of a bitset (i.e. a data
> structure that acts as a 0-indexed boolean array for many entries).

I suspect that for some callers it's not word-aligned.

Without the following squashed in, commits 212f2ffb and later fail t5310
on some machines[1].

On ARMv5:

	expecting success: 
		git rev-list --test-bitmap HEAD

	*** Error in `/«PKGBUILDDIR»/git': realloc(): invalid pointer: 0x008728b0 ***
	Aborted
	not ok 3 - rev-list --test-bitmap verifies bitmaps

On sparc:

	expecting success: 
		git rev-list --test-bitmap HEAD

	Bus error
	not ok 3 - rev-list --test-bitmap verifies bitmaps

Hopefully it's possible to get the alignment right in the caller
and tweak the signature to require that instead of using unaligned
reads like this.  There's still something wrong after this patch ---
the new result is a NULL pointer dereference in t5310.7 "enumerate
--objects (full bitmap)".

  (gdb) run
  Starting program: /home/jrnieder/src/git/git rev-list --objects --use-bitmap-index HEAD
  [Thread debugging using libthread_db enabled]
  Using host libthread_db library "/lib/sparc-linux-gnu/libthread_db.so.1".
  537ea4d3eb79c95f602873b1167c480006d2ac2d
[...]
  ec635144f60048986bc560c5576355344005e6e7

  Program received signal SIGSEGV, Segmentation fault.
  0x001321c0 in sha1_to_hex (sha1=0x0) at hex.c:68
  68                      unsigned int val = *sha1++;
  (gdb) bt
  #0  0x001321c0 in sha1_to_hex (sha1=0x0) at hex.c:68
  #1  0x000b839c in show_object_fast (sha1=0x0, type=OBJ_TREE, exclude=0, name_hash=0, found_pack=0x2b8480, found_offset=4338) at builtin/rev-list.c:270
  #2  0x00158abc in show_objects_for_type (objects=0x2b2498, type_filter=0x2b0fb0, object_type=OBJ_TREE, show_reach=0xb834c <show_object_fast>) at pack-bitmap.c:640
  #3  0x001592d0 in traverse_bitmap_commit_list (show_reachable=0xb834c <show_object_fast>) at pack-bitmap.c:818
  #4  0x000b894c in cmd_rev_list (argc=2, argv=0xffffd688, prefix=0x0) at builtin/rev-list.c:369
  #5  0x00014024 in run_builtin (p=0x256e38 <commands+1020>, argc=4, argv=0xffffd688) at git.c:314
  #6  0x00014330 in handle_builtin (argc=4, argv=0xffffd688) at git.c:487
  #7  0x000144a8 in run_argv (argcp=0xffffd5ec, argv=0xffffd5a0) at git.c:533
  #8  0x000146fc in main (argc=4, av=0xffffd684) at git.c:616
  (gdb) frame 2
  #2  0x00158abc in show_objects_for_type (objects=0x2b2498, type_filter=0x2b0fb0, object_type=OBJ_TREE, show_reach=0xb834c <show_object_fast>) at pack-bitmap.c:640
  640                             show_reach(sha1, object_type, 0, hash, bitmap_git.pack, entry->offset);
  (gdb) p entry->nr
  $1 = 4294967295

Line numbers are in the context of 8e6341d9.  Ideas?

[1] ARMv5 and sparc:
https://buildd.debian.org/status/logs.php?pkg=git&suite=experimental

diff --git a/ewah/ewah_io.c b/ewah/ewah_io.c
index aed0da6..696a8ec 100644
--- a/ewah/ewah_io.c
+++ b/ewah/ewah_io.c
@@ -110,25 +110,38 @@ int ewah_serialize(struct ewah_bitmap *self, int fd)
 	return ewah_serialize_to(self, write_helper, (void *)(intptr_t)fd);
 }
 
+#define get_be32(p) ( \
+	(*((unsigned char *)(p) + 0) << 24) | \
+	(*((unsigned char *)(p) + 1) << 16) | \
+	(*((unsigned char *)(p) + 2) <<  8) | \
+	(*((unsigned char *)(p) + 3) <<  0) )
+
+#define get_be64(p) ( \
+	((uint64_t) get_be32(p) << 32) | \
+	get_be32((unsigned char *)(p) + 4) )
+
 int ewah_read_mmap(struct ewah_bitmap *self, void *map, size_t len)
 {
-	uint32_t *read32 = map;
-	eword_t *read64;
+	unsigned char *p = map;
 	size_t i;
 
-	self->bit_size = ntohl(*read32++);
-	self->buffer_size = self->alloc_size = ntohl(*read32++);
+	self->bit_size = get_be32(p);
+	p += 4;
+	self->buffer_size = self->alloc_size = get_be32(p);
+	p += 4;
 	self->buffer = ewah_realloc(self->buffer,
 		self->alloc_size * sizeof(eword_t));
 
 	if (!self->buffer)
 		return -1;
 
-	for (i = 0, read64 = (void *)read32; i < self->buffer_size; ++i)
-		self->buffer[i] = ntohll(*read64++);
+	for (i = 0; i < self->buffer_size; ++i) {
+		self->buffer[i] = get_be64(p);
+		p += 8;
+	}
 
-	read32 = (void *)read64;
-	self->rlw = self->buffer + ntohl(*read32++);
+	self->rlw = self->buffer + get_be32(p);
+	p += 4;
 
 	return (3 * 4) + (self->buffer_size * 8);
 }

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23  2:05   ` Jonathan Nieder
@ 2014-01-23 18:33     ` Jeff King
  2014-01-23 18:35       ` [PATCH 1/2] compat: move unaligned helpers to bswap.h Jeff King
                         ` (5 more replies)
  0 siblings, 6 replies; 68+ messages in thread
From: Jeff King @ 2014-01-23 18:33 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

On Wed, Jan 22, 2014 at 06:05:36PM -0800, Jonathan Nieder wrote:

> Jeff King wrote:
> 
> > EWAH is a word-aligned compressed variant of a bitset (i.e. a data
> > structure that acts as a 0-indexed boolean array for many entries).
> 
> I suspect that for some callers it's not word-aligned.

Yes, the mmap'd buffers aren't necessarily word-aligned. I don't think
we can fix that easily without changing the on-disk format (which comes
from JGit anyway). However, since we are memcpying the bulk of the data
into a newly allocated buffer (which must be aligned), we can do that
first, and then fix the endian-ness in place.

The only SPARC machine I have access to is running Solaris, but after
some slight wrestling with the BYTE_ORDER macros, I managed to get it to
compile and reproduced the bus error.

Here's a patch series (on top of jk/pack-bitmap, naturally) that lets
t5310 pass there. I assume the ARM problem is the same, though seeing
the failure in realloc() is unexpected. Can you try it on both your
platforms with these patches?

  [1/2]: compat: move unaligned helpers to bswap.h
  [2/2]: ewah: support platforms that require aligned reads

> Hopefully it's possible to get the alignment right in the caller
> and tweak the signature to require that instead of using unaligned
> reads like this.  There's still something wrong after this patch ---
> the new result is a NULL pointer dereference in t5310.7 "enumerate
> --objects (full bitmap)".

After my patches, t5310 runs fine for me. I didn't try your patch, but
mine are similar. Let me know if you still see the problem (there may
simply be a bug in yours, but I didn't see it).

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* [PATCH 1/2] compat: move unaligned helpers to bswap.h
  2014-01-23 18:33     ` Jeff King
@ 2014-01-23 18:35       ` Jeff King
  2014-01-23 19:41         ` Jonathan Nieder
  2014-01-23 18:35       ` [PATCH 2/2] ewah: support platforms that require aligned reads Jeff King
                         ` (4 subsequent siblings)
  5 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2014-01-23 18:35 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

From: Vicent Marti <tanoku@gmail.com>

Commit d60c49c (read-cache.c: allow unaligned mapping of the
index file, 2012-04-03) introduced helpers to access
unaligned data. Let's factor them out to make them more
widely available.

While we're at it, we'll give the helpers more readable
names, add a helper for the "ntohll" form, and add the
appropriate Makefile knob.

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 Makefile       |  7 +++++++
 compat/bswap.h | 28 ++++++++++++++++++++++++++++
 read-cache.c   | 44 ++++++++++++--------------------------------
 3 files changed, 47 insertions(+), 32 deletions(-)

diff --git a/Makefile b/Makefile
index 4136c4f..5711c0e 100644
--- a/Makefile
+++ b/Makefile
@@ -342,6 +342,9 @@ all::
 # Define DEFAULT_HELP_FORMAT to "man", "info" or "html"
 # (defaults to "man") if you want to have a different default when
 # "git help" is called without a parameter specifying the format.
+#
+# Define NEEDS_ALIGNED_ACCESS if your platform cannot handle unaligned
+# access to integers in mmap'd files.
 
 GIT-VERSION-FILE: FORCE
 	@$(SHELL_PATH) ./GIT-VERSION-GEN
@@ -1505,6 +1508,10 @@ ifneq (,$(XDL_FAST_HASH))
 	BASIC_CFLAGS += -DXDL_FAST_HASH
 endif
 
+ifdef NEEDS_ALIGNED_ACCESS
+	BASIC_CFLAGS += -DNEEDS_ALIGNED_ACCESS
+endif
+
 ifeq ($(TCLTK_PATH),)
 NO_TCLTK = NoThanks
 endif
diff --git a/compat/bswap.h b/compat/bswap.h
index c18a78e..80abc54 100644
--- a/compat/bswap.h
+++ b/compat/bswap.h
@@ -122,3 +122,31 @@ static inline uint64_t git_bswap64(uint64_t x)
 #endif
 
 #endif
+
+#ifndef NEEDS_ALIGNED_ACCESS
+#define align_ntohs(var) ntohs(var)
+#define align_ntohl(var) ntohl(var)
+#define align_ntohll(var) ntohll(var)
+#else
+static inline uint16_t ntohs_force_align(void *p)
+{
+	uint16_t x;
+	memcpy(&x, p, sizeof(x));
+	return ntohs(x);
+}
+static inline uint32_t ntohl_force_align(void *p)
+{
+	uint32_t x;
+	memcpy(&x, p, sizeof(x));
+	return ntohl(x);
+}
+static inline uint64_t ntohll_force_align(void *p)
+{
+	uint64_t x;
+	memcpy(&x, p, sizeof(x));
+	return ntohll(x);
+}
+#define align_ntohs(var) ntohs_force_align(&(var))
+#define align_ntohl(var) ntohl_force_align(&(var))
+#define align_ntohll(var) ntohll_force_align(&(var))
+#endif
diff --git a/read-cache.c b/read-cache.c
index 33dd676..fa53504 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -1313,26 +1313,6 @@ int read_index(struct index_state *istate)
 	return read_index_from(istate, get_index_file());
 }
 
-#ifndef NEEDS_ALIGNED_ACCESS
-#define ntoh_s(var) ntohs(var)
-#define ntoh_l(var) ntohl(var)
-#else
-static inline uint16_t ntoh_s_force_align(void *p)
-{
-	uint16_t x;
-	memcpy(&x, p, sizeof(x));
-	return ntohs(x);
-}
-static inline uint32_t ntoh_l_force_align(void *p)
-{
-	uint32_t x;
-	memcpy(&x, p, sizeof(x));
-	return ntohl(x);
-}
-#define ntoh_s(var) ntoh_s_force_align(&(var))
-#define ntoh_l(var) ntoh_l_force_align(&(var))
-#endif
-
 static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk,
 						   unsigned int flags,
 						   const char *name,
@@ -1340,16 +1320,16 @@ static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *on
 {
 	struct cache_entry *ce = xmalloc(cache_entry_size(len));
 
-	ce->ce_stat_data.sd_ctime.sec = ntoh_l(ondisk->ctime.sec);
-	ce->ce_stat_data.sd_mtime.sec = ntoh_l(ondisk->mtime.sec);
-	ce->ce_stat_data.sd_ctime.nsec = ntoh_l(ondisk->ctime.nsec);
-	ce->ce_stat_data.sd_mtime.nsec = ntoh_l(ondisk->mtime.nsec);
-	ce->ce_stat_data.sd_dev   = ntoh_l(ondisk->dev);
-	ce->ce_stat_data.sd_ino   = ntoh_l(ondisk->ino);
-	ce->ce_mode  = ntoh_l(ondisk->mode);
-	ce->ce_stat_data.sd_uid   = ntoh_l(ondisk->uid);
-	ce->ce_stat_data.sd_gid   = ntoh_l(ondisk->gid);
-	ce->ce_stat_data.sd_size  = ntoh_l(ondisk->size);
+	ce->ce_stat_data.sd_ctime.sec = align_ntohl(ondisk->ctime.sec);
+	ce->ce_stat_data.sd_mtime.sec = align_ntohl(ondisk->mtime.sec);
+	ce->ce_stat_data.sd_ctime.nsec = align_ntohl(ondisk->ctime.nsec);
+	ce->ce_stat_data.sd_mtime.nsec = align_ntohl(ondisk->mtime.nsec);
+	ce->ce_stat_data.sd_dev   = align_ntohl(ondisk->dev);
+	ce->ce_stat_data.sd_ino   = align_ntohl(ondisk->ino);
+	ce->ce_mode  = align_ntohl(ondisk->mode);
+	ce->ce_stat_data.sd_uid   = align_ntohl(ondisk->uid);
+	ce->ce_stat_data.sd_gid   = align_ntohl(ondisk->gid);
+	ce->ce_stat_data.sd_size  = align_ntohl(ondisk->size);
 	ce->ce_flags = flags & ~CE_NAMEMASK;
 	ce->ce_namelen = len;
 	hashcpy(ce->sha1, ondisk->sha1);
@@ -1389,14 +1369,14 @@ static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk,
 	unsigned int flags;
 
 	/* On-disk flags are just 16 bits */
-	flags = ntoh_s(ondisk->flags);
+	flags = align_ntohs(ondisk->flags);
 	len = flags & CE_NAMEMASK;
 
 	if (flags & CE_EXTENDED) {
 		struct ondisk_cache_entry_extended *ondisk2;
 		int extended_flags;
 		ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
-		extended_flags = ntoh_s(ondisk2->flags2) << 16;
+		extended_flags = align_ntohs(ondisk2->flags2) << 16;
 		/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
 		if (extended_flags & ~CE_EXTENDED_FLAGS)
 			die("Unknown index entry format %08x", extended_flags);
-- 
1.8.5.2.500.g8060133

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH 2/2] ewah: support platforms that require aligned reads
  2014-01-23 18:33     ` Jeff King
  2014-01-23 18:35       ` [PATCH 1/2] compat: move unaligned helpers to bswap.h Jeff King
@ 2014-01-23 18:35       ` Jeff King
  2014-01-23 19:52       ` [PATCH v4 08/23] ewah: compressed bitmap implementation Jonathan Nieder
                         ` (3 subsequent siblings)
  5 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2014-01-23 18:35 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

From: Vicent Marti <tanoku@gmail.com>

The caller may hand us an unaligned buffer (e.g., because it
is an mmap of a file with many ewah bitmaps). On some
platforms (like SPARC) this can cause a bus error. We can
fix it with a combination of force-align macros and moving
the data into an aligned buffer (which we would do anyway,
but we can move it before fixing the endianness).

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
 ewah/ewah_io.c | 33 ++++++++++++++++++++++++---------
 1 file changed, 24 insertions(+), 9 deletions(-)

diff --git a/ewah/ewah_io.c b/ewah/ewah_io.c
index aed0da6..1948ba5 100644
--- a/ewah/ewah_io.c
+++ b/ewah/ewah_io.c
@@ -112,23 +112,38 @@ int ewah_serialize(struct ewah_bitmap *self, int fd)
 
 int ewah_read_mmap(struct ewah_bitmap *self, void *map, size_t len)
 {
-	uint32_t *read32 = map;
-	eword_t *read64;
-	size_t i;
+	uint8_t *ptr = map;
+
+	self->bit_size = align_ntohl(*(uint32_t *)ptr);
+	ptr += sizeof(uint32_t);
+
+	self->buffer_size = self->alloc_size = align_ntohl(*(uint32_t *)ptr);
+	ptr += sizeof(uint32_t);
 
-	self->bit_size = ntohl(*read32++);
-	self->buffer_size = self->alloc_size = ntohl(*read32++);
 	self->buffer = ewah_realloc(self->buffer,
 		self->alloc_size * sizeof(eword_t));
 
 	if (!self->buffer)
 		return -1;
 
-	for (i = 0, read64 = (void *)read32; i < self->buffer_size; ++i)
-		self->buffer[i] = ntohll(*read64++);
+	/*
+	 * Copy the raw data for the bitmap as a whole chunk;
+	 * if we're in a little-endian platform, we'll perform
+	 * the endianness conversion in a separate pass to ensure
+	 * we're loading 8-byte aligned words.
+	 */
+	memcpy(self->buffer, ptr, self->buffer_size * sizeof(uint64_t));
+	ptr += self->buffer_size * sizeof(uint64_t);
+
+#if __BYTE_ORDER != __BIG_ENDIAN
+	{
+		size_t i;
+		for (i = 0; i < self->buffer_size; ++i)
+			self->buffer[i] = ntohll(self->buffer[i]);
+	}
+#endif
 
-	read32 = (void *)read64;
-	self->rlw = self->buffer + ntohl(*read32++);
+	self->rlw = self->buffer + align_ntohl(*(uint32_t *)ptr);
 
 	return (3 * 4) + (self->buffer_size * 8);
 }
-- 
1.8.5.2.500.g8060133

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* Re: [PATCH 1/2] compat: move unaligned helpers to bswap.h
  2014-01-23 18:35       ` [PATCH 1/2] compat: move unaligned helpers to bswap.h Jeff King
@ 2014-01-23 19:41         ` Jonathan Nieder
  2014-01-23 19:44           ` Jeff King
  0 siblings, 1 reply; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 19:41 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King wrote:

> Commit d60c49c (read-cache.c: allow unaligned mapping of the
> index file, 2012-04-03) introduced helpers to access
> unaligned data. Let's factor them out to make them more
> widely available.
>
> While we're at it, we'll give the helpers more readable
> names, add a helper for the "ntohll" form, and add the
> appropriate Makefile knob.

Weird.  Why wasn't git broken on the relevant platforms before (given
that no one has been setting NEEDS_ALIGNED_ACCESS for them)?

Puzzled,
Jonathan

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH 1/2] compat: move unaligned helpers to bswap.h
  2014-01-23 19:41         ` Jonathan Nieder
@ 2014-01-23 19:44           ` Jeff King
  2014-01-23 19:56             ` Jonathan Nieder
  0 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2014-01-23 19:44 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

On Thu, Jan 23, 2014 at 11:41:18AM -0800, Jonathan Nieder wrote:

> Jeff King wrote:
> 
> > Commit d60c49c (read-cache.c: allow unaligned mapping of the
> > index file, 2012-04-03) introduced helpers to access
> > unaligned data. Let's factor them out to make them more
> > widely available.
> >
> > While we're at it, we'll give the helpers more readable
> > names, add a helper for the "ntohll" form, and add the
> > appropriate Makefile knob.
> 
> Weird.  Why wasn't git broken on the relevant platforms before (given
> that no one has been setting NEEDS_ALIGNED_ACCESS for them)?

Because most of our data structures support aligned access. Thomas
mentioned this as a potential issue earlier, and I said in a re-roll
cover letter:

  I did not include the NEEDS_ALIGNED_ACCESS patch. I note that we do
  not even have a Makefile knob for this, and the code in read-cache.c
  has probably never actually been used. Are there real systems that
  have a problem? The read-cache code was in support of the index v4
  experiment, which did away with the 8-byte padding. So it could be
  that we simply don't see it, because everything is currently aligned.

I think it was a bug waiting to surface if index v4 ever got wide use.

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 18:33     ` Jeff King
  2014-01-23 18:35       ` [PATCH 1/2] compat: move unaligned helpers to bswap.h Jeff King
  2014-01-23 18:35       ` [PATCH 2/2] ewah: support platforms that require aligned reads Jeff King
@ 2014-01-23 19:52       ` Jonathan Nieder
  2014-01-23 20:03         ` Jeff King
  2014-01-23 20:14       ` Shawn Pearce
                         ` (2 subsequent siblings)
  5 siblings, 1 reply; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 19:52 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King wrote:

> Here's a patch series (on top of jk/pack-bitmap, naturally) that lets
> t5310 pass there. I assume the ARM problem is the same, though seeing
> the failure in realloc() is unexpected. Can you try it on both your
> platforms with these patches?

Thanks.  Trying it out now.

[...]
>> Hopefully it's possible to get the alignment right in the caller
>> and tweak the signature to require that instead of using unaligned
>> reads like this.  There's still something wrong after this patch ---
>> the new result is a NULL pointer dereference in t5310.7 "enumerate
>> --objects (full bitmap)".
>
> After my patches, t5310 runs fine for me. I didn't try your patch, but
> mine are similar. Let me know if you still see the problem (there may
> simply be a bug in yours, but I didn't see it).

I had left out a cast to unsigned, producing an overflow.

My main worry about the patches is that they will probably run into
an analagous problem to the one that v1.7.12-rc0~1^2~2 (block-sha1:
avoid pointer conversion that violates alignment constraints,
2012-07-22) solved.  By casting the pointer to (uint32_t *) we are
telling the compiler it is 32-bit aligned (C99 section 6.3.2.3).

Thanks,
Jonathan

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH 1/2] compat: move unaligned helpers to bswap.h
  2014-01-23 19:44           ` Jeff King
@ 2014-01-23 19:56             ` Jonathan Nieder
  2014-01-23 20:04               ` Jeff King
  0 siblings, 1 reply; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 19:56 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King wrote:

> I think it was a bug waiting to surface if index v4 ever got wide use.

Ah, ok.

In that case I think git-compat-util.h should include something like
what block-sha1/sha1.c has:

	#if !defined(__i386__) && !defined(__x86_64__) && \
	    !defined(_M_IX86) && !defined(_M_X64) && \
	    !defined(__ppc__) && !defined(__ppc64__) && \
	    !defined(__powerpc__) && !defined(__powerpc64__) && \
	    !defined(__s390__) && !defined(__s390x__)
	#define NEEDS_ALIGNED_ACCESS
	#endif

Otherwise we are relying on the person building to know their own
architecture intimately, which shouldn't be necessary.

Meanwhile, as mentioned in the other message, I suspect the
NEEDS_ALIGNED_ACCESS code path is broken for aggressive compilers
anyway.  Looking more.

Thanks,
Jonathan

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 19:52       ` [PATCH v4 08/23] ewah: compressed bitmap implementation Jonathan Nieder
@ 2014-01-23 20:03         ` Jeff King
  2014-01-23 20:12           ` Jonathan Nieder
                             ` (2 more replies)
  0 siblings, 3 replies; 68+ messages in thread
From: Jeff King @ 2014-01-23 20:03 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

On Thu, Jan 23, 2014 at 11:52:06AM -0800, Jonathan Nieder wrote:

> > After my patches, t5310 runs fine for me. I didn't try your patch, but
> > mine are similar. Let me know if you still see the problem (there may
> > simply be a bug in yours, but I didn't see it).
> 
> I had left out a cast to unsigned, producing an overflow.
> 
> My main worry about the patches is that they will probably run into
> an analagous problem to the one that v1.7.12-rc0~1^2~2 (block-sha1:
> avoid pointer conversion that violates alignment constraints,
> 2012-07-22) solved.  By casting the pointer to (uint32_t *) we are
> telling the compiler it is 32-bit aligned (C99 section 6.3.2.3).

Yeah, maybe. We go via memcpy, which takes a "void *", so that part is
good. However, the new code looks like:

  foo = align_ntohl(*(uint32_t *)ptr);

I think this probably works in practice because align_ntohl is inlined,
and any sane compiler will never actually load the variable. If we
change the signature of align_ntohl, we can do this:

  uint32_t align_ntohl(void *ptr)
  {
          uint32_t x;
          memcpy(x, ptr, sizeof(x));
          return ntohl(x);
  }

  ...

  foo = align_ntohl(ptr);

The memcpy solution is taken from read-cache.c, but as we noted, it
probably hasn't been used a lot. The blk_sha1 get_be may be faster, as
it converts as it reads. However, the bulk of the data is copied via
a single memcpy and then modified in place. I don't know if that would
be faster or not (for a big-endian system it probably is, since we can
omit the modification loop entirely).

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH 1/2] compat: move unaligned helpers to bswap.h
  2014-01-23 19:56             ` Jonathan Nieder
@ 2014-01-23 20:04               ` Jeff King
  2014-01-23 20:08                 ` Jonathan Nieder
  0 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2014-01-23 20:04 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

On Thu, Jan 23, 2014 at 11:56:43AM -0800, Jonathan Nieder wrote:

> Jeff King wrote:
> 
> > I think it was a bug waiting to surface if index v4 ever got wide use.
> 
> Ah, ok.
> 
> In that case I think git-compat-util.h should include something like
> what block-sha1/sha1.c has:
> 
> 	#if !defined(__i386__) && !defined(__x86_64__) && \
> 	    !defined(_M_IX86) && !defined(_M_X64) && \
> 	    !defined(__ppc__) && !defined(__ppc64__) && \
> 	    !defined(__powerpc__) && !defined(__powerpc64__) && \
> 	    !defined(__s390__) && !defined(__s390x__)
> 	#define NEEDS_ALIGNED_ACCESS
> 	#endif
> 
> Otherwise we are relying on the person building to know their own
> architecture intimately, which shouldn't be necessary.

Yeah, I agree it would be nice to autodetect. I just didn't know what
the right set of platforms was, and assumed people would tweak the
Makefile knob as appropriate (though it is probably much easier to do so
within the compiler, where we have the right architecture variables
set).

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH 1/2] compat: move unaligned helpers to bswap.h
  2014-01-23 20:04               ` Jeff King
@ 2014-01-23 20:08                 ` Jonathan Nieder
  2014-01-23 20:09                   ` Jeff King
  0 siblings, 1 reply; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 20:08 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King wrote:
> On Thu, Jan 23, 2014 at 11:56:43AM -0800, Jonathan Nieder wrote:

>> In that case I think git-compat-util.h should include something like
>> what block-sha1/sha1.c has:
>> 
>> 	#if !defined(__i386__) && !defined(__x86_64__) && \
>> 	    !defined(_M_IX86) && !defined(_M_X64) && \
>> 	    !defined(__ppc__) && !defined(__ppc64__) && \
>> 	    !defined(__powerpc__) && !defined(__powerpc64__) && \
>> 	    !defined(__s390__) && !defined(__s390x__)
>> 	#define NEEDS_ALIGNED_ACCESS
>> 	#endif
>>
>> Otherwise we are relying on the person building to know their own
>> architecture intimately, which shouldn't be necessary.
>
> Yeah, I agree it would be nice to autodetect.

The nice thing is that false positives are harmless, modulo slowing
down git a little if the compiler doesn't figure out how to optimize
the NEEDS_ALIGNED_ACCESS codepath when on an unlisted platform that
doesn't, in fact, need aligned access.

In other words, it would work out of the box for everybody.

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH 1/2] compat: move unaligned helpers to bswap.h
  2014-01-23 20:08                 ` Jonathan Nieder
@ 2014-01-23 20:09                   ` Jeff King
  0 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2014-01-23 20:09 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

On Thu, Jan 23, 2014 at 12:08:04PM -0800, Jonathan Nieder wrote:

> Jeff King wrote:
> > On Thu, Jan 23, 2014 at 11:56:43AM -0800, Jonathan Nieder wrote:
> 
> >> In that case I think git-compat-util.h should include something like
> >> what block-sha1/sha1.c has:
> >> 
> >> 	#if !defined(__i386__) && !defined(__x86_64__) && \
> >> 	    !defined(_M_IX86) && !defined(_M_X64) && \
> >> 	    !defined(__ppc__) && !defined(__ppc64__) && \
> >> 	    !defined(__powerpc__) && !defined(__powerpc64__) && \
> >> 	    !defined(__s390__) && !defined(__s390x__)
> >> 	#define NEEDS_ALIGNED_ACCESS
> >> 	#endif
> >>
> >> Otherwise we are relying on the person building to know their own
> >> architecture intimately, which shouldn't be necessary.
> >
> > Yeah, I agree it would be nice to autodetect.
> 
> The nice thing is that false positives are harmless, modulo slowing
> down git a little if the compiler doesn't figure out how to optimize
> the NEEDS_ALIGNED_ACCESS codepath when on an unlisted platform that
> doesn't, in fact, need aligned access.

OK, I'll refactor the knob.

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 20:03         ` Jeff King
@ 2014-01-23 20:12           ` Jonathan Nieder
  2014-01-23 20:13             ` Jeff King
  2014-01-23 20:23           ` Jonathan Nieder
  2014-01-23 20:38           ` Jeff King
  2 siblings, 1 reply; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 20:12 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King wrote:
> On Thu, Jan 23, 2014 at 11:52:06AM -0800, Jonathan Nieder wrote:

>> My main worry about the patches is that they will probably run into
>> an analagous problem to the one that v1.7.12-rc0~1^2~2
[...]
> I think this probably works in practice because align_ntohl is inlined,
> and any sane compiler will never actually load the variable.

I don't think that's safe to rely on.  The example named above didn't
pose any problems except on one platform.  All the relevant functions
were static and easy to inline.  GCC just followed the standard
literally and chose to break by reading one word at a time, just like
in this case it could break e.g. by copying one word at a time in
__builtin_memcpy (which seems perfectly reasonable to me ---
optimization involves a lot of constraint solving, and if you can't
trust your constraints then there's not much you can do).

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 20:12           ` Jonathan Nieder
@ 2014-01-23 20:13             ` Jeff King
  0 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2014-01-23 20:13 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

On Thu, Jan 23, 2014 at 12:12:23PM -0800, Jonathan Nieder wrote:

> Jeff King wrote:
> > On Thu, Jan 23, 2014 at 11:52:06AM -0800, Jonathan Nieder wrote:
> 
> >> My main worry about the patches is that they will probably run into
> >> an analagous problem to the one that v1.7.12-rc0~1^2~2
> [...]
> > I think this probably works in practice because align_ntohl is inlined,
> > and any sane compiler will never actually load the variable.
> 
> I don't think that's safe to rely on.  The example named above didn't
> pose any problems except on one platform.  All the relevant functions
> were static and easy to inline.  GCC just followed the standard
> literally and chose to break by reading one word at a time, just like
> in this case it could break e.g. by copying one word at a time in
> __builtin_memcpy (which seems perfectly reasonable to me ---
> optimization involves a lot of constraint solving, and if you can't
> trust your constraints then there's not much you can do).

I wasn't disagreeing with you. I was guessing at why it did not fail out
of the box when I tested it.  What do you think of the alternative I
posted?

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 18:33     ` Jeff King
                         ` (2 preceding siblings ...)
  2014-01-23 19:52       ` [PATCH v4 08/23] ewah: compressed bitmap implementation Jonathan Nieder
@ 2014-01-23 20:14       ` Shawn Pearce
  2014-01-23 20:26         ` Jeff King
  2014-01-23 20:18       ` Jonathan Nieder
  2014-01-23 21:20       ` [PATCH v2 0/3] unaligned reads from .bitmap files Jeff King
  5 siblings, 1 reply; 68+ messages in thread
From: Shawn Pearce @ 2014-01-23 20:14 UTC (permalink / raw)
  To: Jeff King; +Cc: Jonathan Nieder, git

On Thu, Jan 23, 2014 at 10:33 AM, Jeff King <peff@peff.net> wrote:
> On Wed, Jan 22, 2014 at 06:05:36PM -0800, Jonathan Nieder wrote:
>
>> Jeff King wrote:
>>
>> > EWAH is a word-aligned compressed variant of a bitset (i.e. a data
>> > structure that acts as a 0-indexed boolean array for many entries).
>>
>> I suspect that for some callers it's not word-aligned.
>
> Yes, the mmap'd buffers aren't necessarily word-aligned. I don't think
> we can fix that easily without changing the on-disk format (which comes
> from JGit anyway).

Ouch, sorry about that. JGit doesn't mmap the file so we didn't think
about the impact of words not being aligned. I should have caught
that, but I didn't.

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 18:33     ` Jeff King
                         ` (3 preceding siblings ...)
  2014-01-23 20:14       ` Shawn Pearce
@ 2014-01-23 20:18       ` Jonathan Nieder
  2014-01-23 21:20       ` [PATCH v2 0/3] unaligned reads from .bitmap files Jeff King
  5 siblings, 0 replies; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 20:18 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King wrote:

>   [1/2]: compat: move unaligned helpers to bswap.h
>   [2/2]: ewah: support platforms that require aligned reads

After setting NEEDS_ALIGNED_ACCESS,
Tested-by: Jonathan Nieder <jrnieder@gmail.com> # ARMv5

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 20:03         ` Jeff King
  2014-01-23 20:12           ` Jonathan Nieder
@ 2014-01-23 20:23           ` Jonathan Nieder
  2014-01-23 20:29             ` Jeff King
  2014-01-23 20:38           ` Jeff King
  2 siblings, 1 reply; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 20:23 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King wrote:

>                                                              If we
> change the signature of align_ntohl, we can do this:
>
>   uint32_t align_ntohl(void *ptr)
>   {
>           uint32_t x;
>           memcpy(x, ptr, sizeof(x));
>           return ntohl(x);
>   }
>
>   ...
>
>   foo = align_ntohl(ptr);
>
> The memcpy solution is taken from read-cache.c, but as we noted, it
> probably hasn't been used a lot. The blk_sha1 get_be may be faster, as
> it converts as it reads.

I doubt there's much difference either way, especially after an
optimizer gets its hands on it.  According to [1] ARM has no fast
byte swap instruction so with -O0 the byte-at-a-time implementation is
probably faster there.  I can try a performance test if you like.

Jonathan

[1] http://thread.gmane.org/gmane.comp.version-control.git/125737

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 20:14       ` Shawn Pearce
@ 2014-01-23 20:26         ` Jeff King
  2014-01-23 21:53           ` brian m. carlson
  0 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2014-01-23 20:26 UTC (permalink / raw)
  To: Shawn Pearce; +Cc: Jonathan Nieder, git

On Thu, Jan 23, 2014 at 12:14:03PM -0800, Shawn Pearce wrote:

> > Yes, the mmap'd buffers aren't necessarily word-aligned. I don't think
> > we can fix that easily without changing the on-disk format (which comes
> > from JGit anyway).
> 
> Ouch, sorry about that. JGit doesn't mmap the file so we didn't think
> about the impact of words not being aligned. I should have caught
> that, but I didn't.

Looking over the format, I think the only thing preventing 4-byte
alignment is the 1-byte XOR-offset and 1-byte flags field for each
bitmap. If we ever have a v2, we could pad the sum of those out to 4
bytes. Is 4-byte alignment enough? We do treat the actual data as 64-bit
integers. I wonder if that would have problems on Sparc64, for example.

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 20:23           ` Jonathan Nieder
@ 2014-01-23 20:29             ` Jeff King
  0 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2014-01-23 20:29 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

On Thu, Jan 23, 2014 at 12:23:42PM -0800, Jonathan Nieder wrote:

> > The memcpy solution is taken from read-cache.c, but as we noted, it
> > probably hasn't been used a lot. The blk_sha1 get_be may be faster, as
> > it converts as it reads.
> 
> I doubt there's much difference either way, especially after an
> optimizer gets its hands on it.  According to [1] ARM has no fast
> byte swap instruction so with -O0 the byte-at-a-time implementation is
> probably faster there.  I can try a performance test if you like.

If you're curious and have time, go ahead and benchmark what I posted
against what you posted (with your fix). But you'll probably need a big
repo like the kernel to notice anything.

But I don't mind that much if we just use the memcpy trick for now. It's
nice and obvious, and we can always change it later if somebody has
numbers (I doubt it will be all that noticeable anyway; this isn't
nearly as tight a loop as the BLK_SHA1 code).

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 20:03         ` Jeff King
  2014-01-23 20:12           ` Jonathan Nieder
  2014-01-23 20:23           ` Jonathan Nieder
@ 2014-01-23 20:38           ` Jeff King
  2 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2014-01-23 20:38 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

On Thu, Jan 23, 2014 at 03:03:11PM -0500, Jeff King wrote:

> > My main worry about the patches is that they will probably run into
> > an analagous problem to the one that v1.7.12-rc0~1^2~2 (block-sha1:
> > avoid pointer conversion that violates alignment constraints,
> > 2012-07-22) solved.  By casting the pointer to (uint32_t *) we are
> > telling the compiler it is 32-bit aligned (C99 section 6.3.2.3).
> 
> Yeah, maybe. We go via memcpy, which takes a "void *", so that part is
> good. However, the new code looks like:
> 
>   foo = align_ntohl(*(uint32_t *)ptr);
> 
> I think this probably works in practice because align_ntohl is inlined,
> and any sane compiler will never actually load the variable. If we
> change the signature of align_ntohl, we can do this:

Actually, it is a little trickier than that. We actually take the
address in the macro. So even without inlining, we end up casting to
void. I still think this:

>   uint32_t align_ntohl(void *ptr)
>   {
>           uint32_t x;
>           memcpy(x, ptr, sizeof(x));
>           return ntohl(x);
>   }

is a little more obvious, though. It does mean that everybody has to
pass a pointer, though, and on platforms where non-aligned reads are OK,
we do the cast ourselves. That means that:

  foo = align_ntohl(&bar);

will not be able to do any type-checking for "bar" (say, when we are
pulling "bar" straight out of a packed struct). I don't know how much
we care.

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* [PATCH v2 0/3] unaligned reads from .bitmap files
  2014-01-23 18:33     ` Jeff King
                         ` (4 preceding siblings ...)
  2014-01-23 20:18       ` Jonathan Nieder
@ 2014-01-23 21:20       ` Jeff King
  2014-01-23 21:23         ` [PATCH 1/3] block-sha1: factor out get_be and put_be wrappers Jeff King
                           ` (3 more replies)
  5 siblings, 4 replies; 68+ messages in thread
From: Jeff King @ 2014-01-23 21:20 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

On Thu, Jan 23, 2014 at 01:33:20PM -0500, Jeff King wrote:

> Here's a patch series (on top of jk/pack-bitmap, naturally) that lets
> t5310 pass there. I assume the ARM problem is the same, though seeing
> the failure in realloc() is unexpected. Can you try it on both your
> platforms with these patches?
> 
>   [1/2]: compat: move unaligned helpers to bswap.h
>   [2/2]: ewah: support platforms that require aligned reads

Here it is again, fixing the issues we've discussed.

Instead of building on the code in read-cache, it pulls the much more
battle-tested code from block-sha1, and refactors read-cache to use that
instead. So the fix now kicks in automatically, and in theory it is a
slight bit faster (though I still doubt it would even be measurable in
this case).

  [1/3]: block-sha1: factor out get_be and put_be wrappers
  [2/3]: read-cache: use get_be32 instead of hand-rolled ntoh_l
  [3/3]: ewah: support platforms that require aligned reads

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* [PATCH 1/3] block-sha1: factor out get_be and put_be wrappers
  2014-01-23 21:20       ` [PATCH v2 0/3] unaligned reads from .bitmap files Jeff King
@ 2014-01-23 21:23         ` Jeff King
  2014-01-23 23:19           ` Jonathan Nieder
  2014-01-23 21:26         ` [PATCH 2/3] read-cache: use get_be32 instead of hand-rolled ntoh_l Jeff King
                           ` (2 subsequent siblings)
  3 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2014-01-23 21:23 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

The BLK_SHA1 code has optimized wrappers for doing endian
conversions on memory that may not be aligned. Let's pull
them out so that we can use them elsewhere, especially the
time-tested list of platforms that prefer each strategy.

Signed-off-by: Jeff King <peff@peff.net>
---
These short names might not be descriptive enough now that they are
globals. However, they make sense to me. I'm open to suggestions if
somebody disagrees.

 block-sha1/sha1.c | 32 --------------------------------
 compat/bswap.h    | 32 ++++++++++++++++++++++++++++++++
 2 files changed, 32 insertions(+), 32 deletions(-)

diff --git a/block-sha1/sha1.c b/block-sha1/sha1.c
index e1a1eb6..22b125c 100644
--- a/block-sha1/sha1.c
+++ b/block-sha1/sha1.c
@@ -62,38 +62,6 @@
   #define setW(x, val) (W(x) = (val))
 #endif
 
-/*
- * Performance might be improved if the CPU architecture is OK with
- * unaligned 32-bit loads and a fast ntohl() is available.
- * Otherwise fall back to byte loads and shifts which is portable,
- * and is faster on architectures with memory alignment issues.
- */
-
-#if defined(__i386__) || defined(__x86_64__) || \
-    defined(_M_IX86) || defined(_M_X64) || \
-    defined(__ppc__) || defined(__ppc64__) || \
-    defined(__powerpc__) || defined(__powerpc64__) || \
-    defined(__s390__) || defined(__s390x__)
-
-#define get_be32(p)	ntohl(*(unsigned int *)(p))
-#define put_be32(p, v)	do { *(unsigned int *)(p) = htonl(v); } while (0)
-
-#else
-
-#define get_be32(p)	( \
-	(*((unsigned char *)(p) + 0) << 24) | \
-	(*((unsigned char *)(p) + 1) << 16) | \
-	(*((unsigned char *)(p) + 2) <<  8) | \
-	(*((unsigned char *)(p) + 3) <<  0) )
-#define put_be32(p, v)	do { \
-	unsigned int __v = (v); \
-	*((unsigned char *)(p) + 0) = __v >> 24; \
-	*((unsigned char *)(p) + 1) = __v >> 16; \
-	*((unsigned char *)(p) + 2) = __v >>  8; \
-	*((unsigned char *)(p) + 3) = __v >>  0; } while (0)
-
-#endif
-
 /* This "rolls" over the 512-bit array */
 #define W(x) (array[(x)&15])
 
diff --git a/compat/bswap.h b/compat/bswap.h
index c18a78e..7d17953 100644
--- a/compat/bswap.h
+++ b/compat/bswap.h
@@ -122,3 +122,35 @@ static inline uint64_t git_bswap64(uint64_t x)
 #endif
 
 #endif
+
+/*
+ * Performance might be improved if the CPU architecture is OK with
+ * unaligned 32-bit loads and a fast ntohl() is available.
+ * Otherwise fall back to byte loads and shifts which is portable,
+ * and is faster on architectures with memory alignment issues.
+ */
+
+#if defined(__i386__) || defined(__x86_64__) || \
+    defined(_M_IX86) || defined(_M_X64) || \
+    defined(__ppc__) || defined(__ppc64__) || \
+    defined(__powerpc__) || defined(__powerpc64__) || \
+    defined(__s390__) || defined(__s390x__)
+
+#define get_be32(p)	ntohl(*(unsigned int *)(p))
+#define put_be32(p, v)	do { *(unsigned int *)(p) = htonl(v); } while (0)
+
+#else
+
+#define get_be32(p)	( \
+	(*((unsigned char *)(p) + 0) << 24) | \
+	(*((unsigned char *)(p) + 1) << 16) | \
+	(*((unsigned char *)(p) + 2) <<  8) | \
+	(*((unsigned char *)(p) + 3) <<  0) )
+#define put_be32(p, v)	do { \
+	unsigned int __v = (v); \
+	*((unsigned char *)(p) + 0) = __v >> 24; \
+	*((unsigned char *)(p) + 1) = __v >> 16; \
+	*((unsigned char *)(p) + 2) = __v >>  8; \
+	*((unsigned char *)(p) + 3) = __v >>  0; } while (0)
+
+#endif
-- 
1.8.5.2.500.g8060133

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH 2/3] read-cache: use get_be32 instead of hand-rolled ntoh_l
  2014-01-23 21:20       ` [PATCH v2 0/3] unaligned reads from .bitmap files Jeff King
  2014-01-23 21:23         ` [PATCH 1/3] block-sha1: factor out get_be and put_be wrappers Jeff King
@ 2014-01-23 21:26         ` Jeff King
  2014-01-23 23:34           ` Jonathan Nieder
  2014-01-23 21:27         ` [PATCH 3/3] ewah: support platforms that require aligned reads Jeff King
  2014-01-23 23:17         ` [PATCH v2 0/3] unaligned reads from .bitmap files Jonathan Nieder
  3 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2014-01-23 21:26 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

Commit d60c49c (read-cache.c: allow unaligned mapping of the
index file, 2012-04-03) introduced helpers to access
unaligned data. However, we already have get_be32, which has
a few advantages:

  1. It's already written, so we avoid duplication.

  2. It's probably faster, since it does the endian
     conversion and the alignment fix at the same time.

  3. The get_be32 code is well-tested, having been in
     block-sha1 for a long time. By contrast, our custom
     helpers were probably almost never used, since the user
     needed to manually define a macro to enable them.

We have to add a get_be16 implementation to the existing
get_be32, but that is very simple to do.

Signed-off-by: Jeff King <peff@peff.net>
---
This _might_ still suffer from the issue fixed in 5f6a112 (block-sha1:
avoid pointer conversion that violates alignment constraints,
2012-07-22), as we are taking the pointer of a uint32 in a struct. But
if that is the case, then the original did, as well. It's not clear to
me if the casting get_be32 does is sufficient, or if a sufficiently
clever compiler might make assumptions based on the original pointer
type.

I'm inclined to leave it for now, as we haven't made anything worse, and
nobody has reported a problem.

 compat/bswap.h |  4 ++++
 read-cache.c   | 44 ++++++++++++--------------------------------
 2 files changed, 16 insertions(+), 32 deletions(-)

diff --git a/compat/bswap.h b/compat/bswap.h
index 7d17953..120c6c1 100644
--- a/compat/bswap.h
+++ b/compat/bswap.h
@@ -136,11 +136,15 @@ static inline uint64_t git_bswap64(uint64_t x)
     defined(__powerpc__) || defined(__powerpc64__) || \
     defined(__s390__) || defined(__s390x__)
 
+#define get_be16(p)	ntohs(*(unsigned short *)(p))
 #define get_be32(p)	ntohl(*(unsigned int *)(p))
 #define put_be32(p, v)	do { *(unsigned int *)(p) = htonl(v); } while (0)
 
 #else
 
+#define get_be16(p)	( \
+	(*((unsigned char *)(p) + 0) << 8) | \
+	(*((unsigned char *)(p) + 1) << 0) )
 #define get_be32(p)	( \
 	(*((unsigned char *)(p) + 0) << 24) | \
 	(*((unsigned char *)(p) + 1) << 16) | \
diff --git a/read-cache.c b/read-cache.c
index 33dd676..4221872 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -1313,26 +1313,6 @@ int read_index(struct index_state *istate)
 	return read_index_from(istate, get_index_file());
 }
 
-#ifndef NEEDS_ALIGNED_ACCESS
-#define ntoh_s(var) ntohs(var)
-#define ntoh_l(var) ntohl(var)
-#else
-static inline uint16_t ntoh_s_force_align(void *p)
-{
-	uint16_t x;
-	memcpy(&x, p, sizeof(x));
-	return ntohs(x);
-}
-static inline uint32_t ntoh_l_force_align(void *p)
-{
-	uint32_t x;
-	memcpy(&x, p, sizeof(x));
-	return ntohl(x);
-}
-#define ntoh_s(var) ntoh_s_force_align(&(var))
-#define ntoh_l(var) ntoh_l_force_align(&(var))
-#endif
-
 static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk,
 						   unsigned int flags,
 						   const char *name,
@@ -1340,16 +1320,16 @@ static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *on
 {
 	struct cache_entry *ce = xmalloc(cache_entry_size(len));
 
-	ce->ce_stat_data.sd_ctime.sec = ntoh_l(ondisk->ctime.sec);
-	ce->ce_stat_data.sd_mtime.sec = ntoh_l(ondisk->mtime.sec);
-	ce->ce_stat_data.sd_ctime.nsec = ntoh_l(ondisk->ctime.nsec);
-	ce->ce_stat_data.sd_mtime.nsec = ntoh_l(ondisk->mtime.nsec);
-	ce->ce_stat_data.sd_dev   = ntoh_l(ondisk->dev);
-	ce->ce_stat_data.sd_ino   = ntoh_l(ondisk->ino);
-	ce->ce_mode  = ntoh_l(ondisk->mode);
-	ce->ce_stat_data.sd_uid   = ntoh_l(ondisk->uid);
-	ce->ce_stat_data.sd_gid   = ntoh_l(ondisk->gid);
-	ce->ce_stat_data.sd_size  = ntoh_l(ondisk->size);
+	ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec);
+	ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec);
+	ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec);
+	ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec);
+	ce->ce_stat_data.sd_dev   = get_be32(&ondisk->dev);
+	ce->ce_stat_data.sd_ino   = get_be32(&ondisk->ino);
+	ce->ce_mode  = get_be32(&ondisk->mode);
+	ce->ce_stat_data.sd_uid   = get_be32(&ondisk->uid);
+	ce->ce_stat_data.sd_gid   = get_be32(&ondisk->gid);
+	ce->ce_stat_data.sd_size  = get_be32(&ondisk->size);
 	ce->ce_flags = flags & ~CE_NAMEMASK;
 	ce->ce_namelen = len;
 	hashcpy(ce->sha1, ondisk->sha1);
@@ -1389,14 +1369,14 @@ static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk,
 	unsigned int flags;
 
 	/* On-disk flags are just 16 bits */
-	flags = ntoh_s(ondisk->flags);
+	flags = get_be16(&ondisk->flags);
 	len = flags & CE_NAMEMASK;
 
 	if (flags & CE_EXTENDED) {
 		struct ondisk_cache_entry_extended *ondisk2;
 		int extended_flags;
 		ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
-		extended_flags = ntoh_s(ondisk2->flags2) << 16;
+		extended_flags = get_be16(&ondisk2->flags2) << 16;
 		/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
 		if (extended_flags & ~CE_EXTENDED_FLAGS)
 			die("Unknown index entry format %08x", extended_flags);
-- 
1.8.5.2.500.g8060133

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* [PATCH 3/3] ewah: support platforms that require aligned reads
  2014-01-23 21:20       ` [PATCH v2 0/3] unaligned reads from .bitmap files Jeff King
  2014-01-23 21:23         ` [PATCH 1/3] block-sha1: factor out get_be and put_be wrappers Jeff King
  2014-01-23 21:26         ` [PATCH 2/3] read-cache: use get_be32 instead of hand-rolled ntoh_l Jeff King
@ 2014-01-23 21:27         ` Jeff King
  2014-01-23 23:44           ` Jonathan Nieder
  2014-01-23 23:17         ` [PATCH v2 0/3] unaligned reads from .bitmap files Jonathan Nieder
  3 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2014-01-23 21:27 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

From: Vicent Marti <tanoku@gmail.com>

The caller may hand us an unaligned buffer (e.g., because it
is an mmap of a file with many ewah bitmaps). On some
platforms (like SPARC) this can cause a bus error. We can
fix it with a combination of get_be32 and moving the data
into an aligned buffer (which we would do anyway, but we can
move it before fixing the endianness).

Signed-off-by: Vicent Marti <tanoku@gmail.com>
Signed-off-by: Jeff King <peff@peff.net>
---
Tested on the SPARC I have access to. Please double-check that it also
works fine on ARM.

 ewah/ewah_io.c | 33 ++++++++++++++++++++++++---------
 1 file changed, 24 insertions(+), 9 deletions(-)

diff --git a/ewah/ewah_io.c b/ewah/ewah_io.c
index aed0da6..4a7fae6 100644
--- a/ewah/ewah_io.c
+++ b/ewah/ewah_io.c
@@ -112,23 +112,38 @@ int ewah_serialize(struct ewah_bitmap *self, int fd)
 
 int ewah_read_mmap(struct ewah_bitmap *self, void *map, size_t len)
 {
-	uint32_t *read32 = map;
-	eword_t *read64;
-	size_t i;
+	uint8_t *ptr = map;
+
+	self->bit_size = get_be32(ptr);
+	ptr += sizeof(uint32_t);
+
+	self->buffer_size = self->alloc_size = get_be32(ptr);
+	ptr += sizeof(uint32_t);
 
-	self->bit_size = ntohl(*read32++);
-	self->buffer_size = self->alloc_size = ntohl(*read32++);
 	self->buffer = ewah_realloc(self->buffer,
 		self->alloc_size * sizeof(eword_t));
 
 	if (!self->buffer)
 		return -1;
 
-	for (i = 0, read64 = (void *)read32; i < self->buffer_size; ++i)
-		self->buffer[i] = ntohll(*read64++);
+	/*
+	 * Copy the raw data for the bitmap as a whole chunk;
+	 * if we're in a little-endian platform, we'll perform
+	 * the endianness conversion in a separate pass to ensure
+	 * we're loading 8-byte aligned words.
+	 */
+	memcpy(self->buffer, ptr, self->buffer_size * sizeof(uint64_t));
+	ptr += self->buffer_size * sizeof(uint64_t);
+
+#if __BYTE_ORDER != __BIG_ENDIAN
+	{
+		size_t i;
+		for (i = 0; i < self->buffer_size; ++i)
+			self->buffer[i] = ntohll(self->buffer[i]);
+	}
+#endif
 
-	read32 = (void *)read64;
-	self->rlw = self->buffer + ntohl(*read32++);
+	self->rlw = self->buffer + get_be32(ptr);
 
 	return (3 * 4) + (self->buffer_size * 8);
 }
-- 
1.8.5.2.500.g8060133

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 20:26         ` Jeff King
@ 2014-01-23 21:53           ` brian m. carlson
  2014-01-23 22:07             ` Jeff King
  0 siblings, 1 reply; 68+ messages in thread
From: brian m. carlson @ 2014-01-23 21:53 UTC (permalink / raw)
  To: Jeff King; +Cc: Shawn Pearce, Jonathan Nieder, git

[-- Attachment #1: Type: text/plain, Size: 1374 bytes --]

On Thu, Jan 23, 2014 at 03:26:45PM -0500, Jeff King wrote:
> Looking over the format, I think the only thing preventing 4-byte
> alignment is the 1-byte XOR-offset and 1-byte flags field for each
> bitmap. If we ever have a v2, we could pad the sum of those out to 4
> bytes. Is 4-byte alignment enough? We do treat the actual data as 64-bit
> integers. I wonder if that would have problems on Sparc64, for example.

Yes, it will.  SPARC requires all loads be naturally aligned (4-byte to
an address that's a multiple of 4, 8-byte to a multiple of 8, and so
on).  In general, architectures that do not support unaligned access
require natural alignment for all quantities.

Also, even on architectures where the kernel can fix these alignment
issues up, the cost of doing so is a two context switches (in and out of
the kernel), servicing the trap, two loads, some shifts and rotates, and
a kernel message, so many people disable alignment fixups.  I know it
made things extremely slow on Alpha.  ARM is even more fun since if you
don't take the trap, it loads the data rotated, so the load happens, it
just silently returns the wrong data.

-- 
brian m. carlson / brian with sandals: Houston, Texas, US
+1 832 623 2791 | http://www.crustytoothpaste.net/~bmc | My opinion only
OpenPGP: RSA v4 4096b: 88AC E9B2 9196 305B A994 7552 F1BA 225C 0223 B187

[-- Attachment #2: Digital signature --]
[-- Type: application/pgp-signature, Size: 819 bytes --]

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 21:53           ` brian m. carlson
@ 2014-01-23 22:07             ` Jeff King
  2014-01-23 22:17               ` Jonathan Nieder
  0 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2014-01-23 22:07 UTC (permalink / raw)
  To: git; +Cc: Shawn Pearce, Jonathan Nieder

On Thu, Jan 23, 2014 at 09:53:26PM +0000, brian m. carlson wrote:

> On Thu, Jan 23, 2014 at 03:26:45PM -0500, Jeff King wrote:
> > Looking over the format, I think the only thing preventing 4-byte
> > alignment is the 1-byte XOR-offset and 1-byte flags field for each
> > bitmap. If we ever have a v2, we could pad the sum of those out to 4
> > bytes. Is 4-byte alignment enough? We do treat the actual data as 64-bit
> > integers. I wonder if that would have problems on Sparc64, for example.
> 
> Yes, it will.  SPARC requires all loads be naturally aligned (4-byte to
> an address that's a multiple of 4, 8-byte to a multiple of 8, and so
> on).  In general, architectures that do not support unaligned access
> require natural alignment for all quantities.

In that case, I think we cannot even blame Shawn. The ewah serialization
format itself (which JGit inherited from the javaewah library) has 8
bytes of header and 4 bytes of trailer. So packed serialized ewahs
wouldn't be 8-byte aligned (though of course he could have added his own
padding to each when we have a sequence of them).

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 22:07             ` Jeff King
@ 2014-01-23 22:17               ` Jonathan Nieder
  2014-01-23 22:26                 ` Jeff King
  0 siblings, 1 reply; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 22:17 UTC (permalink / raw)
  To: Jeff King; +Cc: git, Shawn Pearce

Jeff King wrote:
> On Thu, Jan 23, 2014 at 09:53:26PM +0000, brian m. carlson wrote:

>> Yes, it will.  SPARC requires all loads be naturally aligned (4-byte to
>> an address that's a multiple of 4, 8-byte to a multiple of 8, and so
>> on).  In general, architectures that do not support unaligned access
>> require natural alignment for all quantities.
>
> In that case, I think we cannot even blame Shawn. The ewah serialization
> format itself (which JGit inherited from the javaewah library) has 8
> bytes of header and 4 bytes of trailer. So packed serialized ewahs
> wouldn't be 8-byte aligned

I don't think that's a big issue.  A pair of 4-byte reads would not be
too slow.

Even on x86, aligned reads are supposed to be faster than unaligned
reads (though I haven't looked at benchmarks recently).

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 22:17               ` Jonathan Nieder
@ 2014-01-23 22:26                 ` Jeff King
  2014-01-23 22:33                   ` Jonathan Nieder
  0 siblings, 1 reply; 68+ messages in thread
From: Jeff King @ 2014-01-23 22:26 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git, Shawn Pearce

On Thu, Jan 23, 2014 at 02:17:55PM -0800, Jonathan Nieder wrote:

> Jeff King wrote:
> > On Thu, Jan 23, 2014 at 09:53:26PM +0000, brian m. carlson wrote:
> 
> >> Yes, it will.  SPARC requires all loads be naturally aligned (4-byte to
> >> an address that's a multiple of 4, 8-byte to a multiple of 8, and so
> >> on).  In general, architectures that do not support unaligned access
> >> require natural alignment for all quantities.
> >
> > In that case, I think we cannot even blame Shawn. The ewah serialization
> > format itself (which JGit inherited from the javaewah library) has 8
> > bytes of header and 4 bytes of trailer. So packed serialized ewahs
> > wouldn't be 8-byte aligned
> 
> I don't think that's a big issue.  A pair of 4-byte reads would not be
> too slow.

The header is actually two separate 4-byte values, so that's fine. But
between the header and trailer are a series of 8-byte data values, and
that is what we need the 8-byte alignment for. So the _first_ ewah's
data is 8-byte aligned, but then it offsets the alignment with a single
4-byte trailer. So the next ewah, if they are packed in a sequence, is
will have its data misaligned.

You could solve it by putting an empty 4-byte pad at the end of each
ewah (and of course making sure the first one is 8-byte aligned).

Anyway, this is all academic until we are designing bitmap v2, which I
do not plan on doing anytime soon.

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v4 08/23] ewah: compressed bitmap implementation
  2014-01-23 22:26                 ` Jeff King
@ 2014-01-23 22:33                   ` Jonathan Nieder
  0 siblings, 0 replies; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 22:33 UTC (permalink / raw)
  To: Jeff King; +Cc: git, Shawn Pearce

Jeff King wrote:
> On Thu, Jan 23, 2014 at 02:17:55PM -0800, Jonathan Nieder wrote:

>> I don't think that's a big issue.  A pair of 4-byte reads would not be
>> too slow.
>
> The header is actually two separate 4-byte values, so that's fine. But
> between the header and trailer are a series of 8-byte data values, and
> that is what we need the 8-byte alignment for.

Sorry for the lack of clarity.  What I meant is that a 4-byte aligned
8-byte value can be read using a pair of 4-byte reads, which is less
of a performance issue than a completely unaligned value.

[...]
> Anyway, this is all academic until we are designing bitmap v2, which I
> do not plan on doing anytime soon.

Sure, fair enough. :)

Jonathan

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH v2 0/3] unaligned reads from .bitmap files
  2014-01-23 21:20       ` [PATCH v2 0/3] unaligned reads from .bitmap files Jeff King
                           ` (2 preceding siblings ...)
  2014-01-23 21:27         ` [PATCH 3/3] ewah: support platforms that require aligned reads Jeff King
@ 2014-01-23 23:17         ` Jonathan Nieder
  3 siblings, 0 replies; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 23:17 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King wrote:

> Here it is again, fixing the issues we've discussed.

Thanks!  Passes all tests.

Tested-by: Jonathan Nieder <jrnieder@gmail.com> # ARMv5

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH 1/3] block-sha1: factor out get_be and put_be wrappers
  2014-01-23 21:23         ` [PATCH 1/3] block-sha1: factor out get_be and put_be wrappers Jeff King
@ 2014-01-23 23:19           ` Jonathan Nieder
  0 siblings, 0 replies; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 23:19 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King wrote:

> These short names might not be descriptive enough now that they are
> globals. However, they make sense to me.

Yeah, I think they're clear.  And they match the Linux kernel's
get_unaligned_be32() / put_unaligned_be32().

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH 2/3] read-cache: use get_be32 instead of hand-rolled ntoh_l
  2014-01-23 21:26         ` [PATCH 2/3] read-cache: use get_be32 instead of hand-rolled ntoh_l Jeff King
@ 2014-01-23 23:34           ` Jonathan Nieder
  2014-01-24  2:22             ` Jeff King
  0 siblings, 1 reply; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 23:34 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King wrote:

> This _might_ still suffer from the issue fixed in 5f6a112 (block-sha1:
> avoid pointer conversion that violates alignment constraints,
> 2012-07-22), as we are taking the pointer of a uint32 in a struct.

No conversion, so no issue there.

Line 1484 looks more problematic:

		disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset);

In v4 indexes, src_offset doesn't have any particular alignment so
this conversion has undefined behavior.

Do you know if any tests exercise this code with paths that don't
have convenient length?

[...]
> I'm inclined to leave it for now, as we haven't made anything worse, and
> nobody has reported a problem.

Yeah, agreed.

Probably the simplest fix would be to take a char *, memcpy into a
new (aligned) buffer and then byteswap in place, but that's
orthogonal to this series.

Thanks,
Jonathan

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH 3/3] ewah: support platforms that require aligned reads
  2014-01-23 21:27         ` [PATCH 3/3] ewah: support platforms that require aligned reads Jeff King
@ 2014-01-23 23:44           ` Jonathan Nieder
  2014-01-23 23:49             ` Vicent Martí
  0 siblings, 1 reply; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-23 23:44 UTC (permalink / raw)
  To: Jeff King; +Cc: git

Jeff King wrote:

> --- a/ewah/ewah_io.c
> +++ b/ewah/ewah_io.c
> @@ -112,23 +112,38 @@ int ewah_serialize(struct ewah_bitmap *self, int fd)
[...]
> +#if __BYTE_ORDER != __BIG_ENDIAN

Is this portable?

On a platform without __BYTE_ORDER or __BIG_ENDIAN defined,
it is interpreted as

	#if 0 != 0

which means that such platforms are assumed to be big endian.
Does Mingw define __BYTE_ORDER, for example?


> +	{
> +		size_t i;
> +		for (i = 0; i < self->buffer_size; ++i)
> +			self->buffer[i] = ntohll(self->buffer[i]);
> +	}
> +#endif

It's tempting to guard with something like

	if (ntohl(1) != 1) {
		...
	}

The optimizer can tell if this is true or false at compile time, so
it shouldn't slow anything down.

With that change,
Reviewed-by: Jonathan Nieder <jrnieder@gmail.com>

Thanks for the quick fix.

diff --git i/ewah/ewah_io.c w/ewah/ewah_io.c
index 4a7fae6..5a527a4 100644
--- i/ewah/ewah_io.c
+++ w/ewah/ewah_io.c
@@ -135,13 +135,11 @@ int ewah_read_mmap(struct ewah_bitmap *self, void *map, size_t len)
 	memcpy(self->buffer, ptr, self->buffer_size * sizeof(uint64_t));
 	ptr += self->buffer_size * sizeof(uint64_t);
 
-#if __BYTE_ORDER != __BIG_ENDIAN
-	{
+	if (ntohl(1) != 1) {
 		size_t i;
 		for (i = 0; i < self->buffer_size; ++i)
 			self->buffer[i] = ntohll(self->buffer[i]);
 	}
-#endif
 
 	self->rlw = self->buffer + get_be32(ptr);
 

^ permalink raw reply related	[flat|nested] 68+ messages in thread

* Re: [PATCH 3/3] ewah: support platforms that require aligned reads
  2014-01-23 23:44           ` Jonathan Nieder
@ 2014-01-23 23:49             ` Vicent Martí
  2014-01-24  0:15               ` Jonathan Nieder
  0 siblings, 1 reply; 68+ messages in thread
From: Vicent Martí @ 2014-01-23 23:49 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: Jeff King, git

On Fri, Jan 24, 2014 at 12:44 AM, Jonathan Nieder <jrnieder@gmail.com> wrote:
>> --- a/ewah/ewah_io.c
>> +++ b/ewah/ewah_io.c
>> @@ -112,23 +112,38 @@ int ewah_serialize(struct ewah_bitmap *self, int fd)
> [...]
>> +#if __BYTE_ORDER != __BIG_ENDIAN
>
> Is this portable?

We explicitly set the __BYTE_ORDER macros in `compat/bswap.h`. In
fact, this preprocessor conditional is the same one that we use when
choosing what version of the `ntohl` macro to define, so that's why I
decided to use it here.

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH 3/3] ewah: support platforms that require aligned reads
  2014-01-23 23:49             ` Vicent Martí
@ 2014-01-24  0:15               ` Jonathan Nieder
  0 siblings, 0 replies; 68+ messages in thread
From: Jonathan Nieder @ 2014-01-24  0:15 UTC (permalink / raw)
  To: Vicent Martí; +Cc: Jeff King, git

Vicent Martí wrote:
> On Fri, Jan 24, 2014 at 12:44 AM, Jonathan Nieder <jrnieder@gmail.com> wrote:

>>> +#if __BYTE_ORDER != __BIG_ENDIAN
>>
>> Is this portable?
>
> We explicitly set the __BYTE_ORDER macros in `compat/bswap.h`. In
> fact, this preprocessor conditional is the same one that we use when
> choosing what version of the `ntohl` macro to define, so that's why I
> decided to use it here.

Ah, thanks.  Sorry I missed that.  So feel free to add my reviewed-by
to the patch without my tweak, too.

^ permalink raw reply	[flat|nested] 68+ messages in thread

* Re: [PATCH 2/3] read-cache: use get_be32 instead of hand-rolled ntoh_l
  2014-01-23 23:34           ` Jonathan Nieder
@ 2014-01-24  2:22             ` Jeff King
  0 siblings, 0 replies; 68+ messages in thread
From: Jeff King @ 2014-01-24  2:22 UTC (permalink / raw)
  To: Jonathan Nieder; +Cc: git

On Thu, Jan 23, 2014 at 03:34:16PM -0800, Jonathan Nieder wrote:

> Line 1484 looks more problematic:
> 
> 		disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset);
> 
> In v4 indexes, src_offset doesn't have any particular alignment so
> this conversion has undefined behavior.
> 
> Do you know if any tests exercise this code with paths that don't
> have convenient length?

My impression was that we are not testing v4 index at all (and grepping
for `--index-version`, which I think is the only way to write it,
supports that).

-Peff

^ permalink raw reply	[flat|nested] 68+ messages in thread

end of thread, other threads:[~2014-01-24  2:22 UTC | newest]

Thread overview: 68+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-12-21 13:56 [PATCH v4 0/22] pack bitmaps Jeff King
2013-12-21 13:59 ` [PATCH v4 01/23] sha1write: make buffer const-correct Jeff King
2013-12-22  9:06   ` Christian Couder
2013-12-21 13:59 ` [PATCH v4 02/23] revindex: Export new APIs Jeff King
2013-12-21 13:59 ` [PATCH v4 03/23] pack-objects: Refactor the packing list Jeff King
2013-12-21 13:59 ` [PATCH v4 04/23] pack-objects: factor out name_hash Jeff King
2013-12-21 13:59 ` [PATCH v4 05/23] revision: allow setting custom limiter function Jeff King
2013-12-21 13:59 ` [PATCH v4 06/23] sha1_file: export `git_open_noatime` Jeff King
2013-12-21 13:59 ` [PATCH v4 07/23] compat: add endianness helpers Jeff King
2013-12-21 13:59 ` [PATCH v4 08/23] ewah: compressed bitmap implementation Jeff King
2014-01-23  2:05   ` Jonathan Nieder
2014-01-23 18:33     ` Jeff King
2014-01-23 18:35       ` [PATCH 1/2] compat: move unaligned helpers to bswap.h Jeff King
2014-01-23 19:41         ` Jonathan Nieder
2014-01-23 19:44           ` Jeff King
2014-01-23 19:56             ` Jonathan Nieder
2014-01-23 20:04               ` Jeff King
2014-01-23 20:08                 ` Jonathan Nieder
2014-01-23 20:09                   ` Jeff King
2014-01-23 18:35       ` [PATCH 2/2] ewah: support platforms that require aligned reads Jeff King
2014-01-23 19:52       ` [PATCH v4 08/23] ewah: compressed bitmap implementation Jonathan Nieder
2014-01-23 20:03         ` Jeff King
2014-01-23 20:12           ` Jonathan Nieder
2014-01-23 20:13             ` Jeff King
2014-01-23 20:23           ` Jonathan Nieder
2014-01-23 20:29             ` Jeff King
2014-01-23 20:38           ` Jeff King
2014-01-23 20:14       ` Shawn Pearce
2014-01-23 20:26         ` Jeff King
2014-01-23 21:53           ` brian m. carlson
2014-01-23 22:07             ` Jeff King
2014-01-23 22:17               ` Jonathan Nieder
2014-01-23 22:26                 ` Jeff King
2014-01-23 22:33                   ` Jonathan Nieder
2014-01-23 20:18       ` Jonathan Nieder
2014-01-23 21:20       ` [PATCH v2 0/3] unaligned reads from .bitmap files Jeff King
2014-01-23 21:23         ` [PATCH 1/3] block-sha1: factor out get_be and put_be wrappers Jeff King
2014-01-23 23:19           ` Jonathan Nieder
2014-01-23 21:26         ` [PATCH 2/3] read-cache: use get_be32 instead of hand-rolled ntoh_l Jeff King
2014-01-23 23:34           ` Jonathan Nieder
2014-01-24  2:22             ` Jeff King
2014-01-23 21:27         ` [PATCH 3/3] ewah: support platforms that require aligned reads Jeff King
2014-01-23 23:44           ` Jonathan Nieder
2014-01-23 23:49             ` Vicent Martí
2014-01-24  0:15               ` Jonathan Nieder
2014-01-23 23:17         ` [PATCH v2 0/3] unaligned reads from .bitmap files Jonathan Nieder
2013-12-21 13:59 ` [PATCH v4 09/23] documentation: add documentation for the bitmap format Jeff King
2013-12-21 14:00 ` [PATCH v4 10/23] pack-bitmap: add support for bitmap indexes Jeff King
2013-12-21 14:00 ` [PATCH v4 11/23] pack-objects: split add_object_entry Jeff King
2013-12-21 14:00 ` [PATCH v4 12/23] pack-objects: use bitmaps when packing objects Jeff King
2013-12-21 14:00 ` [PATCH v4 13/23] rev-list: add bitmap mode to speed up object lists Jeff King
2013-12-21 14:00 ` [PATCH v4 14/23] pack-objects: implement bitmap writing Jeff King
2013-12-21 14:00 ` [PATCH v4 15/23] repack: stop using magic number for ARRAY_SIZE(exts) Jeff King
2013-12-21 14:00 ` [PATCH v4 16/23] repack: turn exts array into array-of-struct Jeff King
2013-12-21 14:00 ` [PATCH v4 17/23] repack: handle optional files created by pack-objects Jeff King
2013-12-21 14:00 ` [PATCH v4 18/23] repack: consider bitmaps when performing repacks Jeff King
2013-12-21 14:00 ` [PATCH v4 19/23] count-objects: recognize .bitmap in garbage-checking Jeff King
2013-12-21 14:00 ` [PATCH v4 20/23] t: add basic bitmap functionality tests Jeff King
2013-12-21 14:00 ` [PATCH v4 21/23] t/perf: add tests for pack bitmaps Jeff King
2013-12-21 14:00 ` [PATCH v4 22/23] pack-bitmap: implement optional name_hash cache Jeff King
2013-12-21 14:00 ` [PATCH v4 23/23] compat/mingw.h: Fix the MinGW and msvc builds Jeff King
2013-12-25 22:08   ` Erik Faye-Lund
2013-12-28 10:00     ` Jeff King
2013-12-28 10:06       ` Vicent Martí
2013-12-28 15:58       ` Ramsay Jones
2013-12-21 14:03 ` [PATCH v4 0/22] pack bitmaps Jeff King
2013-12-21 14:05 ` Jeff King
2013-12-21 18:34 ` Thomas Rast

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.