* [PATCH 01/10] zero-initialize object_info structs
2013-07-10 11:34 ` [PATCHv2 00/10] cat-file formats/on-disk sizes Jeff King
@ 2013-07-10 11:35 ` Jeff King
2013-07-10 11:35 ` [PATCH 02/10] teach sha1_object_info_extended a "disk_size" query Jeff King
` (8 subsequent siblings)
9 siblings, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-10 11:35 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
The sha1_object_info_extended function expects the caller to
provide a "struct object_info" which contains pointers to
"query" items that will be filled in. The purpose of
providing pointers rather than storing the response directly
in the struct is so that callers can choose not to incur the
expense in finding particular fields that they do not care
about.
Right now the only query item is "sizep", and all callers
set it explicitly to choose whether or not to query it; they
can then leave the rest of the struct uninitialized.
However, as we add new query items, each caller will have to
be updated to explicitly turn off the new ones (by setting
them to NULL). Instead, let's teach each caller to
zero-initialize the struct, so that they do not have to
learn about each new query item added.
Signed-off-by: Jeff King <peff@peff.net>
---
sha1_file.c | 2 +-
streaming.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/sha1_file.c b/sha1_file.c
index 0af19c0..de06a97 100644
--- a/sha1_file.c
+++ b/sha1_file.c
@@ -2428,7 +2428,7 @@ int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
{
- struct object_info oi;
+ struct object_info oi = {0};
oi.sizep = sizep;
return sha1_object_info_extended(sha1, &oi);
diff --git a/streaming.c b/streaming.c
index cabcd9d..cac282f 100644
--- a/streaming.c
+++ b/streaming.c
@@ -135,7 +135,7 @@ struct git_istream *open_istream(const unsigned char *sha1,
struct stream_filter *filter)
{
struct git_istream *st;
- struct object_info oi;
+ struct object_info oi = {0};
const unsigned char *real = lookup_replace_object(sha1);
enum input_source src = istream_source(real, type, &oi);
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* [PATCH 02/10] teach sha1_object_info_extended a "disk_size" query
2013-07-10 11:34 ` [PATCHv2 00/10] cat-file formats/on-disk sizes Jeff King
2013-07-10 11:35 ` [PATCH 01/10] zero-initialize object_info structs Jeff King
@ 2013-07-10 11:35 ` Jeff King
2013-07-10 11:36 ` [PATCH 03/10] t1006: modernize output comparisons Jeff King
` (7 subsequent siblings)
9 siblings, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-10 11:35 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
Using sha1_object_info_extended, a caller can find out the
type of an object, its size, and information about where it
is stored. In addition to the object's "true" size, it can
also be useful to know the size that the object takes on
disk (e.g., to generate statistics about which refs consume
space).
This patch adds a "disk_sizep" field to "struct object_info",
and fills it in during sha1_object_info_extended if it is
non-NULL.
Signed-off-by: Jeff King <peff@peff.net>
---
cache.h | 1 +
sha1_file.c | 20 ++++++++++++++++----
2 files changed, 17 insertions(+), 4 deletions(-)
diff --git a/cache.h b/cache.h
index dd0fb33..2d06169 100644
--- a/cache.h
+++ b/cache.h
@@ -1130,6 +1130,7 @@ struct object_info {
struct object_info {
/* Request */
unsigned long *sizep;
+ unsigned long *disk_sizep;
/* Response */
enum {
diff --git a/sha1_file.c b/sha1_file.c
index de06a97..4c2365f 100644
--- a/sha1_file.c
+++ b/sha1_file.c
@@ -1697,7 +1697,8 @@ static int packed_object_info(struct packed_git *p, off_t obj_offset,
#define POI_STACK_PREALLOC 64
static int packed_object_info(struct packed_git *p, off_t obj_offset,
- unsigned long *sizep, int *rtype)
+ unsigned long *sizep, int *rtype,
+ unsigned long *disk_sizep)
{
struct pack_window *w_curs = NULL;
unsigned long size;
@@ -1731,6 +1732,11 @@ static int packed_object_info(struct packed_git *p, off_t obj_offset,
}
}
+ if (disk_sizep) {
+ struct revindex_entry *revidx = find_pack_revindex(p, obj_offset);
+ *disk_sizep = revidx[1].offset - obj_offset;
+ }
+
while (type == OBJ_OFS_DELTA || type == OBJ_REF_DELTA) {
off_t base_offset;
/* Push the object we're going to leave behind */
@@ -2357,7 +2363,8 @@ struct packed_git *find_sha1_pack(const unsigned char *sha1,
}
-static int sha1_loose_object_info(const unsigned char *sha1, unsigned long *sizep)
+static int sha1_loose_object_info(const unsigned char *sha1, unsigned long *sizep,
+ unsigned long *disk_sizep)
{
int status;
unsigned long mapsize, size;
@@ -2368,6 +2375,8 @@ static int sha1_loose_object_info(const unsigned char *sha1, unsigned long *size
map = map_sha1_file(sha1, &mapsize);
if (!map)
return -1;
+ if (disk_sizep)
+ *disk_sizep = mapsize;
if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
status = error("unable to unpack %s header",
sha1_to_hex(sha1));
@@ -2391,13 +2400,15 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi)
if (co) {
if (oi->sizep)
*(oi->sizep) = co->size;
+ if (oi->disk_sizep)
+ *(oi->disk_sizep) = 0;
oi->whence = OI_CACHED;
return co->type;
}
if (!find_pack_entry(sha1, &e)) {
/* Most likely it's a loose object. */
- status = sha1_loose_object_info(sha1, oi->sizep);
+ status = sha1_loose_object_info(sha1, oi->sizep, oi->disk_sizep);
if (status >= 0) {
oi->whence = OI_LOOSE;
return status;
@@ -2409,7 +2420,8 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi)
return status;
}
- status = packed_object_info(e.p, e.offset, oi->sizep, &rtype);
+ status = packed_object_info(e.p, e.offset, oi->sizep, &rtype,
+ oi->disk_sizep);
if (status < 0) {
mark_bad_packed_object(e.p, sha1);
status = sha1_object_info_extended(sha1, oi);
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* [PATCH 03/10] t1006: modernize output comparisons
2013-07-10 11:34 ` [PATCHv2 00/10] cat-file formats/on-disk sizes Jeff King
2013-07-10 11:35 ` [PATCH 01/10] zero-initialize object_info structs Jeff King
2013-07-10 11:35 ` [PATCH 02/10] teach sha1_object_info_extended a "disk_size" query Jeff King
@ 2013-07-10 11:36 ` Jeff King
2013-07-10 11:38 ` [PATCH 04/10] cat-file: teach --batch to stream blob objects Jeff King
` (6 subsequent siblings)
9 siblings, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-10 11:36 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
In modern tests, we typically put output into a file and
compare it with test_cmp. This is nicer than just comparing
via "test", and much shorter than comparing via "test" and
printing a custom message.
Signed-off-by: Jeff King <peff@peff.net>
---
I didn't do the whole file, just the ones of a particular style close to
what I was touching.
t/t1006-cat-file.sh | 61 ++++++++++++++++-------------------------------------
1 file changed, 18 insertions(+), 43 deletions(-)
diff --git a/t/t1006-cat-file.sh b/t/t1006-cat-file.sh
index 9cc5c6b..c2f2503 100755
--- a/t/t1006-cat-file.sh
+++ b/t/t1006-cat-file.sh
@@ -36,66 +36,41 @@ $content"
'
test_expect_success "Type of $type is correct" '
- test $type = "$(git cat-file -t $sha1)"
+ echo $type >expect &&
+ git cat-file -t $sha1 >actual &&
+ test_cmp expect actual
'
test_expect_success "Size of $type is correct" '
- test $size = "$(git cat-file -s $sha1)"
+ echo $size >expect &&
+ git cat-file -s $sha1 >actual &&
+ test_cmp expect actual
'
test -z "$content" ||
test_expect_success "Content of $type is correct" '
- expect="$(maybe_remove_timestamp "$content" $no_ts)"
- actual="$(maybe_remove_timestamp "$(git cat-file $type $sha1)" $no_ts)"
-
- if test "z$expect" = "z$actual"
- then
- : happy
- else
- echo "Oops: expected $expect"
- echo "but got $actual"
- false
- fi
+ maybe_remove_timestamp "$content" $no_ts >expect &&
+ maybe_remove_timestamp "$(git cat-file $type $sha1)" $no_ts >actual &&
+ test_cmp expect actual
'
test_expect_success "Pretty content of $type is correct" '
- expect="$(maybe_remove_timestamp "$pretty_content" $no_ts)"
- actual="$(maybe_remove_timestamp "$(git cat-file -p $sha1)" $no_ts)"
- if test "z$expect" = "z$actual"
- then
- : happy
- else
- echo "Oops: expected $expect"
- echo "but got $actual"
- false
- fi
+ maybe_remove_timestamp "$pretty_content" $no_ts >expect &&
+ maybe_remove_timestamp "$(git cat-file -p $sha1)" $no_ts >actual &&
+ test_cmp expect actual
'
test -z "$content" ||
test_expect_success "--batch output of $type is correct" '
- expect="$(maybe_remove_timestamp "$batch_output" $no_ts)"
- actual="$(maybe_remove_timestamp "$(echo $sha1 | git cat-file --batch)" $no_ts)"
- if test "z$expect" = "z$actual"
- then
- : happy
- else
- echo "Oops: expected $expect"
- echo "but got $actual"
- false
- fi
+ maybe_remove_timestamp "$batch_output" $no_ts >expect &&
+ maybe_remove_timestamp "$(echo $sha1 | git cat-file --batch)" $no_ts >actual &&
+ test_cmp expect actual
'
test_expect_success "--batch-check output of $type is correct" '
- expect="$sha1 $type $size"
- actual="$(echo_without_newline $sha1 | git cat-file --batch-check)"
- if test "z$expect" = "z$actual"
- then
- : happy
- else
- echo "Oops: expected $expect"
- echo "but got $actual"
- false
- fi
+ echo "$sha1 $type $size" >expect &&
+ echo_without_newline $sha1 | git cat-file --batch-check >actual &&
+ test_cmp expect actual
'
}
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* [PATCH 04/10] cat-file: teach --batch to stream blob objects
2013-07-10 11:34 ` [PATCHv2 00/10] cat-file formats/on-disk sizes Jeff King
` (2 preceding siblings ...)
2013-07-10 11:36 ` [PATCH 03/10] t1006: modernize output comparisons Jeff King
@ 2013-07-10 11:38 ` Jeff King
2013-07-10 11:38 ` [PATCH 05/10] cat-file: refactor --batch option parsing Jeff King
` (5 subsequent siblings)
9 siblings, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-10 11:38 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
The regular "git cat-file -p" and "git cat-file blob" code
paths already learned to stream large blobs. Let's do the
same here.
Note that this means we look up the type and size before
making a decision of whether to load the object into memory
or stream (just like the "-p" code path does). That can lead
to extra work, but it should be dwarfed by the cost of
actually accessing the object itself. In my measurements,
there was a 1-2% slowdown when using "--batch" on a large
number of objects.
Signed-off-by: Jeff King <peff@peff.net>
---
builtin/cat-file.c | 41 ++++++++++++++++++++++++++++-------------
1 file changed, 28 insertions(+), 13 deletions(-)
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 045cee7..70dd8c8 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -117,12 +117,36 @@ static int batch_one_object(const char *obj_name, int print_contents)
return 0;
}
+static void print_object_or_die(int fd, const unsigned char *sha1,
+ enum object_type type, unsigned long size)
+{
+ if (type == OBJ_BLOB) {
+ if (stream_blob_to_fd(fd, sha1, NULL, 0) < 0)
+ die("unable to stream %s to stdout", sha1_to_hex(sha1));
+ }
+ else {
+ enum object_type rtype;
+ unsigned long rsize;
+ void *contents;
+
+ contents = read_sha1_file(sha1, &rtype, &rsize);
+ if (!contents)
+ die("object %s disappeared", sha1_to_hex(sha1));
+ if (rtype != type)
+ die("object %s changed type!?", sha1_to_hex(sha1));
+ if (rsize != size)
+ die("object %s change size!?", sha1_to_hex(sha1));
+
+ write_or_die(fd, contents, size);
+ free(contents);
+ }
+}
+
static int batch_one_object(const char *obj_name, int print_contents)
{
unsigned char sha1[20];
enum object_type type = 0;
unsigned long size;
- void *contents = NULL;
if (!obj_name)
return 1;
@@ -133,16 +157,10 @@ static int batch_one_object(const char *obj_name, int print_contents)
return 0;
}
- if (print_contents == BATCH)
- contents = read_sha1_file(sha1, &type, &size);
- else
- type = sha1_object_info(sha1, &size);
-
+ type = sha1_object_info(sha1, &size);
if (type <= 0) {
printf("%s missing\n", obj_name);
fflush(stdout);
- if (print_contents == BATCH)
- free(contents);
return 0;
}
@@ -150,12 +168,9 @@ static int batch_one_object(const char *obj_name, int print_contents)
fflush(stdout);
if (print_contents == BATCH) {
- write_or_die(1, contents, size);
- printf("\n");
- fflush(stdout);
- free(contents);
+ print_object_or_die(1, sha1, type, size);
+ write_or_die(1, "\n", 1);
}
-
return 0;
}
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* [PATCH 05/10] cat-file: refactor --batch option parsing
2013-07-10 11:34 ` [PATCHv2 00/10] cat-file formats/on-disk sizes Jeff King
` (3 preceding siblings ...)
2013-07-10 11:38 ` [PATCH 04/10] cat-file: teach --batch to stream blob objects Jeff King
@ 2013-07-10 11:38 ` Jeff King
2013-07-10 11:45 ` [PATCH 06/10] cat-file: add --batch-check=<format> Jeff King
` (4 subsequent siblings)
9 siblings, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-10 11:38 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
We currently use an int to tell us whether --batch parsing
is on, and if so, whether we should print the full object
contents. Let's instead factor this into a struct, filled in
by callback, which will make further batch-related options
easy to add.
Signed-off-by: Jeff King <peff@peff.net>
---
builtin/cat-file.c | 56 ++++++++++++++++++++++++++++++++++++------------------
1 file changed, 38 insertions(+), 18 deletions(-)
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 70dd8c8..5254fe8 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -13,9 +13,6 @@
#include "userdiff.h"
#include "streaming.h"
-#define BATCH 1
-#define BATCH_CHECK 2
-
static int cat_one_file(int opt, const char *exp_type, const char *obj_name)
{
unsigned char sha1[20];
@@ -142,7 +139,12 @@ static void print_object_or_die(int fd, const unsigned char *sha1,
}
}
-static int batch_one_object(const char *obj_name, int print_contents)
+struct batch_options {
+ int enabled;
+ int print_contents;
+};
+
+static int batch_one_object(const char *obj_name, struct batch_options *opt)
{
unsigned char sha1[20];
enum object_type type = 0;
@@ -167,19 +169,19 @@ static int batch_objects(int print_contents)
printf("%s %s %lu\n", sha1_to_hex(sha1), typename(type), size);
fflush(stdout);
- if (print_contents == BATCH) {
+ if (opt->print_contents) {
print_object_or_die(1, sha1, type, size);
write_or_die(1, "\n", 1);
}
return 0;
}
-static int batch_objects(int print_contents)
+static int batch_objects(struct batch_options *opt)
{
struct strbuf buf = STRBUF_INIT;
while (strbuf_getline(&buf, stdin, '\n') != EOF) {
- int error = batch_one_object(buf.buf, print_contents);
+ int error = batch_one_object(buf.buf, opt);
if (error)
return error;
}
@@ -201,10 +203,28 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix)
return git_default_config(var, value, cb);
}
+static int batch_option_callback(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ struct batch_options *bo = opt->value;
+
+ if (unset) {
+ memset(bo, 0, sizeof(*bo));
+ return 0;
+ }
+
+ bo->enabled = 1;
+ bo->print_contents = !strcmp(opt->long_name, "batch");
+
+ return 0;
+}
+
int cmd_cat_file(int argc, const char **argv, const char *prefix)
{
- int opt = 0, batch = 0;
+ int opt = 0;
const char *exp_type = NULL, *obj_name = NULL;
+ struct batch_options batch = {0};
const struct option options[] = {
OPT_GROUP(N_("<type> can be one of: blob, tree, commit, tag")),
@@ -215,12 +235,12 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix)
OPT_SET_INT('p', NULL, &opt, N_("pretty-print object's content"), 'p'),
OPT_SET_INT(0, "textconv", &opt,
N_("for blob objects, run textconv on object's content"), 'c'),
- OPT_SET_INT(0, "batch", &batch,
- N_("show info and content of objects fed from the standard input"),
- BATCH),
- OPT_SET_INT(0, "batch-check", &batch,
- N_("show info about objects fed from the standard input"),
- BATCH_CHECK),
+ { OPTION_CALLBACK, 0, "batch", &batch, NULL,
+ N_("show info and content of objects fed from the standard input"),
+ PARSE_OPT_NOARG, batch_option_callback },
+ { OPTION_CALLBACK, 0, "batch-check", &batch, NULL,
+ N_("show info about objects fed from the standard input"),
+ PARSE_OPT_NOARG, batch_option_callback },
OPT_END()
};
@@ -237,19 +257,19 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix)
else
usage_with_options(cat_file_usage, options);
}
- if (!opt && !batch) {
+ if (!opt && !batch.enabled) {
if (argc == 2) {
exp_type = argv[0];
obj_name = argv[1];
} else
usage_with_options(cat_file_usage, options);
}
- if (batch && (opt || argc)) {
+ if (batch.enabled && (opt || argc)) {
usage_with_options(cat_file_usage, options);
}
- if (batch)
- return batch_objects(batch);
+ if (batch.enabled)
+ return batch_objects(&batch);
return cat_one_file(opt, exp_type, obj_name);
}
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* [PATCH 06/10] cat-file: add --batch-check=<format>
2013-07-10 11:34 ` [PATCHv2 00/10] cat-file formats/on-disk sizes Jeff King
` (4 preceding siblings ...)
2013-07-10 11:38 ` [PATCH 05/10] cat-file: refactor --batch option parsing Jeff King
@ 2013-07-10 11:45 ` Jeff King
2013-07-10 11:57 ` Eric Sunshine
2013-07-10 14:51 ` Ramkumar Ramachandra
2013-07-10 11:46 ` [PATCH 07/10] cat-file: add %(objectsize:disk) format atom Jeff King
` (3 subsequent siblings)
9 siblings, 2 replies; 52+ messages in thread
From: Jeff King @ 2013-07-10 11:45 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
The `cat-file --batch-check` command can be used to quickly
get information about a large number of objects. However, it
provides a fixed set of information.
This patch adds an optional <format> option to --batch-check
to allow a caller to specify which items they are interested
in, and in which order to output them. This is not very
exciting for now, since we provide the same limited set that
you could already get. However, it opens the door to adding
new format items in the future without breaking backwards
compatibility (or forcing callers to pay the cost to
calculate uninteresting items).
Since the --batch option shares code with --batch-check, it
receives the same feature, though it is less likely to be of
interest there.
The format atom names are chosen to match their counterparts
in for-each-ref. Though we do not (yet) share any code with
for-each-ref's formatter, this keeps the interface as
consistent as possible, and may help later on if the
implementations are unified.
Signed-off-by: Jeff King <peff@peff.net>
---
If the 1% slowdown in the streaming blob patch is too much, the simplest
thing would be to have this formatting apply only to --batch-check, and
let --batch follow its own simpler code path. I doubt anybody really
cares about having custom formats for --batch, as it is less about
analysis and more about getting enough information to recreate the
objects.
Also note that there is no %(contents) atom that one could use to
emulate --batch via --batch-check. I don't see much point, and it would
mean we would not want to build the formatting on strbuf_expand (because
we don't want to copy a large blob into the strbuf). We can add it later
if somebody finds an actual use.
Documentation/git-cat-file.txt | 55 ++++++++++++++++-----
builtin/cat-file.c | 107 +++++++++++++++++++++++++++++++++++------
t/t1006-cat-file.sh | 6 +++
3 files changed, 142 insertions(+), 26 deletions(-)
diff --git a/Documentation/git-cat-file.txt b/Documentation/git-cat-file.txt
index 30d585a..dd5d6e4 100644
--- a/Documentation/git-cat-file.txt
+++ b/Documentation/git-cat-file.txt
@@ -58,12 +58,16 @@ OPTIONS
to apply the filter to the content recorded in the index at <path>.
--batch::
- Print the SHA-1, type, size, and contents of each object provided on
- stdin. May not be combined with any other options or arguments.
+--batch=<format>::
+ Print object information and contents for each object provided
+ on stdin. May not be combined with any other options or arguments.
+ See the section `BATCH OUTPUT` below for details.
--batch-check::
- Print the SHA-1, type, and size of each object provided on stdin. May not
- be combined with any other options or arguments.
+--batch-check=<format>::
+ Print object information for each object provided on stdin. May
+ not be combined with any other options or arguments. See the
+ section `BATCH OUTPUT` below for details.
OUTPUT
------
@@ -78,23 +82,52 @@ each object specified on stdin:
If <type> is specified, the raw (though uncompressed) contents of the <object>
will be returned.
-If '--batch' is specified, output of the following form is printed for each
-object specified on stdin:
+BATCH OUTPUT
+------------
+
+If `--batch` or `--batch-check` is given, `cat-file` will read objects
+from stdin, one per line, and print information about them.
+
+Each line is considered as a whole object name, and is parsed as if
+given to linkgit:git-rev-parse[1].
+
+You can specify the information shown for each object by using a custom
+`<format>`. The `<format>` is copied literally to stdout for each
+object, with placeholders of the form `%(atom)` expanded, followed by a
+newline. The available atoms are:
+
+`objectname`::
+ The sha1 hash of the object.
+
+`objecttype`::
+ The type of of the object (the same as `cat-file -t` reports).
+
+`objectsize`::
+ The size, in bytes, of the object (the same as `cat-file -s`
+ reports).
+
+If no format is specified, the default format is `%(objectname)
+%(objecttype) %(objectsize)`.
+
+If `--batch` is specified, the object information is followed by the
+object contents (consisting of `%(objectsize)` bytes), followed by a
+newline.
+
+For example, `--batch` without a custom format would produce:
------------
<sha1> SP <type> SP <size> LF
<contents> LF
------------
-If '--batch-check' is specified, output of the following form is printed for
-each object specified on stdin:
+Whereas `--batch-check='%(objectname) %(objecttype)'` would produce:
------------
-<sha1> SP <type> SP <size> LF
+<sha1> SP <type> LF
------------
-For both '--batch' and '--batch-check', output of the following form is printed
-for each object specified on stdin that does not exist in the repository:
+If a name is specified on stdin that cannot be resolved to an object in
+the repository, then `cat-file` will ignore any custom format and print:
------------
<object> SP missing LF
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 5254fe8..b43a0c5 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -114,6 +114,66 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name)
return 0;
}
+struct expand_data {
+ unsigned char sha1[20];
+ enum object_type type;
+ unsigned long size;
+
+ /*
+ * If mark_query is true, we do not expand anything, but rather
+ * just mark the object_info with items we wish to query.
+ */
+ int mark_query;
+
+ /*
+ * After a mark_query run, this object_info is set up to be
+ * passed to sha1_object_info_extended. It will point to the data
+ * elements above, so you can retrieve the response from there.
+ */
+ struct object_info info;
+};
+
+static int is_atom(const char *atom, const char *s, int slen)
+{
+ int alen = strlen(atom);
+ return alen == slen && !memcmp(atom, s, alen);
+}
+
+static void expand_atom(struct strbuf *sb, const char *atom, int len,
+ void *vdata)
+{
+ struct expand_data *data = vdata;
+
+ if (is_atom("objectname", atom, len)) {
+ if (!data->mark_query)
+ strbuf_addstr(sb, sha1_to_hex(data->sha1));
+ } else if (is_atom("objecttype", atom, len)) {
+ if (!data->mark_query)
+ strbuf_addstr(sb, typename(data->type));
+ } else if (is_atom("objectsize", atom, len)) {
+ if (data->mark_query)
+ data->info.sizep = &data->size;
+ else
+ strbuf_addf(sb, "%lu", data->size);
+ } else
+ die("unknown format element: %.*s", len, atom);
+}
+
+static size_t expand_format(struct strbuf *sb, const char *start, void *data)
+{
+ const char *end;
+
+ if (*start != '(')
+ return 0;
+ end = strchr(start + 1, ')');
+ if (!end)
+ die("format element '%s' does not end in ')'", start);
+
+ expand_atom(sb, start + 1, end - start - 1, data);
+
+ return end - start + 1;
+}
+
static void print_object_or_die(int fd, const unsigned char *sha1,
enum object_type type, unsigned long size)
{
@@ -142,35 +202,37 @@ static int batch_one_object(const char *obj_name, struct batch_options *opt)
struct batch_options {
int enabled;
int print_contents;
+ const char *format;
};
-static int batch_one_object(const char *obj_name, struct batch_options *opt)
+static int batch_one_object(const char *obj_name, struct batch_options *opt,
+ struct expand_data *data)
{
- unsigned char sha1[20];
- enum object_type type = 0;
- unsigned long size;
+ struct strbuf buf = STRBUF_INIT;
if (!obj_name)
return 1;
- if (get_sha1(obj_name, sha1)) {
+ if (get_sha1(obj_name, data->sha1)) {
printf("%s missing\n", obj_name);
fflush(stdout);
return 0;
}
- type = sha1_object_info(sha1, &size);
- if (type <= 0) {
+ data->type = sha1_object_info_extended(data->sha1, &data->info);
+ if (data->type <= 0) {
printf("%s missing\n", obj_name);
fflush(stdout);
return 0;
}
- printf("%s %s %lu\n", sha1_to_hex(sha1), typename(type), size);
- fflush(stdout);
+ strbuf_expand(&buf, opt->format, expand_format, data);
+ strbuf_addch(&buf, '\n');
+ write_or_die(1, buf.buf, buf.len);
+ strbuf_release(&buf);
if (opt->print_contents) {
- print_object_or_die(1, sha1, type, size);
+ print_object_or_die(1, data->sha1, data->type, data->size);
write_or_die(1, "\n", 1);
}
return 0;
@@ -179,9 +241,23 @@ static int batch_objects(struct batch_options *opt)
static int batch_objects(struct batch_options *opt)
{
struct strbuf buf = STRBUF_INIT;
+ struct expand_data data;
+
+ if (!opt->format)
+ opt->format = "%(objectname) %(objecttype) %(objectsize)";
+
+ /*
+ * Expand once with our special mark_query flag, which will prime the
+ * object_info to be handed to sha1_object_info_extended for each
+ * object.
+ */
+ memset(&data, 0, sizeof(data));
+ data.mark_query = 1;
+ strbuf_expand(&buf, opt->format, expand_format, &data);
+ data.mark_query = 0;
while (strbuf_getline(&buf, stdin, '\n') != EOF) {
- int error = batch_one_object(buf.buf, opt);
+ int error = batch_one_object(buf.buf, opt, &data);
if (error)
return error;
}
@@ -216,6 +292,7 @@ static int batch_option_callback(const struct option *opt,
bo->enabled = 1;
bo->print_contents = !strcmp(opt->long_name, "batch");
+ bo->format = arg;
return 0;
}
@@ -235,12 +312,12 @@ int cmd_cat_file(int argc, const char **argv, const char *prefix)
OPT_SET_INT('p', NULL, &opt, N_("pretty-print object's content"), 'p'),
OPT_SET_INT(0, "textconv", &opt,
N_("for blob objects, run textconv on object's content"), 'c'),
- { OPTION_CALLBACK, 0, "batch", &batch, NULL,
+ { OPTION_CALLBACK, 0, "batch", &batch, "format",
N_("show info and content of objects fed from the standard input"),
- PARSE_OPT_NOARG, batch_option_callback },
- { OPTION_CALLBACK, 0, "batch-check", &batch, NULL,
+ PARSE_OPT_OPTARG, batch_option_callback },
+ { OPTION_CALLBACK, 0, "batch-check", &batch, "format",
N_("show info about objects fed from the standard input"),
- PARSE_OPT_NOARG, batch_option_callback },
+ PARSE_OPT_OPTARG, batch_option_callback },
OPT_END()
};
diff --git a/t/t1006-cat-file.sh b/t/t1006-cat-file.sh
index c2f2503..4e911fb 100755
--- a/t/t1006-cat-file.sh
+++ b/t/t1006-cat-file.sh
@@ -72,6 +72,12 @@ $content"
echo_without_newline $sha1 | git cat-file --batch-check >actual &&
test_cmp expect actual
'
+
+ test_expect_success "custom --batch-check format" '
+ echo "$type $sha1" >expect &&
+ echo $sha1 | git cat-file --batch-check="%(objecttype) %(objectname)" >actual &&
+ test_cmp expect actual
+ '
}
hello_content="Hello World"
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* Re: [PATCH 06/10] cat-file: add --batch-check=<format>
2013-07-10 11:45 ` [PATCH 06/10] cat-file: add --batch-check=<format> Jeff King
@ 2013-07-10 11:57 ` Eric Sunshine
2013-07-10 14:51 ` Ramkumar Ramachandra
1 sibling, 0 replies; 52+ messages in thread
From: Eric Sunshine @ 2013-07-10 11:57 UTC (permalink / raw)
To: Jeff King
Cc: Git List, Ramkumar Ramachandra, Duy Nguyen, Brandon Casey,
Junio C Hamano
On Wed, Jul 10, 2013 at 7:45 AM, Jeff King <peff@peff.net> wrote:
> The `cat-file --batch-check` command can be used to quickly
> get information about a large number of objects. However, it
> provides a fixed set of information.
>
> This patch adds an optional <format> option to --batch-check
> to allow a caller to specify which items they are interested
> in, and in which order to output them. This is not very
> exciting for now, since we provide the same limited set that
> you could already get. However, it opens the door to adding
> new format items in the future without breaking backwards
> compatibility (or forcing callers to pay the cost to
> calculate uninteresting items).
>
> The format atom names are chosen to match their counterparts
> in for-each-ref. Though we do not (yet) share any code with
> for-each-ref's formatter, this keeps the interface as
> consistent as possible, and may help later on if the
> implementations are unified.
>
> Signed-off-by: Jeff King <peff@peff.net>
> diff --git a/Documentation/git-cat-file.txt b/Documentation/git-cat-file.txt
> index 30d585a..dd5d6e4 100644
> --- a/Documentation/git-cat-file.txt
> +++ b/Documentation/git-cat-file.txt
> @@ -78,23 +82,52 @@ each object specified on stdin:
> If <type> is specified, the raw (though uncompressed) contents of the <object>
> will be returned.
>
> -If '--batch' is specified, output of the following form is printed for each
> -object specified on stdin:
> +BATCH OUTPUT
> +------------
> +
> +You can specify the information shown for each object by using a custom
> +`<format>`. The `<format>` is copied literally to stdout for each
> +object, with placeholders of the form `%(atom)` expanded, followed by a
> +newline. The available atoms are:
> +
> +`objectname`::
> + The sha1 hash of the object.
For consistency with (d5fa1f1; The name of the hash function is
"SHA-1", not "SHA1"):
s/sha1/SHA-1/
> +
> +`objecttype`::
> + The type of of the object (the same as `cat-file -t` reports).
> +
> +`objectsize`::
> + The size, in bytes, of the object (the same as `cat-file -s`
> + reports).
> +
> +If no format is specified, the default format is `%(objectname)
> +%(objecttype) %(objectsize)`.
> +
> +If `--batch` is specified, the object information is followed by the
> +object contents (consisting of `%(objectsize)` bytes), followed by a
> +newline.
^ permalink raw reply [flat|nested] 52+ messages in thread
* Re: [PATCH 06/10] cat-file: add --batch-check=<format>
2013-07-10 11:45 ` [PATCH 06/10] cat-file: add --batch-check=<format> Jeff King
2013-07-10 11:57 ` Eric Sunshine
@ 2013-07-10 14:51 ` Ramkumar Ramachandra
2013-07-11 11:24 ` Jeff King
1 sibling, 1 reply; 52+ messages in thread
From: Ramkumar Ramachandra @ 2013-07-10 14:51 UTC (permalink / raw)
To: Jeff King; +Cc: git, Duy Nguyen, Brandon Casey, Junio C Hamano
Jeff King wrote:
> +If `--batch` or `--batch-check` is given, `cat-file` will read objects
> +from stdin, one per line, and print information about them.
> +
> +You can specify the information shown for each object by using a custom
> +`<format>`. The `<format>` is copied literally to stdout for each
> +object, with placeholders of the form `%(atom)` expanded, followed by a
> +newline. The available atoms are:
> +
> +If no format is specified, the default format is `%(objectname)
> +%(objecttype) %(objectsize)`.
> +
> +If `--batch` is specified, the object information is followed by the
> +object contents (consisting of `%(objectsize)` bytes), followed by a
> +newline.
I find this slightly hideous, and would have expected an
%(objectcontents) or similar.
Perhaps --batch-check should become a non-configurable alias for
--batch="%(objectname) %(objecttype) %(objectsize)", and let --batch
default to the format "%(objectname) %(objecttype) %(objectsize)
%(objectcontents)".
I'm frankly okay with not having --pretty, and making the output in
the non-batch mode non-configurable (does anyone care?).
^ permalink raw reply [flat|nested] 52+ messages in thread
* Re: [PATCH 06/10] cat-file: add --batch-check=<format>
2013-07-10 14:51 ` Ramkumar Ramachandra
@ 2013-07-11 11:24 ` Jeff King
0 siblings, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-11 11:24 UTC (permalink / raw)
To: Ramkumar Ramachandra; +Cc: git, Duy Nguyen, Brandon Casey, Junio C Hamano
On Wed, Jul 10, 2013 at 08:21:15PM +0530, Ramkumar Ramachandra wrote:
> Jeff King wrote:
> > +If `--batch` or `--batch-check` is given, `cat-file` will read objects
> > +from stdin, one per line, and print information about them.
> > +
> > +You can specify the information shown for each object by using a custom
> > +`<format>`. The `<format>` is copied literally to stdout for each
> > +object, with placeholders of the form `%(atom)` expanded, followed by a
> > +newline. The available atoms are:
> > +
> > +If no format is specified, the default format is `%(objectname)
> > +%(objecttype) %(objectsize)`.
> > +
> > +If `--batch` is specified, the object information is followed by the
> > +object contents (consisting of `%(objectsize)` bytes), followed by a
> > +newline.
>
> I find this slightly hideous, and would have expected an
> %(objectcontents) or similar.
I looked into doing that, but it makes the code significantly more
complicated, assuming you do not want to copy the full object contents
in memory. You cannot use strbuf_expand, and you need to worry about
buffering/flushing more (you do not want to write() each individual
item, but if you are using printf(), you need to flush before using the
unbuffered streaming interface).
My thinking was to leave it until somebody actually wants it, at which
point they can do the necessary refactoring (and hopefully this would be
part of unifying it with other format-parsers).
If we were designing from scratch and this was the difference between
having "--batch-check" and "--batch", or having a single "--batch", I'd
care more about doing %(objectcontents) right away. But because we must
support the historical --batch/--batch-check distinction anyway, I don't
think this is any worse.
-Peff
^ permalink raw reply [flat|nested] 52+ messages in thread
* [PATCH 07/10] cat-file: add %(objectsize:disk) format atom
2013-07-10 11:34 ` [PATCHv2 00/10] cat-file formats/on-disk sizes Jeff King
` (5 preceding siblings ...)
2013-07-10 11:45 ` [PATCH 06/10] cat-file: add --batch-check=<format> Jeff King
@ 2013-07-10 11:46 ` Jeff King
2013-07-10 11:48 ` [PATCH 08/10] cat-file: split --batch input lines on whitespace Jeff King
` (2 subsequent siblings)
9 siblings, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-10 11:46 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
This atom is just like %(objectsize), except that it shows
the on-disk size of the object rather than the object's true
size. In other words, it makes the "disk_size" query of
sha1_object_info_extended available via the command-line.
This can be used for rough attribution of disk usage to
particular refs, though see the caveats in the
documentation.
This patch does not include any tests, as the exact numbers
returned are volatile and subject to zlib and packing
decisions. We cannot even reliably guarantee that the
on-disk size is smaller than the object content (though in
general this should be the case for non-trivial objects).
Signed-off-by: Jeff King <peff@peff.net>
---
Documentation/git-cat-file.txt | 18 ++++++++++++++++++
builtin/cat-file.c | 6 ++++++
2 files changed, 24 insertions(+)
diff --git a/Documentation/git-cat-file.txt b/Documentation/git-cat-file.txt
index dd5d6e4..06bdc43 100644
--- a/Documentation/git-cat-file.txt
+++ b/Documentation/git-cat-file.txt
@@ -106,6 +106,10 @@ newline. The available atoms are:
The size, in bytes, of the object (the same as `cat-file -s`
reports).
+`objectsize:disk`::
+ The size, in bytes, that the object takes up on disk. See the
+ note about on-disk sizes in the `CAVEATS` section below.
+
If no format is specified, the default format is `%(objectname)
%(objecttype) %(objectsize)`.
@@ -133,6 +137,20 @@ the repository, then `cat-file` will ignore any custom format and print:
<object> SP missing LF
------------
+
+CAVEATS
+-------
+
+Note that the sizes of objects on disk are reported accurately, but care
+should be taken in drawing conclusions about which refs or objects are
+responsible for disk usage. The size of a packed non-delta object may be
+much larger than the size of objects which delta against it, but the
+choice of which object is the base and which is the delta is arbitrary
+and is subject to change during a repack. Note also that multiple copies
+of an object may be present in the object database; in this case, it is
+undefined which copy's size will be reported.
+
+
GIT
---
Part of the linkgit:git[1] suite
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index b43a0c5..11fa8c0 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -118,6 +118,7 @@ struct expand_data {
unsigned char sha1[20];
enum object_type type;
unsigned long size;
+ unsigned long disk_size;
/*
* If mark_query is true, we do not expand anything, but rather
@@ -155,6 +156,11 @@ static void expand_atom(struct strbuf *sb, const char *atom, int len,
data->info.sizep = &data->size;
else
strbuf_addf(sb, "%lu", data->size);
+ } else if (is_atom("objectsize:disk", atom, len)) {
+ if (data->mark_query)
+ data->info.disk_sizep = &data->disk_size;
+ else
+ strbuf_addf(sb, "%lu", data->disk_size);
} else
die("unknown format element: %.*s", len, atom);
}
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* [PATCH 08/10] cat-file: split --batch input lines on whitespace
2013-07-10 11:34 ` [PATCHv2 00/10] cat-file formats/on-disk sizes Jeff King
` (6 preceding siblings ...)
2013-07-10 11:46 ` [PATCH 07/10] cat-file: add %(objectsize:disk) format atom Jeff King
@ 2013-07-10 11:48 ` Jeff King
2013-07-10 15:29 ` Ramkumar Ramachandra
2013-07-10 11:50 ` [PATCH 09/10] pack-revindex: use unsigned to store number of objects Jeff King
2013-07-10 11:55 ` [PATCH 10/10] pack-revindex: radix-sort the revindex Jeff King
9 siblings, 1 reply; 52+ messages in thread
From: Jeff King @ 2013-07-10 11:48 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
If we get an input line to --batch or --batch-check that
looks like "HEAD foo bar", we will currently feed the whole
thing to get_sha1(). This means that to use --batch-check
with `rev-list --objects`, one must pre-process the input,
like:
git rev-list --objects HEAD |
cut -d' ' -f1 |
git cat-file --batch-check
Besides being more typing and slightly less efficient to
invoke `cut`, the result loses information: we no longer
know which path each object was found at.
This patch teaches cat-file to split input lines at the
first whitespace. Everything to the left of the whitespace
is considered an object name, and everything to the right is
made available as the %(text) atom. So you can now do:
git rev-list --objects HEAD |
git cat-file --batch-check='%(objectsize) %(text)'
to collect object sizes at particular paths.
Even if %(text) is not used, we always do the whitespace
split (which means you can simply eliminate the `cut`
command from the first example above).
This whitespace split is backwards compatible for any
reasonable input. Object names cannot contain spaces, so any
input with spaces would have resulted in a "missing" line.
The only input hurt is if somebody really expected input of
the form "HEAD is a fine-looking ref!" to fail; it will now
parse HEAD, and make "is a fine-looking ref!" available as
%(text).
Signed-off-by: Jeff King <peff@peff.net>
---
I have often found myself cross-referencing object sha1s with rev-list
--objects output in order to find out which paths are bloating
repository size. You can do it manually, or by post-processing the
output of cat-file with "join", but it is way more efficient to simply
not lose the information in the first place.
Documentation/git-cat-file.txt | 10 ++++++++--
builtin/cat-file.c | 20 +++++++++++++++++++-
t/t1006-cat-file.sh | 7 +++++++
3 files changed, 34 insertions(+), 3 deletions(-)
diff --git a/Documentation/git-cat-file.txt b/Documentation/git-cat-file.txt
index 06bdc43..6b0b2de 100644
--- a/Documentation/git-cat-file.txt
+++ b/Documentation/git-cat-file.txt
@@ -88,8 +88,10 @@ from stdin, one per line, and print information about them.
If `--batch` or `--batch-check` is given, `cat-file` will read objects
from stdin, one per line, and print information about them.
-Each line is considered as a whole object name, and is parsed as if
-given to linkgit:git-rev-parse[1].
+Each line is split at the first whitespace boundary. All characters
+before that whitespace are considered as a whole object name, and are
+parsed as if given to linkgit:git-rev-parse[1]. Characters after that
+whitespace can be accessed using the `%(text)` atom (see below).
You can specify the information shown for each object by using a custom
`<format>`. The `<format>` is copied literally to stdout for each
@@ -110,6 +112,10 @@ newline. The available atoms are:
The size, in bytes, that the object takes up on disk. See the
note about on-disk sizes in the `CAVEATS` section below.
+`text`::
+ The text (if any) found after the first run of whitespace on the
+ input line.
+
If no format is specified, the default format is `%(objectname)
%(objecttype) %(objectsize)`.
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 11fa8c0..36f8159 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -119,6 +119,7 @@ struct expand_data {
enum object_type type;
unsigned long size;
unsigned long disk_size;
+ const char *text;
/*
* If mark_query is true, we do not expand anything, but rather
@@ -161,6 +162,9 @@ static void expand_atom(struct strbuf *sb, const char *atom, int len,
data->info.disk_sizep = &data->disk_size;
else
strbuf_addf(sb, "%lu", data->disk_size);
+ } else if (is_atom("text", atom, len)) {
+ if (!data->mark_query && data->text)
+ strbuf_addstr(sb, data->text);
} else
die("unknown format element: %.*s", len, atom);
}
@@ -263,7 +267,21 @@ static int batch_objects(struct batch_options *opt)
data.mark_query = 0;
while (strbuf_getline(&buf, stdin, '\n') != EOF) {
- int error = batch_one_object(buf.buf, opt, &data);
+ char *p;
+ int error;
+
+ /*
+ * Split at first whitespace, tying off the beginning of the
+ * string and saving the remainder (or NULL) in data.text.
+ */
+ p = strpbrk(buf.buf, " \t");
+ if (p) {
+ while (*p && strchr(" \t", *p))
+ *p++ = '\0';
+ }
+ data.text = p;
+
+ error = batch_one_object(buf.buf, opt, &data);
if (error)
return error;
}
diff --git a/t/t1006-cat-file.sh b/t/t1006-cat-file.sh
index 4e911fb..315da6f 100755
--- a/t/t1006-cat-file.sh
+++ b/t/t1006-cat-file.sh
@@ -78,6 +78,13 @@ $content"
echo $sha1 | git cat-file --batch-check="%(objecttype) %(objectname)" >actual &&
test_cmp expect actual
'
+
+ test_expect_success '--batch-check with %(text)' '
+ echo "$type this is some extra content" >expect &&
+ echo "$sha1 this is some extra content" |
+ git cat-file --batch-check="%(objecttype) %(text)" >actual &&
+ test_cmp expect actual
+ '
}
hello_content="Hello World"
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* Re: [PATCH 08/10] cat-file: split --batch input lines on whitespace
2013-07-10 11:48 ` [PATCH 08/10] cat-file: split --batch input lines on whitespace Jeff King
@ 2013-07-10 15:29 ` Ramkumar Ramachandra
2013-07-11 11:36 ` Jeff King
0 siblings, 1 reply; 52+ messages in thread
From: Ramkumar Ramachandra @ 2013-07-10 15:29 UTC (permalink / raw)
To: Jeff King; +Cc: git, Duy Nguyen, Brandon Casey, Junio C Hamano
Jeff King wrote:
> git rev-list --objects HEAD |
> git cat-file --batch-check='%(objectsize) %(text)'
If anything, I would have expected %(rest), not %(text). This atom is
specific to commands that accept input via stdin (i.e. not log, f-e-r,
branch, or anything else I can think of).
Also, this makes me wonder if %(field:0), %(field:1), and probably
%(field:@) are good ideas. Even if we go down that road, I don't
think %(rest) is a problem per-se.
^ permalink raw reply [flat|nested] 52+ messages in thread
* Re: [PATCH 08/10] cat-file: split --batch input lines on whitespace
2013-07-10 15:29 ` Ramkumar Ramachandra
@ 2013-07-11 11:36 ` Jeff King
2013-07-11 17:42 ` Junio C Hamano
2013-07-11 20:45 ` [PATCHv3 " Jeff King
0 siblings, 2 replies; 52+ messages in thread
From: Jeff King @ 2013-07-11 11:36 UTC (permalink / raw)
To: Ramkumar Ramachandra; +Cc: git, Duy Nguyen, Brandon Casey, Junio C Hamano
On Wed, Jul 10, 2013 at 08:59:51PM +0530, Ramkumar Ramachandra wrote:
> Jeff King wrote:
> > git rev-list --objects HEAD |
> > git cat-file --batch-check='%(objectsize) %(text)'
>
> If anything, I would have expected %(rest), not %(text). This atom is
> specific to commands that accept input via stdin (i.e. not log, f-e-r,
> branch, or anything else I can think of).
I considered %(rest) as well. I don't have a strong preference.
> Also, this makes me wonder if %(field:0), %(field:1), and probably
> %(field:@) are good ideas. Even if we go down that road, I don't
> think %(rest) is a problem per-se.
I don't have a use for them, and even if we want to add them later, you
would still want to support %(rest) for when the user wants to take the
rest of the line verbatim without caring about field-splitting.
To be honest, I do not see %(field) as all that useful. If you want to
go about rearranging or selecting fields, that is what "cut" (or "awk")
is for. Having fields means you need to specify field separators, and
how runs of separators are treated. Other tools already do this.
So it would (at best) save you from an extra cut invocation, whereas
%(rest) gets you out of doing something much more difficult. Without it,
information is lost from your pipeline (so you have to have tee to a
separate pipeline, and then reassemble the pieces).
-Peff
^ permalink raw reply [flat|nested] 52+ messages in thread
* Re: [PATCH 08/10] cat-file: split --batch input lines on whitespace
2013-07-11 11:36 ` Jeff King
@ 2013-07-11 17:42 ` Junio C Hamano
2013-07-11 20:45 ` [PATCHv3 " Jeff King
1 sibling, 0 replies; 52+ messages in thread
From: Junio C Hamano @ 2013-07-11 17:42 UTC (permalink / raw)
To: Jeff King; +Cc: Ramkumar Ramachandra, git, Duy Nguyen, Brandon Casey
Jeff King <peff@peff.net> writes:
> On Wed, Jul 10, 2013 at 08:59:51PM +0530, Ramkumar Ramachandra wrote:
>
>> Jeff King wrote:
>> > git rev-list --objects HEAD |
>> > git cat-file --batch-check='%(objectsize) %(text)'
>>
>> If anything, I would have expected %(rest), not %(text). This atom is
>> specific to commands that accept input via stdin (i.e. not log, f-e-r,
>> branch, or anything else I can think of).
>
> I considered %(rest) as well. I don't have a strong preference.
>
>> Also, this makes me wonder if %(field:0), %(field:1), and probably
>> %(field:@) are good ideas. Even if we go down that road, I don't
>> think %(rest) is a problem per-se.
>
> I don't have a use for them, and even if we want to add them later, you
> would still want to support %(rest) for when the user wants to take the
> rest of the line verbatim without caring about field-splitting.
>
> To be honest, I do not see %(field) as all that useful. If you want to
> go about rearranging or selecting fields, that is what "cut" (or "awk")
> is for. Having fields means you need to specify field separators, and
> how runs of separators are treated. Other tools already do this.
Very true, and more importantly, you cannot still say "my input
object name is at field N, not at the beginning", so that makes it
doubly dubious how %(field:$n) would be any useful.
> So it would (at best) save you from an extra cut invocation, whereas
> %(rest) gets you out of doing something much more difficult. Without it,
> information is lost from your pipeline (so you have to have tee to a
> separate pipeline, and then reassemble the pieces).
^ permalink raw reply [flat|nested] 52+ messages in thread
* [PATCHv3 08/10] cat-file: split --batch input lines on whitespace
2013-07-11 11:36 ` Jeff King
2013-07-11 17:42 ` Junio C Hamano
@ 2013-07-11 20:45 ` Jeff King
1 sibling, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-11 20:45 UTC (permalink / raw)
To: Ramkumar Ramachandra; +Cc: git, Duy Nguyen, Brandon Casey, Junio C Hamano
On Thu, Jul 11, 2013 at 07:36:53AM -0400, Jeff King wrote:
> On Wed, Jul 10, 2013 at 08:59:51PM +0530, Ramkumar Ramachandra wrote:
>
> > Jeff King wrote:
> > > git rev-list --objects HEAD |
> > > git cat-file --batch-check='%(objectsize) %(text)'
> >
> > If anything, I would have expected %(rest), not %(text). This atom is
> > specific to commands that accept input via stdin (i.e. not log, f-e-r,
> > branch, or anything else I can think of).
>
> I considered %(rest) as well. I don't have a strong preference.
Here is the patch re-rolled with s/text/rest/.
Junio, that makes this and 10/10 the only "v3" updates. Let me know if
it would be simpler to just resend the whole series.
-- >8 --
Subject: [PATCH] cat-file: split --batch input lines on whitespace
If we get an input line to --batch or --batch-check that
looks like "HEAD foo bar", we will currently feed the whole
thing to get_sha1(). This means that to use --batch-check
with `rev-list --objects`, one must pre-process the input,
like:
git rev-list --objects HEAD |
cut -d' ' -f1 |
git cat-file --batch-check
Besides being more typing and slightly less efficient to
invoke `cut`, the result loses information: we no longer
know which path each object was found at.
This patch teaches cat-file to split input lines at the
first whitespace. Everything to the left of the whitespace
is considered an object name, and everything to the right is
made available as the %(reset) atom. So you can now do:
git rev-list --objects HEAD |
git cat-file --batch-check='%(objectsize) %(rest)'
to collect object sizes at particular paths.
Even if %(rest) is not used, we always do the whitespace
split (which means you can simply eliminate the `cut`
command from the first example above).
This whitespace split is backwards compatible for any
reasonable input. Object names cannot contain spaces, so any
input with spaces would have resulted in a "missing" line.
The only input hurt is if somebody really expected input of
the form "HEAD is a fine-looking ref!" to fail; it will now
parse HEAD, and make "is a fine-looking ref!" available as
%(rest).
Signed-off-by: Jeff King <peff@peff.net>
---
Documentation/git-cat-file.txt | 10 ++++++++--
builtin/cat-file.c | 20 +++++++++++++++++++-
t/t1006-cat-file.sh | 7 +++++++
3 files changed, 34 insertions(+), 3 deletions(-)
diff --git a/Documentation/git-cat-file.txt b/Documentation/git-cat-file.txt
index 06bdc43..68691d4 100644
--- a/Documentation/git-cat-file.txt
+++ b/Documentation/git-cat-file.txt
@@ -88,8 +88,10 @@ from stdin, one per line, and print information about them.
If `--batch` or `--batch-check` is given, `cat-file` will read objects
from stdin, one per line, and print information about them.
-Each line is considered as a whole object name, and is parsed as if
-given to linkgit:git-rev-parse[1].
+Each line is split at the first whitespace boundary. All characters
+before that whitespace are considered as a whole object name, and are
+parsed as if given to linkgit:git-rev-parse[1]. Characters after that
+whitespace can be accessed using the `%(rest)` atom (see below).
You can specify the information shown for each object by using a custom
`<format>`. The `<format>` is copied literally to stdout for each
@@ -110,6 +112,10 @@ newline. The available atoms are:
The size, in bytes, that the object takes up on disk. See the
note about on-disk sizes in the `CAVEATS` section below.
+`rest`::
+ The text (if any) found after the first run of whitespace on the
+ input line (i.e., the "rest" of the line).
+
If no format is specified, the default format is `%(objectname)
%(objecttype) %(objectsize)`.
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 11fa8c0..0e64b41 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -119,6 +119,7 @@ struct expand_data {
enum object_type type;
unsigned long size;
unsigned long disk_size;
+ const char *rest;
/*
* If mark_query is true, we do not expand anything, but rather
@@ -161,6 +162,9 @@ static void expand_atom(struct strbuf *sb, const char *atom, int len,
data->info.disk_sizep = &data->disk_size;
else
strbuf_addf(sb, "%lu", data->disk_size);
+ } else if (is_atom("rest", atom, len)) {
+ if (!data->mark_query && data->rest)
+ strbuf_addstr(sb, data->rest);
} else
die("unknown format element: %.*s", len, atom);
}
@@ -263,7 +267,21 @@ static int batch_objects(struct batch_options *opt)
data.mark_query = 0;
while (strbuf_getline(&buf, stdin, '\n') != EOF) {
- int error = batch_one_object(buf.buf, opt, &data);
+ char *p;
+ int error;
+
+ /*
+ * Split at first whitespace, tying off the beginning of the
+ * string and saving the remainder (or NULL) in data.rest.
+ */
+ p = strpbrk(buf.buf, " \t");
+ if (p) {
+ while (*p && strchr(" \t", *p))
+ *p++ = '\0';
+ }
+ data.rest = p;
+
+ error = batch_one_object(buf.buf, opt, &data);
if (error)
return error;
}
diff --git a/t/t1006-cat-file.sh b/t/t1006-cat-file.sh
index 4e911fb..d499d02 100755
--- a/t/t1006-cat-file.sh
+++ b/t/t1006-cat-file.sh
@@ -78,6 +78,13 @@ $content"
echo $sha1 | git cat-file --batch-check="%(objecttype) %(objectname)" >actual &&
test_cmp expect actual
'
+
+ test_expect_success '--batch-check with %(rest)' '
+ echo "$type this is some extra content" >expect &&
+ echo "$sha1 this is some extra content" |
+ git cat-file --batch-check="%(objecttype) %(rest)" >actual &&
+ test_cmp expect actual
+ '
}
hello_content="Hello World"
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* [PATCH 09/10] pack-revindex: use unsigned to store number of objects
2013-07-10 11:34 ` [PATCHv2 00/10] cat-file formats/on-disk sizes Jeff King
` (7 preceding siblings ...)
2013-07-10 11:48 ` [PATCH 08/10] cat-file: split --batch input lines on whitespace Jeff King
@ 2013-07-10 11:50 ` Jeff King
2013-07-10 11:55 ` [PATCH 10/10] pack-revindex: radix-sort the revindex Jeff King
9 siblings, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-10 11:50 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
A packfile may have up to 2^32-1 objects in it, so the
"right" data type to use is uint32_t. We currently use a
signed int, which means that we may behave incorrectly for
packfiles with more than 2^31-1 objects on 32-bit systems.
Nobody has noticed because having 2^31 objects is pretty
insane. The linux.git repo has on the order of 2^22 objects,
which is hundreds of times smaller than necessary to trigger
the bug.
Let's bump this up to an "unsigned". On 32-bit systems, this
gives us the correct data-type, and on 64-bit systems, it is
probably more efficient to use the native "unsigned" than a
true uint32_t.
While we're at it, we can fix the binary search not to
overflow in such a case if our unsigned is 32 bits.
Signed-off-by: Jeff King <peff@peff.net>
---
I didn't look farther in the pack code to see if we have other
problematic instances. So there may be others lurking, but these ones
were close to the area I was working in.
pack-revindex.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/pack-revindex.c b/pack-revindex.c
index 77a0465..1aa9754 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -72,8 +72,8 @@ static void create_pack_revindex(struct pack_revindex *rix)
static void create_pack_revindex(struct pack_revindex *rix)
{
struct packed_git *p = rix->p;
- int num_ent = p->num_objects;
- int i;
+ unsigned num_ent = p->num_objects;
+ unsigned i;
const char *index = p->index_data;
rix->revindex = xmalloc(sizeof(*rix->revindex) * (num_ent + 1));
@@ -114,7 +114,7 @@ struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
{
int num;
- int lo, hi;
+ unsigned lo, hi;
struct pack_revindex *rix;
struct revindex_entry *revindex;
@@ -132,7 +132,7 @@ struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
lo = 0;
hi = p->num_objects + 1;
do {
- int mi = (lo + hi) / 2;
+ unsigned mi = lo + (hi - lo) / 2;
if (revindex[mi].offset == ofs) {
return revindex + mi;
} else if (ofs < revindex[mi].offset)
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* [PATCH 10/10] pack-revindex: radix-sort the revindex
2013-07-10 11:34 ` [PATCHv2 00/10] cat-file formats/on-disk sizes Jeff King
` (8 preceding siblings ...)
2013-07-10 11:50 ` [PATCH 09/10] pack-revindex: use unsigned to store number of objects Jeff King
@ 2013-07-10 11:55 ` Jeff King
2013-07-10 12:00 ` Jeff King
` (3 more replies)
9 siblings, 4 replies; 52+ messages in thread
From: Jeff King @ 2013-07-10 11:55 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
The pack revindex stores the offsets of the objects in the
pack in sorted order, allowing us to easily find the on-disk
size of each object. To compute it, we populate an array
with the offsets from the sha1-sorted idx file, and then use
qsort to order it by offsets.
That does O(n log n) offset comparisons, and profiling shows
that we spend most of our time in cmp_offset. However, since
we are sorting on a simple off_t, we can use numeric sorts
that perform better. A radix sort can run in O(k*n), where k
is the number of "digits" in our number. For a 64-bit off_t,
using 16-bit "digits" gives us k=4.
On the linux.git repo, with about 3M objects to sort, this
yields a 400% speedup. Here are the best-of-five numbers for
running "echo HEAD | git cat-file --batch-disk-size", which
is dominated by time spent building the pack revindex:
before after
real 0m0.834s 0m0.204s
user 0m0.788s 0m0.164s
sys 0m0.040s 0m0.036s
On a smaller repo, the radix sort would not be
as impressive (and could even be worse), as we are trading
the log(n) factor for the k=4 of the radix sort. However,
even on git.git, with 173K objects, it shows some
improvement:
before after
real 0m0.046s 0m0.017s
user 0m0.036s 0m0.012s
sys 0m0.008s 0m0.000s
Signed-off-by: Jeff King <peff@peff.net>
---
I changed a few things from the original, including:
1. We take an "unsigned" number of objects to match the fix in the
last patch.
2. The 16-bit "digit" size is factored out to a single place, which
avoids magic numbers and repeating ourselves.
3. The "digits" variable is renamed to "bits", which is more accurate.
4. The outer loop condition uses the simpler "while (max >> bits)".
5. We use memcpy instead of an open-coded loop to copy the whole array
at the end. The individual bucket-assignment is still done by
struct assignment. I haven't timed if memcpy would make a
difference there.
6. The 64K*sizeof(int) "pos" array is now heap-allocated, in case
there are platforms with a small stack.
I re-ran my timings to make sure none of the above impacted them; it
turned out the same.
pack-revindex.c | 84 +++++++++++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 79 insertions(+), 5 deletions(-)
diff --git a/pack-revindex.c b/pack-revindex.c
index 1aa9754..9365bc2 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -59,11 +59,85 @@ static int cmp_offset(const void *a_, const void *b_)
/* revindex elements are lazily initialized */
}
-static int cmp_offset(const void *a_, const void *b_)
+/*
+ * This is a least-significant-digit radix sort.
+ */
+static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
{
- const struct revindex_entry *a = a_;
- const struct revindex_entry *b = b_;
- return (a->offset < b->offset) ? -1 : (a->offset > b->offset) ? 1 : 0;
+ /*
+ * We use a "digit" size of 16 bits. That keeps our memory
+ * usage reasonable, and we can generally (for a 4G or smaller
+ * packfile) quit after two rounds of radix-sorting.
+ */
+#define DIGIT_SIZE (16)
+#define BUCKETS (1 << DIGIT_SIZE)
+ /*
+ * We want to know the bucket that a[i] will go into when we are using
+ * the digit that is N bits from the (least significant) end.
+ */
+#define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1))
+
+ /*
+ * We need O(n) temporary storage, so we sort back and forth between
+ * the real array and our tmp storage. To keep them straight, we always
+ * sort from "a" into buckets in "b".
+ */
+ struct revindex_entry *tmp = xcalloc(n, sizeof(*tmp));
+ struct revindex_entry *a = entries, *b = tmp;
+ int bits = 0;
+ unsigned *pos = xmalloc(BUCKETS * sizeof(*pos));
+
+ while (max >> bits) {
+ struct revindex_entry *swap;
+ int i;
+
+ memset(pos, 0, BUCKETS * sizeof(*pos));
+
+ /*
+ * We want pos[i] to store the index of the last element that
+ * will go in bucket "i" (actually one past the last element).
+ * To do this, we first count the items that will go in each
+ * bucket, which gives us a relative offset from the last
+ * bucket. We can then cumulatively add the index from the
+ * previous bucket to get the true index.
+ */
+ for (i = 0; i < n; i++)
+ pos[BUCKET_FOR(a, i, bits)]++;
+ for (i = 1; i < BUCKETS; i++)
+ pos[i] += pos[i-1];
+
+ /*
+ * Now we can drop the elements into their correct buckets (in
+ * our temporary array). We iterate the pos counter backwards
+ * to avoid using an extra index to count up. And since we are
+ * going backwards there, we must also go backwards through the
+ * array itself, to keep the sort stable.
+ */
+ for (i = n - 1; i >= 0; i--)
+ b[--pos[BUCKET_FOR(a, i, bits)]] = a[i];
+
+ /*
+ * Now "b" contains the most sorted list, so we swap "a" and
+ * "b" for the next iteration.
+ */
+ swap = a;
+ a = b;
+ b = swap;
+
+ /* And bump our bits for the next round. */
+ bits += DIGIT_SIZE;
+ }
+
+ /*
+ * If we ended with our data in the original array, great. If not,
+ * we have to move it back from the temporary storage.
+ */
+ if (a != entries)
+ memcpy(entries, tmp, n * sizeof(*entries));
+ free(tmp);
+ free(pos);
+
+#undef BUCKET_FOR
}
/*
@@ -108,7 +182,7 @@ static void create_pack_revindex(struct pack_revindex *rix)
*/
rix->revindex[num_ent].offset = p->pack_size - 20;
rix->revindex[num_ent].nr = -1;
- qsort(rix->revindex, num_ent, sizeof(*rix->revindex), cmp_offset);
+ sort_revindex(rix->revindex, num_ent, p->pack_size);
}
struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* Re: [PATCH 10/10] pack-revindex: radix-sort the revindex
2013-07-10 11:55 ` [PATCH 10/10] pack-revindex: radix-sort the revindex Jeff King
@ 2013-07-10 12:00 ` Jeff King
2013-07-10 13:17 ` Ramkumar Ramachandra
` (2 subsequent siblings)
3 siblings, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-10 12:00 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
On Wed, Jul 10, 2013 at 07:55:57AM -0400, Jeff King wrote:
> 5. We use memcpy instead of an open-coded loop to copy the whole array
> at the end. The individual bucket-assignment is still done by
> struct assignment. I haven't timed if memcpy would make a
> difference there.
I just timed this, and I can't measure any difference. I think the
struct assignment is the more readable option, and I do not think any
compilers should have trouble with it. But if they do, we can switch it
for a memcpy.
-Peff
^ permalink raw reply [flat|nested] 52+ messages in thread
* Re: [PATCH 10/10] pack-revindex: radix-sort the revindex
2013-07-10 11:55 ` [PATCH 10/10] pack-revindex: radix-sort the revindex Jeff King
2013-07-10 12:00 ` Jeff King
@ 2013-07-10 13:17 ` Ramkumar Ramachandra
2013-07-11 11:03 ` Jeff King
2013-07-10 17:10 ` Brandon Casey
2013-07-11 12:16 ` [PATCHv3 " Jeff King
3 siblings, 1 reply; 52+ messages in thread
From: Ramkumar Ramachandra @ 2013-07-10 13:17 UTC (permalink / raw)
To: Jeff King; +Cc: git, Duy Nguyen, Brandon Casey, Junio C Hamano
Jeff King wrote:
> That does O(n log n) offset comparisons, and profiling shows
> that we spend most of our time in cmp_offset. However, since
> we are sorting on a simple off_t, we can use numeric sorts
> that perform better. A radix sort can run in O(k*n), where k
> is the number of "digits" in our number. For a 64-bit off_t,
> using 16-bit "digits" gives us k=4.
Wait, isn't off_t a signed data type? Did you account for that in
your algorithm?
> On the linux.git repo, with about 3M objects to sort, this
> yields a 400% speedup. Here are the best-of-five numbers for
> running "echo HEAD | git cat-file --batch-disk-size", which
> is dominated by time spent building the pack revindex:
Okay.
> diff --git a/pack-revindex.c b/pack-revindex.c
> index 1aa9754..9365bc2 100644
> --- a/pack-revindex.c
> +++ b/pack-revindex.c
> @@ -59,11 +59,85 @@ static int cmp_offset(const void *a_, const void *b_)
> /* revindex elements are lazily initialized */
> }
>
> -static int cmp_offset(const void *a_, const void *b_)
> +/*
> + * This is a least-significant-digit radix sort.
> + */
Any particular reason for choosing LSD, and not MSD?
> +#define DIGIT_SIZE (16)
> +#define BUCKETS (1 << DIGIT_SIZE)
Okay, NUMBER_OF_BUCKETS = 2^RADIX, and you choose a hex radix. Is
off_t guaranteed to be fixed-length though? I thought only the ones
in stdint.h were guaranteed to be fixed-length?
> + /*
> + * We want to know the bucket that a[i] will go into when we are using
> + * the digit that is N bits from the (least significant) end.
> + */
> +#define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1))
Ouch! This is unreadable. Just write an inline function instead? A
% would've been easier on the eyes, but you chose base-16.
> + /*
> + * We need O(n) temporary storage, so we sort back and forth between
> + * the real array and our tmp storage. To keep them straight, we always
> + * sort from "a" into buckets in "b".
> + */
> + struct revindex_entry *tmp = xcalloc(n, sizeof(*tmp));
Shouldn't this be sizeof (struct revindex_entry), since tmp hasn't
been declared yet? Also, s/n/revindex_nr/, and something more
appropriate for tmp?
> + struct revindex_entry *a = entries, *b = tmp;
It's starting to look like you have something against descriptive names ;)
> + int bits = 0;
> + unsigned *pos = xmalloc(BUCKETS * sizeof(*pos));
sizeof(unsigned int), for clarity, if not anything else. You picked
malloc over calloc here, because you didn't want to incur the extra
cost of zero-initializing the memory? Also, pos is the actual buckets
array, I presume (hence unsigned, because there can't be a negative
number of keys in any bucket)?
> + while (max >> bits) {
No clue what max is. Looked at the caller and figured out that it's
the pack-size, although I'm still clueless about why it's appearing
here.
> + struct revindex_entry *swap;
> + int i;
> +
> + memset(pos, 0, BUCKETS * sizeof(*pos));
Ah, so that's why you used malloc there. Wait, shouldn't this be
memset(pos, 0, sizeof(*pos))?
> + for (i = 0; i < n; i++)
> + pos[BUCKET_FOR(a, i, bits)]++;
Okay, so you know how many numbers are in each bucket.
> + for (i = 1; i < BUCKETS; i++)
> + pos[i] += pos[i-1];
Cumulative sums; right.
> + for (i = n - 1; i >= 0; i--)
> + b[--pos[BUCKET_FOR(a, i, bits)]] = a[i];
Classical queue. You could've gone for something more complex, but I
don't think it would have been worth the extra complexity.
> + swap = a;
> + a = b;
> + b = swap;
Wait a minute: why don't you just throw away b? You're going to
rebuild the queue in the next iteration anyway, no? a is what is
being sorted.
> + /* And bump our bits for the next round. */
> + bits += DIGIT_SIZE;
I'd have gone for a nice for-loop.
> + /*
> + * If we ended with our data in the original array, great. If not,
> + * we have to move it back from the temporary storage.
> + */
> + if (a != entries)
> + memcpy(entries, tmp, n * sizeof(*entries));
How could a be different from entries? It has no memory allocated for
itself, no? Why did you even create a, and not directly operate on
entries?
> + free(tmp);
> + free(pos);
Overall, I found it quite confusing :(
> +#undef BUCKET_FOR
Why not DIGIT_SIZE and BUCKETS too, while at it?
^ permalink raw reply [flat|nested] 52+ messages in thread
* Re: [PATCH 10/10] pack-revindex: radix-sort the revindex
2013-07-10 13:17 ` Ramkumar Ramachandra
@ 2013-07-11 11:03 ` Jeff King
0 siblings, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-11 11:03 UTC (permalink / raw)
To: Ramkumar Ramachandra; +Cc: git, Duy Nguyen, Brandon Casey, Junio C Hamano
On Wed, Jul 10, 2013 at 06:47:49PM +0530, Ramkumar Ramachandra wrote:
> > For a 64-bit off_t, using 16-bit "digits" gives us k=4.
>
> Wait, isn't off_t a signed data type? Did you account for that in
> your algorithm?
It is signed, but the values we are storing in the revindex are all
positive file offsets. Right-shifting a positive signed type is
explicitly allowed in C.
> > -static int cmp_offset(const void *a_, const void *b_)
> > +/*
> > + * This is a least-significant-digit radix sort.
> > + */
>
> Any particular reason for choosing LSD, and not MSD?
Simplicity. An MSD implementation should have the same algorithmic
complexity and in theory, one can do MSD in-place. I'm happy enough with
the speedup here, but if you want to take a stab at beating my times
with MSD, please feel free.
The other "usual" downside of MSD is that it is typically not stable,
but we don't care about that here. We know that our sort keys are
unique.
> > +#define DIGIT_SIZE (16)
> > +#define BUCKETS (1 << DIGIT_SIZE)
>
> Okay, NUMBER_OF_BUCKETS = 2^RADIX, and you choose a hex radix. Is
> off_t guaranteed to be fixed-length though? I thought only the ones
> in stdint.h were guaranteed to be fixed-length?
I'm not sure what you mean by fixed-length. If you mean does it have the
same size on every platform, then no. It will typically be 32-bit on
platforms without largefile support, and 64-bit elsewhere. But it
shouldn't matter. We'll first sort the entries by the lower 16 bits, and
then if we have more bits, by the next 16 bits, and so on. We quit when
the maximum value to sort (which we know ahead of time from the size of
the packfile) is smaller than the 16-bits we are on. So we don't need to
know the exact size of off_t, only the maximum value in our list (which
must, by definition, be smaller than what can be represented by off_t).
> > + /*
> > + * We want to know the bucket that a[i] will go into when we are using
> > + * the digit that is N bits from the (least significant) end.
> > + */
> > +#define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1))
>
> Ouch! This is unreadable. Just write an inline function instead? A
> % would've been easier on the eyes, but you chose base-16.
I specifically avoided an inline function because they are subject to
compiler settings. This isn't just "it would be a bit faster if this got
inlined, and OK otherwise" but "this would be horribly slow if not
inlined".
I'm also not sure that
static inline unsigned bucket_for(const struct revindex *a,
unsigned i,
unsigned bits)
{
return a[i].offset >> bits & (BUCKETS-1);
}
is actually any more readable.
I'm not sure what you mean by base-16. No matter the radix digit size,
as long as it is an integral number of bits, we can mask it off, which
is more efficient than modulo. A good compiler should see that it
is a constant and convert it to a bit-mask, but I'm not sure I agree
that modular arithmetic is more readable. This is fundamentally a
bit-twiddling operation, as we are shifting and masking.
I tried to explain it in the comment; suggestions to improve that are
welcome.
> > + /*
> > + * We need O(n) temporary storage, so we sort back and forth between
> > + * the real array and our tmp storage. To keep them straight, we always
> > + * sort from "a" into buckets in "b".
> > + */
> > + struct revindex_entry *tmp = xcalloc(n, sizeof(*tmp));
>
> Shouldn't this be sizeof (struct revindex_entry), since tmp hasn't
> been declared yet?
No, the variable is declared (but uninitialized) in its initializer.
Despite its syntax, sizeof() is not a function and does not care about
the state of the variable, only its type.
> Also, s/n/revindex_nr/, and something more appropriate for tmp?
What name would you suggest be be more appropriate for tmp?
> > + int bits = 0;
> > + unsigned *pos = xmalloc(BUCKETS * sizeof(*pos));
>
> sizeof(unsigned int), for clarity, if not anything else.
I disagree; in general, I prefer using sizeof(*var) rather than
sizeof(type), because it avoids repeating ourselves, and there is no
compile-time check that you have gotten it right.
In the initializer it is less important, because the type is right
there. But when you are later doing:
memset(pos, 0, BUCKETS * sizeof(*pos));
this is much more robust. If somebody changes the type of pos, the
memset line does not need changed. If you used sizeof(unsigned), then
the code is now buggy (and the compiler cannot notice).
> You picked malloc over calloc here, because you didn't want to incur
> the extra cost of zero-initializing the memory?
Yes. We have to zero-initialize in each loop, so there is no point
spending the extra effort on calloc.
We could also xcalloc inside each loop iteration, but since we need the
same-size allocation each time, I hoisted the malloc out of the loop.
> Also, pos is the actual buckets array, I presume (hence unsigned,
> because there can't be a negative number of keys in any bucket)?
Exactly. I called it "pos" rather than "buckets" because the goal is to
get the start-position of each bucket (as explained in the comment in
the loop).
> > + while (max >> bits) {
>
> No clue what max is. Looked at the caller and figured out that it's
> the pack-size, although I'm still clueless about why it's appearing
> here.
It's larger than the largest sort key in the array. On an LSD radix
sort, we can stop sorting when we are looking at a radix digit whose
value is larger than the max, because we know all of the entries will
simply have "0" in that digit.
So even if off_t is 64-bit, we can quit after the 32nd bit (i.e., k=2)
if the packfile is smaller than 4G.
An MSD radix sort could do the same trick, but would obviously skip the
zero digits at the beginning rather than the end.
> > + struct revindex_entry *swap;
> > + int i;
> > +
> > + memset(pos, 0, BUCKETS * sizeof(*pos));
>
> Ah, so that's why you used malloc there. Wait, shouldn't this be
> memset(pos, 0, sizeof(*pos))?
No, that would zero only the first entry of the array. We allocated
BUCKETS * sizeof(*pos) bytes, and we want to zero them all.
> > + swap = a;
> > + a = b;
> > + b = swap;
>
> Wait a minute: why don't you just throw away b? You're going to
> rebuild the queue in the next iteration anyway, no? a is what is
> being sorted.
For each iteration, we need to sort into temporary storage. So you can
do it like:
1. sort entries into tmp
2. copy tmp back into entries
3. bump radix digit and goto 1
But you can eliminate the copy in step 2 if you instead go back and
forth, like:
1. sort entries into tmp
2. bump radix digit
3. sort tmp into entries
...etc
To do that in a loop, we need an alias for "the thing we are sorting
from" and "the thing we are sorting to". Hence the "a" and "b" pointers.
Perhaps these comments make more sense now:
+ /*
+ * We need O(n) temporary storage, so we sort back and forth between
+ * the real array and our tmp storage. To keep them straight, we always
+ * sort from "a" into buckets in "b".
+ */
+ struct revindex_entry *tmp = xcalloc(n, sizeof(*tmp));
+ struct revindex_entry *a = entries, *b = tmp;
[...]
+ /*
+ * Now "b" contains the most sorted list, so we swap "a" and
+ * "b" for the next iteration.
+ */
> > + /* And bump our bits for the next round. */
> > + bits += DIGIT_SIZE;
>
> I'd have gone for a nice for-loop.
Yeah, that would look like:
for (bits = 0; max >> bits; bits += DIGIT_SIZE) {
...
}
I don't really find one more readable than the other.
> > + /*
> > + * If we ended with our data in the original array, great. If not,
> > + * we have to move it back from the temporary storage.
> > + */
> > + if (a != entries)
> > + memcpy(entries, tmp, n * sizeof(*entries));
>
> How could a be different from entries? It has no memory allocated for
> itself, no? Why did you even create a, and not directly operate on
> entries?
See the back-and-forth explanation above.
> > + free(tmp);
> > + free(pos);
>
> Overall, I found it quite confusing :(
Clearly. It was confusing to write (especially because there are a
number of optimizations, and because radix sort is not well known, at
least to me), which is why I tried to comment profusely. It seems quite
a few of them didn't help, as the answers to your questions were there.
If you have suggestions for improvement to the comments, I'm all ears.
> > +#undef BUCKET_FOR
>
> Why not DIGIT_SIZE and BUCKETS too, while at it?
I forgot. I added them later (they were originally magic numbers in the
code). Will add.
-Peff
^ permalink raw reply [flat|nested] 52+ messages in thread
* Re: [PATCH 10/10] pack-revindex: radix-sort the revindex
2013-07-10 11:55 ` [PATCH 10/10] pack-revindex: radix-sort the revindex Jeff King
2013-07-10 12:00 ` Jeff King
2013-07-10 13:17 ` Ramkumar Ramachandra
@ 2013-07-10 17:10 ` Brandon Casey
2013-07-11 11:17 ` Jeff King
2013-07-11 12:16 ` [PATCHv3 " Jeff King
3 siblings, 1 reply; 52+ messages in thread
From: Brandon Casey @ 2013-07-10 17:10 UTC (permalink / raw)
To: Jeff King; +Cc: git, Ramkumar Ramachandra, Duy Nguyen, Junio C Hamano
On Wed, Jul 10, 2013 at 4:55 AM, Jeff King <peff@peff.net> wrote:
> The pack revindex stores the offsets of the objects in the
> pack in sorted order, allowing us to easily find the on-disk
> size of each object. To compute it, we populate an array
> with the offsets from the sha1-sorted idx file, and then use
> qsort to order it by offsets.
>
> That does O(n log n) offset comparisons, and profiling shows
> that we spend most of our time in cmp_offset. However, since
> we are sorting on a simple off_t, we can use numeric sorts
> that perform better. A radix sort can run in O(k*n), where k
> is the number of "digits" in our number. For a 64-bit off_t,
> using 16-bit "digits" gives us k=4.
>
> On the linux.git repo, with about 3M objects to sort, this
> yields a 400% speedup. Here are the best-of-five numbers for
> running "echo HEAD | git cat-file --batch-disk-size", which
> is dominated by time spent building the pack revindex:
>
> before after
> real 0m0.834s 0m0.204s
> user 0m0.788s 0m0.164s
> sys 0m0.040s 0m0.036s
>
> On a smaller repo, the radix sort would not be
> as impressive (and could even be worse), as we are trading
> the log(n) factor for the k=4 of the radix sort. However,
> even on git.git, with 173K objects, it shows some
> improvement:
>
> before after
> real 0m0.046s 0m0.017s
> user 0m0.036s 0m0.012s
> sys 0m0.008s 0m0.000s
k should only be 2 for git.git. I haven't packed in a while, but I
think it should all fit within 4G. :) The pathological case would be
a pack file with very few very very large objects, large enough to
push the pack size over the 2^48 threshold so we'd have to do all four
radixes.
It's probably worth mentioning here and/or in the code that k is
dependent on the pack file size and that we can jump out early for
small pack files. That's my favorite part of this code by the way. :)
> Signed-off-by: Jeff King <peff@peff.net>
> ---
> I changed a few things from the original, including:
>
> 1. We take an "unsigned" number of objects to match the fix in the
> last patch.
>
> 2. The 16-bit "digit" size is factored out to a single place, which
> avoids magic numbers and repeating ourselves.
>
> 3. The "digits" variable is renamed to "bits", which is more accurate.
>
> 4. The outer loop condition uses the simpler "while (max >> bits)".
>
> 5. We use memcpy instead of an open-coded loop to copy the whole array
> at the end. The individual bucket-assignment is still done by
> struct assignment. I haven't timed if memcpy would make a
> difference there.
>
> 6. The 64K*sizeof(int) "pos" array is now heap-allocated, in case
> there are platforms with a small stack.
>
> I re-ran my timings to make sure none of the above impacted them; it
> turned out the same.
>
> pack-revindex.c | 84 +++++++++++++++++++++++++++++++++++++++++++++++++++++----
> 1 file changed, 79 insertions(+), 5 deletions(-)
>
> diff --git a/pack-revindex.c b/pack-revindex.c
> index 1aa9754..9365bc2 100644
> --- a/pack-revindex.c
> +++ b/pack-revindex.c
> @@ -59,11 +59,85 @@ static int cmp_offset(const void *a_, const void *b_)
> /* revindex elements are lazily initialized */
> }
>
> -static int cmp_offset(const void *a_, const void *b_)
> +/*
> + * This is a least-significant-digit radix sort.
> + */
> +static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
> {
> - const struct revindex_entry *a = a_;
> - const struct revindex_entry *b = b_;
> - return (a->offset < b->offset) ? -1 : (a->offset > b->offset) ? 1 : 0;
> + /*
> + * We use a "digit" size of 16 bits. That keeps our memory
> + * usage reasonable, and we can generally (for a 4G or smaller
> + * packfile) quit after two rounds of radix-sorting.
> + */
> +#define DIGIT_SIZE (16)
> +#define BUCKETS (1 << DIGIT_SIZE)
> + /*
> + * We want to know the bucket that a[i] will go into when we are using
> + * the digit that is N bits from the (least significant) end.
> + */
> +#define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1))
> +
> + /*
> + * We need O(n) temporary storage, so we sort back and forth between
> + * the real array and our tmp storage. To keep them straight, we always
> + * sort from "a" into buckets in "b".
> + */
> + struct revindex_entry *tmp = xcalloc(n, sizeof(*tmp));
Didn't notice it the first time I read this, but do we really need
calloc here? Or will malloc do?
> + struct revindex_entry *a = entries, *b = tmp;
> + int bits = 0;
> + unsigned *pos = xmalloc(BUCKETS * sizeof(*pos));
> +
> + while (max >> bits) {
> + struct revindex_entry *swap;
> + int i;
You forgot to make i unsigned. See below too...
> +
> + memset(pos, 0, BUCKETS * sizeof(*pos));
> +
> + /*
> + * We want pos[i] to store the index of the last element that
> + * will go in bucket "i" (actually one past the last element).
> + * To do this, we first count the items that will go in each
> + * bucket, which gives us a relative offset from the last
> + * bucket. We can then cumulatively add the index from the
> + * previous bucket to get the true index.
> + */
> + for (i = 0; i < n; i++)
> + pos[BUCKET_FOR(a, i, bits)]++;
> + for (i = 1; i < BUCKETS; i++)
> + pos[i] += pos[i-1];
> +
> + /*
> + * Now we can drop the elements into their correct buckets (in
> + * our temporary array). We iterate the pos counter backwards
> + * to avoid using an extra index to count up. And since we are
> + * going backwards there, we must also go backwards through the
> + * array itself, to keep the sort stable.
> + */
> + for (i = n - 1; i >= 0; i--)
> + b[--pos[BUCKET_FOR(a, i, bits)]] = a[i];
...which is why the above loop still works.
> +
> + /*
> + * Now "b" contains the most sorted list, so we swap "a" and
> + * "b" for the next iteration.
> + */
> + swap = a;
> + a = b;
> + b = swap;
> +
> + /* And bump our bits for the next round. */
> + bits += DIGIT_SIZE;
> + }
> +
> + /*
> + * If we ended with our data in the original array, great. If not,
> + * we have to move it back from the temporary storage.
> + */
> + if (a != entries)
> + memcpy(entries, tmp, n * sizeof(*entries));
> + free(tmp);
> + free(pos);
> +
> +#undef BUCKET_FOR
> }
>
> /*
> @@ -108,7 +182,7 @@ static void create_pack_revindex(struct pack_revindex *rix)
> */
> rix->revindex[num_ent].offset = p->pack_size - 20;
> rix->revindex[num_ent].nr = -1;
> - qsort(rix->revindex, num_ent, sizeof(*rix->revindex), cmp_offset);
> + sort_revindex(rix->revindex, num_ent, p->pack_size);
> }
>
> struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
> --
> 1.8.3.rc3.24.gec82cb9
^ permalink raw reply [flat|nested] 52+ messages in thread
* Re: [PATCH 10/10] pack-revindex: radix-sort the revindex
2013-07-10 17:10 ` Brandon Casey
@ 2013-07-11 11:17 ` Jeff King
0 siblings, 0 replies; 52+ messages in thread
From: Jeff King @ 2013-07-11 11:17 UTC (permalink / raw)
To: Brandon Casey; +Cc: git, Ramkumar Ramachandra, Duy Nguyen, Junio C Hamano
On Wed, Jul 10, 2013 at 10:10:16AM -0700, Brandon Casey wrote:
> > On the linux.git repo, with about 3M objects to sort, this
> > yields a 400% speedup. Here are the best-of-five numbers for
> > running "echo HEAD | git cat-file --batch-disk-size", which
> > is dominated by time spent building the pack revindex:
> >
> > before after
> > real 0m0.834s 0m0.204s
> > user 0m0.788s 0m0.164s
> > sys 0m0.040s 0m0.036s
> >
> > On a smaller repo, the radix sort would not be
> > as impressive (and could even be worse), as we are trading
> > the log(n) factor for the k=4 of the radix sort. However,
> > even on git.git, with 173K objects, it shows some
> > improvement:
> >
> > before after
> > real 0m0.046s 0m0.017s
> > user 0m0.036s 0m0.012s
> > sys 0m0.008s 0m0.000s
>
> k should only be 2 for git.git. I haven't packed in a while, but I
> think it should all fit within 4G. :) The pathological case would be
> a pack file with very few very very large objects, large enough to
> push the pack size over the 2^48 threshold so we'd have to do all four
> radixes.
Yeah, even linux.git fits into k=2. And that does more or less explain
the numbers in both cases.
For git.git, With 173K objects, log(n) is ~18, so regular sort is 18n.
With a radix sort of k=2, which has a constant factor of 2 (you can see
by looking at the code that we go through the list twice per radix), we
have 4n. So there should be a 4.5x speedup. We don't quite get that,
which is probably due to the extra bookkeeping on the buckets.
For linux.git, with 3M objects, log(n) is ~22, so the speedup we hope
for is 5.5x. We end up with 4x.
> It's probably worth mentioning here and/or in the code that k is
> dependent on the pack file size and that we can jump out early for
> small pack files. That's my favorite part of this code by the way. :)
Yeah, I agree it is probably worth mentioning along with the numbers; it
is where half of our speedup is coming from. I think the "max >> bits"
loop condition deserves to be commented, too. I'll add that.
Also note that my commit message still refers to "--batch-disk-size"
which does not exist anymore. :) I didn't update the timings in the
commit message for my re-roll, but I did confirm that they are the same.
> > + /*
> > + * We need O(n) temporary storage, so we sort back and forth between
> > + * the real array and our tmp storage. To keep them straight, we always
> > + * sort from "a" into buckets in "b".
> > + */
> > + struct revindex_entry *tmp = xcalloc(n, sizeof(*tmp));
>
> Didn't notice it the first time I read this, but do we really need
> calloc here? Or will malloc do?
No, a malloc should be fine. I doubt it matters much, but there's no
reason not to go the cheap route.
> > + struct revindex_entry *a = entries, *b = tmp;
> > + int bits = 0;
> > + unsigned *pos = xmalloc(BUCKETS * sizeof(*pos));
> > +
> > + while (max >> bits) {
> > + struct revindex_entry *swap;
> > + int i;
>
> You forgot to make i unsigned. See below too...
Oops. Thanks for catching.
> > + /*
> > + * Now we can drop the elements into their correct buckets (in
> > + * our temporary array). We iterate the pos counter backwards
> > + * to avoid using an extra index to count up. And since we are
> > + * going backwards there, we must also go backwards through the
> > + * array itself, to keep the sort stable.
> > + */
> > + for (i = n - 1; i >= 0; i--)
> > + b[--pos[BUCKET_FOR(a, i, bits)]] = a[i];
>
> ...which is why the above loop still works.
Since we are iterating by ones, I guess I can just compare to UINT_MAX.
-Peff
^ permalink raw reply [flat|nested] 52+ messages in thread
* [PATCHv3 10/10] pack-revindex: radix-sort the revindex
2013-07-10 11:55 ` [PATCH 10/10] pack-revindex: radix-sort the revindex Jeff King
` (2 preceding siblings ...)
2013-07-10 17:10 ` Brandon Casey
@ 2013-07-11 12:16 ` Jeff King
2013-07-11 21:12 ` Brandon Casey
3 siblings, 1 reply; 52+ messages in thread
From: Jeff King @ 2013-07-11 12:16 UTC (permalink / raw)
To: git; +Cc: Ramkumar Ramachandra, Duy Nguyen, Brandon Casey, Junio C Hamano
Here's an update of the radix-sort patch. It fixes the "unsigned" issue
Brandon pointed out, along with a few other comment/naming/style fixes.
I also updated the commit message with more explanation of the
timings.
The interdiff is:
diff --git a/pack-revindex.c b/pack-revindex.c
index 9365bc2..b4d2b35 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -61,6 +61,10 @@ static void init_pack_revindex(void)
/*
* This is a least-significant-digit radix sort.
+ *
+ * It sorts each of the "n" items in "entries" by its offset field. The "max"
+ * parameter must be at least as large as the largest offset in the array,
+ * and lets us quit the sort early.
*/
static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
{
@@ -78,18 +82,25 @@ static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
#define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1))
/*
- * We need O(n) temporary storage, so we sort back and forth between
- * the real array and our tmp storage. To keep them straight, we always
- * sort from "a" into buckets in "b".
+ * We need O(n) temporary storage. Rather than do an extra copy of the
+ * partial results into "entries", we sort back and forth between the
+ * real array and temporary storage. In each iteration of the loop, we
+ * keep track of them with alias pointers, always sorting from "from"
+ * to "to".
*/
- struct revindex_entry *tmp = xcalloc(n, sizeof(*tmp));
- struct revindex_entry *a = entries, *b = tmp;
- int bits = 0;
+ struct revindex_entry *tmp = xmalloc(n * sizeof(*tmp));
+ struct revindex_entry *from = entries, *to = tmp;
+ int bits;
unsigned *pos = xmalloc(BUCKETS * sizeof(*pos));
- while (max >> bits) {
+ /*
+ * If (max >> bits) is zero, then we know that the radix digit we are
+ * on (and any higher) will be zero for all entries, and our loop will
+ * be a no-op, as everybody lands in the same zero-th bucket.
+ */
+ for (bits = 0; max >> bits; bits += DIGIT_SIZE) {
struct revindex_entry *swap;
- int i;
+ unsigned i;
memset(pos, 0, BUCKETS * sizeof(*pos));
@@ -102,7 +113,7 @@ static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
* previous bucket to get the true index.
*/
for (i = 0; i < n; i++)
- pos[BUCKET_FOR(a, i, bits)]++;
+ pos[BUCKET_FOR(from, i, bits)]++;
for (i = 1; i < BUCKETS; i++)
pos[i] += pos[i-1];
@@ -112,32 +123,37 @@ static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
* to avoid using an extra index to count up. And since we are
* going backwards there, we must also go backwards through the
* array itself, to keep the sort stable.
+ *
+ * Note that we use an unsigned iterator to make sure we can
+ * handle 2^32-1 objects, even on a 32-bit system. But this
+ * means we cannot use the more obvious "i >= 0" loop condition
+ * for counting backwards, and must instead check for
+ * wrap-around with UINT_MAX.
*/
- for (i = n - 1; i >= 0; i--)
- b[--pos[BUCKET_FOR(a, i, bits)]] = a[i];
+ for (i = n - 1; i != UINT_MAX; i--)
+ to[--pos[BUCKET_FOR(from, i, bits)]] = from[i];
/*
- * Now "b" contains the most sorted list, so we swap "a" and
- * "b" for the next iteration.
+ * Now "to" contains the most sorted list, so we swap "from" and
+ * "to" for the next iteration.
*/
- swap = a;
- a = b;
- b = swap;
-
- /* And bump our bits for the next round. */
- bits += DIGIT_SIZE;
+ swap = from;
+ from = to;
+ to = swap;
}
/*
* If we ended with our data in the original array, great. If not,
* we have to move it back from the temporary storage.
*/
- if (a != entries)
+ if (from != entries)
memcpy(entries, tmp, n * sizeof(*entries));
free(tmp);
free(pos);
#undef BUCKET_FOR
+#undef BUCKETS
+#undef DIGIT_SIZE
}
/*
-- >8 --
Subject: [PATCH] pack-revindex: radix-sort the revindex
The pack revindex stores the offsets of the objects in the
pack in sorted order, allowing us to easily find the on-disk
size of each object. To compute it, we populate an array
with the offsets from the sha1-sorted idx file, and then use
qsort to order it by offsets.
That does O(n log n) offset comparisons, and profiling shows
that we spend most of our time in cmp_offset. However, since
we are sorting on a simple off_t, we can use numeric sorts
that perform better. A radix sort can run in O(k*n), where k
is the number of "digits" in our number. For a 64-bit off_t,
using 16-bit "digits" gives us k=4.
On the linux.git repo, with about 3M objects to sort, this
yields a 400% speedup. Here are the best-of-five numbers for
running
echo HEAD | git cat-file --batch-check="%(objectsize:disk)
on a fully packed repository, which is dominated by time
spent building the pack revindex:
before after
real 0m0.834s 0m0.204s
user 0m0.788s 0m0.164s
sys 0m0.040s 0m0.036s
This matches our algorithmic expectations. log(3M) is ~21.5,
so a traditional sort is ~21.5n. Our radix sort runs in k*n,
where k is the number of radix digits. In the worst case,
this is k=4 for a 64-bit off_t, but we can quit early when
the largest value to be sorted is smaller. For any
repository under 4G, k=2. Our algorithm makes two passes
over the list per radix digit, so we end up with 4n. That
should yield ~5.3x speedup. We see 4x here; the difference
is probably due to the extra bucket book-keeping the radix
sort has to do.
On a smaller repo, the difference is less impressive, as
log(n) is smaller. For git.git, with 173K objects (but still
k=2), we see a 2.7x improvement:
before after
real 0m0.046s 0m0.017s
user 0m0.036s 0m0.012s
sys 0m0.008s 0m0.000s
On even tinier repos (e.g., a few hundred objects), the
speedup goes away entirely, as the small advantage of the
radix sort gets erased by the book-keeping costs (and at
those sizes, the cost to generate the the rev-index gets
lost in the noise anyway).
Signed-off-by: Jeff King <peff@peff.net>
---
pack-revindex.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 95 insertions(+), 5 deletions(-)
diff --git a/pack-revindex.c b/pack-revindex.c
index 1aa9754..b4d2b35 100644
--- a/pack-revindex.c
+++ b/pack-revindex.c
@@ -59,11 +59,101 @@ static int cmp_offset(const void *a_, const void *b_)
/* revindex elements are lazily initialized */
}
-static int cmp_offset(const void *a_, const void *b_)
+/*
+ * This is a least-significant-digit radix sort.
+ *
+ * It sorts each of the "n" items in "entries" by its offset field. The "max"
+ * parameter must be at least as large as the largest offset in the array,
+ * and lets us quit the sort early.
+ */
+static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
{
- const struct revindex_entry *a = a_;
- const struct revindex_entry *b = b_;
- return (a->offset < b->offset) ? -1 : (a->offset > b->offset) ? 1 : 0;
+ /*
+ * We use a "digit" size of 16 bits. That keeps our memory
+ * usage reasonable, and we can generally (for a 4G or smaller
+ * packfile) quit after two rounds of radix-sorting.
+ */
+#define DIGIT_SIZE (16)
+#define BUCKETS (1 << DIGIT_SIZE)
+ /*
+ * We want to know the bucket that a[i] will go into when we are using
+ * the digit that is N bits from the (least significant) end.
+ */
+#define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1))
+
+ /*
+ * We need O(n) temporary storage. Rather than do an extra copy of the
+ * partial results into "entries", we sort back and forth between the
+ * real array and temporary storage. In each iteration of the loop, we
+ * keep track of them with alias pointers, always sorting from "from"
+ * to "to".
+ */
+ struct revindex_entry *tmp = xmalloc(n * sizeof(*tmp));
+ struct revindex_entry *from = entries, *to = tmp;
+ int bits;
+ unsigned *pos = xmalloc(BUCKETS * sizeof(*pos));
+
+ /*
+ * If (max >> bits) is zero, then we know that the radix digit we are
+ * on (and any higher) will be zero for all entries, and our loop will
+ * be a no-op, as everybody lands in the same zero-th bucket.
+ */
+ for (bits = 0; max >> bits; bits += DIGIT_SIZE) {
+ struct revindex_entry *swap;
+ unsigned i;
+
+ memset(pos, 0, BUCKETS * sizeof(*pos));
+
+ /*
+ * We want pos[i] to store the index of the last element that
+ * will go in bucket "i" (actually one past the last element).
+ * To do this, we first count the items that will go in each
+ * bucket, which gives us a relative offset from the last
+ * bucket. We can then cumulatively add the index from the
+ * previous bucket to get the true index.
+ */
+ for (i = 0; i < n; i++)
+ pos[BUCKET_FOR(from, i, bits)]++;
+ for (i = 1; i < BUCKETS; i++)
+ pos[i] += pos[i-1];
+
+ /*
+ * Now we can drop the elements into their correct buckets (in
+ * our temporary array). We iterate the pos counter backwards
+ * to avoid using an extra index to count up. And since we are
+ * going backwards there, we must also go backwards through the
+ * array itself, to keep the sort stable.
+ *
+ * Note that we use an unsigned iterator to make sure we can
+ * handle 2^32-1 objects, even on a 32-bit system. But this
+ * means we cannot use the more obvious "i >= 0" loop condition
+ * for counting backwards, and must instead check for
+ * wrap-around with UINT_MAX.
+ */
+ for (i = n - 1; i != UINT_MAX; i--)
+ to[--pos[BUCKET_FOR(from, i, bits)]] = from[i];
+
+ /*
+ * Now "to" contains the most sorted list, so we swap "from" and
+ * "to" for the next iteration.
+ */
+ swap = from;
+ from = to;
+ to = swap;
+ }
+
+ /*
+ * If we ended with our data in the original array, great. If not,
+ * we have to move it back from the temporary storage.
+ */
+ if (from != entries)
+ memcpy(entries, tmp, n * sizeof(*entries));
+ free(tmp);
+ free(pos);
+
+#undef BUCKET_FOR
+#undef BUCKETS
+#undef DIGIT_SIZE
}
/*
@@ -108,7 +198,7 @@ static void create_pack_revindex(struct pack_revindex *rix)
*/
rix->revindex[num_ent].offset = p->pack_size - 20;
rix->revindex[num_ent].nr = -1;
- qsort(rix->revindex, num_ent, sizeof(*rix->revindex), cmp_offset);
+ sort_revindex(rix->revindex, num_ent, p->pack_size);
}
struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
--
1.8.3.rc3.24.gec82cb9
^ permalink raw reply related [flat|nested] 52+ messages in thread
* Re: [PATCHv3 10/10] pack-revindex: radix-sort the revindex
2013-07-11 12:16 ` [PATCHv3 " Jeff King
@ 2013-07-11 21:12 ` Brandon Casey
0 siblings, 0 replies; 52+ messages in thread
From: Brandon Casey @ 2013-07-11 21:12 UTC (permalink / raw)
To: Jeff King; +Cc: git, Ramkumar Ramachandra, Duy Nguyen, Junio C Hamano
On Thu, Jul 11, 2013 at 5:16 AM, Jeff King <peff@peff.net> wrote:
> Here's an update of the radix-sort patch. It fixes the "unsigned" issue
> Brandon pointed out, along with a few other comment/naming/style fixes.
> I also updated the commit message with more explanation of the
> timings.
Very nice.
For what it's worth:
Reviewed-by: Brandon Casey <drafnel@gmail.com>
<remainder retained for reference (or whatever Jonathan usually says)>
> The interdiff is:
>
> diff --git a/pack-revindex.c b/pack-revindex.c
> index 9365bc2..b4d2b35 100644
> --- a/pack-revindex.c
> +++ b/pack-revindex.c
> @@ -61,6 +61,10 @@ static void init_pack_revindex(void)
>
> /*
> * This is a least-significant-digit radix sort.
> + *
> + * It sorts each of the "n" items in "entries" by its offset field. The "max"
> + * parameter must be at least as large as the largest offset in the array,
> + * and lets us quit the sort early.
> */
> static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
> {
> @@ -78,18 +82,25 @@ static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
> #define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1))
>
> /*
> - * We need O(n) temporary storage, so we sort back and forth between
> - * the real array and our tmp storage. To keep them straight, we always
> - * sort from "a" into buckets in "b".
> + * We need O(n) temporary storage. Rather than do an extra copy of the
> + * partial results into "entries", we sort back and forth between the
> + * real array and temporary storage. In each iteration of the loop, we
> + * keep track of them with alias pointers, always sorting from "from"
> + * to "to".
> */
> - struct revindex_entry *tmp = xcalloc(n, sizeof(*tmp));
> - struct revindex_entry *a = entries, *b = tmp;
> - int bits = 0;
> + struct revindex_entry *tmp = xmalloc(n * sizeof(*tmp));
> + struct revindex_entry *from = entries, *to = tmp;
> + int bits;
> unsigned *pos = xmalloc(BUCKETS * sizeof(*pos));
>
> - while (max >> bits) {
> + /*
> + * If (max >> bits) is zero, then we know that the radix digit we are
> + * on (and any higher) will be zero for all entries, and our loop will
> + * be a no-op, as everybody lands in the same zero-th bucket.
> + */
> + for (bits = 0; max >> bits; bits += DIGIT_SIZE) {
> struct revindex_entry *swap;
> - int i;
> + unsigned i;
>
> memset(pos, 0, BUCKETS * sizeof(*pos));
>
> @@ -102,7 +113,7 @@ static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
> * previous bucket to get the true index.
> */
> for (i = 0; i < n; i++)
> - pos[BUCKET_FOR(a, i, bits)]++;
> + pos[BUCKET_FOR(from, i, bits)]++;
> for (i = 1; i < BUCKETS; i++)
> pos[i] += pos[i-1];
>
> @@ -112,32 +123,37 @@ static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
> * to avoid using an extra index to count up. And since we are
> * going backwards there, we must also go backwards through the
> * array itself, to keep the sort stable.
> + *
> + * Note that we use an unsigned iterator to make sure we can
> + * handle 2^32-1 objects, even on a 32-bit system. But this
> + * means we cannot use the more obvious "i >= 0" loop condition
> + * for counting backwards, and must instead check for
> + * wrap-around with UINT_MAX.
> */
> - for (i = n - 1; i >= 0; i--)
> - b[--pos[BUCKET_FOR(a, i, bits)]] = a[i];
> + for (i = n - 1; i != UINT_MAX; i--)
> + to[--pos[BUCKET_FOR(from, i, bits)]] = from[i];
>
> /*
> - * Now "b" contains the most sorted list, so we swap "a" and
> - * "b" for the next iteration.
> + * Now "to" contains the most sorted list, so we swap "from" and
> + * "to" for the next iteration.
> */
> - swap = a;
> - a = b;
> - b = swap;
> -
> - /* And bump our bits for the next round. */
> - bits += DIGIT_SIZE;
> + swap = from;
> + from = to;
> + to = swap;
> }
>
> /*
> * If we ended with our data in the original array, great. If not,
> * we have to move it back from the temporary storage.
> */
> - if (a != entries)
> + if (from != entries)
> memcpy(entries, tmp, n * sizeof(*entries));
> free(tmp);
> free(pos);
>
> #undef BUCKET_FOR
> +#undef BUCKETS
> +#undef DIGIT_SIZE
> }
>
> /*
>
> -- >8 --
> Subject: [PATCH] pack-revindex: radix-sort the revindex
>
> The pack revindex stores the offsets of the objects in the
> pack in sorted order, allowing us to easily find the on-disk
> size of each object. To compute it, we populate an array
> with the offsets from the sha1-sorted idx file, and then use
> qsort to order it by offsets.
>
> That does O(n log n) offset comparisons, and profiling shows
> that we spend most of our time in cmp_offset. However, since
> we are sorting on a simple off_t, we can use numeric sorts
> that perform better. A radix sort can run in O(k*n), where k
> is the number of "digits" in our number. For a 64-bit off_t,
> using 16-bit "digits" gives us k=4.
>
> On the linux.git repo, with about 3M objects to sort, this
> yields a 400% speedup. Here are the best-of-five numbers for
> running
>
> echo HEAD | git cat-file --batch-check="%(objectsize:disk)
>
> on a fully packed repository, which is dominated by time
> spent building the pack revindex:
>
> before after
> real 0m0.834s 0m0.204s
> user 0m0.788s 0m0.164s
> sys 0m0.040s 0m0.036s
>
> This matches our algorithmic expectations. log(3M) is ~21.5,
> so a traditional sort is ~21.5n. Our radix sort runs in k*n,
> where k is the number of radix digits. In the worst case,
> this is k=4 for a 64-bit off_t, but we can quit early when
> the largest value to be sorted is smaller. For any
> repository under 4G, k=2. Our algorithm makes two passes
> over the list per radix digit, so we end up with 4n. That
> should yield ~5.3x speedup. We see 4x here; the difference
> is probably due to the extra bucket book-keeping the radix
> sort has to do.
>
> On a smaller repo, the difference is less impressive, as
> log(n) is smaller. For git.git, with 173K objects (but still
> k=2), we see a 2.7x improvement:
>
> before after
> real 0m0.046s 0m0.017s
> user 0m0.036s 0m0.012s
> sys 0m0.008s 0m0.000s
>
> On even tinier repos (e.g., a few hundred objects), the
> speedup goes away entirely, as the small advantage of the
> radix sort gets erased by the book-keeping costs (and at
> those sizes, the cost to generate the the rev-index gets
> lost in the noise anyway).
>
> Signed-off-by: Jeff King <peff@peff.net>
> ---
> pack-revindex.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++++++---
> 1 file changed, 95 insertions(+), 5 deletions(-)
>
> diff --git a/pack-revindex.c b/pack-revindex.c
> index 1aa9754..b4d2b35 100644
> --- a/pack-revindex.c
> +++ b/pack-revindex.c
> @@ -59,11 +59,101 @@ static int cmp_offset(const void *a_, const void *b_)
> /* revindex elements are lazily initialized */
> }
>
> -static int cmp_offset(const void *a_, const void *b_)
> +/*
> + * This is a least-significant-digit radix sort.
> + *
> + * It sorts each of the "n" items in "entries" by its offset field. The "max"
> + * parameter must be at least as large as the largest offset in the array,
> + * and lets us quit the sort early.
> + */
> +static void sort_revindex(struct revindex_entry *entries, unsigned n, off_t max)
> {
> - const struct revindex_entry *a = a_;
> - const struct revindex_entry *b = b_;
> - return (a->offset < b->offset) ? -1 : (a->offset > b->offset) ? 1 : 0;
> + /*
> + * We use a "digit" size of 16 bits. That keeps our memory
> + * usage reasonable, and we can generally (for a 4G or smaller
> + * packfile) quit after two rounds of radix-sorting.
> + */
> +#define DIGIT_SIZE (16)
> +#define BUCKETS (1 << DIGIT_SIZE)
> + /*
> + * We want to know the bucket that a[i] will go into when we are using
> + * the digit that is N bits from the (least significant) end.
> + */
> +#define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1))
> +
> + /*
> + * We need O(n) temporary storage. Rather than do an extra copy of the
> + * partial results into "entries", we sort back and forth between the
> + * real array and temporary storage. In each iteration of the loop, we
> + * keep track of them with alias pointers, always sorting from "from"
> + * to "to".
> + */
> + struct revindex_entry *tmp = xmalloc(n * sizeof(*tmp));
> + struct revindex_entry *from = entries, *to = tmp;
> + int bits;
> + unsigned *pos = xmalloc(BUCKETS * sizeof(*pos));
> +
> + /*
> + * If (max >> bits) is zero, then we know that the radix digit we are
> + * on (and any higher) will be zero for all entries, and our loop will
> + * be a no-op, as everybody lands in the same zero-th bucket.
> + */
> + for (bits = 0; max >> bits; bits += DIGIT_SIZE) {
> + struct revindex_entry *swap;
> + unsigned i;
> +
> + memset(pos, 0, BUCKETS * sizeof(*pos));
> +
> + /*
> + * We want pos[i] to store the index of the last element that
> + * will go in bucket "i" (actually one past the last element).
> + * To do this, we first count the items that will go in each
> + * bucket, which gives us a relative offset from the last
> + * bucket. We can then cumulatively add the index from the
> + * previous bucket to get the true index.
> + */
> + for (i = 0; i < n; i++)
> + pos[BUCKET_FOR(from, i, bits)]++;
> + for (i = 1; i < BUCKETS; i++)
> + pos[i] += pos[i-1];
> +
> + /*
> + * Now we can drop the elements into their correct buckets (in
> + * our temporary array). We iterate the pos counter backwards
> + * to avoid using an extra index to count up. And since we are
> + * going backwards there, we must also go backwards through the
> + * array itself, to keep the sort stable.
> + *
> + * Note that we use an unsigned iterator to make sure we can
> + * handle 2^32-1 objects, even on a 32-bit system. But this
> + * means we cannot use the more obvious "i >= 0" loop condition
> + * for counting backwards, and must instead check for
> + * wrap-around with UINT_MAX.
> + */
> + for (i = n - 1; i != UINT_MAX; i--)
> + to[--pos[BUCKET_FOR(from, i, bits)]] = from[i];
> +
> + /*
> + * Now "to" contains the most sorted list, so we swap "from" and
> + * "to" for the next iteration.
> + */
> + swap = from;
> + from = to;
> + to = swap;
> + }
> +
> + /*
> + * If we ended with our data in the original array, great. If not,
> + * we have to move it back from the temporary storage.
> + */
> + if (from != entries)
> + memcpy(entries, tmp, n * sizeof(*entries));
> + free(tmp);
> + free(pos);
> +
> +#undef BUCKET_FOR
> +#undef BUCKETS
> +#undef DIGIT_SIZE
> }
>
> /*
> @@ -108,7 +198,7 @@ static void create_pack_revindex(struct pack_revindex *rix)
> */
> rix->revindex[num_ent].offset = p->pack_size - 20;
> rix->revindex[num_ent].nr = -1;
> - qsort(rix->revindex, num_ent, sizeof(*rix->revindex), cmp_offset);
> + sort_revindex(rix->revindex, num_ent, p->pack_size);
> }
>
> struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
> --
> 1.8.3.rc3.24.gec82cb9
>
^ permalink raw reply [flat|nested] 52+ messages in thread