提交 074b2eea 编写于 作者: M Martin Koegler 提交者: Junio C Hamano

git-pack-objects: cache small deltas between big objects

Creating deltas between big blobs is a CPU and memory intensive task.
In the writing phase, all (not reused) deltas are redone.

This patch adds support for caching deltas from the deltifing phase, so
that that the writing phase is faster.

The caching is limited to small deltas to avoid increasing memory usage very much.
The implemented limit is (memory needed to create the delta)/1024.
Signed-off-by: NMartin Koegler <mkoegler@auto.tuwien.ac.at>
Signed-off-by: NJunio C Hamano <junkio@cox.net>
上级 a588d88a
...@@ -567,6 +567,11 @@ pack.compression:: ...@@ -567,6 +567,11 @@ pack.compression::
slowest. If not set, defaults to core.compression. If that is slowest. If not set, defaults to core.compression. If that is
not set, defaults to -1. not set, defaults to -1.
pack.deltaCacheSize::
The maxium memory in bytes used for caching deltas in
gitlink:git-pack-objects[1].
A value of 0 means no limit. Defaults to 0.
pull.octopus:: pull.octopus::
The default merge strategy to use when pulling multiple branches The default merge strategy to use when pulling multiple branches
at once. at once.
......
...@@ -36,6 +36,7 @@ struct object_entry { ...@@ -36,6 +36,7 @@ struct object_entry {
struct object_entry *delta_sibling; /* other deltified objects who struct object_entry *delta_sibling; /* other deltified objects who
* uses the same base as me * uses the same base as me
*/ */
void *delta_data; /* cached delta (uncompressed) */
unsigned long delta_size; /* delta data size (uncompressed) */ unsigned long delta_size; /* delta data size (uncompressed) */
enum object_type type; enum object_type type;
enum object_type in_pack_type; /* could be delta */ enum object_type in_pack_type; /* could be delta */
...@@ -76,6 +77,9 @@ static struct progress progress_state; ...@@ -76,6 +77,9 @@ static struct progress progress_state;
static int pack_compression_level = Z_DEFAULT_COMPRESSION; static int pack_compression_level = Z_DEFAULT_COMPRESSION;
static int pack_compression_seen; static int pack_compression_seen;
static unsigned long delta_cache_size = 0;
static unsigned long max_delta_cache_size = 0;
/* /*
* The object names in objects array are hashed with this hashtable, * The object names in objects array are hashed with this hashtable,
* to help looking up the entry by object name. * to help looking up the entry by object name.
...@@ -405,24 +409,31 @@ static unsigned long write_object(struct sha1file *f, ...@@ -405,24 +409,31 @@ static unsigned long write_object(struct sha1file *f,
z_stream stream; z_stream stream;
unsigned long maxsize; unsigned long maxsize;
void *out; void *out;
buf = read_sha1_file(entry->sha1, &type, &size); if (entry->delta_data && usable_delta) {
if (!buf) buf = entry->delta_data;
die("unable to read %s", sha1_to_hex(entry->sha1));
if (size != entry->size)
die("object %s size inconsistency (%lu vs %lu)",
sha1_to_hex(entry->sha1), size, entry->size);
if (usable_delta) {
buf = delta_against(buf, size, entry);
size = entry->delta_size; size = entry->delta_size;
obj_type = (allow_ofs_delta && entry->delta->offset) ? obj_type = (allow_ofs_delta && entry->delta->offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA; OBJ_OFS_DELTA : OBJ_REF_DELTA;
} else { } else {
/* buf = read_sha1_file(entry->sha1, &type, &size);
* recover real object type in case if (!buf)
* check_object() wanted to re-use a delta, die("unable to read %s", sha1_to_hex(entry->sha1));
* but we couldn't since base was in previous split pack if (size != entry->size)
*/ die("object %s size inconsistency (%lu vs %lu)",
obj_type = type; sha1_to_hex(entry->sha1), size, entry->size);
if (usable_delta) {
buf = delta_against(buf, size, entry);
size = entry->delta_size;
obj_type = (allow_ofs_delta && entry->delta->offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
} else {
/*
* recover real object type in case
* check_object() wanted to re-use a delta,
* but we couldn't since base was in previous split pack
*/
obj_type = type;
}
} }
/* compress the data to store and put compressed length in datalen */ /* compress the data to store and put compressed length in datalen */
memset(&stream, 0, sizeof(stream)); memset(&stream, 0, sizeof(stream));
...@@ -1385,6 +1396,20 @@ struct unpacked { ...@@ -1385,6 +1396,20 @@ struct unpacked {
struct delta_index *index; struct delta_index *index;
}; };
static int delta_cacheable(struct unpacked *trg, struct unpacked *src,
unsigned long src_size, unsigned long trg_size,
unsigned long delta_size)
{
if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
return 0;
/* cache delta, if objects are large enough compared to delta size */
if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
return 1;
return 0;
}
/* /*
* We search for deltas _backwards_ in a list sorted by type and * We search for deltas _backwards_ in a list sorted by type and
* by size, so that we see progressively smaller and smaller files. * by size, so that we see progressively smaller and smaller files.
...@@ -1466,10 +1491,20 @@ static int try_delta(struct unpacked *trg, struct unpacked *src, ...@@ -1466,10 +1491,20 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
if (!delta_buf) if (!delta_buf)
return 0; return 0;
if (trg_entry->delta_data) {
delta_cache_size -= trg_entry->delta_size;
free(trg_entry->delta_data);
}
trg_entry->delta_data = 0;
trg_entry->delta = src_entry; trg_entry->delta = src_entry;
trg_entry->delta_size = delta_size; trg_entry->delta_size = delta_size;
trg_entry->depth = src_entry->depth + 1; trg_entry->depth = src_entry->depth + 1;
free(delta_buf);
if (delta_cacheable(src, trg, src_size, trg_size, delta_size)) {
trg_entry->delta_data = xrealloc(delta_buf, delta_size);
delta_cache_size += trg_entry->delta_size;
} else
free(delta_buf);
return 1; return 1;
} }
...@@ -1615,6 +1650,10 @@ static int git_pack_config(const char *k, const char *v) ...@@ -1615,6 +1650,10 @@ static int git_pack_config(const char *k, const char *v)
pack_compression_seen = 1; pack_compression_seen = 1;
return 0; return 0;
} }
if (!strcmp(k, "pack.deltacachesize")) {
max_delta_cache_size = git_config_int(k, v);
return 0;
}
return git_default_config(k, v); return git_default_config(k, v);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册