pack-objects.c 81.1 KB
Newer Older
1
#include "builtin.h"
2
#include "cache.h"
3
#include "attr.h"
4
#include "object.h"
5 6 7 8
#include "blob.h"
#include "commit.h"
#include "tag.h"
#include "tree.h"
9
#include "delta.h"
10
#include "pack.h"
11
#include "pack-revindex.h"
12
#include "csum-file.h"
J
Junio C Hamano 已提交
13
#include "tree-walk.h"
14 15 16
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
17
#include "pack-objects.h"
N
Nicolas Pitre 已提交
18
#include "progress.h"
19
#include "refs.h"
20
#include "streaming.h"
21
#include "thread-utils.h"
22
#include "pack-bitmap.h"
23 24
#include "reachable.h"
#include "sha1-array.h"
J
Jeff King 已提交
25
#include "argv-array.h"
26
#include "mru.h"
N
Nicolas Pitre 已提交
27

28
static const char *pack_usage[] = {
29 30
	N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
	N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"),
31 32
	NULL
};
33

34
/*
35 36 37
 * Objects we are going to pack are collected in the `to_pack` structure.
 * It contains an array (dynamically expanded) of the object data, and a map
 * that can resolve SHA1s to their position in the array.
38
 */
39 40
static struct packing_data to_pack;

41
static struct pack_idx_entry **written_list;
42
static uint32_t nr_result, nr_written;
43

44
static int non_empty;
45
static int reuse_delta = 1, reuse_object = 1;
J
Junio C Hamano 已提交
46
static int keep_unreachable, unpack_unreachable, include_tag;
47
static unsigned long unpack_unreachable_expiration;
48
static int pack_loose_unreachable;
49
static int local;
50
static int have_non_local_packs;
51
static int incremental;
52
static int ignore_packed_keep;
53
static int allow_ofs_delta;
54
static struct pack_idx_option pack_idx_opts;
55
static const char *base_name;
J
Junio C Hamano 已提交
56
static int progress = 1;
57
static int window = 10;
58
static unsigned long pack_size_limit;
59
static int depth = 50;
60
static int delta_search_threads;
61
static int pack_to_stdout;
62
static int num_preferred_base;
63
static struct progress *progress_state;
64

65 66 67 68
static struct packed_git *reuse_packfile;
static uint32_t reuse_packfile_objects;
static off_t reuse_packfile_offset;

69 70
static int use_bitmap_index_default = 1;
static int use_bitmap_index = -1;
71
static int write_bitmap_index;
72
static uint16_t write_bitmap_options;
73

74
static unsigned long delta_cache_size = 0;
75
static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
76
static unsigned long cache_max_small_delta_size = 1000;
77

78 79
static unsigned long window_memory_limit = 0;

80 81 82
/*
 * stats
 */
83 84
static uint32_t written, written_delta;
static uint32_t reused, reused_delta;
85

86 87 88 89 90 91 92 93 94 95 96
/*
 * Indexed commits
 */
static struct commit **indexed_commits;
static unsigned int indexed_commits_nr;
static unsigned int indexed_commits_alloc;

static void index_commit_for_bitmap(struct commit *commit)
{
	if (indexed_commits_nr >= indexed_commits_alloc) {
		indexed_commits_alloc = (indexed_commits_alloc + 32) * 2;
97
		REALLOC_ARRAY(indexed_commits, indexed_commits_alloc);
98 99 100 101
	}

	indexed_commits[indexed_commits_nr++] = commit;
}
102

N
Nicolas Pitre 已提交
103
static void *get_delta(struct object_entry *entry)
104
{
N
Nicolas Pitre 已提交
105 106
	unsigned long size, base_size, delta_size;
	void *buf, *base_buf, *delta_buf;
107
	enum object_type type;
108

N
Nicolas Pitre 已提交
109 110 111 112 113
	buf = read_sha1_file(entry->idx.sha1, &type, &size);
	if (!buf)
		die("unable to read %s", sha1_to_hex(entry->idx.sha1));
	base_buf = read_sha1_file(entry->delta->idx.sha1, &type, &base_size);
	if (!base_buf)
G
Geert Bosch 已提交
114
		die("unable to read %s", sha1_to_hex(entry->delta->idx.sha1));
N
Nicolas Pitre 已提交
115
	delta_buf = diff_delta(base_buf, base_size,
116
			       buf, size, &delta_size, 0);
N
Nicolas Pitre 已提交
117
	if (!delta_buf || delta_size != entry->delta_size)
J
Junio C Hamano 已提交
118
		die("delta size changed");
N
Nicolas Pitre 已提交
119 120
	free(buf);
	free(base_buf);
121 122 123
	return delta_buf;
}

124 125
static unsigned long do_compress(void **pptr, unsigned long size)
{
126
	git_zstream stream;
127 128 129
	void *in, *out;
	unsigned long maxsize;

130
	git_deflate_init(&stream, pack_compression_level);
J
Junio C Hamano 已提交
131
	maxsize = git_deflate_bound(&stream, size);
132 133 134 135 136 137 138 139 140

	in = *pptr;
	out = xmalloc(maxsize);
	*pptr = out;

	stream.next_in = in;
	stream.avail_in = size;
	stream.next_out = out;
	stream.avail_out = maxsize;
141
	while (git_deflate(&stream, Z_FINISH) == Z_OK)
142
		; /* nothing */
143
	git_deflate_end(&stream);
144 145 146 147 148

	free(in);
	return stream.total_out;
}

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
					   const unsigned char *sha1)
{
	git_zstream stream;
	unsigned char ibuf[1024 * 16];
	unsigned char obuf[1024 * 16];
	unsigned long olen = 0;

	git_deflate_init(&stream, pack_compression_level);

	for (;;) {
		ssize_t readlen;
		int zret = Z_OK;
		readlen = read_istream(st, ibuf, sizeof(ibuf));
		if (readlen == -1)
			die(_("unable to read %s"), sha1_to_hex(sha1));

		stream.next_in = ibuf;
		stream.avail_in = readlen;
		while ((stream.avail_in || readlen == 0) &&
		       (zret == Z_OK || zret == Z_BUF_ERROR)) {
			stream.next_out = obuf;
			stream.avail_out = sizeof(obuf);
			zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
			sha1write(f, obuf, stream.next_out - obuf);
			olen += stream.next_out - obuf;
		}
		if (stream.avail_in)
			die(_("deflate error (%d)"), zret);
		if (readlen == 0) {
			if (zret != Z_STREAM_END)
				die(_("deflate error (%d)"), zret);
			break;
		}
	}
	git_deflate_end(&stream);
	return olen;
}

188 189 190 191
/*
 * we are going to reuse the existing object data as is.  make
 * sure it is not corrupt.
 */
192 193
static int check_pack_inflate(struct packed_git *p,
		struct pack_window **w_curs,
194 195
		off_t offset,
		off_t len,
196 197
		unsigned long expect)
{
198
	git_zstream stream;
199 200 201 202
	unsigned char fakebuf[4096], *in;
	int st;

	memset(&stream, 0, sizeof(stream));
203
	git_inflate_init(&stream);
204 205 206 207 208
	do {
		in = use_pack(p, w_curs, offset, &stream.avail_in);
		stream.next_in = in;
		stream.next_out = fakebuf;
		stream.avail_out = sizeof(fakebuf);
209
		st = git_inflate(&stream, Z_FINISH);
210 211
		offset += stream.next_in - in;
	} while (st == Z_OK || st == Z_BUF_ERROR);
212
	git_inflate_end(&stream);
213 214 215 216 217 218 219 220
	return (st == Z_STREAM_END &&
		stream.total_out == expect &&
		stream.total_in == len) ? 0 : -1;
}

static void copy_pack_data(struct sha1file *f,
		struct packed_git *p,
		struct pack_window **w_curs,
221 222
		off_t offset,
		off_t len)
223 224
{
	unsigned char *in;
225
	unsigned long avail;
226 227 228 229

	while (len) {
		in = use_pack(p, w_curs, offset, &avail);
		if (avail > len)
230
			avail = (unsigned long)len;
231 232 233 234 235 236
		sha1write(f, in, avail);
		offset += avail;
		len -= avail;
	}
}

237
/* Return 0 if we will bust the pack-size limit */
238 239
static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
					   unsigned long limit, int usable_delta)
240
{
241
	unsigned long size, datalen;
242
	unsigned char header[10], dheader[10];
243
	unsigned hdrlen;
244
	enum object_type type;
245
	void *buf;
246
	struct git_istream *st = NULL;
247 248

	if (!usable_delta) {
249 250 251 252 253 254 255 256 257
		if (entry->type == OBJ_BLOB &&
		    entry->size > big_file_threshold &&
		    (st = open_istream(entry->idx.sha1, &type, &size, NULL)) != NULL)
			buf = NULL;
		else {
			buf = read_sha1_file(entry->idx.sha1, &type, &size);
			if (!buf)
				die(_("unable to read %s"), sha1_to_hex(entry->idx.sha1));
		}
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
		/*
		 * make sure no cached delta data remains from a
		 * previous attempt before a pack split occurred.
		 */
		free(entry->delta_data);
		entry->delta_data = NULL;
		entry->z_delta_size = 0;
	} else if (entry->delta_data) {
		size = entry->delta_size;
		buf = entry->delta_data;
		entry->delta_data = NULL;
		type = (allow_ofs_delta && entry->delta->idx.offset) ?
			OBJ_OFS_DELTA : OBJ_REF_DELTA;
	} else {
		buf = get_delta(entry);
		size = entry->delta_size;
		type = (allow_ofs_delta && entry->delta->idx.offset) ?
			OBJ_OFS_DELTA : OBJ_REF_DELTA;
	}

278 279 280
	if (st)	/* large blob case, just assume we don't compress well */
		datalen = size;
	else if (entry->z_delta_size)
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
		datalen = entry->z_delta_size;
	else
		datalen = do_compress(&buf, size);

	/*
	 * The object header is a byte of 'type' followed by zero or
	 * more bytes of length.
	 */
	hdrlen = encode_in_pack_object_header(type, size, header);

	if (type == OBJ_OFS_DELTA) {
		/*
		 * Deltas with relative base contain an additional
		 * encoding of the relative offset for the delta
		 * base from this object's position in the pack.
		 */
		off_t ofs = entry->idx.offset - entry->delta->idx.offset;
		unsigned pos = sizeof(dheader) - 1;
		dheader[pos] = ofs & 127;
		while (ofs >>= 7)
			dheader[--pos] = 128 | (--ofs & 127);
		if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
303 304
			if (st)
				close_istream(st);
305 306 307 308 309 310 311 312 313 314 315 316
			free(buf);
			return 0;
		}
		sha1write(f, header, hdrlen);
		sha1write(f, dheader + pos, sizeof(dheader) - pos);
		hdrlen += sizeof(dheader) - pos;
	} else if (type == OBJ_REF_DELTA) {
		/*
		 * Deltas with a base reference contain
		 * an additional 20 bytes for the base sha1.
		 */
		if (limit && hdrlen + 20 + datalen + 20 >= limit) {
317 318
			if (st)
				close_istream(st);
319 320 321 322 323 324 325 326
			free(buf);
			return 0;
		}
		sha1write(f, header, hdrlen);
		sha1write(f, entry->delta->idx.sha1, 20);
		hdrlen += 20;
	} else {
		if (limit && hdrlen + datalen + 20 >= limit) {
327 328
			if (st)
				close_istream(st);
329 330 331 332 333
			free(buf);
			return 0;
		}
		sha1write(f, header, hdrlen);
	}
334 335 336 337 338 339 340
	if (st) {
		datalen = write_large_blob_data(st, f, entry->idx.sha1);
		close_istream(st);
	} else {
		sha1write(f, buf, datalen);
		free(buf);
	}
341 342 343 344 345

	return hdrlen + datalen;
}

/* Return 0 if we will bust the pack-size limit */
346 347
static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
				unsigned long limit, int usable_delta)
348 349 350 351 352 353
{
	struct packed_git *p = entry->in_pack;
	struct pack_window *w_curs = NULL;
	struct revindex_entry *revidx;
	off_t offset;
	enum object_type type = entry->type;
354
	off_t datalen;
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
	unsigned char header[10], dheader[10];
	unsigned hdrlen;

	if (entry->delta)
		type = (allow_ofs_delta && entry->delta->idx.offset) ?
			OBJ_OFS_DELTA : OBJ_REF_DELTA;
	hdrlen = encode_in_pack_object_header(type, entry->size, header);

	offset = entry->in_pack_offset;
	revidx = find_pack_revindex(p, offset);
	datalen = revidx[1].offset - offset;
	if (!pack_to_stdout && p->index_version > 1 &&
	    check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
		error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1));
		unuse_pack(&w_curs);
		return write_no_reuse_object(f, entry, limit, usable_delta);
	}

	offset += entry->in_pack_header_size;
	datalen -= entry->in_pack_header_size;

	if (!pack_to_stdout && p->index_version == 1 &&
	    check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
		error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1));
		unuse_pack(&w_curs);
		return write_no_reuse_object(f, entry, limit, usable_delta);
	}

	if (type == OBJ_OFS_DELTA) {
		off_t ofs = entry->idx.offset - entry->delta->idx.offset;
		unsigned pos = sizeof(dheader) - 1;
		dheader[pos] = ofs & 127;
		while (ofs >>= 7)
			dheader[--pos] = 128 | (--ofs & 127);
		if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
			unuse_pack(&w_curs);
			return 0;
		}
		sha1write(f, header, hdrlen);
		sha1write(f, dheader + pos, sizeof(dheader) - pos);
		hdrlen += sizeof(dheader) - pos;
		reused_delta++;
	} else if (type == OBJ_REF_DELTA) {
		if (limit && hdrlen + 20 + datalen + 20 >= limit) {
			unuse_pack(&w_curs);
			return 0;
		}
		sha1write(f, header, hdrlen);
		sha1write(f, entry->delta->idx.sha1, 20);
		hdrlen += 20;
		reused_delta++;
	} else {
		if (limit && hdrlen + datalen + 20 >= limit) {
			unuse_pack(&w_curs);
			return 0;
		}
		sha1write(f, header, hdrlen);
	}
	copy_pack_data(f, p, &w_curs, offset, datalen);
	unuse_pack(&w_curs);
	reused++;
	return hdrlen + datalen;
}

/* Return 0 if we will bust the pack-size limit */
420 421 422
static off_t write_object(struct sha1file *f,
			  struct object_entry *entry,
			  off_t write_offset)
423
{
424 425
	unsigned long limit;
	off_t len;
426
	int usable_delta, to_reuse;
427

428 429 430
	if (!pack_to_stdout)
		crc32_begin(f);

431
	/* apply size limit if limited packsize and not first object */
432 433 434 435 436 437 438 439 440 441
	if (!pack_size_limit || !nr_written)
		limit = 0;
	else if (pack_size_limit <= write_offset)
		/*
		 * the earlier object did not fit the limit; avoid
		 * mistaking this with unlimited (i.e. limit = 0).
		 */
		limit = 1;
	else
		limit = pack_size_limit - write_offset;
442 443 444 445 446 447 448 449 450 451 452 453

	if (!entry->delta)
		usable_delta = 0;	/* no delta */
	else if (!pack_size_limit)
	       usable_delta = 1;	/* unlimited packfile */
	else if (entry->delta->idx.offset == (off_t)-1)
		usable_delta = 0;	/* base was written to another pack */
	else if (entry->delta->idx.offset)
		usable_delta = 1;	/* base already exists in this pack */
	else
		usable_delta = 0;	/* base could end up in another pack */

454
	if (!reuse_object)
455 456
		to_reuse = 0;	/* explicit */
	else if (!entry->in_pack)
457
		to_reuse = 0;	/* can't reuse what we don't have */
458
	else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
459 460 461
				/* check_object() decided it for us ... */
		to_reuse = usable_delta;
				/* ... but pack split may override that */
462
	else if (entry->type != entry->in_pack_type)
463 464 465 466 467 468 469 470
		to_reuse = 0;	/* pack has delta which is unusable */
	else if (entry->delta)
		to_reuse = 0;	/* we want to pack afresh */
	else
		to_reuse = 1;	/* we have it in-pack undeltified,
				 * and we do not need to deltify it.
				 */

471 472 473 474 475 476
	if (!to_reuse)
		len = write_no_reuse_object(f, entry, limit, usable_delta);
	else
		len = write_reuse_object(f, entry, limit, usable_delta);
	if (!len)
		return 0;
477

478
	if (usable_delta)
479
		written_delta++;
480
	written++;
481
	if (!pack_to_stdout)
G
Geert Bosch 已提交
482
		entry->idx.crc32 = crc32_end(f);
483
	return len;
484 485
}

486 487 488 489 490 491 492 493 494 495
enum write_one_status {
	WRITE_ONE_SKIP = -1, /* already written */
	WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
	WRITE_ONE_WRITTEN = 1, /* normal */
	WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
};

static enum write_one_status write_one(struct sha1file *f,
				       struct object_entry *e,
				       off_t *offset)
496
{
497
	off_t size;
498
	int recursing;
499

500 501 502 503 504 505 506 507 508 509 510 511 512 513
	/*
	 * we set offset to 1 (which is an impossible value) to mark
	 * the fact that this object is involved in "write its base
	 * first before writing a deltified object" recursion.
	 */
	recursing = (e->idx.offset == 1);
	if (recursing) {
		warning("recursive delta detected for object %s",
			sha1_to_hex(e->idx.sha1));
		return WRITE_ONE_RECURSIVE;
	} else if (e->idx.offset || e->preferred_base) {
		/* offset is non zero if object is written already. */
		return WRITE_ONE_SKIP;
	}
514

515
	/* if we are deltified, write out base object first. */
516 517 518 519 520 521 522 523 524 525 526 527 528 529
	if (e->delta) {
		e->idx.offset = 1; /* now recurse */
		switch (write_one(f, e->delta, offset)) {
		case WRITE_ONE_RECURSIVE:
			/* we cannot depend on this one */
			e->delta = NULL;
			break;
		default:
			break;
		case WRITE_ONE_BREAK:
			e->idx.offset = recursing;
			return WRITE_ONE_BREAK;
		}
	}
530

531 532
	e->idx.offset = *offset;
	size = write_object(f, e, *offset);
533
	if (!size) {
534 535
		e->idx.offset = recursing;
		return WRITE_ONE_BREAK;
536
	}
537
	written_list[nr_written++] = &e->idx;
538 539

	/* make sure off_t is sufficiently large not to wrap */
540
	if (signed_add_overflows(*offset, size))
541
		die("pack too large for current definition of off_t");
542
	*offset += size;
543
	return WRITE_ONE_WRITTEN;
544 545
}

546
static int mark_tagged(const char *path, const struct object_id *oid, int flag,
547 548 549
		       void *cb_data)
{
	unsigned char peeled[20];
550
	struct object_entry *entry = packlist_find(&to_pack, oid->hash, NULL);
551 552 553 554

	if (entry)
		entry->tagged = 1;
	if (!peel_ref(path, peeled)) {
555
		entry = packlist_find(&to_pack, peeled, NULL);
556 557 558 559 560 561
		if (entry)
			entry->tagged = 1;
	}
	return 0;
}

562
static inline void add_to_write_order(struct object_entry **wo,
563
			       unsigned int *endp,
564 565 566 567 568 569 570 571 572
			       struct object_entry *e)
{
	if (e->filled)
		return;
	wo[(*endp)++] = e;
	e->filled = 1;
}

static void add_descendants_to_write_order(struct object_entry **wo,
573
					   unsigned int *endp,
574 575
					   struct object_entry *e)
{
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
	int add_to_order = 1;
	while (e) {
		if (add_to_order) {
			struct object_entry *s;
			/* add this node... */
			add_to_write_order(wo, endp, e);
			/* all its siblings... */
			for (s = e->delta_sibling; s; s = s->delta_sibling) {
				add_to_write_order(wo, endp, s);
			}
		}
		/* drop down a level to add left subtree nodes if possible */
		if (e->delta_child) {
			add_to_order = 1;
			e = e->delta_child;
		} else {
			add_to_order = 0;
			/* our sibling might have some children, it is next */
			if (e->delta_sibling) {
				e = e->delta_sibling;
				continue;
			}
			/* go back to our parent node */
			e = e->delta;
			while (e && !e->delta_sibling) {
				/* we're on the right side of a subtree, keep
				 * going up until we can go right again */
				e = e->delta;
			}
			if (!e) {
				/* done- we hit our original root node */
				return;
			}
			/* pass it off to sibling at this level */
			e = e->delta_sibling;
		}
	};
613 614 615
}

static void add_family_to_write_order(struct object_entry **wo,
616
				      unsigned int *endp,
617 618 619 620 621 622 623 624 625 626 627
				      struct object_entry *e)
{
	struct object_entry *root;

	for (root = e; root->delta; root = root->delta)
		; /* nothing */
	add_descendants_to_write_order(wo, endp, root);
}

static struct object_entry **compute_write_order(void)
{
628
	unsigned int i, wo_end, last_untagged;
629

J
Jeff King 已提交
630
	struct object_entry **wo;
631
	struct object_entry *objects = to_pack.objects;
632

633
	for (i = 0; i < to_pack.nr_objects; i++) {
634 635 636 637 638 639 640 641 642 643 644
		objects[i].tagged = 0;
		objects[i].filled = 0;
		objects[i].delta_child = NULL;
		objects[i].delta_sibling = NULL;
	}

	/*
	 * Fully connect delta_child/delta_sibling network.
	 * Make sure delta_sibling is sorted in the original
	 * recency order.
	 */
645
	for (i = to_pack.nr_objects; i > 0;) {
646
		struct object_entry *e = &objects[--i];
647 648 649 650 651 652 653 654 655 656
		if (!e->delta)
			continue;
		/* Mark me as the first child */
		e->delta_sibling = e->delta->delta_child;
		e->delta->delta_child = e;
	}

	/*
	 * Mark objects that are at the tip of tags.
	 */
657
	for_each_tag_ref(mark_tagged, NULL);
658 659

	/*
660
	 * Give the objects in the original recency order until
661 662
	 * we see a tagged tip.
	 */
J
Jeff King 已提交
663
	ALLOC_ARRAY(wo, to_pack.nr_objects);
664
	for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
665 666 667 668
		if (objects[i].tagged)
			break;
		add_to_write_order(wo, &wo_end, &objects[i]);
	}
669
	last_untagged = i;
670 671 672 673

	/*
	 * Then fill all the tagged tips.
	 */
674
	for (; i < to_pack.nr_objects; i++) {
675 676 677 678 679 680 681
		if (objects[i].tagged)
			add_to_write_order(wo, &wo_end, &objects[i]);
	}

	/*
	 * And then all remaining commits and tags.
	 */
682
	for (i = last_untagged; i < to_pack.nr_objects; i++) {
683 684 685 686 687 688 689 690 691
		if (objects[i].type != OBJ_COMMIT &&
		    objects[i].type != OBJ_TAG)
			continue;
		add_to_write_order(wo, &wo_end, &objects[i]);
	}

	/*
	 * And then all the trees.
	 */
692
	for (i = last_untagged; i < to_pack.nr_objects; i++) {
693 694 695 696 697 698 699 700
		if (objects[i].type != OBJ_TREE)
			continue;
		add_to_write_order(wo, &wo_end, &objects[i]);
	}

	/*
	 * Finally all the rest in really tight order
	 */
701
	for (i = last_untagged; i < to_pack.nr_objects; i++) {
702 703 704 705
		if (!objects[i].filled)
			add_family_to_write_order(wo, &wo_end, &objects[i]);
	}

706 707
	if (wo_end != to_pack.nr_objects)
		die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
708 709 710 711

	return wo;
}

712 713 714
static off_t write_reused_pack(struct sha1file *f)
{
	unsigned char buffer[8192];
715
	off_t to_write, total;
716 717 718 719 720
	int fd;

	if (!is_pack_valid(reuse_packfile))
		die("packfile is invalid: %s", reuse_packfile->pack_name);

721
	fd = git_open(reuse_packfile->pack_name);
722 723 724 725 726 727 728 729 730 731
	if (fd < 0)
		die_errno("unable to open packfile for reuse: %s",
			  reuse_packfile->pack_name);

	if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
		die_errno("unable to seek in reused packfile");

	if (reuse_packfile_offset < 0)
		reuse_packfile_offset = reuse_packfile->pack_size - 20;

732
	total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
733 734 735 736 737 738 739 740 741 742 743 744

	while (to_write) {
		int read_pack = xread(fd, buffer, sizeof(buffer));

		if (read_pack <= 0)
			die_errno("unable to read from reused packfile");

		if (read_pack > to_write)
			read_pack = to_write;

		sha1write(f, buffer, read_pack);
		to_write -= read_pack;
745 746 747 748 749 750 751 752 753 754 755 756

		/*
		 * We don't know the actual number of objects written,
		 * only how many bytes written, how many bytes total, and
		 * how many objects total. So we can fake it by pretending all
		 * objects we are writing are the same size. This gives us a
		 * smooth progress meter, and at the end it matches the true
		 * answer.
		 */
		written = reuse_packfile_objects *
				(((double)(total - to_write)) / total);
		display_progress(progress_state, written);
757 758 759
	}

	close(fd);
760 761
	written = reuse_packfile_objects;
	display_progress(progress_state, written);
762 763 764
	return reuse_packfile_offset - sizeof(struct pack_header);
}

765 766 767 768
static const char no_split_warning[] = N_(
"disabling bitmap writing, packs are split due to pack.packSizeLimit"
);

769
static void write_pack_file(void)
770
{
771
	uint32_t i = 0, j;
772
	struct sha1file *f;
773
	off_t offset;
774
	uint32_t nr_remaining = nr_result;
775
	time_t last_mtime = 0;
776
	struct object_entry **write_order;
777

778
	if (progress > pack_to_stdout)
779
		progress_state = start_progress(_("Writing objects"), nr_result);
J
Jeff King 已提交
780
	ALLOC_ARRAY(written_list, to_pack.nr_objects);
781
	write_order = compute_write_order();
782

783
	do {
G
Geert Bosch 已提交
784
		unsigned char sha1[20];
785
		char *pack_tmp_name = NULL;
G
Geert Bosch 已提交
786

787
		if (pack_to_stdout)
N
Nicolas Pitre 已提交
788
			f = sha1fd_throughput(1, "<stdout>", progress_state);
789 790
		else
			f = create_tmp_packfile(&pack_tmp_name);
791

792
		offset = write_pack_header(f, nr_remaining);
793 794 795 796 797 798 799 800 801

		if (reuse_packfile) {
			off_t packfile_size;
			assert(pack_to_stdout);

			packfile_size = write_reused_pack(f);
			offset += packfile_size;
		}

802
		nr_written = 0;
803
		for (; i < to_pack.nr_objects; i++) {
804
			struct object_entry *e = write_order[i];
805
			if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
806 807 808
				break;
			display_progress(progress_state, written);
		}
809

810 811 812 813
		/*
		 * Did we write the wrong # entries in the header?
		 * If so, rewrite it like in fast-import
		 */
814 815 816 817
		if (pack_to_stdout) {
			sha1close(f, sha1, CSUM_CLOSE);
		} else if (nr_written == nr_remaining) {
			sha1close(f, sha1, CSUM_FSYNC);
818
		} else {
819
			int fd = sha1close(f, sha1, 0);
820
			fixup_pack_header_footer(fd, sha1, pack_tmp_name,
821
						 nr_written, sha1, offset);
822
			close(fd);
823 824 825 826
			if (write_bitmap_index) {
				warning(_(no_split_warning));
				write_bitmap_index = 0;
			}
827 828 829
		}

		if (!pack_to_stdout) {
830
			struct stat st;
831
			struct strbuf tmpname = STRBUF_INIT;
832

833 834 835 836 837 838 839
			/*
			 * Packs are runtime accessed in their mtime
			 * order since newer packs are more likely to contain
			 * younger objects.  So if we are creating multiple
			 * packs then we should modify the mtime of later ones
			 * to preserve this property.
			 */
840
			if (stat(pack_tmp_name, &st) < 0) {
841
				warning_errno("failed to stat %s", pack_tmp_name);
842 843 844 845 846 847
			} else if (!last_mtime) {
				last_mtime = st.st_mtime;
			} else {
				struct utimbuf utb;
				utb.actime = st.st_atime;
				utb.modtime = --last_mtime;
848
				if (utime(pack_tmp_name, &utb) < 0)
849
					warning_errno("failed utime() on %s", pack_tmp_name);
850 851
			}

852
			strbuf_addf(&tmpname, "%s-", base_name);
853 854 855 856 857 858

			if (write_bitmap_index) {
				bitmap_writer_set_checksum(sha1);
				bitmap_writer_build_type_index(written_list, nr_written);
			}

859
			finish_tmp_packfile(&tmpname, pack_tmp_name,
860 861
					    written_list, nr_written,
					    &pack_idx_opts, sha1);
862 863

			if (write_bitmap_index) {
864
				strbuf_addf(&tmpname, "%s.bitmap", sha1_to_hex(sha1));
865 866 867 868 869 870 871

				stop_progress(&progress_state);

				bitmap_writer_show_progress(progress);
				bitmap_writer_reuse_bitmaps(&to_pack);
				bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
				bitmap_writer_build(&to_pack);
872
				bitmap_writer_finish(written_list, nr_written,
873
						     tmpname.buf, write_bitmap_options);
874 875 876
				write_bitmap_index = 0;
			}

877
			strbuf_release(&tmpname);
878
			free(pack_tmp_name);
G
Geert Bosch 已提交
879
			puts(sha1_to_hex(sha1));
880 881 882 883
		}

		/* mark written objects as written to previous pack */
		for (j = 0; j < nr_written; j++) {
884
			written_list[j]->offset = (off_t)-1;
885 886
		}
		nr_remaining -= nr_written;
887
	} while (nr_remaining && i < to_pack.nr_objects);
888 889

	free(written_list);
890
	free(write_order);
N
Nicolas Pitre 已提交
891
	stop_progress(&progress_state);
892
	if (written != nr_result)
893 894
		die("wrote %"PRIu32" objects while expecting %"PRIu32,
			written, nr_result);
895 896
}

897 898
static int no_try_delta(const char *path)
{
899
	static struct attr_check *check;
900

901 902 903
	if (!check)
		check = attr_check_initl("delta", NULL);
	if (git_check_attr(path, check))
904
		return 0;
905
	if (ATTR_FALSE(check->items[0].value))
906 907 908 909
		return 1;
	return 0;
}

J
Jeff King 已提交
910 911 912 913 914 915 916 917 918 919 920 921 922
/*
 * When adding an object, check whether we have already added it
 * to our packing list. If so, we can skip. However, if we are
 * being asked to excludei t, but the previous mention was to include
 * it, make sure to adjust its flags and tweak our numbers accordingly.
 *
 * As an optimization, we pass out the index position where we would have
 * found the item, since that saves us from having to look it up again a
 * few lines later when we want to add the new entry.
 */
static int have_duplicate_entry(const unsigned char *sha1,
				int exclude,
				uint32_t *index_pos)
923 924
{
	struct object_entry *entry;
N
Nicolas Pitre 已提交
925

J
Jeff King 已提交
926 927
	entry = packlist_find(&to_pack, sha1, index_pos);
	if (!entry)
N
Nicolas Pitre 已提交
928
		return 0;
J
Jeff King 已提交
929 930 931 932 933

	if (exclude) {
		if (!entry->preferred_base)
			nr_result--;
		entry->preferred_base = 1;
N
Nicolas Pitre 已提交
934
	}
935

J
Jeff King 已提交
936 937 938
	return 1;
}

939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
static int want_found_object(int exclude, struct packed_git *p)
{
	if (exclude)
		return 1;
	if (incremental)
		return 0;

	/*
	 * When asked to do --local (do not include an object that appears in a
	 * pack we borrow from elsewhere) or --honor-pack-keep (do not include
	 * an object that appears in a pack marked with .keep), finding a pack
	 * that matches the criteria is sufficient for us to decide to omit it.
	 * However, even if this pack does not satisfy the criteria, we need to
	 * make sure no copy of this object appears in _any_ pack that makes us
	 * to omit the object, so we need to check all the packs.
	 *
	 * We can however first check whether these options can possible matter;
	 * if they do not matter we know we want the object in generated pack.
	 * Otherwise, we signal "-1" at the end to tell the caller that we do
	 * not know either way, and it needs to check more packs.
	 */
	if (!ignore_packed_keep &&
	    (!local || !have_non_local_packs))
		return 1;

	if (local && !p->pack_local)
		return 0;
	if (ignore_packed_keep && p->pack_local && p->pack_keep)
		return 0;

	/* we don't know yet; keep looking for more packs */
	return -1;
}

J
Jeff King 已提交
973 974 975 976
/*
 * Check whether we want the object in the pack (e.g., we do not want
 * objects found in non-local stores if the "--local" option was used).
 *
977 978 979 980
 * If the caller already knows an existing pack it wants to take the object
 * from, that is passed in *found_pack and *found_offset; otherwise this
 * function finds if there is any pack that has the object and returns the pack
 * and its offset in these variables.
J
Jeff King 已提交
981 982 983 984 985 986
 */
static int want_object_in_pack(const unsigned char *sha1,
			       int exclude,
			       struct packed_git **found_pack,
			       off_t *found_offset)
{
987
	struct mru_entry *entry;
988
	int want;
J
Jeff King 已提交
989

990 991 992
	if (!exclude && local && has_loose_object_nonlocal(sha1))
		return 0;

993 994 995 996 997 998 999 1000 1001 1002
	/*
	 * If we already know the pack object lives in, start checks from that
	 * pack - in the usual case when neither --local was given nor .keep files
	 * are present we will determine the answer right now.
	 */
	if (*found_pack) {
		want = want_found_object(exclude, *found_pack);
		if (want != -1)
			return want;
	}
J
Jeff King 已提交
1003

1004 1005
	for (entry = packed_git_mru->head; entry; entry = entry->next) {
		struct packed_git *p = entry->item;
1006 1007 1008 1009 1010 1011 1012
		off_t offset;

		if (p == *found_pack)
			offset = *found_offset;
		else
			offset = find_pack_entry_one(sha1, p);

1013
		if (offset) {
J
Jeff King 已提交
1014
			if (!*found_pack) {
1015
				if (!is_pack_valid(p))
1016
					continue;
J
Jeff King 已提交
1017 1018
				*found_offset = offset;
				*found_pack = p;
L
Linus Torvalds 已提交
1019
			}
1020
			want = want_found_object(exclude, p);
1021
			if (!exclude && want > 0)
1022
				mru_mark(packed_git_mru, entry);
1023 1024
			if (want != -1)
				return want;
L
Linus Torvalds 已提交
1025 1026
		}
	}
1027

J
Jeff King 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
	return 1;
}

static void create_object_entry(const unsigned char *sha1,
				enum object_type type,
				uint32_t hash,
				int exclude,
				int no_try_delta,
				uint32_t index_pos,
				struct packed_git *found_pack,
				off_t found_offset)
{
	struct object_entry *entry;
N
Nicolas Pitre 已提交
1041

1042
	entry = packlist_alloc(&to_pack, sha1, index_pos);
1043
	entry->hash = hash;
1044 1045
	if (type)
		entry->type = type;
N
Nicolas Pitre 已提交
1046 1047
	if (exclude)
		entry->preferred_base = 1;
1048 1049
	else
		nr_result++;
N
Nicolas Pitre 已提交
1050 1051 1052 1053
	if (found_pack) {
		entry->in_pack = found_pack;
		entry->in_pack_offset = found_offset;
	}
1054

J
Jeff King 已提交
1055 1056
	entry->no_try_delta = no_try_delta;
}
N
Nicolas Pitre 已提交
1057

1058 1059 1060 1061
static const char no_closure_warning[] = N_(
"disabling bitmap writing, as some objects are not being packed"
);

J
Jeff King 已提交
1062 1063 1064
static int add_object_entry(const unsigned char *sha1, enum object_type type,
			    const char *name, int exclude)
{
1065 1066
	struct packed_git *found_pack = NULL;
	off_t found_offset = 0;
J
Jeff King 已提交
1067
	uint32_t index_pos;
1068

J
Jeff King 已提交
1069 1070
	if (have_duplicate_entry(sha1, exclude, &index_pos))
		return 0;
1071

1072 1073 1074 1075 1076 1077
	if (!want_object_in_pack(sha1, exclude, &found_pack, &found_offset)) {
		/* The pack is missing an object, so it will not have closure */
		if (write_bitmap_index) {
			warning(_(no_closure_warning));
			write_bitmap_index = 0;
		}
J
Jeff King 已提交
1078
		return 0;
1079
	}
N
Nicolas Pitre 已提交
1080

J
Jeff King 已提交
1081 1082 1083
	create_object_entry(sha1, type, pack_name_hash(name),
			    exclude, name && no_try_delta(name),
			    index_pos, found_pack, found_offset);
1084

1085
	display_progress(progress_state, nr_result);
N
Nicolas Pitre 已提交
1086
	return 1;
1087 1088
}

1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
static int add_object_entry_from_bitmap(const unsigned char *sha1,
					enum object_type type,
					int flags, uint32_t name_hash,
					struct packed_git *pack, off_t offset)
{
	uint32_t index_pos;

	if (have_duplicate_entry(sha1, 0, &index_pos))
		return 0;

1099 1100 1101
	if (!want_object_in_pack(sha1, 0, &pack, &offset))
		return 0;

1102 1103
	create_object_entry(sha1, type, name_hash, 0, 0, index_pos, pack, offset);

1104
	display_progress(progress_state, nr_result);
N
Nicolas Pitre 已提交
1105
	return 1;
1106 1107
}

1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
struct pbase_tree_cache {
	unsigned char sha1[20];
	int ref;
	int temporary;
	void *tree_data;
	unsigned long tree_size;
};

static struct pbase_tree_cache *(pbase_tree_cache[256]);
static int pbase_tree_cache_ix(const unsigned char *sha1)
{
	return sha1[0] % ARRAY_SIZE(pbase_tree_cache);
}
static int pbase_tree_cache_ix_incr(int ix)
{
	return (ix+1) % ARRAY_SIZE(pbase_tree_cache);
}

static struct pbase_tree {
	struct pbase_tree *next;
	/* This is a phony "cache" entry; we are not
J
Justin Lebar 已提交
1129
	 * going to evict it or find it through _get()
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
	 * mechanism -- this is for the toplevel node that
	 * would almost always change with any commit.
	 */
	struct pbase_tree_cache pcache;
} *pbase_tree;

static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1)
{
	struct pbase_tree_cache *ent, *nent;
	void *data;
	unsigned long size;
1141
	enum object_type type;
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
	int neigh;
	int my_ix = pbase_tree_cache_ix(sha1);
	int available_ix = -1;

	/* pbase-tree-cache acts as a limited hashtable.
	 * your object will be found at your index or within a few
	 * slots after that slot if it is cached.
	 */
	for (neigh = 0; neigh < 8; neigh++) {
		ent = pbase_tree_cache[my_ix];
1152
		if (ent && !hashcmp(ent->sha1, sha1)) {
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
			ent->ref++;
			return ent;
		}
		else if (((available_ix < 0) && (!ent || !ent->ref)) ||
			 ((0 <= available_ix) &&
			  (!ent && pbase_tree_cache[available_ix])))
			available_ix = my_ix;
		if (!ent)
			break;
		my_ix = pbase_tree_cache_ix_incr(my_ix);
	}

	/* Did not find one.  Either we got a bogus request or
	 * we need to read and perhaps cache.
	 */
1168
	data = read_sha1_file(sha1, &type, &size);
1169 1170
	if (!data)
		return NULL;
1171
	if (type != OBJ_TREE) {
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
		free(data);
		return NULL;
	}

	/* We need to either cache or return a throwaway copy */

	if (available_ix < 0)
		ent = NULL;
	else {
		ent = pbase_tree_cache[available_ix];
		my_ix = available_ix;
	}

	if (!ent) {
		nent = xmalloc(sizeof(*nent));
		nent->temporary = (available_ix < 0);
	}
	else {
		/* evict and reuse */
		free(ent->tree_data);
		nent = ent;
	}
1194
	hashcpy(nent->sha1, sha1);
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
	nent->tree_data = data;
	nent->tree_size = size;
	nent->ref = 1;
	if (!nent->temporary)
		pbase_tree_cache[my_ix] = nent;
	return nent;
}

static void pbase_tree_put(struct pbase_tree_cache *cache)
{
	if (!cache->temporary) {
		cache->ref--;
		return;
	}
	free(cache->tree_data);
	free(cache);
}

static int name_cmp_len(const char *name)
{
	int i;
	for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)
		;
	return i;
}

static void add_pbase_object(struct tree_desc *tree,
			     const char *name,
1223 1224
			     int cmplen,
			     const char *fullname)
1225
{
1226
	struct name_entry entry;
1227
	int cmp;
1228 1229

	while (tree_entry(tree,&entry)) {
1230 1231
		if (S_ISGITLINK(entry.mode))
			continue;
1232
		cmp = tree_entry_len(&entry) != cmplen ? 1 :
1233 1234
		      memcmp(name, entry.path, cmplen);
		if (cmp > 0)
1235
			continue;
1236 1237
		if (cmp < 0)
			return;
1238
		if (name[cmplen] != '/') {
1239
			add_object_entry(entry.oid->hash,
1240
					 object_type(entry.mode),
1241
					 fullname, 1);
1242 1243
			return;
		}
1244
		if (S_ISDIR(entry.mode)) {
1245
			struct tree_desc sub;
1246 1247 1248 1249
			struct pbase_tree_cache *tree;
			const char *down = name+cmplen+1;
			int downlen = name_cmp_len(down);

1250
			tree = pbase_tree_get(entry.oid->hash);
1251 1252
			if (!tree)
				return;
1253
			init_tree_desc(&sub, tree->tree_data, tree->tree_size);
1254

1255
			add_pbase_object(&sub, down, downlen, fullname);
1256 1257 1258 1259
			pbase_tree_put(tree);
		}
	}
}
1260

1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
static unsigned *done_pbase_paths;
static int done_pbase_paths_num;
static int done_pbase_paths_alloc;
static int done_pbase_path_pos(unsigned hash)
{
	int lo = 0;
	int hi = done_pbase_paths_num;
	while (lo < hi) {
		int mi = (hi + lo) / 2;
		if (done_pbase_paths[mi] == hash)
			return mi;
		if (done_pbase_paths[mi] < hash)
			hi = mi;
		else
			lo = mi + 1;
	}
	return -lo-1;
}

static int check_pbase_path(unsigned hash)
{
	int pos = (!done_pbase_paths) ? -1 : done_pbase_path_pos(hash);
	if (0 <= pos)
		return 1;
	pos = -pos - 1;
1286 1287 1288
	ALLOC_GROW(done_pbase_paths,
		   done_pbase_paths_num + 1,
		   done_pbase_paths_alloc);
1289 1290 1291 1292 1293 1294 1295 1296 1297
	done_pbase_paths_num++;
	if (pos < done_pbase_paths_num)
		memmove(done_pbase_paths + pos + 1,
			done_pbase_paths + pos,
			(done_pbase_paths_num - pos - 1) * sizeof(unsigned));
	done_pbase_paths[pos] = hash;
	return 0;
}

1298
static void add_preferred_base_object(const char *name)
1299 1300
{
	struct pbase_tree *it;
1301
	int cmplen;
V
Vicent Marti 已提交
1302
	unsigned hash = pack_name_hash(name);
1303

1304
	if (!num_preferred_base || check_pbase_path(hash))
1305 1306
		return;

1307
	cmplen = name_cmp_len(name);
1308 1309
	for (it = pbase_tree; it; it = it->next) {
		if (cmplen == 0) {
1310
			add_object_entry(it->pcache.sha1, OBJ_TREE, NULL, 1);
1311 1312 1313
		}
		else {
			struct tree_desc tree;
1314
			init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);
1315
			add_pbase_object(&tree, name, cmplen, name);
1316
		}
1317 1318 1319
	}
}

1320
static void add_preferred_base(unsigned char *sha1)
1321
{
1322 1323 1324 1325
	struct pbase_tree *it;
	void *data;
	unsigned long size;
	unsigned char tree_sha1[20];
1326

1327 1328 1329
	if (window <= num_preferred_base++)
		return;

1330 1331
	data = read_object_with_reference(sha1, tree_type, &size, tree_sha1);
	if (!data)
1332
		return;
1333 1334

	for (it = pbase_tree; it; it = it->next) {
1335
		if (!hashcmp(it->pcache.sha1, tree_sha1)) {
1336 1337 1338 1339 1340 1341 1342 1343 1344
			free(data);
			return;
		}
	}

	it = xcalloc(1, sizeof(*it));
	it->next = pbase_tree;
	pbase_tree = it;

1345
	hashcpy(it->pcache.sha1, tree_sha1);
1346 1347
	it->pcache.tree_data = data;
	it->pcache.tree_size = size;
1348 1349
}

1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
static void cleanup_preferred_base(void)
{
	struct pbase_tree *it;
	unsigned i;

	it = pbase_tree;
	pbase_tree = NULL;
	while (it) {
		struct pbase_tree *this = it;
		it = this->next;
		free(this->pcache.tree_data);
		free(this);
	}

	for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
		if (!pbase_tree_cache[i])
			continue;
		free(pbase_tree_cache[i]->tree_data);
		free(pbase_tree_cache[i]);
		pbase_tree_cache[i] = NULL;
	}

	free(done_pbase_paths);
	done_pbase_paths = NULL;
	done_pbase_paths_num = done_pbase_paths_alloc = 0;
}

1377 1378
static void check_object(struct object_entry *entry)
{
1379
	if (entry->in_pack) {
1380
		struct packed_git *p = entry->in_pack;
1381
		struct pack_window *w_curs = NULL;
1382 1383 1384
		const unsigned char *base_ref = NULL;
		struct object_entry *base_entry;
		unsigned long used, used_0;
1385
		unsigned long avail;
1386 1387
		off_t ofs;
		unsigned char *buf, c;
1388

1389
		buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
1390

1391
		/*
1392 1393
		 * We want in_pack_type even if we do not reuse delta
		 * since non-delta representations could still be reused.
1394
		 */
1395
		used = unpack_object_header_buffer(buf, avail,
1396 1397
						   &entry->in_pack_type,
						   &entry->size);
1398 1399
		if (used == 0)
			goto give_up;
1400

1401 1402 1403 1404
		/*
		 * Determine if this is a delta and if so whether we can
		 * reuse it or not.  Otherwise let's find out as cheaply as
		 * possible what the actual type and size for this object is.
1405
		 */
1406 1407 1408 1409 1410
		switch (entry->in_pack_type) {
		default:
			/* Not a delta hence we've already got all we need. */
			entry->type = entry->in_pack_type;
			entry->in_pack_header_size = used;
1411 1412
			if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
				goto give_up;
1413 1414 1415
			unuse_pack(&w_curs);
			return;
		case OBJ_REF_DELTA:
1416
			if (reuse_delta && !entry->preferred_base)
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
				base_ref = use_pack(p, &w_curs,
						entry->in_pack_offset + used, NULL);
			entry->in_pack_header_size = used + 20;
			break;
		case OBJ_OFS_DELTA:
			buf = use_pack(p, &w_curs,
				       entry->in_pack_offset + used, NULL);
			used_0 = 0;
			c = buf[used_0++];
			ofs = c & 127;
			while (c & 128) {
				ofs += 1;
1429 1430 1431 1432 1433
				if (!ofs || MSB(ofs, 7)) {
					error("delta base offset overflow in pack for %s",
					      sha1_to_hex(entry->idx.sha1));
					goto give_up;
				}
1434 1435
				c = buf[used_0++];
				ofs = (ofs << 7) + (c & 127);
1436
			}
1437
			ofs = entry->in_pack_offset - ofs;
1438 1439 1440 1441 1442
			if (ofs <= 0 || ofs >= entry->in_pack_offset) {
				error("delta base offset out of bound for %s",
				      sha1_to_hex(entry->idx.sha1));
				goto give_up;
			}
1443
			if (reuse_delta && !entry->preferred_base) {
1444 1445
				struct revindex_entry *revidx;
				revidx = find_pack_revindex(p, ofs);
1446 1447
				if (!revidx)
					goto give_up;
1448 1449
				base_ref = nth_packed_object_sha1(p, revidx->nr);
			}
1450 1451
			entry->in_pack_header_size = used + used_0;
			break;
1452 1453
		}

1454
		if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {
1455 1456 1457 1458 1459 1460 1461 1462 1463
			/*
			 * If base_ref was set above that means we wish to
			 * reuse delta data, and we even found that base
			 * in the list of objects we want to pack. Goodie!
			 *
			 * Depth value does not matter - find_deltas() will
			 * never consider reused delta as the base object to
			 * deltify other objects against, in order to avoid
			 * circular deltas.
1464
			 */
1465
			entry->type = entry->in_pack_type;
1466
			entry->delta = base_entry;
1467
			entry->delta_size = entry->size;
1468 1469
			entry->delta_sibling = base_entry->delta_child;
			base_entry->delta_child = entry;
1470 1471 1472
			unuse_pack(&w_curs);
			return;
		}
1473

1474 1475 1476 1477 1478 1479 1480 1481
		if (entry->type) {
			/*
			 * This must be a delta and we already know what the
			 * final object type is.  Let's extract the actual
			 * object size from the delta header.
			 */
			entry->size = get_size_from_delta(p, &w_curs,
					entry->in_pack_offset + entry->in_pack_header_size);
1482 1483
			if (entry->size == 0)
				goto give_up;
1484
			unuse_pack(&w_curs);
1485 1486
			return;
		}
1487 1488 1489 1490 1491 1492

		/*
		 * No choice but to fall back to the recursive delta walk
		 * with sha1_object_info() to find about the object type
		 * at this point...
		 */
1493
		give_up:
1494
		unuse_pack(&w_curs);
1495
	}
1496

G
Geert Bosch 已提交
1497
	entry->type = sha1_object_info(entry->idx.sha1, &entry->size);
1498 1499 1500 1501 1502 1503
	/*
	 * The error condition is checked in prepare_pack().  This is
	 * to permit a missing preferred base object to be ignored
	 * as a preferred base.  Doing so can result in a larger
	 * pack file, but the transfer will still take place.
	 */
1504 1505
}

1506 1507 1508 1509 1510 1511 1512
static int pack_offset_sort(const void *_a, const void *_b)
{
	const struct object_entry *a = *(struct object_entry **)_a;
	const struct object_entry *b = *(struct object_entry **)_b;

	/* avoid filesystem trashing with loose objects */
	if (!a->in_pack && !b->in_pack)
G
Geert Bosch 已提交
1513
		return hashcmp(a->idx.sha1, b->idx.sha1);
1514 1515 1516 1517 1518 1519 1520 1521 1522

	if (a->in_pack < b->in_pack)
		return -1;
	if (a->in_pack > b->in_pack)
		return 1;
	return a->in_pack_offset < b->in_pack_offset ? -1 :
			(a->in_pack_offset > b->in_pack_offset);
}

1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
/*
 * Drop an on-disk delta we were planning to reuse. Naively, this would
 * just involve blanking out the "delta" field, but we have to deal
 * with some extra book-keeping:
 *
 *   1. Removing ourselves from the delta_sibling linked list.
 *
 *   2. Updating our size/type to the non-delta representation. These were
 *      either not recorded initially (size) or overwritten with the delta type
 *      (type) when check_object() decided to reuse the delta.
1533 1534
 *
 *   3. Resetting our delta depth, as we are now a base object.
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
 */
static void drop_reused_delta(struct object_entry *entry)
{
	struct object_entry **p = &entry->delta->delta_child;
	struct object_info oi = OBJECT_INFO_INIT;

	while (*p) {
		if (*p == entry)
			*p = (*p)->delta_sibling;
		else
			p = &(*p)->delta_sibling;
	}
	entry->delta = NULL;
1548
	entry->depth = 0;
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566

	oi.sizep = &entry->size;
	oi.typep = &entry->type;
	if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) {
		/*
		 * We failed to get the info from this pack for some reason;
		 * fall back to sha1_object_info, which may find another copy.
		 * And if that fails, the error will be recorded in entry->type
		 * and dealt with in prepare_pack().
		 */
		entry->type = sha1_object_info(entry->idx.sha1, &entry->size);
	}
}

/*
 * Follow the chain of deltas from this entry onward, throwing away any links
 * that cause us to hit a cycle (as determined by the DFS state flags in
 * the entries).
1567 1568 1569
 *
 * We also detect too-long reused chains that would violate our --depth
 * limit.
1570 1571 1572
 */
static void break_delta_chains(struct object_entry *entry)
{
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
	/*
	 * The actual depth of each object we will write is stored as an int,
	 * as it cannot exceed our int "depth" limit. But before we break
	 * changes based no that limit, we may potentially go as deep as the
	 * number of objects, which is elsewhere bounded to a uint32_t.
	 */
	uint32_t total_depth;
	struct object_entry *cur, *next;

	for (cur = entry, total_depth = 0;
	     cur;
	     cur = cur->delta, total_depth++) {
		if (cur->dfs_state == DFS_DONE) {
			/*
			 * We've already seen this object and know it isn't
			 * part of a cycle. We do need to append its depth
			 * to our count.
			 */
			total_depth += cur->depth;
			break;
		}
1594 1595

		/*
1596 1597 1598
		 * We break cycles before looping, so an ACTIVE state (or any
		 * other cruft which made its way into the state variable)
		 * is a bug.
1599
		 */
1600 1601 1602
		if (cur->dfs_state != DFS_NONE)
			die("BUG: confusing delta dfs state in first pass: %d",
			    cur->dfs_state);
1603 1604

		/*
1605 1606 1607
		 * Now we know this is the first time we've seen the object. If
		 * it's not a delta, we're done traversing, but we'll mark it
		 * done to save time on future traversals.
1608
		 */
1609 1610 1611 1612
		if (!cur->delta) {
			cur->dfs_state = DFS_DONE;
			break;
		}
1613 1614

		/*
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
		 * Mark ourselves as active and see if the next step causes
		 * us to cycle to another active object. It's important to do
		 * this _before_ we loop, because it impacts where we make the
		 * cut, and thus how our total_depth counter works.
		 * E.g., We may see a partial loop like:
		 *
		 *   A -> B -> C -> D -> B
		 *
		 * Cutting B->C breaks the cycle. But now the depth of A is
		 * only 1, and our total_depth counter is at 3. The size of the
		 * error is always one less than the size of the cycle we
		 * broke. Commits C and D were "lost" from A's chain.
		 *
		 * If we instead cut D->B, then the depth of A is correct at 3.
		 * We keep all commits in the chain that we examined.
1630
		 */
1631 1632 1633 1634 1635
		cur->dfs_state = DFS_ACTIVE;
		if (cur->delta->dfs_state == DFS_ACTIVE) {
			drop_reused_delta(cur);
			cur->dfs_state = DFS_DONE;
			break;
1636
		}
1637
	}
1638

1639 1640 1641 1642 1643 1644 1645 1646 1647
	/*
	 * And now that we've gone all the way to the bottom of the chain, we
	 * need to clear the active flags and set the depth fields as
	 * appropriate. Unlike the loop above, which can quit when it drops a
	 * delta, we need to keep going to look for more depth cuts. So we need
	 * an extra "next" pointer to keep going after we reset cur->delta.
	 */
	for (cur = entry; cur; cur = next) {
		next = cur->delta;
1648

1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
		/*
		 * We should have a chain of zero or more ACTIVE states down to
		 * a final DONE. We can quit after the DONE, because either it
		 * has no bases, or we've already handled them in a previous
		 * call.
		 */
		if (cur->dfs_state == DFS_DONE)
			break;
		else if (cur->dfs_state != DFS_ACTIVE)
			die("BUG: confusing delta dfs state in second pass: %d",
			    cur->dfs_state);
1660 1661

		/*
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
		 * If the total_depth is more than depth, then we need to snip
		 * the chain into two or more smaller chains that don't exceed
		 * the maximum depth. Most of the resulting chains will contain
		 * (depth + 1) entries (i.e., depth deltas plus one base), and
		 * the last chain (i.e., the one containing entry) will contain
		 * whatever entries are left over, namely
		 * (total_depth % (depth + 1)) of them.
		 *
		 * Since we are iterating towards decreasing depth, we need to
		 * decrement total_depth as we go, and we need to write to the
		 * entry what its final depth will be after all of the
		 * snipping. Since we're snipping into chains of length (depth
		 * + 1) entries, the final depth of an entry will be its
		 * original depth modulo (depth + 1). Any time we encounter an
		 * entry whose final depth is supposed to be zero, we snip it
		 * from its delta base, thereby making it so.
1678
		 */
1679 1680 1681 1682 1683
		cur->depth = (total_depth--) % (depth + 1);
		if (!cur->depth)
			drop_reused_delta(cur);

		cur->dfs_state = DFS_DONE;
1684 1685 1686
	}
}

1687 1688
static void get_object_details(void)
{
1689
	uint32_t i;
1690 1691
	struct object_entry **sorted_by_offset;

1692 1693 1694
	sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
	for (i = 0; i < to_pack.nr_objects; i++)
		sorted_by_offset[i] = to_pack.objects + i;
R
René Scharfe 已提交
1695
	QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);
1696

1697
	for (i = 0; i < to_pack.nr_objects; i++) {
1698 1699
		struct object_entry *entry = sorted_by_offset[i];
		check_object(entry);
1700
		if (big_file_threshold < entry->size)
1701 1702
			entry->no_try_delta = 1;
	}
1703

1704 1705 1706 1707 1708 1709 1710
	/*
	 * This must happen in a second pass, since we rely on the delta
	 * information for the whole list being completed.
	 */
	for (i = 0; i < to_pack.nr_objects; i++)
		break_delta_chains(&to_pack.objects[i]);

1711
	free(sorted_by_offset);
1712 1713
}

1714 1715 1716 1717 1718 1719 1720 1721 1722
/*
 * We search for deltas in a list sorted by type, by filename hash, and then
 * by size, so that we see progressively smaller and smaller files.
 * That's because we prefer deltas to be from the bigger file
 * to the smaller -- deletes are potentially cheaper, but perhaps
 * more importantly, the bigger file is likely the more recent
 * one.  The deepest deltas are therefore the oldest objects which are
 * less susceptible to be accessed often.
 */
1723
static int type_size_sort(const void *_a, const void *_b)
1724
{
1725 1726 1727
	const struct object_entry *a = *(struct object_entry **)_a;
	const struct object_entry *b = *(struct object_entry **)_b;

1728
	if (a->type > b->type)
1729
		return -1;
1730
	if (a->type < b->type)
1731
		return 1;
1732
	if (a->hash > b->hash)
1733
		return -1;
1734
	if (a->hash < b->hash)
1735
		return 1;
1736
	if (a->preferred_base > b->preferred_base)
1737
		return -1;
1738 1739
	if (a->preferred_base < b->preferred_base)
		return 1;
1740
	if (a->size > b->size)
1741 1742
		return -1;
	if (a->size < b->size)
1743
		return 1;
1744
	return a < b ? -1 : (a > b);  /* newest first */
1745 1746 1747 1748 1749
}

struct unpacked {
	struct object_entry *entry;
	void *data;
1750
	struct delta_index *index;
1751
	unsigned depth;
1752 1753
};

1754 1755
static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
			   unsigned long delta_size)
1756 1757 1758 1759
{
	if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
		return 0;

1760 1761 1762
	if (delta_size < cache_max_small_delta_size)
		return 1;

1763 1764 1765 1766 1767 1768 1769
	/* cache delta, if objects are large enough compared to delta size */
	if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
		return 1;

	return 0;
}

1770
#ifndef NO_PTHREADS
N
Nicolas Pitre 已提交
1771

1772
static pthread_mutex_t read_mutex;
N
Nicolas Pitre 已提交
1773 1774 1775
#define read_lock()		pthread_mutex_lock(&read_mutex)
#define read_unlock()		pthread_mutex_unlock(&read_mutex)

1776
static pthread_mutex_t cache_mutex;
1777 1778 1779
#define cache_lock()		pthread_mutex_lock(&cache_mutex)
#define cache_unlock()		pthread_mutex_unlock(&cache_mutex)

1780
static pthread_mutex_t progress_mutex;
N
Nicolas Pitre 已提交
1781 1782 1783 1784 1785
#define progress_lock()		pthread_mutex_lock(&progress_mutex)
#define progress_unlock()	pthread_mutex_unlock(&progress_mutex)

#else

1786 1787 1788 1789 1790 1791
#define read_lock()		(void)0
#define read_unlock()		(void)0
#define cache_lock()		(void)0
#define cache_unlock()		(void)0
#define progress_lock()		(void)0
#define progress_unlock()	(void)0
N
Nicolas Pitre 已提交
1792 1793 1794

#endif

1795
static int try_delta(struct unpacked *trg, struct unpacked *src,
1796
		     unsigned max_depth, unsigned long *mem_usage)
1797
{
1798 1799
	struct object_entry *trg_entry = trg->entry;
	struct object_entry *src_entry = src->entry;
1800
	unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;
1801
	unsigned ref_depth;
1802
	enum object_type type;
1803 1804 1805
	void *delta_buf;

	/* Don't bother doing diffs between different types */
1806
	if (trg_entry->type != src_entry->type)
1807 1808
		return -1;

1809
	/*
1810 1811 1812 1813 1814 1815
	 * We do not bother to try a delta that we discarded on an
	 * earlier try, but only when reusing delta data.  Note that
	 * src_entry that is marked as the preferred_base should always
	 * be considered, as even if we produce a suboptimal delta against
	 * it, we will still save the transfer cost, as we already know
	 * the other side has it and we won't send src_entry at all.
1816
	 */
1817
	if (reuse_delta && trg_entry->in_pack &&
1818
	    trg_entry->in_pack == src_entry->in_pack &&
1819
	    !src_entry->preferred_base &&
1820 1821
	    trg_entry->in_pack_type != OBJ_REF_DELTA &&
	    trg_entry->in_pack_type != OBJ_OFS_DELTA)
1822 1823
		return 0;

1824
	/* Let's not bust the allowed depth. */
1825
	if (src->depth >= max_depth)
1826
		return 0;
1827

1828
	/* Now some size filtering heuristics. */
1829
	trg_size = trg_entry->size;
1830 1831 1832 1833 1834
	if (!trg_entry->delta) {
		max_size = trg_size/2 - 20;
		ref_depth = 1;
	} else {
		max_size = trg_entry->delta_size;
1835
		ref_depth = trg->depth;
1836
	}
1837
	max_size = (uint64_t)max_size * (max_depth - src->depth) /
1838
						(max_depth - ref_depth + 1);
1839 1840
	if (max_size == 0)
		return 0;
1841
	src_size = src_entry->size;
1842
	sizediff = src_size < trg_size ? trg_size - src_size : 0;
1843
	if (sizediff >= max_size)
1844
		return 0;
1845 1846
	if (trg_size < src_size / 32)
		return 0;
1847

1848 1849
	/* Load data if not already done */
	if (!trg->data) {
N
Nicolas Pitre 已提交
1850
		read_lock();
G
Geert Bosch 已提交
1851
		trg->data = read_sha1_file(trg_entry->idx.sha1, &type, &sz);
N
Nicolas Pitre 已提交
1852
		read_unlock();
1853 1854 1855
		if (!trg->data)
			die("object %s cannot be read",
			    sha1_to_hex(trg_entry->idx.sha1));
1856 1857
		if (sz != trg_size)
			die("object %s inconsistent object length (%lu vs %lu)",
G
Geert Bosch 已提交
1858
			    sha1_to_hex(trg_entry->idx.sha1), sz, trg_size);
1859
		*mem_usage += sz;
1860 1861
	}
	if (!src->data) {
N
Nicolas Pitre 已提交
1862
		read_lock();
G
Geert Bosch 已提交
1863
		src->data = read_sha1_file(src_entry->idx.sha1, &type, &sz);
N
Nicolas Pitre 已提交
1864
		read_unlock();
1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
		if (!src->data) {
			if (src_entry->preferred_base) {
				static int warned = 0;
				if (!warned++)
					warning("object %s cannot be read",
						sha1_to_hex(src_entry->idx.sha1));
				/*
				 * Those objects are not included in the
				 * resulting pack.  Be resilient and ignore
				 * them if they can't be read, in case the
				 * pack could be created nevertheless.
				 */
				return 0;
			}
1879 1880
			die("object %s cannot be read",
			    sha1_to_hex(src_entry->idx.sha1));
1881
		}
1882 1883
		if (sz != src_size)
			die("object %s inconsistent object length (%lu vs %lu)",
G
Geert Bosch 已提交
1884
			    sha1_to_hex(src_entry->idx.sha1), sz, src_size);
1885
		*mem_usage += sz;
1886 1887 1888
	}
	if (!src->index) {
		src->index = create_delta_index(src->data, src_size);
1889 1890 1891 1892 1893 1894
		if (!src->index) {
			static int warned = 0;
			if (!warned++)
				warning("suboptimal pack - out of memory");
			return 0;
		}
1895
		*mem_usage += sizeof_delta_index(src->index);
1896 1897 1898
	}

	delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
1899
	if (!delta_buf)
1900
		return 0;
1901

N
Nicolas Pitre 已提交
1902
	if (trg_entry->delta) {
1903 1904
		/* Prefer only shallower same-sized deltas. */
		if (delta_size == trg_entry->delta_size &&
1905
		    src->depth + 1 >= trg->depth) {
1906 1907 1908
			free(delta_buf);
			return 0;
		}
1909
	}
N
Nicolas Pitre 已提交
1910

1911 1912 1913
	/*
	 * Handle memory allocation outside of the cache
	 * accounting lock.  Compiler will optimize the strangeness
1914
	 * away when NO_PTHREADS is defined.
1915
	 */
1916
	free(trg_entry->delta_data);
1917
	cache_lock();
N
Nicolas Pitre 已提交
1918 1919 1920 1921
	if (trg_entry->delta_data) {
		delta_cache_size -= trg_entry->delta_size;
		trg_entry->delta_data = NULL;
	}
1922
	if (delta_cacheable(src_size, trg_size, delta_size)) {
1923
		delta_cache_size += delta_size;
1924 1925 1926 1927
		cache_unlock();
		trg_entry->delta_data = xrealloc(delta_buf, delta_size);
	} else {
		cache_unlock();
1928
		free(delta_buf);
1929 1930
	}

1931 1932 1933 1934
	trg_entry->delta = src_entry;
	trg_entry->delta_size = delta_size;
	trg->depth = src->depth + 1;

1935
	return 1;
1936 1937
}

1938
static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
1939
{
1940 1941 1942 1943 1944 1945 1946 1947 1948
	struct object_entry *child = me->delta_child;
	unsigned int m = n;
	while (child) {
		unsigned int c = check_delta_limit(child, n + 1);
		if (m < c)
			m = c;
		child = child->delta_sibling;
	}
	return m;
1949 1950
}

1951
static unsigned long free_unpacked(struct unpacked *n)
1952
{
1953
	unsigned long freed_mem = sizeof_delta_index(n->index);
1954 1955 1956
	free_delta_index(n->index);
	n->index = NULL;
	if (n->data) {
1957
		freed_mem += n->entry->size;
1958 1959 1960 1961
		free(n->data);
		n->data = NULL;
	}
	n->entry = NULL;
1962
	n->depth = 0;
1963
	return freed_mem;
1964 1965
}

1966
static void find_deltas(struct object_entry **list, unsigned *list_size,
1967
			int window, int depth, unsigned *processed)
1968
{
1969
	uint32_t i, idx = 0, count = 0;
1970
	struct unpacked *array;
1971
	unsigned long mem_usage = 0;
1972

1973
	array = xcalloc(window, sizeof(struct unpacked));
1974

1975
	for (;;) {
1976
		struct object_entry *entry;
1977
		struct unpacked *n = array + idx;
1978
		int j, max_depth, best_base = -1;
1979

1980 1981 1982 1983 1984
		progress_lock();
		if (!*list_size) {
			progress_unlock();
			break;
		}
1985
		entry = *list++;
1986 1987 1988 1989 1990 1991 1992
		(*list_size)--;
		if (!entry->preferred_base) {
			(*processed)++;
			display_progress(progress_state, *processed);
		}
		progress_unlock();

1993
		mem_usage -= free_unpacked(n);
1994
		n->entry = entry;
1995

1996
		while (window_memory_limit &&
1997
		       mem_usage > window_memory_limit &&
1998 1999
		       count > 1) {
			uint32_t tail = (idx + window - count) % window;
2000
			mem_usage -= free_unpacked(array + tail);
2001 2002 2003
			count--;
		}

2004 2005 2006 2007 2008 2009
		/* We do not compute delta to *create* objects we are not
		 * going to pack.
		 */
		if (entry->preferred_base)
			goto next;

2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
		/*
		 * If the current object is at pack edge, take the depth the
		 * objects that depend on the current object into account
		 * otherwise they would become too deep.
		 */
		max_depth = depth;
		if (entry->delta_child) {
			max_depth -= check_delta_limit(entry, 0);
			if (max_depth <= 0)
				goto next;
		}

L
Linus Torvalds 已提交
2022 2023
		j = window;
		while (--j > 0) {
2024
			int ret;
2025
			uint32_t other_idx = idx + j;
2026
			struct unpacked *m;
L
Linus Torvalds 已提交
2027 2028
			if (other_idx >= window)
				other_idx -= window;
2029 2030 2031
			m = array + other_idx;
			if (!m->entry)
				break;
2032
			ret = try_delta(n, m, max_depth, &mem_usage);
2033
			if (ret < 0)
2034
				break;
2035 2036
			else if (ret > 0)
				best_base = other_idx;
2037
		}
2038

2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
		/*
		 * If we decided to cache the delta data, then it is best
		 * to compress it right away.  First because we have to do
		 * it anyway, and doing it here while we're threaded will
		 * save a lot of time in the non threaded write phase,
		 * as well as allow for caching more deltas within
		 * the same cache size limit.
		 * ...
		 * But only if not writing to stdout, since in that case
		 * the network is most likely throttling writes anyway,
		 * and therefore it is best to go to the write phase ASAP
		 * instead, as we can afford spending more time compressing
		 * between writes at that moment.
		 */
		if (entry->delta_data && !pack_to_stdout) {
			entry->z_delta_size = do_compress(&entry->delta_data,
							  entry->delta_size);
			cache_lock();
			delta_cache_size -= entry->delta_size;
			delta_cache_size += entry->z_delta_size;
			cache_unlock();
		}

2062 2063 2064 2065
		/* if we made n a delta, and if n is already at max
		 * depth, leaving it in the window is pointless.  we
		 * should evict it first.
		 */
2066
		if (entry->delta && max_depth <= n->depth)
2067
			continue;
2068

2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
		/*
		 * Move the best delta base up in the window, after the
		 * currently deltified object, to keep it longer.  It will
		 * be the first base object to be attempted next.
		 */
		if (entry->delta) {
			struct unpacked swap = array[best_base];
			int dist = (window + idx - best_base) % window;
			int dst = best_base;
			while (dist--) {
				int src = (dst + 1) % window;
				array[dst] = array[src];
				dst = src;
			}
			array[dst] = swap;
		}

2086
		next:
2087
		idx++;
2088 2089
		if (count + 1 < window)
			count++;
2090 2091
		if (idx >= window)
			idx = 0;
2092
	}
2093

2094
	for (i = 0; i < window; ++i) {
2095
		free_delta_index(array[i].index);
2096
		free(array[i].data);
2097
	}
2098
	free(array);
2099 2100
}

2101
#ifndef NO_PTHREADS
N
Nicolas Pitre 已提交
2102

2103 2104 2105
static void try_to_free_from_threads(size_t size)
{
	read_lock();
2106
	release_pack_memory(size);
2107 2108 2109
	read_unlock();
}

2110
static try_to_free_t old_try_to_free_routine;
2111

2112 2113 2114 2115 2116 2117 2118 2119 2120
/*
 * The main thread waits on the condition that (at least) one of the workers
 * has stopped working (which is indicated in the .working member of
 * struct thread_params).
 * When a work thread has completed its work, it sets .working to 0 and
 * signals the main thread and waits on the condition that .data_ready
 * becomes 1.
 */

N
Nicolas Pitre 已提交
2121 2122 2123 2124
struct thread_params {
	pthread_t thread;
	struct object_entry **list;
	unsigned list_size;
2125
	unsigned remaining;
N
Nicolas Pitre 已提交
2126 2127
	int window;
	int depth;
2128 2129 2130 2131
	int working;
	int data_ready;
	pthread_mutex_t mutex;
	pthread_cond_t cond;
N
Nicolas Pitre 已提交
2132 2133 2134
	unsigned *processed;
};

2135 2136 2137 2138 2139 2140 2141
static pthread_cond_t progress_cond;

/*
 * Mutex and conditional variable can't be statically-initialized on Windows.
 */
static void init_threaded_search(void)
{
2142
	init_recursive_mutex(&read_mutex);
2143 2144 2145
	pthread_mutex_init(&cache_mutex, NULL);
	pthread_mutex_init(&progress_mutex, NULL);
	pthread_cond_init(&progress_cond, NULL);
2146
	old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
2147 2148 2149 2150
}

static void cleanup_threaded_search(void)
{
2151
	set_try_to_free_routine(old_try_to_free_routine);
2152 2153 2154 2155 2156
	pthread_cond_destroy(&progress_cond);
	pthread_mutex_destroy(&read_mutex);
	pthread_mutex_destroy(&cache_mutex);
	pthread_mutex_destroy(&progress_mutex);
}
2157

N
Nicolas Pitre 已提交
2158 2159
static void *threaded_find_deltas(void *arg)
{
2160 2161
	struct thread_params *me = arg;

2162
	while (me->remaining) {
2163
		find_deltas(me->list, &me->remaining,
2164
			    me->window, me->depth, me->processed);
2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183

		progress_lock();
		me->working = 0;
		pthread_cond_signal(&progress_cond);
		progress_unlock();

		/*
		 * We must not set ->data_ready before we wait on the
		 * condition because the main thread may have set it to 1
		 * before we get here. In order to be sure that new
		 * work is available if we see 1 in ->data_ready, it
		 * was initialized to 0 before this thread was spawned
		 * and we reset it to 0 right away.
		 */
		pthread_mutex_lock(&me->mutex);
		while (!me->data_ready)
			pthread_cond_wait(&me->cond, &me->mutex);
		me->data_ready = 0;
		pthread_mutex_unlock(&me->mutex);
2184
	}
2185 2186
	/* leave ->working 1 so that this doesn't get more work assigned */
	return NULL;
N
Nicolas Pitre 已提交
2187 2188 2189 2190 2191
}

static void ll_find_deltas(struct object_entry **list, unsigned list_size,
			   int window, int depth, unsigned *processed)
{
2192
	struct thread_params *p;
2193
	int i, ret, active_threads = 0;
2194

2195 2196
	init_threaded_search();

2197
	if (delta_search_threads <= 1) {
2198
		find_deltas(list, &list_size, window, depth, processed);
2199
		cleanup_threaded_search();
2200 2201
		return;
	}
2202
	if (progress > pack_to_stdout)
2203
		fprintf(stderr, "Delta compression using up to %d threads.\n",
2204
				delta_search_threads);
2205
	p = xcalloc(delta_search_threads, sizeof(*p));
2206

2207
	/* Partition the work amongst work threads. */
2208
	for (i = 0; i < delta_search_threads; i++) {
2209 2210
		unsigned sub_size = list_size / (delta_search_threads - i);

2211 2212 2213 2214
		/* don't use too small segments or no deltas will be found */
		if (sub_size < 2*window && i+1 < delta_search_threads)
			sub_size = 0;

N
Nicolas Pitre 已提交
2215 2216 2217
		p[i].window = window;
		p[i].depth = depth;
		p[i].processed = processed;
2218 2219
		p[i].working = 1;
		p[i].data_ready = 0;
2220

2221
		/* try to split chunks on "path" boundaries */
2222 2223
		while (sub_size && sub_size < list_size &&
		       list[sub_size]->hash &&
2224 2225 2226
		       list[sub_size]->hash == list[sub_size-1]->hash)
			sub_size++;

2227 2228 2229
		p[i].list = list;
		p[i].list_size = sub_size;
		p[i].remaining = sub_size;
2230

2231 2232 2233 2234
		list += sub_size;
		list_size -= sub_size;
	}

2235 2236 2237 2238
	/* Start work threads. */
	for (i = 0; i < delta_search_threads; i++) {
		if (!p[i].list_size)
			continue;
2239 2240
		pthread_mutex_init(&p[i].mutex, NULL);
		pthread_cond_init(&p[i].cond, NULL);
2241 2242 2243 2244 2245 2246 2247
		ret = pthread_create(&p[i].thread, NULL,
				     threaded_find_deltas, &p[i]);
		if (ret)
			die("unable to create thread: %s", strerror(ret));
		active_threads++;
	}

2248 2249 2250 2251 2252 2253 2254 2255
	/*
	 * Now let's wait for work completion.  Each time a thread is done
	 * with its work, we steal half of the remaining work from the
	 * thread with the largest number of unprocessed objects and give
	 * it to that newly idle thread.  This ensure good load balancing
	 * until the remaining object list segments are simply too short
	 * to be worth splitting anymore.
	 */
2256 2257
	while (active_threads) {
		struct thread_params *target = NULL;
2258 2259 2260 2261
		struct thread_params *victim = NULL;
		unsigned sub_size = 0;

		progress_lock();
2262 2263 2264 2265 2266 2267 2268 2269 2270
		for (;;) {
			for (i = 0; !target && i < delta_search_threads; i++)
				if (!p[i].working)
					target = &p[i];
			if (target)
				break;
			pthread_cond_wait(&progress_cond, &progress_mutex);
		}

2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
		for (i = 0; i < delta_search_threads; i++)
			if (p[i].remaining > 2*window &&
			    (!victim || victim->remaining < p[i].remaining))
				victim = &p[i];
		if (victim) {
			sub_size = victim->remaining / 2;
			list = victim->list + victim->list_size - sub_size;
			while (sub_size && list[0]->hash &&
			       list[0]->hash == list[-1]->hash) {
				list++;
				sub_size--;
			}
2283 2284 2285 2286 2287 2288 2289 2290 2291 2292
			if (!sub_size) {
				/*
				 * It is possible for some "paths" to have
				 * so many objects that no hash boundary
				 * might be found.  Let's just steal the
				 * exact half in that case.
				 */
				sub_size = victim->remaining / 2;
				list -= sub_size;
			}
2293 2294 2295 2296 2297 2298
			target->list = list;
			victim->list_size -= sub_size;
			victim->remaining -= sub_size;
		}
		target->list_size = sub_size;
		target->remaining = sub_size;
2299 2300 2301 2302 2303 2304 2305
		target->working = 1;
		progress_unlock();

		pthread_mutex_lock(&target->mutex);
		target->data_ready = 1;
		pthread_cond_signal(&target->cond);
		pthread_mutex_unlock(&target->mutex);
2306

2307
		if (!sub_size) {
2308
			pthread_join(target->thread, NULL);
2309 2310
			pthread_cond_destroy(&target->cond);
			pthread_mutex_destroy(&target->mutex);
2311
			active_threads--;
2312
		}
2313
	}
2314
	cleanup_threaded_search();
2315
	free(p);
N
Nicolas Pitre 已提交
2316 2317 2318
}

#else
2319
#define ll_find_deltas(l, s, w, d, p)	find_deltas(l, &s, w, d, p)
N
Nicolas Pitre 已提交
2320 2321
#endif

2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350
static void add_tag_chain(const struct object_id *oid)
{
	struct tag *tag;

	/*
	 * We catch duplicates already in add_object_entry(), but we'd
	 * prefer to do this extra check to avoid having to parse the
	 * tag at all if we already know that it's being packed (e.g., if
	 * it was included via bitmaps, we would not have parsed it
	 * previously).
	 */
	if (packlist_find(&to_pack, oid->hash, NULL))
		return;

	tag = lookup_tag(oid->hash);
	while (1) {
		if (!tag || parse_tag(tag) || !tag->tagged)
			die("unable to pack objects reachable from tag %s",
			    oid_to_hex(oid));

		add_object_entry(tag->object.oid.hash, OBJ_TAG, NULL, 0);

		if (tag->tagged->type != OBJ_TAG)
			return;

		tag = (struct tag *)tag->tagged;
	}
}

2351
static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data)
2352
{
2353
	struct object_id peeled;
2354

2355
	if (starts_with(path, "refs/tags/") && /* is a tag? */
2356 2357
	    !peel_ref(path, peeled.hash)    && /* peelable? */
	    packlist_find(&to_pack, peeled.hash, NULL))      /* object packed? */
2358
		add_tag_chain(oid);
2359 2360 2361
	return 0;
}

2362 2363
static void prepare_pack(int window, int depth)
{
2364
	struct object_entry **delta_list;
2365 2366
	uint32_t i, nr_deltas;
	unsigned n;
2367

2368
	get_object_details();
2369

2370 2371 2372 2373 2374 2375 2376 2377 2378 2379
	/*
	 * If we're locally repacking then we need to be doubly careful
	 * from now on in order to make sure no stealth corruption gets
	 * propagated to the new pack.  Clients receiving streamed packs
	 * should validate everything they get anyway so no need to incur
	 * the additional cost here in that case.
	 */
	if (!pack_to_stdout)
		do_check_packed_object_crc = 1;

2380
	if (!to_pack.nr_objects || !window || !depth)
2381 2382
		return;

J
Jeff King 已提交
2383
	ALLOC_ARRAY(delta_list, to_pack.nr_objects);
2384 2385
	nr_deltas = n = 0;

2386 2387
	for (i = 0; i < to_pack.nr_objects; i++) {
		struct object_entry *entry = to_pack.objects + i;
2388 2389 2390

		if (entry->delta)
			/* This happens if we decided to reuse existing
2391
			 * delta from a pack.  "reuse_delta &&" is implied.
2392 2393 2394 2395 2396 2397 2398 2399 2400
			 */
			continue;

		if (entry->size < 50)
			continue;

		if (entry->no_try_delta)
			continue;

2401
		if (!entry->preferred_base) {
2402
			nr_deltas++;
2403 2404 2405
			if (entry->type < 0)
				die("unable to get type of object %s",
				    sha1_to_hex(entry->idx.sha1));
2406 2407 2408 2409 2410 2411 2412 2413
		} else {
			if (entry->type < 0) {
				/*
				 * This object is not found, but we
				 * don't have to include it anyway.
				 */
				continue;
			}
2414
		}
2415 2416 2417 2418

		delta_list[n++] = entry;
	}

2419
	if (nr_deltas && n > 1) {
2420 2421
		unsigned nr_done = 0;
		if (progress)
2422
			progress_state = start_progress(_("Compressing objects"),
2423
							nr_deltas);
R
René Scharfe 已提交
2424
		QSORT(delta_list, n, type_size_sort);
N
Nicolas Pitre 已提交
2425
		ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
N
Nicolas Pitre 已提交
2426
		stop_progress(&progress_state);
2427 2428
		if (nr_done != nr_deltas)
			die("inconsistency with delta count");
2429
	}
2430
	free(delta_list);
2431 2432
}

2433
static int git_pack_config(const char *k, const char *v, void *cb)
2434
{
2435
	if (!strcmp(k, "pack.window")) {
2436 2437 2438
		window = git_config_int(k, v);
		return 0;
	}
2439 2440 2441 2442 2443
	if (!strcmp(k, "pack.windowmemory")) {
		window_memory_limit = git_config_ulong(k, v);
		return 0;
	}
	if (!strcmp(k, "pack.depth")) {
2444 2445 2446
		depth = git_config_int(k, v);
		return 0;
	}
2447 2448 2449 2450
	if (!strcmp(k, "pack.deltacachesize")) {
		max_delta_cache_size = git_config_int(k, v);
		return 0;
	}
2451 2452 2453 2454
	if (!strcmp(k, "pack.deltacachelimit")) {
		cache_max_small_delta_size = git_config_int(k, v);
		return 0;
	}
2455 2456 2457 2458 2459 2460
	if (!strcmp(k, "pack.writebitmaphashcache")) {
		if (git_config_bool(k, v))
			write_bitmap_options |= BITMAP_OPT_HASH_CACHE;
		else
			write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
	}
2461
	if (!strcmp(k, "pack.usebitmaps")) {
2462
		use_bitmap_index_default = git_config_bool(k, v);
2463 2464
		return 0;
	}
2465 2466
	if (!strcmp(k, "pack.threads")) {
		delta_search_threads = git_config_int(k, v);
2467
		if (delta_search_threads < 0)
2468 2469
			die("invalid number of threads specified (%d)",
			    delta_search_threads);
2470
#ifdef NO_PTHREADS
2471
		if (delta_search_threads != 1)
2472 2473 2474 2475
			warning("no threads support, ignoring %s", k);
#endif
		return 0;
	}
2476
	if (!strcmp(k, "pack.indexversion")) {
2477 2478
		pack_idx_opts.version = git_config_int(k, v);
		if (pack_idx_opts.version > 2)
2479
			die("bad pack.indexversion=%"PRIu32,
2480
			    pack_idx_opts.version);
2481 2482
		return 0;
	}
2483
	return git_default_config(k, v, cb);
2484 2485
}

2486
static void read_object_list_from_stdin(void)
2487
{
2488 2489
	char line[40 + 1 + PATH_MAX + 2];
	unsigned char sha1[20];
2490

2491 2492 2493 2494 2495 2496
	for (;;) {
		if (!fgets(line, sizeof(line), stdin)) {
			if (feof(stdin))
				break;
			if (!ferror(stdin))
				die("fgets returned NULL, not EOF, not error!");
2497
			if (errno != EINTR)
2498
				die_errno("fgets");
2499 2500
			clearerr(stdin);
			continue;
2501
		}
2502 2503 2504
		if (line[0] == '-') {
			if (get_sha1_hex(line+1, sha1))
				die("expected edge sha1, got garbage:\n %s",
2505
				    line);
2506
			add_preferred_base(sha1);
2507
			continue;
2508
		}
2509
		if (get_sha1_hex(line, sha1))
2510
			die("expected sha1, got garbage:\n %s", line);
2511

2512 2513
		add_preferred_base_object(line+41);
		add_object_entry(sha1, 0, line+41, 0);
2514
	}
2515 2516
}

J
Junio C Hamano 已提交
2517 2518
#define OBJECT_ADDED (1u<<20)

2519
static void show_commit(struct commit *commit, void *data)
2520
{
B
brian m. carlson 已提交
2521
	add_object_entry(commit->object.oid.hash, OBJ_COMMIT, NULL, 0);
J
Junio C Hamano 已提交
2522
	commit->object.flags |= OBJECT_ADDED;
2523 2524 2525

	if (write_bitmap_index)
		index_commit_for_bitmap(commit);
2526 2527
}

2528
static void show_object(struct object *obj, const char *name, void *data)
2529
{
2530
	add_preferred_base_object(name);
B
brian m. carlson 已提交
2531
	add_object_entry(obj->oid.hash, obj->type, name, 0);
2532
	obj->flags |= OBJECT_ADDED;
2533 2534
}

2535 2536
static void show_edge(struct commit *commit)
{
B
brian m. carlson 已提交
2537
	add_preferred_base(commit->object.oid.hash);
2538 2539
}

J
Junio C Hamano 已提交
2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552
struct in_pack_object {
	off_t offset;
	struct object *object;
};

struct in_pack {
	int alloc;
	int nr;
	struct in_pack_object *array;
};

static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
{
B
brian m. carlson 已提交
2553
	in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p);
J
Junio C Hamano 已提交
2554 2555 2556 2557 2558 2559
	in_pack->array[in_pack->nr].object = object;
	in_pack->nr++;
}

/*
 * Compare the objects in the offset order, in order to emulate the
2560
 * "git rev-list --objects" output that produced the pack originally.
J
Junio C Hamano 已提交
2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
 */
static int ofscmp(const void *a_, const void *b_)
{
	struct in_pack_object *a = (struct in_pack_object *)a_;
	struct in_pack_object *b = (struct in_pack_object *)b_;

	if (a->offset < b->offset)
		return -1;
	else if (a->offset > b->offset)
		return 1;
	else
2572
		return oidcmp(&a->object->oid, &b->object->oid);
J
Junio C Hamano 已提交
2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
}

static void add_objects_in_unpacked_packs(struct rev_info *revs)
{
	struct packed_git *p;
	struct in_pack in_pack;
	uint32_t i;

	memset(&in_pack, 0, sizeof(in_pack));

	for (p = packed_git; p; p = p->next) {
		const unsigned char *sha1;
		struct object *o;

2587
		if (!p->pack_local || p->pack_keep)
J
Junio C Hamano 已提交
2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605
			continue;
		if (open_pack_index(p))
			die("cannot open pack index");

		ALLOC_GROW(in_pack.array,
			   in_pack.nr + p->num_objects,
			   in_pack.alloc);

		for (i = 0; i < p->num_objects; i++) {
			sha1 = nth_packed_object_sha1(p, i);
			o = lookup_unknown_object(sha1);
			if (!(o->flags & OBJECT_ADDED))
				mark_in_pack_object(o, p, &in_pack);
			o->flags |= OBJECT_ADDED;
		}
	}

	if (in_pack.nr) {
R
René Scharfe 已提交
2606
		QSORT(in_pack.array, in_pack.nr, ofscmp);
J
Junio C Hamano 已提交
2607 2608
		for (i = 0; i < in_pack.nr; i++) {
			struct object *o = in_pack.array[i].object;
B
brian m. carlson 已提交
2609
			add_object_entry(o->oid.hash, o->type, "", 0);
J
Junio C Hamano 已提交
2610 2611 2612 2613 2614
		}
	}
	free(in_pack.array);
}

2615
static int add_loose_object(const struct object_id *oid, const char *path,
2616 2617
			    void *data)
{
2618
	enum object_type type = sha1_object_info(oid->hash, NULL);
2619 2620 2621 2622 2623 2624

	if (type < 0) {
		warning("loose object at %s could not be examined", path);
		return 0;
	}

2625
	add_object_entry(oid->hash, type, "", 0);
2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
	return 0;
}

/*
 * We actually don't even have to worry about reachability here.
 * add_object_entry will weed out duplicates, so we just add every
 * loose object we find.
 */
static void add_unreachable_loose_objects(void)
{
	for_each_loose_file_in_objdir(get_object_directory(),
				      add_loose_object,
				      NULL, NULL, NULL);
}

2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
static int has_sha1_pack_kept_or_nonlocal(const unsigned char *sha1)
{
	static struct packed_git *last_found = (void *)1;
	struct packed_git *p;

	p = (last_found != (void *)1) ? last_found : packed_git;

	while (p) {
		if ((!p->pack_local || p->pack_keep) &&
			find_pack_entry_one(sha1, p)) {
			last_found = p;
			return 1;
		}
		if (p == last_found)
			p = packed_git;
		else
			p = p->next;
		if (p == last_found)
			p = p->next;
	}
	return 0;
}

2664 2665 2666 2667 2668 2669 2670 2671 2672
/*
 * Store a list of sha1s that are should not be discarded
 * because they are either written too recently, or are
 * reachable from another object that was.
 *
 * This is filled by get_object_list.
 */
static struct sha1_array recent_objects;

2673
static int loosened_object_can_be_discarded(const struct object_id *oid,
2674 2675 2676 2677 2678 2679
					    unsigned long mtime)
{
	if (!unpack_unreachable_expiration)
		return 0;
	if (mtime > unpack_unreachable_expiration)
		return 0;
2680
	if (sha1_array_lookup(&recent_objects, oid) >= 0)
2681
		return 0;
2682 2683 2684
	return 1;
}

2685 2686 2687 2688
static void loosen_unused_packed_objects(struct rev_info *revs)
{
	struct packed_git *p;
	uint32_t i;
2689
	struct object_id oid;
2690 2691

	for (p = packed_git; p; p = p->next) {
2692
		if (!p->pack_local || p->pack_keep)
2693 2694 2695 2696 2697 2698
			continue;

		if (open_pack_index(p))
			die("cannot open pack index");

		for (i = 0; i < p->num_objects; i++) {
2699 2700 2701 2702 2703
			nth_packed_object_oid(&oid, p, i);
			if (!packlist_find(&to_pack, oid.hash, NULL) &&
			    !has_sha1_pack_kept_or_nonlocal(oid.hash) &&
			    !loosened_object_can_be_discarded(&oid, p->mtime))
				if (force_object_loose(oid.hash, p->mtime))
2704 2705 2706 2707 2708
					die("unable to force loose object");
		}
	}
}

2709
/*
2710 2711 2712
 * This tracks any options which pack-reuse code expects to be on, or which a
 * reader of the pack might not understand, and which would therefore prevent
 * blind reuse of what we have on disk.
2713 2714 2715
 */
static int pack_options_allow_reuse(void)
{
2716
	return pack_to_stdout && allow_ofs_delta;
2717 2718
}

2719 2720 2721 2722 2723
static int get_object_list_from_bitmap(struct rev_info *revs)
{
	if (prepare_bitmap_walk(revs) < 0)
		return -1;

2724 2725
	if (pack_options_allow_reuse() &&
	    !reuse_partial_packfile_from_bitmap(
2726 2727 2728 2729 2730
			&reuse_packfile,
			&reuse_packfile_objects,
			&reuse_packfile_offset)) {
		assert(reuse_packfile_objects);
		nr_result += reuse_packfile_objects;
2731
		display_progress(progress_state, nr_result);
2732 2733 2734 2735 2736 2737
	}

	traverse_bitmap_commit_list(&add_object_entry_from_bitmap);
	return 0;
}

2738
static void record_recent_object(struct object *obj,
2739
				 const char *name,
2740 2741
				 void *data)
{
2742
	sha1_array_append(&recent_objects, &obj->oid);
2743 2744 2745 2746
}

static void record_recent_commit(struct commit *commit, void *data)
{
2747
	sha1_array_append(&recent_objects, &commit->object.oid);
2748 2749
}

2750
static void get_object_list(int ac, const char **av)
2751 2752 2753 2754 2755 2756 2757 2758 2759
{
	struct rev_info revs;
	char line[1000];
	int flags = 0;

	init_revisions(&revs, NULL);
	save_commit_buffer = 0;
	setup_revisions(ac, av, &revs, NULL);

2760 2761 2762
	/* make sure shallows are read */
	is_repository_shallow();

2763 2764
	while (fgets(line, sizeof(line), stdin) != NULL) {
		int len = strlen(line);
2765
		if (len && line[len - 1] == '\n')
2766 2767 2768 2769 2770 2771
			line[--len] = 0;
		if (!len)
			break;
		if (*line == '-') {
			if (!strcmp(line, "--not")) {
				flags ^= UNINTERESTING;
2772
				write_bitmap_index = 0;
2773 2774
				continue;
			}
2775 2776 2777 2778 2779
			if (starts_with(line, "--shallow ")) {
				unsigned char sha1[20];
				if (get_sha1_hex(line + 10, sha1))
					die("not an SHA-1 '%s'", line + 10);
				register_shallow(sha1);
2780
				use_bitmap_index = 0;
2781 2782
				continue;
			}
2783 2784
			die("not a rev '%s'", line);
		}
2785
		if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
2786 2787 2788
			die("bad revision '%s'", line);
	}

2789 2790 2791
	if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
		return;

2792 2793
	if (prepare_revision_walk(&revs))
		die("revision walk setup failed");
2794
	mark_edges_uninteresting(&revs, show_edge);
2795
	traverse_commit_list(&revs, show_commit, show_object, NULL);
J
Junio C Hamano 已提交
2796

2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807
	if (unpack_unreachable_expiration) {
		revs.ignore_missing_links = 1;
		if (add_unseen_recent_objects_to_traversal(&revs,
				unpack_unreachable_expiration))
			die("unable to add recent objects");
		if (prepare_revision_walk(&revs))
			die("revision walk setup failed");
		traverse_commit_list(&revs, record_recent_commit,
				     record_recent_object, NULL);
	}

J
Junio C Hamano 已提交
2808 2809
	if (keep_unreachable)
		add_objects_in_unpacked_packs(&revs);
2810 2811
	if (pack_loose_unreachable)
		add_unreachable_loose_objects();
2812 2813
	if (unpack_unreachable)
		loosen_unused_packed_objects(&revs);
2814 2815

	sha1_array_clear(&recent_objects);
2816 2817
}

2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832
static int option_parse_index_version(const struct option *opt,
				      const char *arg, int unset)
{
	char *c;
	const char *val = arg;
	pack_idx_opts.version = strtoul(val, &c, 10);
	if (pack_idx_opts.version > 2)
		die(_("unsupported index version %s"), val);
	if (*c == ',' && c[1])
		pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
	if (*c || pack_idx_opts.off32_limit & 0x80000000)
		die(_("bad index version '%s'"), val);
	return 0;
}

2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847
static int option_parse_unpack_unreachable(const struct option *opt,
					   const char *arg, int unset)
{
	if (unset) {
		unpack_unreachable = 0;
		unpack_unreachable_expiration = 0;
	}
	else {
		unpack_unreachable = 1;
		if (arg)
			unpack_unreachable_expiration = approxidate(arg);
	}
	return 0;
}

2848 2849 2850
int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{
	int use_internal_rev_list = 0;
2851
	int thin = 0;
2852
	int shallow = 0;
2853
	int all_progress_implied = 0;
J
Jeff King 已提交
2854
	struct argv_array rp = ARGV_ARRAY_INIT;
2855
	int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
2856
	int rev_list_index = 0;
2857 2858
	struct option pack_objects_options[] = {
		OPT_SET_INT('q', "quiet", &progress,
2859
			    N_("do not show progress meter"), 0),
2860
		OPT_SET_INT(0, "progress", &progress,
2861
			    N_("show progress meter"), 1),
2862
		OPT_SET_INT(0, "all-progress", &progress,
2863
			    N_("show progress meter during object writing phase"), 2),
2864 2865
		OPT_BOOL(0, "all-progress-implied",
			 &all_progress_implied,
2866 2867 2868
			 N_("similar to --all-progress when progress meter is shown")),
		{ OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
		  N_("write the pack index file in the specified idx format version"),
2869
		  0, option_parse_index_version },
2870 2871
		OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
			      N_("maximum size of each output pack file")),
2872
		OPT_BOOL(0, "local", &local,
2873
			 N_("ignore borrowed objects from alternate object store")),
2874
		OPT_BOOL(0, "incremental", &incremental,
2875
			 N_("ignore packed objects")),
2876
		OPT_INTEGER(0, "window", &window,
2877
			    N_("limit pack window by objects")),
2878 2879
		OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,
			      N_("limit pack window by memory in addition to object limit")),
2880
		OPT_INTEGER(0, "depth", &depth,
2881
			    N_("maximum length of delta chain allowed in the resulting pack")),
2882
		OPT_BOOL(0, "reuse-delta", &reuse_delta,
2883
			 N_("reuse existing deltas")),
2884
		OPT_BOOL(0, "reuse-object", &reuse_object,
2885
			 N_("reuse existing objects")),
2886
		OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,
2887
			 N_("use OFS_DELTA objects")),
2888
		OPT_INTEGER(0, "threads", &delta_search_threads,
2889
			    N_("use threads when searching for best delta matches")),
2890
		OPT_BOOL(0, "non-empty", &non_empty,
2891
			 N_("do not create an empty pack output")),
2892
		OPT_BOOL(0, "revs", &use_internal_rev_list,
2893
			 N_("read revision arguments from standard input")),
2894
		{ OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,
2895
		  N_("limit the objects to those that are not yet packed"),
2896 2897
		  PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
		{ OPTION_SET_INT, 0, "all", &rev_list_all, NULL,
2898
		  N_("include objects reachable from any reference"),
2899 2900
		  PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
		{ OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,
2901
		  N_("include objects referred by reflog entries"),
2902
		  PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2903 2904 2905
		{ OPTION_SET_INT, 0, "indexed-objects", &rev_list_index, NULL,
		  N_("include objects referred to by the index"),
		  PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2906
		OPT_BOOL(0, "stdout", &pack_to_stdout,
2907
			 N_("output pack to stdout")),
2908
		OPT_BOOL(0, "include-tag", &include_tag,
2909
			 N_("include tag objects that refer to objects to be packed")),
2910
		OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
2911
			 N_("keep unreachable objects")),
2912 2913
		OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,
			 N_("pack loose unreachable objects")),
2914 2915
		{ OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),
		  N_("unpack unreachable objects newer than <time>"),
2916
		  PARSE_OPT_OPTARG, option_parse_unpack_unreachable },
2917
		OPT_BOOL(0, "thin", &thin,
2918
			 N_("create thin packs")),
2919 2920
		OPT_BOOL(0, "shallow", &shallow,
			 N_("create packs suitable for shallow fetches")),
2921
		OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
2922
			 N_("ignore packs that have companion .keep file")),
2923
		OPT_INTEGER(0, "compression", &pack_compression_level,
2924
			    N_("pack compression level")),
2925
		OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
2926
			    N_("do not hide commits by grafts"), 0),
2927 2928
		OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,
			 N_("use a bitmap index if available to speed up counting objects")),
2929 2930
		OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index,
			 N_("write a bitmap index together with the pack index")),
2931 2932
		OPT_END(),
	};
2933

2934
	check_replace_refs = 0;
2935

2936
	reset_pack_idx_option(&pack_idx_opts);
2937
	git_config(git_pack_config, NULL);
2938 2939

	progress = isatty(2);
2940 2941
	argc = parse_options(argc, argv, prefix, pack_objects_options,
			     pack_usage, 0);
2942

2943 2944 2945
	if (argc) {
		base_name = argv[0];
		argc--;
2946
	}
2947 2948
	if (pack_to_stdout != !base_name || argc)
		usage_with_options(pack_usage, pack_objects_options);
2949

J
Jeff King 已提交
2950
	argv_array_push(&rp, "pack-objects");
2951 2952
	if (thin) {
		use_internal_rev_list = 1;
2953 2954 2955
		argv_array_push(&rp, shallow
				? "--objects-edge-aggressive"
				: "--objects-edge");
2956
	} else
J
Jeff King 已提交
2957
		argv_array_push(&rp, "--objects");
2958

2959 2960
	if (rev_list_all) {
		use_internal_rev_list = 1;
J
Jeff King 已提交
2961
		argv_array_push(&rp, "--all");
2962 2963 2964
	}
	if (rev_list_reflog) {
		use_internal_rev_list = 1;
J
Jeff King 已提交
2965
		argv_array_push(&rp, "--reflog");
2966
	}
2967 2968 2969
	if (rev_list_index) {
		use_internal_rev_list = 1;
		argv_array_push(&rp, "--indexed-objects");
2970 2971 2972
	}
	if (rev_list_unpacked) {
		use_internal_rev_list = 1;
J
Jeff King 已提交
2973
		argv_array_push(&rp, "--unpacked");
2974
	}
2975

2976 2977 2978 2979 2980 2981
	if (!reuse_object)
		reuse_delta = 0;
	if (pack_compression_level == -1)
		pack_compression_level = Z_DEFAULT_COMPRESSION;
	else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
		die("bad pack compression level %d", pack_compression_level);
2982 2983 2984 2985

	if (!delta_search_threads)	/* --threads=0 means autodetect */
		delta_search_threads = online_cpus();

2986 2987
#ifdef NO_PTHREADS
	if (delta_search_threads != 1)
2988
		warning("no threads support, ignoring --threads");
2989
#endif
2990 2991
	if (!pack_to_stdout && !pack_size_limit)
		pack_size_limit = pack_size_limit_cfg;
2992 2993
	if (pack_to_stdout && pack_size_limit)
		die("--max-pack-size cannot be used to build a pack for transfer.");
2994 2995 2996 2997
	if (pack_size_limit && pack_size_limit < 1024*1024) {
		warning("minimum pack size limit is 1 MiB");
		pack_size_limit = 1024*1024;
	}
2998

2999 3000
	if (!pack_to_stdout && thin)
		die("--thin cannot be used to build an indexable pack.");
3001

3002 3003
	if (keep_unreachable && unpack_unreachable)
		die("--keep-unreachable and --unpack-unreachable are incompatible.");
3004 3005
	if (!rev_list_all || !rev_list_reflog || !rev_list_index)
		unpack_unreachable_expiration = 0;
3006

3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023
	/*
	 * "soft" reasons not to use bitmaps - for on-disk repack by default we want
	 *
	 * - to produce good pack (with bitmap index not-yet-packed objects are
	 *   packed in suboptimal order).
	 *
	 * - to use more robust pack-generation codepath (avoiding possible
	 *   bugs in bitmap code and possible bitmap index corruption).
	 */
	if (!pack_to_stdout)
		use_bitmap_index_default = 0;

	if (use_bitmap_index < 0)
		use_bitmap_index = use_bitmap_index_default;

	/* "hard" reasons not to use bitmaps; these just won't work at all */
	if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow())
3024 3025
		use_bitmap_index = 0;

3026 3027 3028
	if (pack_to_stdout || !rev_list_all)
		write_bitmap_index = 0;

3029 3030 3031
	if (progress && all_progress_implied)
		progress = 2;

3032
	prepare_packed_git();
3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
	if (ignore_packed_keep) {
		struct packed_git *p;
		for (p = packed_git; p; p = p->next)
			if (p->pack_local && p->pack_keep)
				break;
		if (!p) /* no keep-able packs found */
			ignore_packed_keep = 0;
	}
	if (local) {
		/*
		 * unlike ignore_packed_keep above, we do not want to
		 * unset "local" based on looking at packs, as it
		 * also covers non-local objects
		 */
		struct packed_git *p;
		for (p = packed_git; p; p = p->next) {
			if (!p->pack_local) {
				have_non_local_packs = 1;
				break;
			}
		}
	}
3055

3056
	if (progress)
3057
		progress_state = start_progress(_("Counting objects"), 0);
3058 3059
	if (!use_internal_rev_list)
		read_object_list_from_stdin();
3060
	else {
J
Jeff King 已提交
3061 3062
		get_object_list(rp.argc, rp.argv);
		argv_array_clear(&rp);
3063
	}
3064
	cleanup_preferred_base();
3065 3066
	if (include_tag && nr_result)
		for_each_ref(add_ref_tag, NULL);
N
Nicolas Pitre 已提交
3067
	stop_progress(&progress_state);
N
Nicolas Pitre 已提交
3068

3069
	if (non_empty && !nr_result)
3070
		return 0;
3071 3072
	if (nr_result)
		prepare_pack(window, depth);
3073
	write_pack_file();
3074
	if (progress)
3075 3076
		fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
			" reused %"PRIu32" (delta %"PRIu32")\n",
3077
			written, written_delta, reused, reused_delta);
3078 3079
	return 0;
}