unpack-trees.c 47.9 KB
Newer Older
1
#define NO_THE_INDEX_COMPATIBILITY_MACROS
2
#include "cache.h"
3
#include "dir.h"
4 5
#include "tree.h"
#include "tree-walk.h"
6
#include "cache-tree.h"
7
#include "unpack-trees.h"
N
Nicolas Pitre 已提交
8
#include "progress.h"
9
#include "refs.h"
10
#include "attr.h"
11

12 13 14 15
/*
 * Error messages expected by scripts out of plumbing commands such as
 * read-tree.  Non-scripted Porcelain is not required to use these messages
 * and in fact are encouraged to reword them to better suit their particular
16
 * situation better.  See how "git checkout" and "git merge" replaces
17
 * them using setup_unpack_trees_porcelain(), for example.
18
 */
S
Stephen Boyd 已提交
19
static const char *unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = {
20
	/* ERROR_WOULD_OVERWRITE */
21 22
	"Entry '%s' would be overwritten by merge. Cannot merge.",

23
	/* ERROR_NOT_UPTODATE_FILE */
24 25
	"Entry '%s' not uptodate. Cannot merge.",

26
	/* ERROR_NOT_UPTODATE_DIR */
27 28
	"Updating '%s' would lose untracked files in it",

29 30
	/* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */
	"Untracked working tree file '%s' would be overwritten by merge.",
31

32 33
	/* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */
	"Untracked working tree file '%s' would be removed by merge.",
34

35
	/* ERROR_BIND_OVERLAP */
36
	"Entry '%s' overlaps with '%s'.  Cannot bind.",
37

38
	/* ERROR_SPARSE_NOT_UPTODATE_FILE */
39 40
	"Entry '%s' not uptodate. Cannot update sparse checkout.",

41 42 43 44 45
	/* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */
	"Working tree file '%s' would be overwritten by sparse checkout update.",

	/* ERROR_WOULD_LOSE_ORPHANED_REMOVED */
	"Working tree file '%s' would be removed by sparse checkout update.",
46 47
};

48 49 50 51
#define ERRORMSG(o,type) \
	( ((o) && (o)->msgs[(type)]) \
	  ? ((o)->msgs[(type)])      \
	  : (unpack_plumbing_errors[(type)]) )
52

53 54
void setup_unpack_trees_porcelain(struct unpack_trees_options *opts,
				  const char *cmd)
55
{
56
	int i;
57
	const char **msgs = opts->msgs;
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
	const char *msg;
	char *tmp;
	const char *cmd2 = strcmp(cmd, "checkout") ? cmd : "switch branches";
	if (advice_commit_before_merge)
		msg = "Your local changes to the following files would be overwritten by %s:\n%%s"
			"Please, commit your changes or stash them before you can %s.";
	else
		msg = "Your local changes to the following files would be overwritten by %s:\n%%s";
	tmp = xmalloc(strlen(msg) + strlen(cmd) + strlen(cmd2) - 2);
	sprintf(tmp, msg, cmd, cmd2);
	msgs[ERROR_WOULD_OVERWRITE] = tmp;
	msgs[ERROR_NOT_UPTODATE_FILE] = tmp;

	msgs[ERROR_NOT_UPTODATE_DIR] =
		"Updating the following directories would lose untracked files in it:\n%s";

	if (advice_commit_before_merge)
		msg = "The following untracked working tree files would be %s by %s:\n%%s"
			"Please move or remove them before you can %s.";
	else
		msg = "The following untracked working tree files would be %s by %s:\n%%s";
	tmp = xmalloc(strlen(msg) + strlen(cmd) + strlen("removed") + strlen(cmd2) - 4);
	sprintf(tmp, msg, "removed", cmd, cmd2);
	msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = tmp;
	tmp = xmalloc(strlen(msg) + strlen(cmd) + strlen("overwritten") + strlen(cmd2) - 4);
	sprintf(tmp, msg, "overwritten", cmd, cmd2);
	msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = tmp;

	/*
	 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we
	 * cannot easily display it as a list.
	 */
	msgs[ERROR_BIND_OVERLAP] = "Entry '%s' overlaps with '%s'.  Cannot bind.";

	msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] =
		"Cannot update sparse checkout: the following entries are not up-to-date:\n%s";
	msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] =
		"The following Working tree files would be overwritten by sparse checkout update:\n%s";
	msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] =
		"The following Working tree files would be removed by sparse checkout update:\n%s";
98 99

	opts->show_all_errors = 1;
100 101 102
	/* rejected paths may not have a static buffer */
	for (i = 0; i < ARRAY_SIZE(opts->unpack_rejects); i++)
		opts->unpack_rejects[i].strdup_strings = 1;
103 104
}

105 106
static void do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
			 unsigned int set, unsigned int clear)
107
{
108 109
	clear |= CE_HASHED | CE_UNHASHED;

110 111 112
	if (set & CE_REMOVE)
		set |= CE_WT_REMOVE;

113 114 115 116 117 118
	ce->next = NULL;
	ce->ce_flags = (ce->ce_flags & ~clear) | set;
	add_index_entry(&o->result, ce,
			ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
}

119
static struct cache_entry *dup_entry(const struct cache_entry *ce)
120 121 122 123
{
	unsigned int size = ce_size(ce);
	struct cache_entry *new = xmalloc(size);

124
	memcpy(new, ce, size);
125 126 127 128 129 130 131 132
	return new;
}

static void add_entry(struct unpack_trees_options *o,
		      const struct cache_entry *ce,
		      unsigned int set, unsigned int clear)
{
	do_add_entry(o, dup_entry(ce), set, clear);
133 134
}

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
/*
 * add error messages on path <path>
 * corresponding to the type <e> with the message <msg>
 * indicating if it should be display in porcelain or not
 */
static int add_rejected_path(struct unpack_trees_options *o,
			     enum unpack_trees_error_types e,
			     const char *path)
{
	if (!o->show_all_errors)
		return error(ERRORMSG(o, e), path);

	/*
	 * Otherwise, insert in a list for future display by
	 * display_error_msgs()
	 */
151
	string_list_append(&o->unpack_rejects[e], path);
152 153 154 155 156 157 158 159
	return -1;
}

/*
 * display all the error messages stored in a nice way
 */
static void display_error_msgs(struct unpack_trees_options *o)
{
160
	int e, i;
161 162
	int something_displayed = 0;
	for (e = 0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) {
163 164
		struct string_list *rejects = &o->unpack_rejects[e];
		if (rejects->nr > 0) {
165 166
			struct strbuf path = STRBUF_INIT;
			something_displayed = 1;
167 168
			for (i = 0; i < rejects->nr; i++)
				strbuf_addf(&path, "\t%s\n", rejects->items[i].string);
169 170 171
			error(ERRORMSG(o, e), path.buf);
			strbuf_release(&path);
		}
172
		string_list_clear(rejects, 0);
173 174
	}
	if (something_displayed)
175
		fprintf(stderr, "Aborting\n");
176 177
}

178 179 180
/*
 * Unlink the last component and schedule the leading directories for
 * removal, such that empty directories get removed.
181
 */
182
static void unlink_entry(const struct cache_entry *ce)
183
{
184
	if (!check_leading_path(ce->name, ce_namelen(ce)))
185
		return;
186 187
	if (remove_or_warn(ce->ce_mode, ce->name))
		return;
188
	schedule_dir_for_removal(ce->name, ce_namelen(ce));
189 190 191
}

static struct checkout state;
192
static int check_updates(struct unpack_trees_options *o)
193
{
N
Nicolas Pitre 已提交
194
	unsigned cnt = 0, total = 0;
195
	struct progress *progress = NULL;
196
	struct index_state *index = &o->result;
197
	int i;
198
	int errs = 0;
199 200

	if (o->update && o->verbose_update) {
201
		for (total = cnt = 0; cnt < index->cache_nr; cnt++) {
202
			const struct cache_entry *ce = index->cache[cnt];
203
			if (ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE))
204 205 206
				total++;
		}

207
		progress = start_progress_delay("Checking out files",
208
						total, 50, 1);
209 210 211
		cnt = 0;
	}

212 213
	if (o->update)
		git_attr_set_direction(GIT_ATTR_CHECKOUT, &o->result);
214
	for (i = 0; i < index->cache_nr; i++) {
215
		const struct cache_entry *ce = index->cache[i];
216

217 218
		if (ce->ce_flags & CE_WT_REMOVE) {
			display_progress(progress, ++cnt);
219
			if (o->update && !o->dry_run)
220 221 222
				unlink_entry(ce);
			continue;
		}
223
	}
224
	remove_marked_cache_entries(&o->result);
225
	remove_scheduled_dirs();
226 227 228 229

	for (i = 0; i < index->cache_nr; i++) {
		struct cache_entry *ce = index->cache[i];

230
		if (ce->ce_flags & CE_UPDATE) {
231
			display_progress(progress, ++cnt);
232
			ce->ce_flags &= ~CE_UPDATE;
233
			if (o->update && !o->dry_run) {
234
				errs |= checkout_entry(ce, &state, NULL);
235
			}
236 237
		}
	}
N
Nicolas Pitre 已提交
238
	stop_progress(&progress);
239 240
	if (o->update)
		git_attr_set_direction(GIT_ATTR_CHECKIN, NULL);
241
	return errs != 0;
242 243
}

244 245 246 247 248
static int verify_uptodate_sparse(const struct cache_entry *ce,
				  struct unpack_trees_options *o);
static int verify_absent_sparse(const struct cache_entry *ce,
				enum unpack_trees_error_types,
				struct unpack_trees_options *o);
249 250 251 252 253

static int apply_sparse_checkout(struct cache_entry *ce, struct unpack_trees_options *o)
{
	int was_skip_worktree = ce_skip_worktree(ce);

254
	if (ce->ce_flags & CE_NEW_SKIP_WORKTREE)
255 256 257 258 259
		ce->ce_flags |= CE_SKIP_WORKTREE;
	else
		ce->ce_flags &= ~CE_SKIP_WORKTREE;

	/*
260 261 262
	 * if (!was_skip_worktree && !ce_skip_worktree()) {
	 *	This is perfectly normal. Move on;
	 * }
263
	 */
264 265 266 267

	/*
	 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout
	 * area as a result of ce_skip_worktree() shortcuts in
268 269 270
	 * verify_absent() and verify_uptodate().
	 * Make sure they don't modify worktree if they are already
	 * outside checkout area
271
	 */
272 273 274 275 276 277 278 279 280 281 282 283
	if (was_skip_worktree && ce_skip_worktree(ce)) {
		ce->ce_flags &= ~CE_UPDATE;

		/*
		 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also
		 * on to get that file removed from both index and worktree.
		 * If that file is already outside worktree area, don't
		 * bother remove it.
		 */
		if (ce->ce_flags & CE_REMOVE)
			ce->ce_flags &= ~CE_WT_REMOVE;
	}
284 285 286 287 288 289 290 291 292 293 294 295

	if (!was_skip_worktree && ce_skip_worktree(ce)) {
		/*
		 * If CE_UPDATE is set, verify_uptodate() must be called already
		 * also stat info may have lost after merged_entry() so calling
		 * verify_uptodate() again may fail
		 */
		if (!(ce->ce_flags & CE_UPDATE) && verify_uptodate_sparse(ce, o))
			return -1;
		ce->ce_flags |= CE_WT_REMOVE;
	}
	if (was_skip_worktree && !ce_skip_worktree(ce)) {
296
		if (verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o))
297 298 299 300 301 302
			return -1;
		ce->ce_flags |= CE_UPDATE;
	}
	return 0;
}

303 304
static inline int call_unpack_fn(const struct cache_entry * const *src,
				 struct unpack_trees_options *o)
305
{
306 307
	int ret = o->fn(src, o);
	if (ret > 0)
308 309 310 311
		ret = 0;
	return ret;
}

312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o)
{
	ce->ce_flags |= CE_UNPACKED;

	if (o->cache_bottom < o->src_index->cache_nr &&
	    o->src_index->cache[o->cache_bottom] == ce) {
		int bottom = o->cache_bottom;
		while (bottom < o->src_index->cache_nr &&
		       o->src_index->cache[bottom]->ce_flags & CE_UNPACKED)
			bottom++;
		o->cache_bottom = bottom;
	}
}

static void mark_all_ce_unused(struct index_state *index)
{
	int i;
	for (i = 0; i < index->cache_nr; i++)
330
		index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE);
331 332
}

333
static int locate_in_src_index(const struct cache_entry *ce,
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
			       struct unpack_trees_options *o)
{
	struct index_state *index = o->src_index;
	int len = ce_namelen(ce);
	int pos = index_name_pos(index, ce->name, len);
	if (pos < 0)
		pos = -1 - pos;
	return pos;
}

/*
 * We call unpack_index_entry() with an unmerged cache entry
 * only in diff-index, and it wants a single callback.  Skip
 * the other unmerged entry with the same name.
 */
static void mark_ce_used_same_name(struct cache_entry *ce,
				   struct unpack_trees_options *o)
{
	struct index_state *index = o->src_index;
	int len = ce_namelen(ce);
	int pos;

	for (pos = locate_in_src_index(ce, o); pos < index->cache_nr; pos++) {
		struct cache_entry *next = index->cache[pos];
		if (len != ce_namelen(next) ||
		    memcmp(ce->name, next->name, len))
			break;
		mark_ce_used(next, o);
	}
}

static struct cache_entry *next_cache_entry(struct unpack_trees_options *o)
{
	const struct index_state *index = o->src_index;
	int pos = o->cache_bottom;

	while (pos < index->cache_nr) {
		struct cache_entry *ce = index->cache[pos];
		if (!(ce->ce_flags & CE_UNPACKED))
			return ce;
		pos++;
	}
	return NULL;
}

379
static void add_same_unmerged(const struct cache_entry *ce,
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
			      struct unpack_trees_options *o)
{
	struct index_state *index = o->src_index;
	int len = ce_namelen(ce);
	int pos = index_name_pos(index, ce->name, len);

	if (0 <= pos)
		die("programming error in a caller of mark_ce_used_same_name");
	for (pos = -pos - 1; pos < index->cache_nr; pos++) {
		struct cache_entry *next = index->cache[pos];
		if (len != ce_namelen(next) ||
		    memcmp(ce->name, next->name, len))
			break;
		add_entry(o, next, 0, 0);
		mark_ce_used(next, o);
	}
}

static int unpack_index_entry(struct cache_entry *ce,
			      struct unpack_trees_options *o)
400
{
401
	const struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
402
	int ret;
403

404 405
	src[0] = ce;

406
	mark_ce_used(ce, o);
407 408
	if (ce_stage(ce)) {
		if (o->skip_unmerged) {
409 410
			add_entry(o, ce, 0, 0);
			return 0;
411 412
		}
	}
413 414 415 416
	ret = call_unpack_fn(src, o);
	if (ce_stage(ce))
		mark_ce_used_same_name(ce, o);
	return ret;
417 418
}

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
static int find_cache_pos(struct traverse_info *, const struct name_entry *);

static void restore_cache_bottom(struct traverse_info *info, int bottom)
{
	struct unpack_trees_options *o = info->data;

	if (o->diff_index_cached)
		return;
	o->cache_bottom = bottom;
}

static int switch_cache_bottom(struct traverse_info *info)
{
	struct unpack_trees_options *o = info->data;
	int ret, pos;

	if (o->diff_index_cached)
		return 0;
	ret = o->cache_bottom;
	pos = find_cache_pos(info->prev, &info->name);

	if (pos < -1)
		o->cache_bottom = -2 - pos;
	else if (pos < 0)
		o->cache_bottom = o->src_index->cache_nr;
	return ret;
445 446
}

J
Junio C Hamano 已提交
447 448 449 450
static int traverse_trees_recursive(int n, unsigned long dirmask,
				    unsigned long df_conflicts,
				    struct name_entry *names,
				    struct traverse_info *info)
451
{
452
	int i, ret, bottom;
453
	struct tree_desc t[MAX_UNPACK_TREES];
454
	void *buf[MAX_UNPACK_TREES];
455 456 457 458 459 460 461 462 463
	struct traverse_info newinfo;
	struct name_entry *p;

	p = names;
	while (!p->mode)
		p++;

	newinfo = *info;
	newinfo.prev = info;
464
	newinfo.pathspec = info->pathspec;
465
	newinfo.name = *p;
466
	newinfo.pathlen += tree_entry_len(p) + 1;
467
	newinfo.df_conflicts |= df_conflicts;
468 469 470 471 472

	for (i = 0; i < n; i++, dirmask >>= 1) {
		const unsigned char *sha1 = NULL;
		if (dirmask & 1)
			sha1 = names[i].sha1;
473
		buf[i] = fill_tree_descriptor(t+i, sha1);
474
	}
475 476 477 478

	bottom = switch_cache_bottom(&newinfo);
	ret = traverse_trees(n, t, &newinfo);
	restore_cache_bottom(&newinfo, bottom);
479 480 481 482

	for (i = 0; i < n; i++)
		free(buf[i]);

483
	return ret;
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
}

/*
 * Compare the traverse-path to the cache entry without actually
 * having to generate the textual representation of the traverse
 * path.
 *
 * NOTE! This *only* compares up to the size of the traverse path
 * itself - the caller needs to do the final check for the cache
 * entry having more data at the end!
 */
static int do_compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
{
	int len, pathlen, ce_len;
	const char *ce_name;

	if (info->prev) {
		int cmp = do_compare_entry(ce, info->prev, &info->name);
		if (cmp)
			return cmp;
	}
	pathlen = info->pathlen;
	ce_len = ce_namelen(ce);

	/* If ce_len < pathlen then we must have previously hit "name == directory" entry */
	if (ce_len < pathlen)
		return -1;

	ce_len -= pathlen;
	ce_name = ce->name + pathlen;

515
	len = tree_entry_len(n);
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
	return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode);
}

static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
{
	int cmp = do_compare_entry(ce, info, n);
	if (cmp)
		return cmp;

	/*
	 * Even if the beginning compared identically, the ce should
	 * compare as bigger than a directory leading up to it!
	 */
	return ce_namelen(ce) > traverse_path_len(info, n);
}

532 533 534 535 536 537 538 539 540 541 542 543 544 545
static int ce_in_traverse_path(const struct cache_entry *ce,
			       const struct traverse_info *info)
{
	if (!info->prev)
		return 1;
	if (do_compare_entry(ce, info->prev, &info->name))
		return 0;
	/*
	 * If ce (blob) is the same name as the path (which is a tree
	 * we will be descending into), it won't be inside it.
	 */
	return (info->pathlen < ce_namelen(ce));
}

546 547 548 549 550 551
static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage)
{
	int len = traverse_path_len(info, n);
	struct cache_entry *ce = xcalloc(1, cache_entry_size(len));

	ce->ce_mode = create_ce_mode(n->mode);
552 553
	ce->ce_flags = create_ce_flags(stage);
	ce->ce_namelen = len;
554 555 556 557 558 559
	hashcpy(ce->sha1, n->sha1);
	make_traverse_path(ce->name, info, n);

	return ce;
}

560 561 562 563 564
static int unpack_nondirectories(int n, unsigned long mask,
				 unsigned long dirmask,
				 struct cache_entry **src,
				 const struct name_entry *names,
				 const struct traverse_info *info)
565 566 567
{
	int i;
	struct unpack_trees_options *o = info->data;
568
	unsigned long conflicts = info->df_conflicts | dirmask;
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597

	/* Do we have *only* directories? Nothing to do */
	if (mask == dirmask && !src[0])
		return 0;

	/*
	 * Ok, we've filled in up to any potential index entry in src[0],
	 * now do the rest.
	 */
	for (i = 0; i < n; i++) {
		int stage;
		unsigned int bit = 1ul << i;
		if (conflicts & bit) {
			src[i + o->merge] = o->df_conflict_entry;
			continue;
		}
		if (!(mask & bit))
			continue;
		if (!o->merge)
			stage = 0;
		else if (i + 1 < o->head_idx)
			stage = 1;
		else if (i + 1 > o->head_idx)
			stage = 3;
		else
			stage = 2;
		src[i + o->merge] = create_ce_entry(info, names + i, stage);
	}

598 599 600 601 602 603 604 605 606 607
	if (o->merge) {
		int rc = call_unpack_fn((const struct cache_entry * const *)src,
					o);
		for (i = 0; i < n; i++) {
			struct cache_entry *ce = src[i + o->merge];
			if (ce != o->df_conflict_entry)
				free(ce);
		}
		return rc;
	}
608 609

	for (i = 0; i < n; i++)
610
		if (src[i] && src[i] != o->df_conflict_entry)
611
			do_add_entry(o, src[i], 0, 0);
612 613 614
	return 0;
}

615 616 617
static int unpack_failed(struct unpack_trees_options *o, const char *message)
{
	discard_index(&o->result);
618
	if (!o->gently && !o->exiting_early) {
619 620 621 622 623 624 625
		if (message)
			return error("%s", message);
		return -1;
	}
	return -1;
}

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
/* NEEDSWORK: give this a better name and share with tree-walk.c */
static int name_compare(const char *a, int a_len,
			const char *b, int b_len)
{
	int len = (a_len < b_len) ? a_len : b_len;
	int cmp = memcmp(a, b, len);
	if (cmp)
		return cmp;
	return (a_len - b_len);
}

/*
 * The tree traversal is looking at name p.  If we have a matching entry,
 * return it.  If name p is a directory in the index, do not return
 * anything, as we will want to match it when the traversal descends into
 * the directory.
 */
static int find_cache_pos(struct traverse_info *info,
			  const struct name_entry *p)
{
	int pos;
	struct unpack_trees_options *o = info->data;
	struct index_state *index = o->src_index;
	int pfxlen = info->pathlen;
650
	int p_len = tree_entry_len(p);
651 652

	for (pos = o->cache_bottom; pos < index->cache_nr; pos++) {
653
		const struct cache_entry *ce = index->cache[pos];
654 655 656
		const char *ce_name, *ce_slash;
		int cmp, ce_len;

657 658 659 660 661 662 663 664
		if (ce->ce_flags & CE_UNPACKED) {
			/*
			 * cache_bottom entry is already unpacked, so
			 * we can never match it; don't check it
			 * again.
			 */
			if (pos == o->cache_bottom)
				++o->cache_bottom;
665
			continue;
666 667
		}
		if (!ce_in_traverse_path(ce, info))
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
			continue;
		ce_name = ce->name + pfxlen;
		ce_slash = strchr(ce_name, '/');
		if (ce_slash)
			ce_len = ce_slash - ce_name;
		else
			ce_len = ce_namelen(ce) - pfxlen;
		cmp = name_compare(p->path, p_len, ce_name, ce_len);
		/*
		 * Exact match; if we have a directory we need to
		 * delay returning it.
		 */
		if (!cmp)
			return ce_slash ? -2 - pos : pos;
		if (0 < cmp)
			continue; /* keep looking */
		/*
		 * ce_name sorts after p->path; could it be that we
		 * have files under p->path directory in the index?
		 * E.g.  ce_name == "t-i", and p->path == "t"; we may
		 * have "t/a" in the index.
		 */
		if (p_len < ce_len && !memcmp(ce_name, p->path, p_len) &&
		    ce_name[p_len] < '/')
			continue; /* keep looking */
		break;
	}
	return -1;
}

static struct cache_entry *find_cache_entry(struct traverse_info *info,
					    const struct name_entry *p)
{
	int pos = find_cache_pos(info, p);
	struct unpack_trees_options *o = info->data;

	if (0 <= pos)
		return o->src_index->cache[pos];
	else
		return NULL;
}

J
Junio C Hamano 已提交
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
static void debug_path(struct traverse_info *info)
{
	if (info->prev) {
		debug_path(info->prev);
		if (*info->prev->name.path)
			putchar('/');
	}
	printf("%s", info->name.path);
}

static void debug_name_entry(int i, struct name_entry *n)
{
	printf("ent#%d %06o %s\n", i,
	       n->path ? n->mode : 0,
	       n->path ? n->path : "(missing)");
}

static void debug_unpack_callback(int n,
				  unsigned long mask,
				  unsigned long dirmask,
				  struct name_entry *names,
				  struct traverse_info *info)
{
	int i;
	printf("* unpack mask %lu, dirmask %lu, cnt %d ",
	       mask, dirmask, n);
	debug_path(info);
	putchar('\n');
	for (i = 0; i < n; i++)
		debug_name_entry(i, names + i);
}

742 743
static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info)
{
744
	struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
745 746 747 748 749 750 751
	struct unpack_trees_options *o = info->data;
	const struct name_entry *p = names;

	/* Find first entry with a real name (we could use "mask" too) */
	while (!p->mode)
		p++;

J
Junio C Hamano 已提交
752 753 754
	if (o->debug_unpack)
		debug_unpack_callback(n, mask, dirmask, names, info);

755 756
	/* Are we supposed to look at the index too? */
	if (o->merge) {
757 758
		while (1) {
			int cmp;
759 760 761 762 763 764 765
			struct cache_entry *ce;

			if (o->diff_index_cached)
				ce = next_cache_entry(o);
			else
				ce = find_cache_entry(info, p);

766 767 768
			if (!ce)
				break;
			cmp = compare_entry(ce, info, p);
769 770
			if (cmp < 0) {
				if (unpack_index_entry(ce, o) < 0)
771
					return unpack_failed(o, NULL);
772 773 774 775 776
				continue;
			}
			if (!cmp) {
				if (ce_stage(ce)) {
					/*
777 778 779 780
					 * If we skip unmerged index
					 * entries, we'll skip this
					 * entry *and* the tree
					 * entries associated with it!
781
					 */
782
					if (o->skip_unmerged) {
783
						add_same_unmerged(ce, o);
784
						return mask;
785
					}
786 787 788 789 790 791 792
				}
				src[0] = ce;
			}
			break;
		}
	}

793
	if (unpack_nondirectories(n, mask, dirmask, src, names, info) < 0)
794 795
		return -1;

796
	if (o->merge && src[0]) {
797 798 799 800 801 802
		if (ce_stage(src[0]))
			mark_ce_used_same_name(src[0], o);
		else
			mark_ce_used(src[0], o);
	}

803 804
	/* Now handle any directories.. */
	if (dirmask) {
805 806 807 808 809 810 811
		/* special case: "diff-index --cached" looking at a tree */
		if (o->diff_index_cached &&
		    n == 1 && dirmask == 1 && S_ISDIR(names->mode)) {
			int matches;
			matches = cache_tree_matches_traversal(o->src_index->cache_tree,
							       names, info);
			/*
812 813 814 815
			 * Everything under the name matches; skip the
			 * entire hierarchy.  diff_index_cached codepath
			 * special cases D/F conflicts in such a way that
			 * it does not do any look-ahead, so this is safe.
816 817
			 */
			if (matches) {
818
				o->cache_bottom += matches;
819 820 821 822
				return mask;
			}
		}

823
		if (traverse_trees_recursive(n, dirmask, mask & ~dirmask,
824 825
					     names, info) < 0)
			return -1;
826 827 828 829 830 831
		return mask;
	}

	return mask;
}

832 833 834 835 836
static int clear_ce_flags_1(struct cache_entry **cache, int nr,
			    char *prefix, int prefix_len,
			    int select_mask, int clear_mask,
			    struct exclude_list *el, int defval);

837 838 839 840 841
/* Whole directory matching */
static int clear_ce_flags_dir(struct cache_entry **cache, int nr,
			      char *prefix, int prefix_len,
			      char *basename,
			      int select_mask, int clear_mask,
842
			      struct exclude_list *el, int defval)
843
{
844
	struct cache_entry **cache_end;
845
	int dtype = DT_DIR;
846 847
	int ret = is_excluded_from_list(prefix, prefix_len,
					basename, &dtype, el);
848 849 850

	prefix[prefix_len++] = '/';

851 852 853
	/* If undecided, use matching result of parent dir in defval */
	if (ret < 0)
		ret = defval;
854

855 856 857 858
	for (cache_end = cache; cache_end != cache + nr; cache_end++) {
		struct cache_entry *ce = *cache_end;
		if (strncmp(ce->name, prefix, prefix_len))
			break;
859 860
	}

861 862 863 864 865
	/*
	 * TODO: check el, if there are no patterns that may conflict
	 * with ret (iow, we know in advance the incl/excl
	 * decision for the entire directory), clear flag here without
	 * calling clear_ce_flags_1(). That function will call
866
	 * the expensive is_excluded_from_list() on every entry.
867 868 869 870 871
	 */
	return clear_ce_flags_1(cache, cache_end - cache,
				prefix, prefix_len,
				select_mask, clear_mask,
				el, ret);
872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
}

/*
 * Traverse the index, find every entry that matches according to
 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the
 * number of traversed entries.
 *
 * If select_mask is non-zero, only entries whose ce_flags has on of
 * those bits enabled are traversed.
 *
 * cache	: pointer to an index entry
 * prefix_len	: an offset to its path
 *
 * The current path ("prefix") including the trailing '/' is
 *   cache[0]->name[0..(prefix_len-1)]
 * Top level path has prefix_len zero.
 */
static int clear_ce_flags_1(struct cache_entry **cache, int nr,
			    char *prefix, int prefix_len,
			    int select_mask, int clear_mask,
892
			    struct exclude_list *el, int defval)
893 894 895 896 897 898 899 900 901 902
{
	struct cache_entry **cache_end = cache + nr;

	/*
	 * Process all entries that have the given prefix and meet
	 * select_mask condition
	 */
	while(cache != cache_end) {
		struct cache_entry *ce = *cache;
		const char *name, *slash;
903
		int len, dtype, ret;
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931

		if (select_mask && !(ce->ce_flags & select_mask)) {
			cache++;
			continue;
		}

		if (prefix_len && strncmp(ce->name, prefix, prefix_len))
			break;

		name = ce->name + prefix_len;
		slash = strchr(name, '/');

		/* If it's a directory, try whole directory match first */
		if (slash) {
			int processed;

			len = slash - name;
			memcpy(prefix + prefix_len, name, len);

			/*
			 * terminate the string (no trailing slash),
			 * clear_c_f_dir needs it
			 */
			prefix[prefix_len + len] = '\0';
			processed = clear_ce_flags_dir(cache, cache_end - cache,
						       prefix, prefix_len + len,
						       prefix + prefix_len,
						       select_mask, clear_mask,
932
						       el, defval);
933 934 935 936 937 938 939 940 941 942

			/* clear_c_f_dir eats a whole dir already? */
			if (processed) {
				cache += processed;
				continue;
			}

			prefix[prefix_len + len++] = '/';
			cache += clear_ce_flags_1(cache, cache_end - cache,
						  prefix, prefix_len + len,
943
						  select_mask, clear_mask, el, defval);
944 945 946 947 948
			continue;
		}

		/* Non-directory */
		dtype = ce_to_dtype(ce);
949 950
		ret = is_excluded_from_list(ce->name, ce_namelen(ce),
					    name, &dtype, el);
951 952 953
		if (ret < 0)
			ret = defval;
		if (ret > 0)
954 955 956 957 958 959 960 961 962 963 964 965 966 967
			ce->ce_flags &= ~clear_mask;
		cache++;
	}
	return nr - (cache_end - cache);
}

static int clear_ce_flags(struct cache_entry **cache, int nr,
			    int select_mask, int clear_mask,
			    struct exclude_list *el)
{
	char prefix[PATH_MAX];
	return clear_ce_flags_1(cache, nr,
				prefix, 0,
				select_mask, clear_mask,
968
				el, 0);
969 970
}

971 972 973 974 975 976 977 978 979
/*
 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout
 */
static void mark_new_skip_worktree(struct exclude_list *el,
				   struct index_state *the_index,
				   int select_flag, int skip_wt_flag)
{
	int i;

980 981 982 983
	/*
	 * 1. Pretend the narrowest worktree: only unmerged entries
	 * are checked out
	 */
984 985 986 987 988 989
	for (i = 0; i < the_index->cache_nr; i++) {
		struct cache_entry *ce = the_index->cache[i];

		if (select_flag && !(ce->ce_flags & select_flag))
			continue;

990
		if (!ce_stage(ce))
991 992 993 994
			ce->ce_flags |= skip_wt_flag;
		else
			ce->ce_flags &= ~skip_wt_flag;
	}
995 996 997 998 999 1000 1001

	/*
	 * 2. Widen worktree according to sparse-checkout file.
	 * Matched entries will have skip_wt_flag cleared (i.e. "in")
	 */
	clear_ce_flags(the_index->cache, the_index->cache_nr,
		       select_flag, skip_wt_flag, el);
1002 1003
}

1004 1005 1006
static int verify_absent(const struct cache_entry *,
			 enum unpack_trees_error_types,
			 struct unpack_trees_options *);
1007 1008 1009
/*
 * N-way merge "len" trees.  Returns 0 on success, -1 on failure to manipulate the
 * resulting index, -2 on failure to reflect the changes to the work tree.
1010 1011
 *
 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally
1012
 */
1013 1014
int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)
{
1015
	int i, ret;
1016
	static struct cache_entry *dfc;
1017
	struct exclude_list el;
1018

1019 1020
	if (len > MAX_UNPACK_TREES)
		die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);
1021
	memset(&state, 0, sizeof(state));
1022 1023 1024 1025 1026
	state.base_dir = "";
	state.force = 1;
	state.quiet = 1;
	state.refresh_cache = 1;

1027 1028 1029 1030
	memset(&el, 0, sizeof(el));
	if (!core_apply_sparse_checkout || !o->update)
		o->skip_sparse_checkout = 1;
	if (!o->skip_sparse_checkout) {
1031
		if (add_excludes_from_file_to_list(git_path("info/sparse-checkout"), "", 0, &el, 0) < 0)
1032 1033 1034 1035 1036
			o->skip_sparse_checkout = 1;
		else
			o->el = &el;
	}

1037
	memset(&o->result, 0, sizeof(o->result));
1038
	o->result.initialized = 1;
1039 1040
	o->result.timestamp.sec = o->src_index->timestamp.sec;
	o->result.timestamp.nsec = o->src_index->timestamp.nsec;
1041
	o->result.version = o->src_index->version;
1042
	o->merge_size = len;
1043
	mark_all_ce_unused(o->src_index);
1044

1045 1046 1047 1048 1049 1050
	/*
	 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries
	 */
	if (!o->skip_sparse_checkout)
		mark_new_skip_worktree(o->el, o->src_index, 0, CE_NEW_SKIP_WORKTREE);

1051
	if (!dfc)
J
Jeff King 已提交
1052
		dfc = xcalloc(1, cache_entry_size(0));
1053
	o->df_conflict_entry = dfc;
1054 1055

	if (len) {
1056 1057 1058 1059 1060 1061
		const char *prefix = o->prefix ? o->prefix : "";
		struct traverse_info info;

		setup_traverse_info(&info, prefix);
		info.fn = unpack_callback;
		info.data = o;
1062
		info.show_all_errors = o->show_all_errors;
1063
		info.pathspec = o->pathspec;
1064

1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
		if (o->prefix) {
			/*
			 * Unpack existing index entries that sort before the
			 * prefix the tree is spliced into.  Note that o->merge
			 * is always true in this case.
			 */
			while (1) {
				struct cache_entry *ce = next_cache_entry(o);
				if (!ce)
					break;
				if (ce_in_traverse_path(ce, &info))
					break;
				if (unpack_index_entry(ce, o) < 0)
					goto return_failed;
			}
1080
		}
1081

1082
		if (traverse_trees(len, t, &info) < 0)
1083
			goto return_failed;
1084 1085
	}

1086 1087
	/* Any left-over entries in the index? */
	if (o->merge) {
1088 1089 1090 1091
		while (1) {
			struct cache_entry *ce = next_cache_entry(o);
			if (!ce)
				break;
1092
			if (unpack_index_entry(ce, o) < 0)
1093
				goto return_failed;
1094 1095
		}
	}
1096
	mark_all_ce_unused(o->src_index);
1097

1098 1099 1100 1101
	if (o->trivial_merges_only && o->nontrivial_merge) {
		ret = unpack_failed(o, "Merge requires file-level merging");
		goto done;
	}
1102

1103
	if (!o->skip_sparse_checkout) {
1104
		int empty_worktree = 1;
1105 1106 1107 1108 1109 1110 1111 1112

		/*
		 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #1
		 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE
		 * so apply_sparse_checkout() won't attempt to remove it from worktree
		 */
		mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);

1113
		ret = 0;
1114
		for (i = 0; i < o->result.cache_nr; i++) {
1115 1116
			struct cache_entry *ce = o->result.cache[i];

1117 1118 1119 1120 1121 1122 1123 1124 1125
			/*
			 * Entries marked with CE_ADDED in merged_entry() do not have
			 * verify_absent() check (the check is effectively disabled
			 * because CE_NEW_SKIP_WORKTREE is set unconditionally).
			 *
			 * Do the real check now because we have had
			 * correct CE_NEW_SKIP_WORKTREE
			 */
			if (ce->ce_flags & CE_ADDED &&
1126 1127 1128 1129 1130
			    verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {
				if (!o->show_all_errors)
					goto return_failed;
				ret = -1;
			}
1131

1132
			if (apply_sparse_checkout(ce, o)) {
1133 1134
				if (!o->show_all_errors)
					goto return_failed;
1135 1136
				ret = -1;
			}
1137
			if (!ce_skip_worktree(ce))
1138
				empty_worktree = 0;
1139

1140
		}
1141 1142
		if (ret < 0)
			goto return_failed;
1143 1144 1145 1146 1147 1148
		/*
		 * Sparse checkout is meant to narrow down checkout area
		 * but it does not make sense to narrow down to empty working
		 * tree. This is usually a mistake in sparse checkout rules.
		 * Do not allow users to do that.
		 */
1149 1150 1151 1152
		if (o->result.cache_nr && empty_worktree) {
			ret = unpack_failed(o, "Sparse checkout leaves no entry on working directory");
			goto done;
		}
1153
	}
1154

1155
	o->src_index = NULL;
1156
	ret = check_updates(o) ? (-2) : 0;
1157 1158
	if (o->dst_index) {
		discard_index(o->dst_index);
1159
		*o->dst_index = o->result;
1160
	}
1161 1162

done:
1163
	clear_exclude_list(&el);
1164
	return ret;
1165 1166

return_failed:
1167 1168
	if (o->show_all_errors)
		display_error_msgs(o);
1169
	mark_all_ce_unused(o->src_index);
J
Junio C Hamano 已提交
1170
	ret = unpack_failed(o, NULL);
1171 1172
	if (o->exiting_early)
		ret = 0;
J
Junio C Hamano 已提交
1173
	goto done;
1174
}
1175 1176 1177

/* Here come the merge functions */

1178 1179
static int reject_merge(const struct cache_entry *ce,
			struct unpack_trees_options *o)
1180
{
1181 1182
	return o->gently ? -1 :
		add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);
1183 1184
}

1185
static int same(const struct cache_entry *a, const struct cache_entry *b)
1186 1187 1188 1189 1190
{
	if (!!a != !!b)
		return 0;
	if (!a && !b)
		return 1;
1191 1192
	if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED)
		return 0;
1193
	return a->ce_mode == b->ce_mode &&
1194
	       !hashcmp(a->sha1, b->sha1);
1195 1196 1197 1198 1199 1200 1201
}


/*
 * When a CE gets turned into an unmerged entry, we
 * want it to be up-to-date
 */
1202 1203 1204
static int verify_uptodate_1(const struct cache_entry *ce,
			     struct unpack_trees_options *o,
			     enum unpack_trees_error_types error_type)
1205 1206 1207
{
	struct stat st;

1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	if (o->index_only)
		return 0;

	/*
	 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again
	 * if this entry is truly up-to-date because this file may be
	 * overwritten.
	 */
	if ((ce->ce_flags & CE_VALID) || ce_skip_worktree(ce))
		; /* keep checking */
	else if (o->reset || ce_uptodate(ce))
1219
		return 0;
1220 1221

	if (!lstat(ce->name, &st)) {
1222 1223
		int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;
		unsigned changed = ie_match_stat(o->src_index, ce, &st, flags);
1224
		if (!changed)
1225
			return 0;
1226 1227
		/*
		 * NEEDSWORK: the current default policy is to allow
1228
		 * submodule to be out of sync wrt the superproject
1229 1230 1231 1232
		 * index.  This needs to be tightened later for
		 * submodules that are marked to be automatically
		 * checked out.
		 */
1233
		if (S_ISGITLINK(ce->ce_mode))
1234
			return 0;
1235 1236 1237
		errno = 0;
	}
	if (errno == ENOENT)
1238
		return 0;
1239
	return o->gently ? -1 :
1240
		add_rejected_path(o, error_type, ce->name);
1241 1242
}

1243
static int verify_uptodate(const struct cache_entry *ce,
1244 1245
			   struct unpack_trees_options *o)
{
1246
	if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
1247
		return 0;
1248
	return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);
1249 1250
}

1251
static int verify_uptodate_sparse(const struct cache_entry *ce,
1252 1253
				  struct unpack_trees_options *o)
{
1254
	return verify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);
1255 1256
}

1257 1258
static void invalidate_ce_path(const struct cache_entry *ce,
			       struct unpack_trees_options *o)
1259 1260
{
	if (ce)
1261
		cache_tree_invalidate_path(o->src_index->cache_tree, ce->name);
1262 1263
}

1264 1265 1266 1267 1268 1269 1270
/*
 * Check that checking out ce->sha1 in subdir ce->name is not
 * going to overwrite any working files.
 *
 * Currently, git does not checkout subprojects during a superproject
 * checkout, so it is not going to overwrite anything.
 */
1271 1272 1273
static int verify_clean_submodule(const struct cache_entry *ce,
				  enum unpack_trees_error_types error_type,
				  struct unpack_trees_options *o)
1274 1275 1276 1277
{
	return 0;
}

1278 1279 1280
static int verify_clean_subdirectory(const struct cache_entry *ce,
				     enum unpack_trees_error_types error_type,
				     struct unpack_trees_options *o)
1281 1282
{
	/*
1283
	 * we are about to extract "ce->name"; we would not want to lose
1284 1285 1286
	 * anything in the existing directory there.
	 */
	int namelen;
1287
	int i;
1288 1289 1290
	struct dir_struct d;
	char *pathbuf;
	int cnt = 0;
1291 1292
	unsigned char sha1[20];

1293
	if (S_ISGITLINK(ce->ce_mode) &&
1294 1295 1296 1297 1298 1299
	    resolve_gitlink_ref(ce->name, "HEAD", sha1) == 0) {
		/* If we are not going to update the submodule, then
		 * we don't care.
		 */
		if (!hashcmp(sha1, ce->sha1))
			return 0;
1300
		return verify_clean_submodule(ce, error_type, o);
1301
	}
1302 1303 1304 1305 1306

	/*
	 * First let's make sure we do not have a local modification
	 * in that directory.
	 */
1307
	namelen = ce_namelen(ce);
1308 1309 1310
	for (i = locate_in_src_index(ce, o);
	     i < o->src_index->cache_nr;
	     i++) {
1311 1312
		struct cache_entry *ce2 = o->src_index->cache[i];
		int len = ce_namelen(ce2);
1313
		if (len < namelen ||
1314 1315
		    strncmp(ce->name, ce2->name, namelen) ||
		    ce2->name[namelen] != '/')
1316 1317
			break;
		/*
1318 1319
		 * ce2->name is an entry in the subdirectory to be
		 * removed.
1320
		 */
1321 1322
		if (!ce_stage(ce2)) {
			if (verify_uptodate(ce2, o))
1323
				return -1;
1324
			add_entry(o, ce2, CE_REMOVE, 0);
1325
			mark_ce_used(ce2, o);
1326 1327 1328 1329 1330 1331 1332 1333 1334
		}
		cnt++;
	}

	/*
	 * Then we need to make sure that we do not lose a locally
	 * present file that is not ignored.
	 */
	pathbuf = xmalloc(namelen + 2);
1335
	memcpy(pathbuf, ce->name, namelen);
1336 1337 1338 1339 1340
	strcpy(pathbuf+namelen, "/");

	memset(&d, 0, sizeof(d));
	if (o->dir)
		d.exclude_per_dir = o->dir->exclude_per_dir;
1341
	i = read_directory(&d, pathbuf, namelen+1, NULL);
1342
	if (i)
1343
		return o->gently ? -1 :
1344
			add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);
1345 1346 1347 1348
	free(pathbuf);
	return cnt;
}

1349 1350 1351 1352 1353 1354 1355 1356
/*
 * This gets called when there was no index entry for the tree entry 'dst',
 * but we found a file in the working tree that 'lstat()' said was fine,
 * and we're on a case-insensitive filesystem.
 *
 * See if we can find a case-insensitive match in the index that also
 * matches the stat information, and assume it's that other file!
 */
1357
static int icase_exists(struct unpack_trees_options *o, const char *name, int len, struct stat *st)
1358
{
1359
	const struct cache_entry *src;
1360

1361
	src = index_file_exists(o->src_index, name, len, 1);
1362
	return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);
1363 1364
}

1365
static int check_ok_to_remove(const char *name, int len, int dtype,
1366
			      const struct cache_entry *ce, struct stat *st,
1367 1368 1369
			      enum unpack_trees_error_types error_type,
			      struct unpack_trees_options *o)
{
1370
	const struct cache_entry *result;
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381

	/*
	 * It may be that the 'lstat()' succeeded even though
	 * target 'ce' was absent, because there is an old
	 * entry that is different only in case..
	 *
	 * Ignore that lstat() if it matches.
	 */
	if (ignore_case && icase_exists(o, name, len, st))
		return 0;

1382
	if (o->dir &&
1383
	    is_excluded(o->dir, name, &dtype))
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
		/*
		 * ce->name is explicitly excluded, so it is Ok to
		 * overwrite it.
		 */
		return 0;
	if (S_ISDIR(st->st_mode)) {
		/*
		 * We are checking out path "foo" and
		 * found "foo/." in the working tree.
		 * This is tricky -- if we have modified
		 * files that are in "foo/" we would lose
		 * them.
		 */
		if (verify_clean_subdirectory(ce, error_type, o) < 0)
			return -1;
		return 0;
	}

	/*
	 * The previous round may already have decided to
	 * delete this path, which is in a subdirectory that
	 * is being replaced with a blob.
	 */
1407
	result = index_file_exists(&o->result, name, len, 0);
1408 1409 1410 1411 1412 1413 1414 1415 1416
	if (result) {
		if (result->ce_flags & CE_REMOVE)
			return 0;
	}

	return o->gently ? -1 :
		add_rejected_path(o, error_type, name);
}

1417 1418
/*
 * We do not want to remove or overwrite a working tree file that
1419
 * is not tracked, unless it is ignored.
1420
 */
1421 1422 1423
static int verify_absent_1(const struct cache_entry *ce,
			   enum unpack_trees_error_types error_type,
			   struct unpack_trees_options *o)
1424
{
1425
	int len;
1426 1427 1428
	struct stat st;

	if (o->index_only || o->reset || !o->update)
1429
		return 0;
1430

1431 1432
	len = check_leading_path(ce->name, ce_namelen(ce));
	if (!len)
1433
		return 0;
1434 1435 1436 1437
	else if (len > 0) {
		char path[PATH_MAX + 1];
		memcpy(path, ce->name, len);
		path[len] = 0;
1438 1439 1440
		if (lstat(path, &st))
			return error("cannot stat '%s': %s", path,
					strerror(errno));
1441

1442 1443
		return check_ok_to_remove(path, len, DT_UNKNOWN, NULL, &st,
				error_type, o);
1444 1445 1446 1447 1448 1449
	} else if (lstat(ce->name, &st)) {
		if (errno != ENOENT)
			return error("cannot stat '%s': %s", ce->name,
				     strerror(errno));
		return 0;
	} else {
1450
		return check_ok_to_remove(ce->name, ce_namelen(ce),
1451 1452 1453
					  ce_to_dtype(ce), ce, &st,
					  error_type, o);
	}
1454
}
1455

1456
static int verify_absent(const struct cache_entry *ce,
1457
			 enum unpack_trees_error_types error_type,
1458 1459
			 struct unpack_trees_options *o)
{
1460
	if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
1461
		return 0;
1462
	return verify_absent_1(ce, error_type, o);
1463
}
1464

1465 1466 1467
static int verify_absent_sparse(const struct cache_entry *ce,
				enum unpack_trees_error_types error_type,
				struct unpack_trees_options *o)
1468
{
1469 1470 1471 1472 1473
	enum unpack_trees_error_types orphaned_error = error_type;
	if (orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)
		orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;

	return verify_absent_1(ce, orphaned_error, o);
1474
}
1475

1476
static int merged_entry(const struct cache_entry *ce,
1477
			const struct cache_entry *old,
1478
			struct unpack_trees_options *o)
1479
{
1480
	int update = CE_UPDATE;
1481
	struct cache_entry *merge = dup_entry(ce);
1482

1483
	if (!old) {
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
		/*
		 * New index entries. In sparse checkout, the following
		 * verify_absent() will be delayed until after
		 * traverse_trees() finishes in unpack_trees(), then:
		 *
		 *  - CE_NEW_SKIP_WORKTREE will be computed correctly
		 *  - verify_absent() be called again, this time with
		 *    correct CE_NEW_SKIP_WORKTREE
		 *
		 * verify_absent() call here does nothing in sparse
		 * checkout (i.e. o->skip_sparse_checkout == 0)
		 */
		update |= CE_ADDED;
		merge->ce_flags |= CE_NEW_SKIP_WORKTREE;

1499 1500 1501
		if (verify_absent(merge,
				  ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {
			free(merge);
1502
			return -1;
1503
		}
1504 1505
		invalidate_ce_path(merge, o);
	} else if (!(old->ce_flags & CE_CONFLICTED)) {
1506 1507 1508 1509
		/*
		 * See if we can re-use the old CE directly?
		 * That way we get the uptodate stat info.
		 *
1510 1511
		 * This also removes the UPDATE flag on a match; otherwise
		 * we will end up overwriting local changes in the work tree.
1512 1513
		 */
		if (same(old, merge)) {
1514
			copy_cache_entry(merge, old);
1515
			update = 0;
1516
		} else {
1517 1518
			if (verify_uptodate(old, o)) {
				free(merge);
1519
				return -1;
1520
			}
1521 1522
			/* Migrate old flags over */
			update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);
1523
			invalidate_ce_path(old, o);
1524
		}
1525 1526 1527 1528 1529 1530
	} else {
		/*
		 * Previously unmerged entry left as an existence
		 * marker by read_index_unmerged();
		 */
		invalidate_ce_path(old, o);
1531 1532
	}

1533
	do_add_entry(o, merge, update, CE_STAGEMASK);
1534 1535 1536
	return 1;
}

1537 1538 1539
static int deleted_entry(const struct cache_entry *ce,
			 const struct cache_entry *old,
			 struct unpack_trees_options *o)
1540
{
1541 1542
	/* Did it exist in the index? */
	if (!old) {
1543
		if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))
1544
			return -1;
1545 1546
		return 0;
	}
1547
	if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o))
1548 1549
		return -1;
	add_entry(o, ce, CE_REMOVE, 0);
1550
	invalidate_ce_path(ce, o);
1551 1552 1553
	return 1;
}

1554 1555
static int keep_entry(const struct cache_entry *ce,
		      struct unpack_trees_options *o)
1556
{
1557
	add_entry(o, ce, 0, 0);
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
	return 1;
}

#if DBRT_DEBUG
static void show_stage_entry(FILE *o,
			     const char *label, const struct cache_entry *ce)
{
	if (!ce)
		fprintf(o, "%s (missing)\n", label);
	else
		fprintf(o, "%s%06o %s %d\t%s\n",
			label,
1570
			ce->ce_mode,
1571 1572 1573 1574 1575 1576
			sha1_to_hex(ce->sha1),
			ce_stage(ce),
			ce->name);
}
#endif

1577 1578
int threeway_merge(const struct cache_entry * const *stages,
		   struct unpack_trees_options *o)
1579
{
1580 1581 1582
	const struct cache_entry *index;
	const struct cache_entry *head;
	const struct cache_entry *remote = stages[o->head_idx + 1];
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
	int count;
	int head_match = 0;
	int remote_match = 0;

	int df_conflict_head = 0;
	int df_conflict_remote = 0;

	int any_anc_missing = 0;
	int no_anc_exists = 1;
	int i;

	for (i = 1; i < o->head_idx; i++) {
1595
		if (!stages[i] || stages[i] == o->df_conflict_entry)
1596
			any_anc_missing = 1;
1597
		else
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
			no_anc_exists = 0;
	}

	index = stages[0];
	head = stages[o->head_idx];

	if (head == o->df_conflict_entry) {
		df_conflict_head = 1;
		head = NULL;
	}

	if (remote == o->df_conflict_entry) {
		df_conflict_remote = 1;
		remote = NULL;
	}

1614 1615
	/*
	 * First, if there's a #16 situation, note that to prevent #13
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
	 * and #14.
	 */
	if (!same(remote, head)) {
		for (i = 1; i < o->head_idx; i++) {
			if (same(stages[i], head)) {
				head_match = i;
			}
			if (same(stages[i], remote)) {
				remote_match = i;
			}
		}
	}

1629 1630
	/*
	 * We start with cases where the index is allowed to match
1631 1632 1633 1634 1635 1636
	 * something other than the head: #14(ALT) and #2ALT, where it
	 * is permitted to match the result instead.
	 */
	/* #14, #14ALT, #2ALT */
	if (remote && !df_conflict_head && head_match && !remote_match) {
		if (index && !same(index, remote) && !same(index, head))
1637
			return reject_merge(index, o);
1638 1639 1640 1641 1642 1643
		return merged_entry(remote, index, o);
	}
	/*
	 * If we have an entry in the index cache, then we want to
	 * make sure that it matches head.
	 */
1644
	if (index && !same(index, head))
1645
		return reject_merge(index, o);
1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656

	if (head) {
		/* #5ALT, #15 */
		if (same(head, remote))
			return merged_entry(head, index, o);
		/* #13, #3ALT */
		if (!df_conflict_remote && remote_match && !head_match)
			return merged_entry(head, index, o);
	}

	/* #1 */
1657
	if (!head && !remote && any_anc_missing)
1658 1659
		return 0;

1660 1661
	/*
	 * Under the "aggressive" rule, we resolve mostly trivial
1662 1663 1664
	 * cases that we historically had git-merge-one-file resolve.
	 */
	if (o->aggressive) {
1665 1666
		int head_deleted = !head;
		int remote_deleted = !remote;
1667
		const struct cache_entry *ce = NULL;
1668 1669

		if (index)
1670
			ce = index;
1671
		else if (head)
1672
			ce = head;
1673
		else if (remote)
1674
			ce = remote;
1675 1676 1677
		else {
			for (i = 1; i < o->head_idx; i++) {
				if (stages[i] && stages[i] != o->df_conflict_entry) {
1678
					ce = stages[i];
1679 1680 1681 1682 1683
					break;
				}
			}
		}

1684 1685 1686 1687 1688 1689 1690 1691 1692
		/*
		 * Deleted in both.
		 * Deleted in one and unchanged in the other.
		 */
		if ((head_deleted && remote_deleted) ||
		    (head_deleted && remote && remote_match) ||
		    (remote_deleted && head && head_match)) {
			if (index)
				return deleted_entry(index, index, o);
1693
			if (ce && !head_deleted) {
1694
				if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))
1695 1696
					return -1;
			}
1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
			return 0;
		}
		/*
		 * Added in both, identically.
		 */
		if (no_anc_exists && head && remote && same(head, remote))
			return merged_entry(head, index, o);

	}

	/* Below are "no merge" cases, which require that the index be
	 * up-to-date to avoid the files getting overwritten with
	 * conflict resolution files.
	 */
	if (index) {
1712 1713
		if (verify_uptodate(index, o))
			return -1;
1714 1715 1716 1717
	}

	o->nontrivial_merge = 1;

J
Junio C Hamano 已提交
1718
	/* #2, #3, #4, #6, #7, #9, #10, #11. */
1719 1720 1721
	count = 0;
	if (!head_match || !remote_match) {
		for (i = 1; i < o->head_idx; i++) {
1722
			if (stages[i] && stages[i] != o->df_conflict_entry) {
1723
				keep_entry(stages[i], o);
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
				count++;
				break;
			}
		}
	}
#if DBRT_DEBUG
	else {
		fprintf(stderr, "read-tree: warning #16 detected\n");
		show_stage_entry(stderr, "head   ", stages[head_match]);
		show_stage_entry(stderr, "remote ", stages[remote_match]);
	}
#endif
1736 1737
	if (head) { count += keep_entry(head, o); }
	if (remote) { count += keep_entry(remote, o); }
1738 1739 1740 1741 1742 1743 1744
	return count;
}

/*
 * Two-way merge.
 *
 * The rule is to "carry forward" what is in the index without losing
1745
 * information across a "fast-forward", favoring a successful merge
1746 1747 1748 1749
 * over a merge failure when it makes sense.  For details of the
 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
 *
 */
1750 1751
int twoway_merge(const struct cache_entry * const *src,
		 struct unpack_trees_options *o)
1752
{
1753 1754 1755
	const struct cache_entry *current = src[0];
	const struct cache_entry *oldtree = src[1];
	const struct cache_entry *newtree = src[2];
1756 1757 1758 1759 1760

	if (o->merge_size != 2)
		return error("Cannot do a twoway merge of %d trees",
			     o->merge_size);

1761 1762 1763 1764 1765
	if (oldtree == o->df_conflict_entry)
		oldtree = NULL;
	if (newtree == o->df_conflict_entry)
		newtree = NULL;

1766
	if (current) {
1767 1768 1769 1770 1771 1772 1773
		if (current->ce_flags & CE_CONFLICTED) {
			if (same(oldtree, newtree) || o->reset) {
				if (!newtree)
					return deleted_entry(current, current, o);
				else
					return merged_entry(newtree, current, o);
			}
1774
			return reject_merge(current, o);
1775
		} else if ((!oldtree && !newtree) || /* 4 and 5 */
1776 1777 1778 1779 1780 1781 1782
			 (!oldtree && newtree &&
			  same(current, newtree)) || /* 6 and 7 */
			 (oldtree && newtree &&
			  same(oldtree, newtree)) || /* 14 and 15 */
			 (oldtree && newtree &&
			  !same(oldtree, newtree) && /* 18 and 19 */
			  same(current, newtree))) {
1783
			return keep_entry(current, o);
1784
		} else if (oldtree && !newtree && same(current, oldtree)) {
1785 1786
			/* 10 or 11 */
			return deleted_entry(oldtree, current, o);
1787
		} else if (oldtree && newtree &&
1788 1789 1790
			 same(current, oldtree) && !same(current, newtree)) {
			/* 20 or 21 */
			return merged_entry(newtree, current, o);
1791
		} else
1792
			return reject_merge(current, o);
1793
	}
1794 1795 1796 1797 1798 1799 1800 1801 1802
	else if (newtree) {
		if (oldtree && !o->initial_checkout) {
			/*
			 * deletion of the path was staged;
			 */
			if (same(oldtree, newtree))
				return 1;
			return reject_merge(oldtree, o);
		}
1803
		return merged_entry(newtree, current, o);
1804
	}
1805
	return deleted_entry(oldtree, current, o);
1806 1807 1808 1809 1810 1811 1812 1813
}

/*
 * Bind merge.
 *
 * Keep the index entries at stage0, collapse stage1 but make sure
 * stage0 does not have anything there.
 */
1814 1815
int bind_merge(const struct cache_entry * const *src,
	       struct unpack_trees_options *o)
1816
{
1817 1818
	const struct cache_entry *old = src[0];
	const struct cache_entry *a = src[1];
1819 1820

	if (o->merge_size != 1)
1821
		return error("Cannot do a bind merge of %d trees",
1822 1823
			     o->merge_size);
	if (a && old)
1824
		return o->gently ? -1 :
1825
			error(ERRORMSG(o, ERROR_BIND_OVERLAP), a->name, old->name);
1826
	if (!a)
1827
		return keep_entry(old, o);
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
	else
		return merged_entry(a, NULL, o);
}

/*
 * One-way merge.
 *
 * The rule is:
 * - take the stat information from stage0, take the data from stage1
 */
1838 1839
int oneway_merge(const struct cache_entry * const *src,
		 struct unpack_trees_options *o)
1840
{
1841 1842
	const struct cache_entry *old = src[0];
	const struct cache_entry *a = src[1];
1843 1844 1845 1846 1847

	if (o->merge_size != 1)
		return error("Cannot do a oneway merge of %d trees",
			     o->merge_size);

1848
	if (!a || a == o->df_conflict_entry)
1849
		return deleted_entry(old, old, o);
1850

1851
	if (old && same(old, a)) {
1852
		int update = 0;
1853
		if (o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {
1854 1855
			struct stat st;
			if (lstat(old->name, &st) ||
1856
			    ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))
1857
				update |= CE_UPDATE;
1858
		}
1859 1860
		add_entry(o, old, update, 0);
		return 0;
1861 1862 1863
	}
	return merged_entry(a, old, o);
}