lstat_cache(): introduce has_symlink_or_noent_leading_path() function
[git/git.git] / unpack-trees.c
1 #define NO_THE_INDEX_COMPATIBILITY_MACROS
2 #include "cache.h"
3 #include "dir.h"
4 #include "tree.h"
5 #include "tree-walk.h"
6 #include "cache-tree.h"
7 #include "unpack-trees.h"
8 #include "progress.h"
9 #include "refs.h"
10
11 /*
12 * Error messages expected by scripts out of plumbing commands such as
13 * read-tree. Non-scripted Porcelain is not required to use these messages
14 * and in fact are encouraged to reword them to better suit their particular
15 * situation better. See how "git checkout" replaces not_uptodate_file to
16 * explain why it does not allow switching between branches when you have
17 * local changes, for example.
18 */
19 static struct unpack_trees_error_msgs unpack_plumbing_errors = {
20 /* would_overwrite */
21 "Entry '%s' would be overwritten by merge. Cannot merge.",
22
23 /* not_uptodate_file */
24 "Entry '%s' not uptodate. Cannot merge.",
25
26 /* not_uptodate_dir */
27 "Updating '%s' would lose untracked files in it",
28
29 /* would_lose_untracked */
30 "Untracked working tree file '%s' would be %s by merge.",
31
32 /* bind_overlap */
33 "Entry '%s' overlaps with '%s'. Cannot bind.",
34 };
35
36 #define ERRORMSG(o,fld) \
37 ( ((o) && (o)->msgs.fld) \
38 ? ((o)->msgs.fld) \
39 : (unpack_plumbing_errors.fld) )
40
41 static void add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
42 unsigned int set, unsigned int clear)
43 {
44 unsigned int size = ce_size(ce);
45 struct cache_entry *new = xmalloc(size);
46
47 clear |= CE_HASHED | CE_UNHASHED;
48
49 memcpy(new, ce, size);
50 new->next = NULL;
51 new->ce_flags = (new->ce_flags & ~clear) | set;
52 add_index_entry(&o->result, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|ADD_CACHE_SKIP_DFCHECK);
53 }
54
55 /* Unlink the last component and attempt to remove leading
56 * directories, in case this unlink is the removal of the
57 * last entry in the directory -- empty directories are removed.
58 */
59 static void unlink_entry(struct cache_entry *ce)
60 {
61 char *cp, *prev;
62 char *name = ce->name;
63
64 if (has_symlink_or_noent_leading_path(ce_namelen(ce), ce->name))
65 return;
66 if (unlink(name))
67 return;
68 prev = NULL;
69 while (1) {
70 int status;
71 cp = strrchr(name, '/');
72 if (prev)
73 *prev = '/';
74 if (!cp)
75 break;
76
77 *cp = 0;
78 status = rmdir(name);
79 if (status) {
80 *cp = '/';
81 break;
82 }
83 prev = cp;
84 }
85 }
86
87 static struct checkout state;
88 static int check_updates(struct unpack_trees_options *o)
89 {
90 unsigned cnt = 0, total = 0;
91 struct progress *progress = NULL;
92 struct index_state *index = &o->result;
93 int i;
94 int errs = 0;
95
96 if (o->update && o->verbose_update) {
97 for (total = cnt = 0; cnt < index->cache_nr; cnt++) {
98 struct cache_entry *ce = index->cache[cnt];
99 if (ce->ce_flags & (CE_UPDATE | CE_REMOVE))
100 total++;
101 }
102
103 progress = start_progress_delay("Checking out files",
104 total, 50, 1);
105 cnt = 0;
106 }
107
108 for (i = 0; i < index->cache_nr; i++) {
109 struct cache_entry *ce = index->cache[i];
110
111 if (ce->ce_flags & CE_REMOVE) {
112 display_progress(progress, ++cnt);
113 if (o->update)
114 unlink_entry(ce);
115 remove_index_entry_at(&o->result, i);
116 i--;
117 continue;
118 }
119 }
120
121 for (i = 0; i < index->cache_nr; i++) {
122 struct cache_entry *ce = index->cache[i];
123
124 if (ce->ce_flags & CE_UPDATE) {
125 display_progress(progress, ++cnt);
126 ce->ce_flags &= ~CE_UPDATE;
127 if (o->update) {
128 errs |= checkout_entry(ce, &state, NULL);
129 }
130 }
131 }
132 stop_progress(&progress);
133 return errs != 0;
134 }
135
136 static inline int call_unpack_fn(struct cache_entry **src, struct unpack_trees_options *o)
137 {
138 int ret = o->fn(src, o);
139 if (ret > 0)
140 ret = 0;
141 return ret;
142 }
143
144 static int unpack_index_entry(struct cache_entry *ce, struct unpack_trees_options *o)
145 {
146 struct cache_entry *src[5] = { ce, };
147
148 o->pos++;
149 if (ce_stage(ce)) {
150 if (o->skip_unmerged) {
151 add_entry(o, ce, 0, 0);
152 return 0;
153 }
154 }
155 return call_unpack_fn(src, o);
156 }
157
158 int traverse_trees_recursive(int n, unsigned long dirmask, unsigned long df_conflicts, struct name_entry *names, struct traverse_info *info)
159 {
160 int i;
161 struct tree_desc t[MAX_UNPACK_TREES];
162 struct traverse_info newinfo;
163 struct name_entry *p;
164
165 p = names;
166 while (!p->mode)
167 p++;
168
169 newinfo = *info;
170 newinfo.prev = info;
171 newinfo.name = *p;
172 newinfo.pathlen += tree_entry_len(p->path, p->sha1) + 1;
173 newinfo.conflicts |= df_conflicts;
174
175 for (i = 0; i < n; i++, dirmask >>= 1) {
176 const unsigned char *sha1 = NULL;
177 if (dirmask & 1)
178 sha1 = names[i].sha1;
179 fill_tree_descriptor(t+i, sha1);
180 }
181 return traverse_trees(n, t, &newinfo);
182 }
183
184 /*
185 * Compare the traverse-path to the cache entry without actually
186 * having to generate the textual representation of the traverse
187 * path.
188 *
189 * NOTE! This *only* compares up to the size of the traverse path
190 * itself - the caller needs to do the final check for the cache
191 * entry having more data at the end!
192 */
193 static int do_compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
194 {
195 int len, pathlen, ce_len;
196 const char *ce_name;
197
198 if (info->prev) {
199 int cmp = do_compare_entry(ce, info->prev, &info->name);
200 if (cmp)
201 return cmp;
202 }
203 pathlen = info->pathlen;
204 ce_len = ce_namelen(ce);
205
206 /* If ce_len < pathlen then we must have previously hit "name == directory" entry */
207 if (ce_len < pathlen)
208 return -1;
209
210 ce_len -= pathlen;
211 ce_name = ce->name + pathlen;
212
213 len = tree_entry_len(n->path, n->sha1);
214 return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode);
215 }
216
217 static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
218 {
219 int cmp = do_compare_entry(ce, info, n);
220 if (cmp)
221 return cmp;
222
223 /*
224 * Even if the beginning compared identically, the ce should
225 * compare as bigger than a directory leading up to it!
226 */
227 return ce_namelen(ce) > traverse_path_len(info, n);
228 }
229
230 static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage)
231 {
232 int len = traverse_path_len(info, n);
233 struct cache_entry *ce = xcalloc(1, cache_entry_size(len));
234
235 ce->ce_mode = create_ce_mode(n->mode);
236 ce->ce_flags = create_ce_flags(len, stage);
237 hashcpy(ce->sha1, n->sha1);
238 make_traverse_path(ce->name, info, n);
239
240 return ce;
241 }
242
243 static int unpack_nondirectories(int n, unsigned long mask, unsigned long dirmask, struct cache_entry *src[5],
244 const struct name_entry *names, const struct traverse_info *info)
245 {
246 int i;
247 struct unpack_trees_options *o = info->data;
248 unsigned long conflicts;
249
250 /* Do we have *only* directories? Nothing to do */
251 if (mask == dirmask && !src[0])
252 return 0;
253
254 conflicts = info->conflicts;
255 if (o->merge)
256 conflicts >>= 1;
257 conflicts |= dirmask;
258
259 /*
260 * Ok, we've filled in up to any potential index entry in src[0],
261 * now do the rest.
262 */
263 for (i = 0; i < n; i++) {
264 int stage;
265 unsigned int bit = 1ul << i;
266 if (conflicts & bit) {
267 src[i + o->merge] = o->df_conflict_entry;
268 continue;
269 }
270 if (!(mask & bit))
271 continue;
272 if (!o->merge)
273 stage = 0;
274 else if (i + 1 < o->head_idx)
275 stage = 1;
276 else if (i + 1 > o->head_idx)
277 stage = 3;
278 else
279 stage = 2;
280 src[i + o->merge] = create_ce_entry(info, names + i, stage);
281 }
282
283 if (o->merge)
284 return call_unpack_fn(src, o);
285
286 n += o->merge;
287 for (i = 0; i < n; i++)
288 add_entry(o, src[i], 0, 0);
289 return 0;
290 }
291
292 static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info)
293 {
294 struct cache_entry *src[5] = { NULL, };
295 struct unpack_trees_options *o = info->data;
296 const struct name_entry *p = names;
297
298 /* Find first entry with a real name (we could use "mask" too) */
299 while (!p->mode)
300 p++;
301
302 /* Are we supposed to look at the index too? */
303 if (o->merge) {
304 while (o->pos < o->src_index->cache_nr) {
305 struct cache_entry *ce = o->src_index->cache[o->pos];
306 int cmp = compare_entry(ce, info, p);
307 if (cmp < 0) {
308 if (unpack_index_entry(ce, o) < 0)
309 return -1;
310 continue;
311 }
312 if (!cmp) {
313 o->pos++;
314 if (ce_stage(ce)) {
315 /*
316 * If we skip unmerged index entries, we'll skip this
317 * entry *and* the tree entries associated with it!
318 */
319 if (o->skip_unmerged) {
320 add_entry(o, ce, 0, 0);
321 return mask;
322 }
323 }
324 src[0] = ce;
325 }
326 break;
327 }
328 }
329
330 if (unpack_nondirectories(n, mask, dirmask, src, names, info) < 0)
331 return -1;
332
333 /* Now handle any directories.. */
334 if (dirmask) {
335 unsigned long conflicts = mask & ~dirmask;
336 if (o->merge) {
337 conflicts <<= 1;
338 if (src[0])
339 conflicts |= 1;
340 }
341 if (traverse_trees_recursive(n, dirmask, conflicts,
342 names, info) < 0)
343 return -1;
344 return mask;
345 }
346
347 return mask;
348 }
349
350 static int unpack_failed(struct unpack_trees_options *o, const char *message)
351 {
352 discard_index(&o->result);
353 if (!o->gently) {
354 if (message)
355 return error("%s", message);
356 return -1;
357 }
358 return -1;
359 }
360
361 /*
362 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the
363 * resulting index, -2 on failure to reflect the changes to the work tree.
364 */
365 int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)
366 {
367 int ret;
368 static struct cache_entry *dfc;
369
370 if (len > MAX_UNPACK_TREES)
371 die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);
372 memset(&state, 0, sizeof(state));
373 state.base_dir = "";
374 state.force = 1;
375 state.quiet = 1;
376 state.refresh_cache = 1;
377
378 memset(&o->result, 0, sizeof(o->result));
379 o->result.initialized = 1;
380 if (o->src_index)
381 o->result.timestamp = o->src_index->timestamp;
382 o->merge_size = len;
383
384 if (!dfc)
385 dfc = xcalloc(1, cache_entry_size(0));
386 o->df_conflict_entry = dfc;
387
388 if (len) {
389 const char *prefix = o->prefix ? o->prefix : "";
390 struct traverse_info info;
391
392 setup_traverse_info(&info, prefix);
393 info.fn = unpack_callback;
394 info.data = o;
395
396 if (traverse_trees(len, t, &info) < 0)
397 return unpack_failed(o, NULL);
398 }
399
400 /* Any left-over entries in the index? */
401 if (o->merge) {
402 while (o->pos < o->src_index->cache_nr) {
403 struct cache_entry *ce = o->src_index->cache[o->pos];
404 if (unpack_index_entry(ce, o) < 0)
405 return unpack_failed(o, NULL);
406 }
407 }
408
409 if (o->trivial_merges_only && o->nontrivial_merge)
410 return unpack_failed(o, "Merge requires file-level merging");
411
412 o->src_index = NULL;
413 ret = check_updates(o) ? (-2) : 0;
414 if (o->dst_index)
415 *o->dst_index = o->result;
416 return ret;
417 }
418
419 /* Here come the merge functions */
420
421 static int reject_merge(struct cache_entry *ce, struct unpack_trees_options *o)
422 {
423 return error(ERRORMSG(o, would_overwrite), ce->name);
424 }
425
426 static int same(struct cache_entry *a, struct cache_entry *b)
427 {
428 if (!!a != !!b)
429 return 0;
430 if (!a && !b)
431 return 1;
432 return a->ce_mode == b->ce_mode &&
433 !hashcmp(a->sha1, b->sha1);
434 }
435
436
437 /*
438 * When a CE gets turned into an unmerged entry, we
439 * want it to be up-to-date
440 */
441 static int verify_uptodate(struct cache_entry *ce,
442 struct unpack_trees_options *o)
443 {
444 struct stat st;
445
446 if (o->index_only || o->reset)
447 return 0;
448
449 if (!lstat(ce->name, &st)) {
450 unsigned changed = ie_match_stat(o->src_index, ce, &st, CE_MATCH_IGNORE_VALID);
451 if (!changed)
452 return 0;
453 /*
454 * NEEDSWORK: the current default policy is to allow
455 * submodule to be out of sync wrt the supermodule
456 * index. This needs to be tightened later for
457 * submodules that are marked to be automatically
458 * checked out.
459 */
460 if (S_ISGITLINK(ce->ce_mode))
461 return 0;
462 errno = 0;
463 }
464 if (errno == ENOENT)
465 return 0;
466 return o->gently ? -1 :
467 error(ERRORMSG(o, not_uptodate_file), ce->name);
468 }
469
470 static void invalidate_ce_path(struct cache_entry *ce, struct unpack_trees_options *o)
471 {
472 if (ce)
473 cache_tree_invalidate_path(o->src_index->cache_tree, ce->name);
474 }
475
476 /*
477 * Check that checking out ce->sha1 in subdir ce->name is not
478 * going to overwrite any working files.
479 *
480 * Currently, git does not checkout subprojects during a superproject
481 * checkout, so it is not going to overwrite anything.
482 */
483 static int verify_clean_submodule(struct cache_entry *ce, const char *action,
484 struct unpack_trees_options *o)
485 {
486 return 0;
487 }
488
489 static int verify_clean_subdirectory(struct cache_entry *ce, const char *action,
490 struct unpack_trees_options *o)
491 {
492 /*
493 * we are about to extract "ce->name"; we would not want to lose
494 * anything in the existing directory there.
495 */
496 int namelen;
497 int pos, i;
498 struct dir_struct d;
499 char *pathbuf;
500 int cnt = 0;
501 unsigned char sha1[20];
502
503 if (S_ISGITLINK(ce->ce_mode) &&
504 resolve_gitlink_ref(ce->name, "HEAD", sha1) == 0) {
505 /* If we are not going to update the submodule, then
506 * we don't care.
507 */
508 if (!hashcmp(sha1, ce->sha1))
509 return 0;
510 return verify_clean_submodule(ce, action, o);
511 }
512
513 /*
514 * First let's make sure we do not have a local modification
515 * in that directory.
516 */
517 namelen = strlen(ce->name);
518 pos = index_name_pos(o->src_index, ce->name, namelen);
519 if (0 <= pos)
520 return cnt; /* we have it as nondirectory */
521 pos = -pos - 1;
522 for (i = pos; i < o->src_index->cache_nr; i++) {
523 struct cache_entry *ce = o->src_index->cache[i];
524 int len = ce_namelen(ce);
525 if (len < namelen ||
526 strncmp(ce->name, ce->name, namelen) ||
527 ce->name[namelen] != '/')
528 break;
529 /*
530 * ce->name is an entry in the subdirectory.
531 */
532 if (!ce_stage(ce)) {
533 if (verify_uptodate(ce, o))
534 return -1;
535 add_entry(o, ce, CE_REMOVE, 0);
536 }
537 cnt++;
538 }
539
540 /*
541 * Then we need to make sure that we do not lose a locally
542 * present file that is not ignored.
543 */
544 pathbuf = xmalloc(namelen + 2);
545 memcpy(pathbuf, ce->name, namelen);
546 strcpy(pathbuf+namelen, "/");
547
548 memset(&d, 0, sizeof(d));
549 if (o->dir)
550 d.exclude_per_dir = o->dir->exclude_per_dir;
551 i = read_directory(&d, ce->name, pathbuf, namelen+1, NULL);
552 if (i)
553 return o->gently ? -1 :
554 error(ERRORMSG(o, not_uptodate_dir), ce->name);
555 free(pathbuf);
556 return cnt;
557 }
558
559 /*
560 * This gets called when there was no index entry for the tree entry 'dst',
561 * but we found a file in the working tree that 'lstat()' said was fine,
562 * and we're on a case-insensitive filesystem.
563 *
564 * See if we can find a case-insensitive match in the index that also
565 * matches the stat information, and assume it's that other file!
566 */
567 static int icase_exists(struct unpack_trees_options *o, struct cache_entry *dst, struct stat *st)
568 {
569 struct cache_entry *src;
570
571 src = index_name_exists(o->src_index, dst->name, ce_namelen(dst), 1);
572 return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID);
573 }
574
575 /*
576 * We do not want to remove or overwrite a working tree file that
577 * is not tracked, unless it is ignored.
578 */
579 static int verify_absent(struct cache_entry *ce, const char *action,
580 struct unpack_trees_options *o)
581 {
582 struct stat st;
583
584 if (o->index_only || o->reset || !o->update)
585 return 0;
586
587 if (has_symlink_or_noent_leading_path(ce_namelen(ce), ce->name))
588 return 0;
589
590 if (!lstat(ce->name, &st)) {
591 int cnt;
592 int dtype = ce_to_dtype(ce);
593 struct cache_entry *result;
594
595 /*
596 * It may be that the 'lstat()' succeeded even though
597 * target 'ce' was absent, because there is an old
598 * entry that is different only in case..
599 *
600 * Ignore that lstat() if it matches.
601 */
602 if (ignore_case && icase_exists(o, ce, &st))
603 return 0;
604
605 if (o->dir && excluded(o->dir, ce->name, &dtype))
606 /*
607 * ce->name is explicitly excluded, so it is Ok to
608 * overwrite it.
609 */
610 return 0;
611 if (S_ISDIR(st.st_mode)) {
612 /*
613 * We are checking out path "foo" and
614 * found "foo/." in the working tree.
615 * This is tricky -- if we have modified
616 * files that are in "foo/" we would lose
617 * it.
618 */
619 cnt = verify_clean_subdirectory(ce, action, o);
620
621 /*
622 * If this removed entries from the index,
623 * what that means is:
624 *
625 * (1) the caller unpack_trees_rec() saw path/foo
626 * in the index, and it has not removed it because
627 * it thinks it is handling 'path' as blob with
628 * D/F conflict;
629 * (2) we will return "ok, we placed a merged entry
630 * in the index" which would cause o->pos to be
631 * incremented by one;
632 * (3) however, original o->pos now has 'path/foo'
633 * marked with "to be removed".
634 *
635 * We need to increment it by the number of
636 * deleted entries here.
637 */
638 o->pos += cnt;
639 return 0;
640 }
641
642 /*
643 * The previous round may already have decided to
644 * delete this path, which is in a subdirectory that
645 * is being replaced with a blob.
646 */
647 result = index_name_exists(&o->result, ce->name, ce_namelen(ce), 0);
648 if (result) {
649 if (result->ce_flags & CE_REMOVE)
650 return 0;
651 }
652
653 return o->gently ? -1 :
654 error(ERRORMSG(o, would_lose_untracked), ce->name, action);
655 }
656 return 0;
657 }
658
659 static int merged_entry(struct cache_entry *merge, struct cache_entry *old,
660 struct unpack_trees_options *o)
661 {
662 int update = CE_UPDATE;
663
664 if (old) {
665 /*
666 * See if we can re-use the old CE directly?
667 * That way we get the uptodate stat info.
668 *
669 * This also removes the UPDATE flag on a match; otherwise
670 * we will end up overwriting local changes in the work tree.
671 */
672 if (same(old, merge)) {
673 copy_cache_entry(merge, old);
674 update = 0;
675 } else {
676 if (verify_uptodate(old, o))
677 return -1;
678 invalidate_ce_path(old, o);
679 }
680 }
681 else {
682 if (verify_absent(merge, "overwritten", o))
683 return -1;
684 invalidate_ce_path(merge, o);
685 }
686
687 add_entry(o, merge, update, CE_STAGEMASK);
688 return 1;
689 }
690
691 static int deleted_entry(struct cache_entry *ce, struct cache_entry *old,
692 struct unpack_trees_options *o)
693 {
694 /* Did it exist in the index? */
695 if (!old) {
696 if (verify_absent(ce, "removed", o))
697 return -1;
698 return 0;
699 }
700 if (verify_uptodate(old, o))
701 return -1;
702 add_entry(o, ce, CE_REMOVE, 0);
703 invalidate_ce_path(ce, o);
704 return 1;
705 }
706
707 static int keep_entry(struct cache_entry *ce, struct unpack_trees_options *o)
708 {
709 add_entry(o, ce, 0, 0);
710 return 1;
711 }
712
713 #if DBRT_DEBUG
714 static void show_stage_entry(FILE *o,
715 const char *label, const struct cache_entry *ce)
716 {
717 if (!ce)
718 fprintf(o, "%s (missing)\n", label);
719 else
720 fprintf(o, "%s%06o %s %d\t%s\n",
721 label,
722 ce->ce_mode,
723 sha1_to_hex(ce->sha1),
724 ce_stage(ce),
725 ce->name);
726 }
727 #endif
728
729 int threeway_merge(struct cache_entry **stages, struct unpack_trees_options *o)
730 {
731 struct cache_entry *index;
732 struct cache_entry *head;
733 struct cache_entry *remote = stages[o->head_idx + 1];
734 int count;
735 int head_match = 0;
736 int remote_match = 0;
737
738 int df_conflict_head = 0;
739 int df_conflict_remote = 0;
740
741 int any_anc_missing = 0;
742 int no_anc_exists = 1;
743 int i;
744
745 for (i = 1; i < o->head_idx; i++) {
746 if (!stages[i] || stages[i] == o->df_conflict_entry)
747 any_anc_missing = 1;
748 else
749 no_anc_exists = 0;
750 }
751
752 index = stages[0];
753 head = stages[o->head_idx];
754
755 if (head == o->df_conflict_entry) {
756 df_conflict_head = 1;
757 head = NULL;
758 }
759
760 if (remote == o->df_conflict_entry) {
761 df_conflict_remote = 1;
762 remote = NULL;
763 }
764
765 /* First, if there's a #16 situation, note that to prevent #13
766 * and #14.
767 */
768 if (!same(remote, head)) {
769 for (i = 1; i < o->head_idx; i++) {
770 if (same(stages[i], head)) {
771 head_match = i;
772 }
773 if (same(stages[i], remote)) {
774 remote_match = i;
775 }
776 }
777 }
778
779 /* We start with cases where the index is allowed to match
780 * something other than the head: #14(ALT) and #2ALT, where it
781 * is permitted to match the result instead.
782 */
783 /* #14, #14ALT, #2ALT */
784 if (remote && !df_conflict_head && head_match && !remote_match) {
785 if (index && !same(index, remote) && !same(index, head))
786 return o->gently ? -1 : reject_merge(index, o);
787 return merged_entry(remote, index, o);
788 }
789 /*
790 * If we have an entry in the index cache, then we want to
791 * make sure that it matches head.
792 */
793 if (index && !same(index, head))
794 return o->gently ? -1 : reject_merge(index, o);
795
796 if (head) {
797 /* #5ALT, #15 */
798 if (same(head, remote))
799 return merged_entry(head, index, o);
800 /* #13, #3ALT */
801 if (!df_conflict_remote && remote_match && !head_match)
802 return merged_entry(head, index, o);
803 }
804
805 /* #1 */
806 if (!head && !remote && any_anc_missing)
807 return 0;
808
809 /* Under the new "aggressive" rule, we resolve mostly trivial
810 * cases that we historically had git-merge-one-file resolve.
811 */
812 if (o->aggressive) {
813 int head_deleted = !head && !df_conflict_head;
814 int remote_deleted = !remote && !df_conflict_remote;
815 struct cache_entry *ce = NULL;
816
817 if (index)
818 ce = index;
819 else if (head)
820 ce = head;
821 else if (remote)
822 ce = remote;
823 else {
824 for (i = 1; i < o->head_idx; i++) {
825 if (stages[i] && stages[i] != o->df_conflict_entry) {
826 ce = stages[i];
827 break;
828 }
829 }
830 }
831
832 /*
833 * Deleted in both.
834 * Deleted in one and unchanged in the other.
835 */
836 if ((head_deleted && remote_deleted) ||
837 (head_deleted && remote && remote_match) ||
838 (remote_deleted && head && head_match)) {
839 if (index)
840 return deleted_entry(index, index, o);
841 if (ce && !head_deleted) {
842 if (verify_absent(ce, "removed", o))
843 return -1;
844 }
845 return 0;
846 }
847 /*
848 * Added in both, identically.
849 */
850 if (no_anc_exists && head && remote && same(head, remote))
851 return merged_entry(head, index, o);
852
853 }
854
855 /* Below are "no merge" cases, which require that the index be
856 * up-to-date to avoid the files getting overwritten with
857 * conflict resolution files.
858 */
859 if (index) {
860 if (verify_uptodate(index, o))
861 return -1;
862 }
863
864 o->nontrivial_merge = 1;
865
866 /* #2, #3, #4, #6, #7, #9, #10, #11. */
867 count = 0;
868 if (!head_match || !remote_match) {
869 for (i = 1; i < o->head_idx; i++) {
870 if (stages[i] && stages[i] != o->df_conflict_entry) {
871 keep_entry(stages[i], o);
872 count++;
873 break;
874 }
875 }
876 }
877 #if DBRT_DEBUG
878 else {
879 fprintf(stderr, "read-tree: warning #16 detected\n");
880 show_stage_entry(stderr, "head ", stages[head_match]);
881 show_stage_entry(stderr, "remote ", stages[remote_match]);
882 }
883 #endif
884 if (head) { count += keep_entry(head, o); }
885 if (remote) { count += keep_entry(remote, o); }
886 return count;
887 }
888
889 /*
890 * Two-way merge.
891 *
892 * The rule is to "carry forward" what is in the index without losing
893 * information across a "fast forward", favoring a successful merge
894 * over a merge failure when it makes sense. For details of the
895 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
896 *
897 */
898 int twoway_merge(struct cache_entry **src, struct unpack_trees_options *o)
899 {
900 struct cache_entry *current = src[0];
901 struct cache_entry *oldtree = src[1];
902 struct cache_entry *newtree = src[2];
903
904 if (o->merge_size != 2)
905 return error("Cannot do a twoway merge of %d trees",
906 o->merge_size);
907
908 if (oldtree == o->df_conflict_entry)
909 oldtree = NULL;
910 if (newtree == o->df_conflict_entry)
911 newtree = NULL;
912
913 if (current) {
914 if ((!oldtree && !newtree) || /* 4 and 5 */
915 (!oldtree && newtree &&
916 same(current, newtree)) || /* 6 and 7 */
917 (oldtree && newtree &&
918 same(oldtree, newtree)) || /* 14 and 15 */
919 (oldtree && newtree &&
920 !same(oldtree, newtree) && /* 18 and 19 */
921 same(current, newtree))) {
922 return keep_entry(current, o);
923 }
924 else if (oldtree && !newtree && same(current, oldtree)) {
925 /* 10 or 11 */
926 return deleted_entry(oldtree, current, o);
927 }
928 else if (oldtree && newtree &&
929 same(current, oldtree) && !same(current, newtree)) {
930 /* 20 or 21 */
931 return merged_entry(newtree, current, o);
932 }
933 else {
934 /* all other failures */
935 if (oldtree)
936 return o->gently ? -1 : reject_merge(oldtree, o);
937 if (current)
938 return o->gently ? -1 : reject_merge(current, o);
939 if (newtree)
940 return o->gently ? -1 : reject_merge(newtree, o);
941 return -1;
942 }
943 }
944 else if (newtree) {
945 if (oldtree && !o->initial_checkout) {
946 /*
947 * deletion of the path was staged;
948 */
949 if (same(oldtree, newtree))
950 return 1;
951 return reject_merge(oldtree, o);
952 }
953 return merged_entry(newtree, current, o);
954 }
955 return deleted_entry(oldtree, current, o);
956 }
957
958 /*
959 * Bind merge.
960 *
961 * Keep the index entries at stage0, collapse stage1 but make sure
962 * stage0 does not have anything there.
963 */
964 int bind_merge(struct cache_entry **src,
965 struct unpack_trees_options *o)
966 {
967 struct cache_entry *old = src[0];
968 struct cache_entry *a = src[1];
969
970 if (o->merge_size != 1)
971 return error("Cannot do a bind merge of %d trees\n",
972 o->merge_size);
973 if (a && old)
974 return o->gently ? -1 :
975 error(ERRORMSG(o, bind_overlap), a->name, old->name);
976 if (!a)
977 return keep_entry(old, o);
978 else
979 return merged_entry(a, NULL, o);
980 }
981
982 /*
983 * One-way merge.
984 *
985 * The rule is:
986 * - take the stat information from stage0, take the data from stage1
987 */
988 int oneway_merge(struct cache_entry **src, struct unpack_trees_options *o)
989 {
990 struct cache_entry *old = src[0];
991 struct cache_entry *a = src[1];
992
993 if (o->merge_size != 1)
994 return error("Cannot do a oneway merge of %d trees",
995 o->merge_size);
996
997 if (!a)
998 return deleted_entry(old, old, o);
999
1000 if (old && same(old, a)) {
1001 int update = 0;
1002 if (o->reset) {
1003 struct stat st;
1004 if (lstat(old->name, &st) ||
1005 ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID))
1006 update |= CE_UPDATE;
1007 }
1008 add_entry(o, old, update, 0);
1009 return 0;
1010 }
1011 return merged_entry(a, old, o);
1012 }