Remove last vestiges of generic tree_entry_list
[git/git.git] / builtin-read-tree.c
1 /*
2 * GIT - The information manager from hell
3 *
4 * Copyright (C) Linus Torvalds, 2005
5 */
6 #define DBRT_DEBUG 1
7
8 #include "cache.h"
9
10 #include "object.h"
11 #include "tree.h"
12 #include "tree-walk.h"
13 #include "cache-tree.h"
14 #include <sys/time.h>
15 #include <signal.h>
16 #include "builtin.h"
17
18 static int reset = 0;
19 static int merge = 0;
20 static int update = 0;
21 static int index_only = 0;
22 static int nontrivial_merge = 0;
23 static int trivial_merges_only = 0;
24 static int aggressive = 0;
25 static int verbose_update = 0;
26 static volatile int progress_update = 0;
27
28 static int head_idx = -1;
29 static int merge_size = 0;
30
31 static struct object_list *trees = NULL;
32
33 static struct cache_entry df_conflict_entry = {
34 };
35
36 struct tree_entry_list {
37 struct tree_entry_list *next;
38 unsigned directory : 1;
39 unsigned executable : 1;
40 unsigned symlink : 1;
41 unsigned int mode;
42 const char *name;
43 const unsigned char *sha1;
44 };
45
46 static struct tree_entry_list df_conflict_list = {
47 .name = NULL,
48 .next = &df_conflict_list
49 };
50
51 typedef int (*merge_fn_t)(struct cache_entry **src);
52
53 static struct tree_entry_list *create_tree_entry_list(struct tree *tree)
54 {
55 struct tree_desc desc;
56 struct tree_entry_list *ret = NULL;
57 struct tree_entry_list **list_p = &ret;
58
59 desc.buf = tree->buffer;
60 desc.size = tree->size;
61
62 while (desc.size) {
63 unsigned mode;
64 const char *path;
65 const unsigned char *sha1;
66 struct tree_entry_list *entry;
67
68 sha1 = tree_entry_extract(&desc, &path, &mode);
69 update_tree_entry(&desc);
70
71 entry = xmalloc(sizeof(struct tree_entry_list));
72 entry->name = path;
73 entry->sha1 = sha1;
74 entry->mode = mode;
75 entry->directory = S_ISDIR(mode) != 0;
76 entry->executable = (mode & S_IXUSR) != 0;
77 entry->symlink = S_ISLNK(mode) != 0;
78 entry->next = NULL;
79
80 *list_p = entry;
81 list_p = &entry->next;
82 }
83 return ret;
84 }
85
86 static int entcmp(const char *name1, int dir1, const char *name2, int dir2)
87 {
88 int len1 = strlen(name1);
89 int len2 = strlen(name2);
90 int len = len1 < len2 ? len1 : len2;
91 int ret = memcmp(name1, name2, len);
92 unsigned char c1, c2;
93 if (ret)
94 return ret;
95 c1 = name1[len];
96 c2 = name2[len];
97 if (!c1 && dir1)
98 c1 = '/';
99 if (!c2 && dir2)
100 c2 = '/';
101 ret = (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
102 if (c1 && c2 && !ret)
103 ret = len1 - len2;
104 return ret;
105 }
106
107 static int unpack_trees_rec(struct tree_entry_list **posns, int len,
108 const char *base, merge_fn_t fn, int *indpos)
109 {
110 int baselen = strlen(base);
111 int src_size = len + 1;
112 do {
113 int i;
114 const char *first;
115 int firstdir = 0;
116 int pathlen;
117 unsigned ce_size;
118 struct tree_entry_list **subposns;
119 struct cache_entry **src;
120 int any_files = 0;
121 int any_dirs = 0;
122 char *cache_name;
123 int ce_stage;
124
125 /* Find the first name in the input. */
126
127 first = NULL;
128 cache_name = NULL;
129
130 /* Check the cache */
131 if (merge && *indpos < active_nr) {
132 /* This is a bit tricky: */
133 /* If the index has a subdirectory (with
134 * contents) as the first name, it'll get a
135 * filename like "foo/bar". But that's after
136 * "foo", so the entry in trees will get
137 * handled first, at which point we'll go into
138 * "foo", and deal with "bar" from the index,
139 * because the base will be "foo/". The only
140 * way we can actually have "foo/bar" first of
141 * all the things is if the trees don't
142 * contain "foo" at all, in which case we'll
143 * handle "foo/bar" without going into the
144 * directory, but that's fine (and will return
145 * an error anyway, with the added unknown
146 * file case.
147 */
148
149 cache_name = active_cache[*indpos]->name;
150 if (strlen(cache_name) > baselen &&
151 !memcmp(cache_name, base, baselen)) {
152 cache_name += baselen;
153 first = cache_name;
154 } else {
155 cache_name = NULL;
156 }
157 }
158
159 #if DBRT_DEBUG > 1
160 if (first)
161 printf("index %s\n", first);
162 #endif
163 for (i = 0; i < len; i++) {
164 if (!posns[i] || posns[i] == &df_conflict_list)
165 continue;
166 #if DBRT_DEBUG > 1
167 printf("%d %s\n", i + 1, posns[i]->name);
168 #endif
169 if (!first || entcmp(first, firstdir,
170 posns[i]->name,
171 posns[i]->directory) > 0) {
172 first = posns[i]->name;
173 firstdir = posns[i]->directory;
174 }
175 }
176 /* No name means we're done */
177 if (!first)
178 return 0;
179
180 pathlen = strlen(first);
181 ce_size = cache_entry_size(baselen + pathlen);
182
183 src = xcalloc(src_size, sizeof(struct cache_entry *));
184
185 subposns = xcalloc(len, sizeof(struct tree_list_entry *));
186
187 if (cache_name && !strcmp(cache_name, first)) {
188 any_files = 1;
189 src[0] = active_cache[*indpos];
190 remove_cache_entry_at(*indpos);
191 }
192
193 for (i = 0; i < len; i++) {
194 struct cache_entry *ce;
195
196 if (!posns[i] ||
197 (posns[i] != &df_conflict_list &&
198 strcmp(first, posns[i]->name))) {
199 continue;
200 }
201
202 if (posns[i] == &df_conflict_list) {
203 src[i + merge] = &df_conflict_entry;
204 continue;
205 }
206
207 if (posns[i]->directory) {
208 struct tree *tree = lookup_tree(posns[i]->sha1);
209 any_dirs = 1;
210 parse_tree(tree);
211 subposns[i] = create_tree_entry_list(tree);
212 posns[i] = posns[i]->next;
213 src[i + merge] = &df_conflict_entry;
214 continue;
215 }
216
217 if (!merge)
218 ce_stage = 0;
219 else if (i + 1 < head_idx)
220 ce_stage = 1;
221 else if (i + 1 > head_idx)
222 ce_stage = 3;
223 else
224 ce_stage = 2;
225
226 ce = xcalloc(1, ce_size);
227 ce->ce_mode = create_ce_mode(posns[i]->mode);
228 ce->ce_flags = create_ce_flags(baselen + pathlen,
229 ce_stage);
230 memcpy(ce->name, base, baselen);
231 memcpy(ce->name + baselen, first, pathlen + 1);
232
233 any_files = 1;
234
235 memcpy(ce->sha1, posns[i]->sha1, 20);
236 src[i + merge] = ce;
237 subposns[i] = &df_conflict_list;
238 posns[i] = posns[i]->next;
239 }
240 if (any_files) {
241 if (merge) {
242 int ret;
243
244 #if DBRT_DEBUG > 1
245 printf("%s:\n", first);
246 for (i = 0; i < src_size; i++) {
247 printf(" %d ", i);
248 if (src[i])
249 printf("%s\n", sha1_to_hex(src[i]->sha1));
250 else
251 printf("\n");
252 }
253 #endif
254 ret = fn(src);
255
256 #if DBRT_DEBUG > 1
257 printf("Added %d entries\n", ret);
258 #endif
259 *indpos += ret;
260 } else {
261 for (i = 0; i < src_size; i++) {
262 if (src[i]) {
263 add_cache_entry(src[i], ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
264 }
265 }
266 }
267 }
268 if (any_dirs) {
269 char *newbase = xmalloc(baselen + 2 + pathlen);
270 memcpy(newbase, base, baselen);
271 memcpy(newbase + baselen, first, pathlen);
272 newbase[baselen + pathlen] = '/';
273 newbase[baselen + pathlen + 1] = '\0';
274 if (unpack_trees_rec(subposns, len, newbase, fn,
275 indpos))
276 return -1;
277 free(newbase);
278 }
279 free(subposns);
280 free(src);
281 } while (1);
282 }
283
284 static void reject_merge(struct cache_entry *ce)
285 {
286 die("Entry '%s' would be overwritten by merge. Cannot merge.",
287 ce->name);
288 }
289
290 /* Unlink the last component and attempt to remove leading
291 * directories, in case this unlink is the removal of the
292 * last entry in the directory -- empty directories are removed.
293 */
294 static void unlink_entry(char *name)
295 {
296 char *cp, *prev;
297
298 if (unlink(name))
299 return;
300 prev = NULL;
301 while (1) {
302 int status;
303 cp = strrchr(name, '/');
304 if (prev)
305 *prev = '/';
306 if (!cp)
307 break;
308
309 *cp = 0;
310 status = rmdir(name);
311 if (status) {
312 *cp = '/';
313 break;
314 }
315 prev = cp;
316 }
317 }
318
319 static void progress_interval(int signum)
320 {
321 progress_update = 1;
322 }
323
324 static void setup_progress_signal(void)
325 {
326 struct sigaction sa;
327 struct itimerval v;
328
329 memset(&sa, 0, sizeof(sa));
330 sa.sa_handler = progress_interval;
331 sigemptyset(&sa.sa_mask);
332 sa.sa_flags = SA_RESTART;
333 sigaction(SIGALRM, &sa, NULL);
334
335 v.it_interval.tv_sec = 1;
336 v.it_interval.tv_usec = 0;
337 v.it_value = v.it_interval;
338 setitimer(ITIMER_REAL, &v, NULL);
339 }
340
341 static void check_updates(struct cache_entry **src, int nr)
342 {
343 static struct checkout state = {
344 .base_dir = "",
345 .force = 1,
346 .quiet = 1,
347 .refresh_cache = 1,
348 };
349 unsigned short mask = htons(CE_UPDATE);
350 unsigned last_percent = 200, cnt = 0, total = 0;
351
352 if (update && verbose_update) {
353 for (total = cnt = 0; cnt < nr; cnt++) {
354 struct cache_entry *ce = src[cnt];
355 if (!ce->ce_mode || ce->ce_flags & mask)
356 total++;
357 }
358
359 /* Don't bother doing this for very small updates */
360 if (total < 250)
361 total = 0;
362
363 if (total) {
364 fprintf(stderr, "Checking files out...\n");
365 setup_progress_signal();
366 progress_update = 1;
367 }
368 cnt = 0;
369 }
370
371 while (nr--) {
372 struct cache_entry *ce = *src++;
373
374 if (total) {
375 if (!ce->ce_mode || ce->ce_flags & mask) {
376 unsigned percent;
377 cnt++;
378 percent = (cnt * 100) / total;
379 if (percent != last_percent ||
380 progress_update) {
381 fprintf(stderr, "%4u%% (%u/%u) done\r",
382 percent, cnt, total);
383 last_percent = percent;
384 }
385 }
386 }
387 if (!ce->ce_mode) {
388 if (update)
389 unlink_entry(ce->name);
390 continue;
391 }
392 if (ce->ce_flags & mask) {
393 ce->ce_flags &= ~mask;
394 if (update)
395 checkout_entry(ce, &state, NULL);
396 }
397 }
398 if (total) {
399 signal(SIGALRM, SIG_IGN);
400 fputc('\n', stderr);
401 }
402 }
403
404 static int unpack_trees(merge_fn_t fn)
405 {
406 int indpos = 0;
407 unsigned len = object_list_length(trees);
408 struct tree_entry_list **posns;
409 int i;
410 struct object_list *posn = trees;
411 merge_size = len;
412
413 if (len) {
414 posns = xmalloc(len * sizeof(struct tree_entry_list *));
415 for (i = 0; i < len; i++) {
416 posns[i] = create_tree_entry_list((struct tree *) posn->item);
417 posn = posn->next;
418 }
419 if (unpack_trees_rec(posns, len, "", fn, &indpos))
420 return -1;
421 }
422
423 if (trivial_merges_only && nontrivial_merge)
424 die("Merge requires file-level merging");
425
426 check_updates(active_cache, active_nr);
427 return 0;
428 }
429
430 static int list_tree(unsigned char *sha1)
431 {
432 struct tree *tree = parse_tree_indirect(sha1);
433 if (!tree)
434 return -1;
435 object_list_append(&tree->object, &trees);
436 return 0;
437 }
438
439 static int same(struct cache_entry *a, struct cache_entry *b)
440 {
441 if (!!a != !!b)
442 return 0;
443 if (!a && !b)
444 return 1;
445 return a->ce_mode == b->ce_mode &&
446 !memcmp(a->sha1, b->sha1, 20);
447 }
448
449
450 /*
451 * When a CE gets turned into an unmerged entry, we
452 * want it to be up-to-date
453 */
454 static void verify_uptodate(struct cache_entry *ce)
455 {
456 struct stat st;
457
458 if (index_only || reset)
459 return;
460
461 if (!lstat(ce->name, &st)) {
462 unsigned changed = ce_match_stat(ce, &st, 1);
463 if (!changed)
464 return;
465 errno = 0;
466 }
467 if (reset) {
468 ce->ce_flags |= htons(CE_UPDATE);
469 return;
470 }
471 if (errno == ENOENT)
472 return;
473 die("Entry '%s' not uptodate. Cannot merge.", ce->name);
474 }
475
476 static void invalidate_ce_path(struct cache_entry *ce)
477 {
478 if (ce)
479 cache_tree_invalidate_path(active_cache_tree, ce->name);
480 }
481
482 /*
483 * We do not want to remove or overwrite a working tree file that
484 * is not tracked.
485 */
486 static void verify_absent(const char *path, const char *action)
487 {
488 struct stat st;
489
490 if (index_only || reset || !update)
491 return;
492 if (!lstat(path, &st))
493 die("Untracked working tree file '%s' "
494 "would be %s by merge.", path, action);
495 }
496
497 static int merged_entry(struct cache_entry *merge, struct cache_entry *old)
498 {
499 merge->ce_flags |= htons(CE_UPDATE);
500 if (old) {
501 /*
502 * See if we can re-use the old CE directly?
503 * That way we get the uptodate stat info.
504 *
505 * This also removes the UPDATE flag on
506 * a match.
507 */
508 if (same(old, merge)) {
509 *merge = *old;
510 } else {
511 verify_uptodate(old);
512 invalidate_ce_path(old);
513 }
514 }
515 else {
516 verify_absent(merge->name, "overwritten");
517 invalidate_ce_path(merge);
518 }
519
520 merge->ce_flags &= ~htons(CE_STAGEMASK);
521 add_cache_entry(merge, ADD_CACHE_OK_TO_ADD);
522 return 1;
523 }
524
525 static int deleted_entry(struct cache_entry *ce, struct cache_entry *old)
526 {
527 if (old)
528 verify_uptodate(old);
529 else
530 verify_absent(ce->name, "removed");
531 ce->ce_mode = 0;
532 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
533 invalidate_ce_path(ce);
534 return 1;
535 }
536
537 static int keep_entry(struct cache_entry *ce)
538 {
539 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
540 return 1;
541 }
542
543 #if DBRT_DEBUG
544 static void show_stage_entry(FILE *o,
545 const char *label, const struct cache_entry *ce)
546 {
547 if (!ce)
548 fprintf(o, "%s (missing)\n", label);
549 else
550 fprintf(o, "%s%06o %s %d\t%s\n",
551 label,
552 ntohl(ce->ce_mode),
553 sha1_to_hex(ce->sha1),
554 ce_stage(ce),
555 ce->name);
556 }
557 #endif
558
559 static int threeway_merge(struct cache_entry **stages)
560 {
561 struct cache_entry *index;
562 struct cache_entry *head;
563 struct cache_entry *remote = stages[head_idx + 1];
564 int count;
565 int head_match = 0;
566 int remote_match = 0;
567 const char *path = NULL;
568
569 int df_conflict_head = 0;
570 int df_conflict_remote = 0;
571
572 int any_anc_missing = 0;
573 int no_anc_exists = 1;
574 int i;
575
576 for (i = 1; i < head_idx; i++) {
577 if (!stages[i])
578 any_anc_missing = 1;
579 else {
580 if (!path)
581 path = stages[i]->name;
582 no_anc_exists = 0;
583 }
584 }
585
586 index = stages[0];
587 head = stages[head_idx];
588
589 if (head == &df_conflict_entry) {
590 df_conflict_head = 1;
591 head = NULL;
592 }
593
594 if (remote == &df_conflict_entry) {
595 df_conflict_remote = 1;
596 remote = NULL;
597 }
598
599 if (!path && index)
600 path = index->name;
601 if (!path && head)
602 path = head->name;
603 if (!path && remote)
604 path = remote->name;
605
606 /* First, if there's a #16 situation, note that to prevent #13
607 * and #14.
608 */
609 if (!same(remote, head)) {
610 for (i = 1; i < head_idx; i++) {
611 if (same(stages[i], head)) {
612 head_match = i;
613 }
614 if (same(stages[i], remote)) {
615 remote_match = i;
616 }
617 }
618 }
619
620 /* We start with cases where the index is allowed to match
621 * something other than the head: #14(ALT) and #2ALT, where it
622 * is permitted to match the result instead.
623 */
624 /* #14, #14ALT, #2ALT */
625 if (remote && !df_conflict_head && head_match && !remote_match) {
626 if (index && !same(index, remote) && !same(index, head))
627 reject_merge(index);
628 return merged_entry(remote, index);
629 }
630 /*
631 * If we have an entry in the index cache, then we want to
632 * make sure that it matches head.
633 */
634 if (index && !same(index, head)) {
635 reject_merge(index);
636 }
637
638 if (head) {
639 /* #5ALT, #15 */
640 if (same(head, remote))
641 return merged_entry(head, index);
642 /* #13, #3ALT */
643 if (!df_conflict_remote && remote_match && !head_match)
644 return merged_entry(head, index);
645 }
646
647 /* #1 */
648 if (!head && !remote && any_anc_missing)
649 return 0;
650
651 /* Under the new "aggressive" rule, we resolve mostly trivial
652 * cases that we historically had git-merge-one-file resolve.
653 */
654 if (aggressive) {
655 int head_deleted = !head && !df_conflict_head;
656 int remote_deleted = !remote && !df_conflict_remote;
657 /*
658 * Deleted in both.
659 * Deleted in one and unchanged in the other.
660 */
661 if ((head_deleted && remote_deleted) ||
662 (head_deleted && remote && remote_match) ||
663 (remote_deleted && head && head_match)) {
664 if (index)
665 return deleted_entry(index, index);
666 else if (path)
667 verify_absent(path, "removed");
668 return 0;
669 }
670 /*
671 * Added in both, identically.
672 */
673 if (no_anc_exists && head && remote && same(head, remote))
674 return merged_entry(head, index);
675
676 }
677
678 /* Below are "no merge" cases, which require that the index be
679 * up-to-date to avoid the files getting overwritten with
680 * conflict resolution files.
681 */
682 if (index) {
683 verify_uptodate(index);
684 }
685 else if (path)
686 verify_absent(path, "overwritten");
687
688 nontrivial_merge = 1;
689
690 /* #2, #3, #4, #6, #7, #9, #11. */
691 count = 0;
692 if (!head_match || !remote_match) {
693 for (i = 1; i < head_idx; i++) {
694 if (stages[i]) {
695 keep_entry(stages[i]);
696 count++;
697 break;
698 }
699 }
700 }
701 #if DBRT_DEBUG
702 else {
703 fprintf(stderr, "read-tree: warning #16 detected\n");
704 show_stage_entry(stderr, "head ", stages[head_match]);
705 show_stage_entry(stderr, "remote ", stages[remote_match]);
706 }
707 #endif
708 if (head) { count += keep_entry(head); }
709 if (remote) { count += keep_entry(remote); }
710 return count;
711 }
712
713 /*
714 * Two-way merge.
715 *
716 * The rule is to "carry forward" what is in the index without losing
717 * information across a "fast forward", favoring a successful merge
718 * over a merge failure when it makes sense. For details of the
719 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
720 *
721 */
722 static int twoway_merge(struct cache_entry **src)
723 {
724 struct cache_entry *current = src[0];
725 struct cache_entry *oldtree = src[1], *newtree = src[2];
726
727 if (merge_size != 2)
728 return error("Cannot do a twoway merge of %d trees",
729 merge_size);
730
731 if (current) {
732 if ((!oldtree && !newtree) || /* 4 and 5 */
733 (!oldtree && newtree &&
734 same(current, newtree)) || /* 6 and 7 */
735 (oldtree && newtree &&
736 same(oldtree, newtree)) || /* 14 and 15 */
737 (oldtree && newtree &&
738 !same(oldtree, newtree) && /* 18 and 19*/
739 same(current, newtree))) {
740 return keep_entry(current);
741 }
742 else if (oldtree && !newtree && same(current, oldtree)) {
743 /* 10 or 11 */
744 return deleted_entry(oldtree, current);
745 }
746 else if (oldtree && newtree &&
747 same(current, oldtree) && !same(current, newtree)) {
748 /* 20 or 21 */
749 return merged_entry(newtree, current);
750 }
751 else {
752 /* all other failures */
753 if (oldtree)
754 reject_merge(oldtree);
755 if (current)
756 reject_merge(current);
757 if (newtree)
758 reject_merge(newtree);
759 return -1;
760 }
761 }
762 else if (newtree)
763 return merged_entry(newtree, current);
764 else
765 return deleted_entry(oldtree, current);
766 }
767
768 /*
769 * One-way merge.
770 *
771 * The rule is:
772 * - take the stat information from stage0, take the data from stage1
773 */
774 static int oneway_merge(struct cache_entry **src)
775 {
776 struct cache_entry *old = src[0];
777 struct cache_entry *a = src[1];
778
779 if (merge_size != 1)
780 return error("Cannot do a oneway merge of %d trees",
781 merge_size);
782
783 if (!a)
784 return deleted_entry(old, old);
785 if (old && same(old, a)) {
786 if (reset) {
787 struct stat st;
788 if (lstat(old->name, &st) ||
789 ce_match_stat(old, &st, 1))
790 old->ce_flags |= htons(CE_UPDATE);
791 }
792 return keep_entry(old);
793 }
794 return merged_entry(a, old);
795 }
796
797 static int read_cache_unmerged(void)
798 {
799 int i, deleted;
800 struct cache_entry **dst;
801
802 read_cache();
803 dst = active_cache;
804 deleted = 0;
805 for (i = 0; i < active_nr; i++) {
806 struct cache_entry *ce = active_cache[i];
807 if (ce_stage(ce)) {
808 deleted++;
809 invalidate_ce_path(ce);
810 continue;
811 }
812 if (deleted)
813 *dst = ce;
814 dst++;
815 }
816 active_nr -= deleted;
817 return deleted;
818 }
819
820 static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
821 {
822 struct tree_desc desc;
823 int cnt;
824
825 memcpy(it->sha1, tree->object.sha1, 20);
826 desc.buf = tree->buffer;
827 desc.size = tree->size;
828 cnt = 0;
829 while (desc.size) {
830 unsigned mode;
831 const char *name;
832 const unsigned char *sha1;
833
834 sha1 = tree_entry_extract(&desc, &name, &mode);
835 update_tree_entry(&desc);
836 if (!S_ISDIR(mode))
837 cnt++;
838 else {
839 struct cache_tree_sub *sub;
840 struct tree *subtree = lookup_tree(sha1);
841 if (!subtree->object.parsed)
842 parse_tree(subtree);
843 sub = cache_tree_sub(it, name);
844 sub->cache_tree = cache_tree();
845 prime_cache_tree_rec(sub->cache_tree, subtree);
846 cnt += sub->cache_tree->entry_count;
847 }
848 }
849 it->entry_count = cnt;
850 }
851
852 static void prime_cache_tree(void)
853 {
854 struct tree *tree = (struct tree *)trees->item;
855 if (!tree)
856 return;
857 active_cache_tree = cache_tree();
858 prime_cache_tree_rec(active_cache_tree, tree);
859
860 }
861
862 static const char read_tree_usage[] = "git-read-tree (<sha> | -m [--aggressive] [-u | -i] <sha1> [<sha2> [<sha3>]])";
863
864 static struct cache_file cache_file;
865
866 int cmd_read_tree(int argc, const char **argv, char **envp)
867 {
868 int i, newfd, stage = 0;
869 unsigned char sha1[20];
870 merge_fn_t fn = NULL;
871
872 setup_git_directory();
873 git_config(git_default_config);
874
875 newfd = hold_index_file_for_update(&cache_file, get_index_file());
876 if (newfd < 0)
877 die("unable to create new cachefile");
878
879 git_config(git_default_config);
880
881 merge = 0;
882 reset = 0;
883 for (i = 1; i < argc; i++) {
884 const char *arg = argv[i];
885
886 /* "-u" means "update", meaning that a merge will update
887 * the working tree.
888 */
889 if (!strcmp(arg, "-u")) {
890 update = 1;
891 continue;
892 }
893
894 if (!strcmp(arg, "-v")) {
895 verbose_update = 1;
896 continue;
897 }
898
899 /* "-i" means "index only", meaning that a merge will
900 * not even look at the working tree.
901 */
902 if (!strcmp(arg, "-i")) {
903 index_only = 1;
904 continue;
905 }
906
907 /* This differs from "-m" in that we'll silently ignore unmerged entries */
908 if (!strcmp(arg, "--reset")) {
909 if (stage || merge)
910 usage(read_tree_usage);
911 reset = 1;
912 merge = 1;
913 stage = 1;
914 read_cache_unmerged();
915 continue;
916 }
917
918 if (!strcmp(arg, "--trivial")) {
919 trivial_merges_only = 1;
920 continue;
921 }
922
923 if (!strcmp(arg, "--aggressive")) {
924 aggressive = 1;
925 continue;
926 }
927
928 /* "-m" stands for "merge", meaning we start in stage 1 */
929 if (!strcmp(arg, "-m")) {
930 if (stage || merge)
931 usage(read_tree_usage);
932 if (read_cache_unmerged())
933 die("you need to resolve your current index first");
934 stage = 1;
935 merge = 1;
936 continue;
937 }
938
939 /* using -u and -i at the same time makes no sense */
940 if (1 < index_only + update)
941 usage(read_tree_usage);
942
943 if (get_sha1(arg, sha1))
944 die("Not a valid object name %s", arg);
945 if (list_tree(sha1) < 0)
946 die("failed to unpack tree object %s", arg);
947 stage++;
948 }
949 if ((update||index_only) && !merge)
950 usage(read_tree_usage);
951
952 if (merge) {
953 if (stage < 2)
954 die("just how do you expect me to merge %d trees?", stage-1);
955 switch (stage - 1) {
956 case 1:
957 fn = oneway_merge;
958 break;
959 case 2:
960 fn = twoway_merge;
961 break;
962 case 3:
963 default:
964 fn = threeway_merge;
965 cache_tree_free(&active_cache_tree);
966 break;
967 }
968
969 if (stage - 1 >= 3)
970 head_idx = stage - 2;
971 else
972 head_idx = 1;
973 }
974
975 unpack_trees(fn);
976
977 /*
978 * When reading only one tree (either the most basic form,
979 * "-m ent" or "--reset ent" form), we can obtain a fully
980 * valid cache-tree because the index must match exactly
981 * what came from the tree.
982 */
983 if (trees && trees->item && (!merge || (stage == 2))) {
984 cache_tree_free(&active_cache_tree);
985 prime_cache_tree();
986 }
987
988 if (write_cache(newfd, active_cache, active_nr) ||
989 commit_index_file(&cache_file))
990 die("unable to write new index file");
991 return 0;
992 }