6b406f1d0081dd77bf572a6094b66458477cbd3d
[git/git.git] / fetch-pack.c
1 #include "cache.h"
2 #include "repository.h"
3 #include "config.h"
4 #include "lockfile.h"
5 #include "refs.h"
6 #include "pkt-line.h"
7 #include "commit.h"
8 #include "tag.h"
9 #include "exec-cmd.h"
10 #include "pack.h"
11 #include "sideband.h"
12 #include "fetch-pack.h"
13 #include "remote.h"
14 #include "run-command.h"
15 #include "connect.h"
16 #include "transport.h"
17 #include "version.h"
18 #include "prio-queue.h"
19 #include "sha1-array.h"
20 #include "oidset.h"
21 #include "packfile.h"
22 #include "object-store.h"
23
24 static int transfer_unpack_limit = -1;
25 static int fetch_unpack_limit = -1;
26 static int unpack_limit = 100;
27 static int prefer_ofs_delta = 1;
28 static int no_done;
29 static int deepen_since_ok;
30 static int deepen_not_ok;
31 static int fetch_fsck_objects = -1;
32 static int transfer_fsck_objects = -1;
33 static int agent_supported;
34 static int server_supports_filtering;
35 static struct lock_file shallow_lock;
36 static const char *alternate_shallow_file;
37
38 /* Remember to update object flag allocation in object.h */
39 #define COMPLETE (1U << 0)
40 #define COMMON (1U << 1)
41 #define COMMON_REF (1U << 2)
42 #define SEEN (1U << 3)
43 #define POPPED (1U << 4)
44 #define ALTERNATE (1U << 5)
45
46 static int marked;
47
48 /*
49 * After sending this many "have"s if we do not get any new ACK , we
50 * give up traversing our history.
51 */
52 #define MAX_IN_VAIN 256
53
54 static struct prio_queue rev_list = { compare_commits_by_commit_date };
55 static int non_common_revs, multi_ack, use_sideband;
56 /* Allow specifying sha1 if it is a ref tip. */
57 #define ALLOW_TIP_SHA1 01
58 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
59 #define ALLOW_REACHABLE_SHA1 02
60 static unsigned int allow_unadvertised_object_request;
61
62 __attribute__((format (printf, 2, 3)))
63 static inline void print_verbose(const struct fetch_pack_args *args,
64 const char *fmt, ...)
65 {
66 va_list params;
67
68 if (!args->verbose)
69 return;
70
71 va_start(params, fmt);
72 vfprintf(stderr, fmt, params);
73 va_end(params);
74 fputc('\n', stderr);
75 }
76
77 struct alternate_object_cache {
78 struct object **items;
79 size_t nr, alloc;
80 };
81
82 static void cache_one_alternate(const char *refname,
83 const struct object_id *oid,
84 void *vcache)
85 {
86 struct alternate_object_cache *cache = vcache;
87 struct object *obj = parse_object(the_repository, oid);
88
89 if (!obj || (obj->flags & ALTERNATE))
90 return;
91
92 obj->flags |= ALTERNATE;
93 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
94 cache->items[cache->nr++] = obj;
95 }
96
97 static void for_each_cached_alternate(void (*cb)(struct object *))
98 {
99 static int initialized;
100 static struct alternate_object_cache cache;
101 size_t i;
102
103 if (!initialized) {
104 for_each_alternate_ref(cache_one_alternate, &cache);
105 initialized = 1;
106 }
107
108 for (i = 0; i < cache.nr; i++)
109 cb(cache.items[i]);
110 }
111
112 static void rev_list_push(struct commit *commit, int mark)
113 {
114 if (!(commit->object.flags & mark)) {
115 commit->object.flags |= mark;
116
117 if (parse_commit(commit))
118 return;
119
120 prio_queue_put(&rev_list, commit);
121
122 if (!(commit->object.flags & COMMON))
123 non_common_revs++;
124 }
125 }
126
127 static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
128 {
129 struct object *o = deref_tag(parse_object(the_repository, oid),
130 refname, 0);
131
132 if (o && o->type == OBJ_COMMIT)
133 rev_list_push((struct commit *)o, SEEN);
134
135 return 0;
136 }
137
138 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
139 int flag, void *cb_data)
140 {
141 return rev_list_insert_ref(refname, oid);
142 }
143
144 static int clear_marks(const char *refname, const struct object_id *oid,
145 int flag, void *cb_data)
146 {
147 struct object *o = deref_tag(parse_object(the_repository, oid),
148 refname, 0);
149
150 if (o && o->type == OBJ_COMMIT)
151 clear_commit_marks((struct commit *)o,
152 COMMON | COMMON_REF | SEEN | POPPED);
153 return 0;
154 }
155
156 /*
157 This function marks a rev and its ancestors as common.
158 In some cases, it is desirable to mark only the ancestors (for example
159 when only the server does not yet know that they are common).
160 */
161
162 static void mark_common(struct commit *commit,
163 int ancestors_only, int dont_parse)
164 {
165 if (commit != NULL && !(commit->object.flags & COMMON)) {
166 struct object *o = (struct object *)commit;
167
168 if (!ancestors_only)
169 o->flags |= COMMON;
170
171 if (!(o->flags & SEEN))
172 rev_list_push(commit, SEEN);
173 else {
174 struct commit_list *parents;
175
176 if (!ancestors_only && !(o->flags & POPPED))
177 non_common_revs--;
178 if (!o->parsed && !dont_parse)
179 if (parse_commit(commit))
180 return;
181
182 for (parents = commit->parents;
183 parents;
184 parents = parents->next)
185 mark_common(parents->item, 0, dont_parse);
186 }
187 }
188 }
189
190 /*
191 Get the next rev to send, ignoring the common.
192 */
193
194 static const struct object_id *get_rev(void)
195 {
196 struct commit *commit = NULL;
197
198 while (commit == NULL) {
199 unsigned int mark;
200 struct commit_list *parents;
201
202 if (rev_list.nr == 0 || non_common_revs == 0)
203 return NULL;
204
205 commit = prio_queue_get(&rev_list);
206 parse_commit(commit);
207 parents = commit->parents;
208
209 commit->object.flags |= POPPED;
210 if (!(commit->object.flags & COMMON))
211 non_common_revs--;
212
213 if (commit->object.flags & COMMON) {
214 /* do not send "have", and ignore ancestors */
215 commit = NULL;
216 mark = COMMON | SEEN;
217 } else if (commit->object.flags & COMMON_REF)
218 /* send "have", and ignore ancestors */
219 mark = COMMON | SEEN;
220 else
221 /* send "have", also for its ancestors */
222 mark = SEEN;
223
224 while (parents) {
225 if (!(parents->item->object.flags & SEEN))
226 rev_list_push(parents->item, mark);
227 if (mark & COMMON)
228 mark_common(parents->item, 1, 0);
229 parents = parents->next;
230 }
231 }
232
233 return &commit->object.oid;
234 }
235
236 enum ack_type {
237 NAK = 0,
238 ACK,
239 ACK_continue,
240 ACK_common,
241 ACK_ready
242 };
243
244 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
245 {
246 if (args->stateless_rpc && args->deepen) {
247 /* If we sent a depth we will get back "duplicate"
248 * shallow and unshallow commands every time there
249 * is a block of have lines exchanged.
250 */
251 char *line;
252 while ((line = packet_read_line(fd, NULL))) {
253 if (starts_with(line, "shallow "))
254 continue;
255 if (starts_with(line, "unshallow "))
256 continue;
257 die(_("git fetch-pack: expected shallow list"));
258 }
259 }
260 }
261
262 static enum ack_type get_ack(int fd, struct object_id *result_oid)
263 {
264 int len;
265 char *line = packet_read_line(fd, &len);
266 const char *arg;
267
268 if (!line)
269 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
270 if (!strcmp(line, "NAK"))
271 return NAK;
272 if (skip_prefix(line, "ACK ", &arg)) {
273 if (!get_oid_hex(arg, result_oid)) {
274 arg += 40;
275 len -= arg - line;
276 if (len < 1)
277 return ACK;
278 if (strstr(arg, "continue"))
279 return ACK_continue;
280 if (strstr(arg, "common"))
281 return ACK_common;
282 if (strstr(arg, "ready"))
283 return ACK_ready;
284 return ACK;
285 }
286 }
287 if (skip_prefix(line, "ERR ", &arg))
288 die(_("remote error: %s"), arg);
289 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
290 }
291
292 static void send_request(struct fetch_pack_args *args,
293 int fd, struct strbuf *buf)
294 {
295 if (args->stateless_rpc) {
296 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
297 packet_flush(fd);
298 } else
299 write_or_die(fd, buf->buf, buf->len);
300 }
301
302 static void insert_one_alternate_object(struct object *obj)
303 {
304 rev_list_insert_ref(NULL, &obj->oid);
305 }
306
307 #define INITIAL_FLUSH 16
308 #define PIPESAFE_FLUSH 32
309 #define LARGE_FLUSH 16384
310
311 static int next_flush(int stateless_rpc, int count)
312 {
313 if (stateless_rpc) {
314 if (count < LARGE_FLUSH)
315 count <<= 1;
316 else
317 count = count * 11 / 10;
318 } else {
319 if (count < PIPESAFE_FLUSH)
320 count <<= 1;
321 else
322 count += PIPESAFE_FLUSH;
323 }
324 return count;
325 }
326
327 static int find_common(struct fetch_pack_args *args,
328 int fd[2], struct object_id *result_oid,
329 struct ref *refs)
330 {
331 int fetching;
332 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
333 const struct object_id *oid;
334 unsigned in_vain = 0;
335 int got_continue = 0;
336 int got_ready = 0;
337 struct strbuf req_buf = STRBUF_INIT;
338 size_t state_len = 0;
339
340 if (args->stateless_rpc && multi_ack == 1)
341 die(_("--stateless-rpc requires multi_ack_detailed"));
342 if (marked)
343 for_each_ref(clear_marks, NULL);
344 marked = 1;
345
346 for_each_ref(rev_list_insert_ref_oid, NULL);
347 for_each_cached_alternate(insert_one_alternate_object);
348
349 fetching = 0;
350 for ( ; refs ; refs = refs->next) {
351 struct object_id *remote = &refs->old_oid;
352 const char *remote_hex;
353 struct object *o;
354
355 /*
356 * If that object is complete (i.e. it is an ancestor of a
357 * local ref), we tell them we have it but do not have to
358 * tell them about its ancestors, which they already know
359 * about.
360 *
361 * We use lookup_object here because we are only
362 * interested in the case we *know* the object is
363 * reachable and we have already scanned it.
364 */
365 if (((o = lookup_object(remote->hash)) != NULL) &&
366 (o->flags & COMPLETE)) {
367 continue;
368 }
369
370 remote_hex = oid_to_hex(remote);
371 if (!fetching) {
372 struct strbuf c = STRBUF_INIT;
373 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
374 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
375 if (no_done) strbuf_addstr(&c, " no-done");
376 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
377 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
378 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
379 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
380 if (args->no_progress) strbuf_addstr(&c, " no-progress");
381 if (args->include_tag) strbuf_addstr(&c, " include-tag");
382 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
383 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
384 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
385 if (agent_supported) strbuf_addf(&c, " agent=%s",
386 git_user_agent_sanitized());
387 if (args->filter_options.choice)
388 strbuf_addstr(&c, " filter");
389 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
390 strbuf_release(&c);
391 } else
392 packet_buf_write(&req_buf, "want %s\n", remote_hex);
393 fetching++;
394 }
395
396 if (!fetching) {
397 strbuf_release(&req_buf);
398 packet_flush(fd[1]);
399 return 1;
400 }
401
402 if (is_repository_shallow(the_repository))
403 write_shallow_commits(&req_buf, 1, NULL);
404 if (args->depth > 0)
405 packet_buf_write(&req_buf, "deepen %d", args->depth);
406 if (args->deepen_since) {
407 timestamp_t max_age = approxidate(args->deepen_since);
408 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
409 }
410 if (args->deepen_not) {
411 int i;
412 for (i = 0; i < args->deepen_not->nr; i++) {
413 struct string_list_item *s = args->deepen_not->items + i;
414 packet_buf_write(&req_buf, "deepen-not %s", s->string);
415 }
416 }
417 if (server_supports_filtering && args->filter_options.choice)
418 packet_buf_write(&req_buf, "filter %s",
419 args->filter_options.filter_spec);
420 packet_buf_flush(&req_buf);
421 state_len = req_buf.len;
422
423 if (args->deepen) {
424 char *line;
425 const char *arg;
426 struct object_id oid;
427
428 send_request(args, fd[1], &req_buf);
429 while ((line = packet_read_line(fd[0], NULL))) {
430 if (skip_prefix(line, "shallow ", &arg)) {
431 if (get_oid_hex(arg, &oid))
432 die(_("invalid shallow line: %s"), line);
433 register_shallow(the_repository, &oid);
434 continue;
435 }
436 if (skip_prefix(line, "unshallow ", &arg)) {
437 if (get_oid_hex(arg, &oid))
438 die(_("invalid unshallow line: %s"), line);
439 if (!lookup_object(oid.hash))
440 die(_("object not found: %s"), line);
441 /* make sure that it is parsed as shallow */
442 if (!parse_object(the_repository, &oid))
443 die(_("error in object: %s"), line);
444 if (unregister_shallow(&oid))
445 die(_("no shallow found: %s"), line);
446 continue;
447 }
448 die(_("expected shallow/unshallow, got %s"), line);
449 }
450 } else if (!args->stateless_rpc)
451 send_request(args, fd[1], &req_buf);
452
453 if (!args->stateless_rpc) {
454 /* If we aren't using the stateless-rpc interface
455 * we don't need to retain the headers.
456 */
457 strbuf_setlen(&req_buf, 0);
458 state_len = 0;
459 }
460
461 flushes = 0;
462 retval = -1;
463 if (args->no_dependents)
464 goto done;
465 while ((oid = get_rev())) {
466 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
467 print_verbose(args, "have %s", oid_to_hex(oid));
468 in_vain++;
469 if (flush_at <= ++count) {
470 int ack;
471
472 packet_buf_flush(&req_buf);
473 send_request(args, fd[1], &req_buf);
474 strbuf_setlen(&req_buf, state_len);
475 flushes++;
476 flush_at = next_flush(args->stateless_rpc, count);
477
478 /*
479 * We keep one window "ahead" of the other side, and
480 * will wait for an ACK only on the next one
481 */
482 if (!args->stateless_rpc && count == INITIAL_FLUSH)
483 continue;
484
485 consume_shallow_list(args, fd[0]);
486 do {
487 ack = get_ack(fd[0], result_oid);
488 if (ack)
489 print_verbose(args, _("got %s %d %s"), "ack",
490 ack, oid_to_hex(result_oid));
491 switch (ack) {
492 case ACK:
493 flushes = 0;
494 multi_ack = 0;
495 retval = 0;
496 goto done;
497 case ACK_common:
498 case ACK_ready:
499 case ACK_continue: {
500 struct commit *commit =
501 lookup_commit(result_oid);
502 if (!commit)
503 die(_("invalid commit %s"), oid_to_hex(result_oid));
504 if (args->stateless_rpc
505 && ack == ACK_common
506 && !(commit->object.flags & COMMON)) {
507 /* We need to replay the have for this object
508 * on the next RPC request so the peer knows
509 * it is in common with us.
510 */
511 const char *hex = oid_to_hex(result_oid);
512 packet_buf_write(&req_buf, "have %s\n", hex);
513 state_len = req_buf.len;
514 /*
515 * Reset in_vain because an ack
516 * for this commit has not been
517 * seen.
518 */
519 in_vain = 0;
520 } else if (!args->stateless_rpc
521 || ack != ACK_common)
522 in_vain = 0;
523 mark_common(commit, 0, 1);
524 retval = 0;
525 got_continue = 1;
526 if (ack == ACK_ready) {
527 clear_prio_queue(&rev_list);
528 got_ready = 1;
529 }
530 break;
531 }
532 }
533 } while (ack);
534 flushes--;
535 if (got_continue && MAX_IN_VAIN < in_vain) {
536 print_verbose(args, _("giving up"));
537 break; /* give up */
538 }
539 }
540 }
541 done:
542 if (!got_ready || !no_done) {
543 packet_buf_write(&req_buf, "done\n");
544 send_request(args, fd[1], &req_buf);
545 }
546 print_verbose(args, _("done"));
547 if (retval != 0) {
548 multi_ack = 0;
549 flushes++;
550 }
551 strbuf_release(&req_buf);
552
553 if (!got_ready || !no_done)
554 consume_shallow_list(args, fd[0]);
555 while (flushes || multi_ack) {
556 int ack = get_ack(fd[0], result_oid);
557 if (ack) {
558 print_verbose(args, _("got %s (%d) %s"), "ack",
559 ack, oid_to_hex(result_oid));
560 if (ack == ACK)
561 return 0;
562 multi_ack = 1;
563 continue;
564 }
565 flushes--;
566 }
567 /* it is no error to fetch into a completely empty repo */
568 return count ? retval : 0;
569 }
570
571 static struct commit_list *complete;
572
573 static int mark_complete(const struct object_id *oid)
574 {
575 struct object *o = parse_object(the_repository, oid);
576
577 while (o && o->type == OBJ_TAG) {
578 struct tag *t = (struct tag *) o;
579 if (!t->tagged)
580 break; /* broken repository */
581 o->flags |= COMPLETE;
582 o = parse_object(the_repository, &t->tagged->oid);
583 }
584 if (o && o->type == OBJ_COMMIT) {
585 struct commit *commit = (struct commit *)o;
586 if (!(commit->object.flags & COMPLETE)) {
587 commit->object.flags |= COMPLETE;
588 commit_list_insert(commit, &complete);
589 }
590 }
591 return 0;
592 }
593
594 static int mark_complete_oid(const char *refname, const struct object_id *oid,
595 int flag, void *cb_data)
596 {
597 return mark_complete(oid);
598 }
599
600 static void mark_recent_complete_commits(struct fetch_pack_args *args,
601 timestamp_t cutoff)
602 {
603 while (complete && cutoff <= complete->item->date) {
604 print_verbose(args, _("Marking %s as complete"),
605 oid_to_hex(&complete->item->object.oid));
606 pop_most_recent_commit(&complete, COMPLETE);
607 }
608 }
609
610 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
611 {
612 for (; refs; refs = refs->next)
613 oidset_insert(oids, &refs->old_oid);
614 }
615
616 static int tip_oids_contain(struct oidset *tip_oids,
617 struct ref *unmatched, struct ref *newlist,
618 const struct object_id *id)
619 {
620 /*
621 * Note that this only looks at the ref lists the first time it's
622 * called. This works out in filter_refs() because even though it may
623 * add to "newlist" between calls, the additions will always be for
624 * oids that are already in the set.
625 */
626 if (!tip_oids->map.map.tablesize) {
627 add_refs_to_oidset(tip_oids, unmatched);
628 add_refs_to_oidset(tip_oids, newlist);
629 }
630 return oidset_contains(tip_oids, id);
631 }
632
633 static void filter_refs(struct fetch_pack_args *args,
634 struct ref **refs,
635 struct ref **sought, int nr_sought)
636 {
637 struct ref *newlist = NULL;
638 struct ref **newtail = &newlist;
639 struct ref *unmatched = NULL;
640 struct ref *ref, *next;
641 struct oidset tip_oids = OIDSET_INIT;
642 int i;
643
644 i = 0;
645 for (ref = *refs; ref; ref = next) {
646 int keep = 0;
647 next = ref->next;
648
649 if (starts_with(ref->name, "refs/") &&
650 check_refname_format(ref->name, 0))
651 ; /* trash */
652 else {
653 while (i < nr_sought) {
654 int cmp = strcmp(ref->name, sought[i]->name);
655 if (cmp < 0)
656 break; /* definitely do not have it */
657 else if (cmp == 0) {
658 keep = 1; /* definitely have it */
659 sought[i]->match_status = REF_MATCHED;
660 }
661 i++;
662 }
663
664 if (!keep && args->fetch_all &&
665 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
666 keep = 1;
667 }
668
669 if (keep) {
670 *newtail = ref;
671 ref->next = NULL;
672 newtail = &ref->next;
673 } else {
674 ref->next = unmatched;
675 unmatched = ref;
676 }
677 }
678
679 /* Append unmatched requests to the list */
680 for (i = 0; i < nr_sought; i++) {
681 struct object_id oid;
682 const char *p;
683
684 ref = sought[i];
685 if (ref->match_status != REF_NOT_MATCHED)
686 continue;
687 if (parse_oid_hex(ref->name, &oid, &p) ||
688 *p != '\0' ||
689 oidcmp(&oid, &ref->old_oid))
690 continue;
691
692 if ((allow_unadvertised_object_request &
693 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
694 tip_oids_contain(&tip_oids, unmatched, newlist,
695 &ref->old_oid)) {
696 ref->match_status = REF_MATCHED;
697 *newtail = copy_ref(ref);
698 newtail = &(*newtail)->next;
699 } else {
700 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
701 }
702 }
703
704 oidset_clear(&tip_oids);
705 for (ref = unmatched; ref; ref = next) {
706 next = ref->next;
707 free(ref);
708 }
709
710 *refs = newlist;
711 }
712
713 static void mark_alternate_complete(struct object *obj)
714 {
715 mark_complete(&obj->oid);
716 }
717
718 struct loose_object_iter {
719 struct oidset *loose_object_set;
720 struct ref *refs;
721 };
722
723 /*
724 * If the number of refs is not larger than the number of loose objects,
725 * this function stops inserting.
726 */
727 static int add_loose_objects_to_set(const struct object_id *oid,
728 const char *path,
729 void *data)
730 {
731 struct loose_object_iter *iter = data;
732 oidset_insert(iter->loose_object_set, oid);
733 if (iter->refs == NULL)
734 return 1;
735
736 iter->refs = iter->refs->next;
737 return 0;
738 }
739
740 static int everything_local(struct fetch_pack_args *args,
741 struct ref **refs,
742 struct ref **sought, int nr_sought)
743 {
744 struct ref *ref;
745 int retval;
746 int old_save_commit_buffer = save_commit_buffer;
747 timestamp_t cutoff = 0;
748 struct oidset loose_oid_set = OIDSET_INIT;
749 int use_oidset = 0;
750 struct loose_object_iter iter = {&loose_oid_set, *refs};
751
752 /* Enumerate all loose objects or know refs are not so many. */
753 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
754 &iter, 0);
755
756 save_commit_buffer = 0;
757
758 for (ref = *refs; ref; ref = ref->next) {
759 struct object *o;
760 unsigned int flags = OBJECT_INFO_QUICK;
761
762 if (use_oidset &&
763 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
764 /*
765 * I know this does not exist in the loose form,
766 * so check if it exists in a non-loose form.
767 */
768 flags |= OBJECT_INFO_IGNORE_LOOSE;
769 }
770
771 if (!has_object_file_with_flags(&ref->old_oid, flags))
772 continue;
773 o = parse_object(the_repository, &ref->old_oid);
774 if (!o)
775 continue;
776
777 /* We already have it -- which may mean that we were
778 * in sync with the other side at some time after
779 * that (it is OK if we guess wrong here).
780 */
781 if (o->type == OBJ_COMMIT) {
782 struct commit *commit = (struct commit *)o;
783 if (!cutoff || cutoff < commit->date)
784 cutoff = commit->date;
785 }
786 }
787
788 oidset_clear(&loose_oid_set);
789
790 if (!args->no_dependents) {
791 if (!args->deepen) {
792 for_each_ref(mark_complete_oid, NULL);
793 for_each_cached_alternate(mark_alternate_complete);
794 commit_list_sort_by_date(&complete);
795 if (cutoff)
796 mark_recent_complete_commits(args, cutoff);
797 }
798
799 /*
800 * Mark all complete remote refs as common refs.
801 * Don't mark them common yet; the server has to be told so first.
802 */
803 for (ref = *refs; ref; ref = ref->next) {
804 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
805 NULL, 0);
806
807 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
808 continue;
809
810 if (!(o->flags & SEEN)) {
811 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
812
813 mark_common((struct commit *)o, 1, 1);
814 }
815 }
816 }
817
818 filter_refs(args, refs, sought, nr_sought);
819
820 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
821 const struct object_id *remote = &ref->old_oid;
822 struct object *o;
823
824 o = lookup_object(remote->hash);
825 if (!o || !(o->flags & COMPLETE)) {
826 retval = 0;
827 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
828 ref->name);
829 continue;
830 }
831 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
832 ref->name);
833 }
834
835 save_commit_buffer = old_save_commit_buffer;
836
837 return retval;
838 }
839
840 static int sideband_demux(int in, int out, void *data)
841 {
842 int *xd = data;
843 int ret;
844
845 ret = recv_sideband("fetch-pack", xd[0], out);
846 close(out);
847 return ret;
848 }
849
850 static int get_pack(struct fetch_pack_args *args,
851 int xd[2], char **pack_lockfile)
852 {
853 struct async demux;
854 int do_keep = args->keep_pack;
855 const char *cmd_name;
856 struct pack_header header;
857 int pass_header = 0;
858 struct child_process cmd = CHILD_PROCESS_INIT;
859 int ret;
860
861 memset(&demux, 0, sizeof(demux));
862 if (use_sideband) {
863 /* xd[] is talking with upload-pack; subprocess reads from
864 * xd[0], spits out band#2 to stderr, and feeds us band#1
865 * through demux->out.
866 */
867 demux.proc = sideband_demux;
868 demux.data = xd;
869 demux.out = -1;
870 demux.isolate_sigpipe = 1;
871 if (start_async(&demux))
872 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
873 }
874 else
875 demux.out = xd[0];
876
877 if (!args->keep_pack && unpack_limit) {
878
879 if (read_pack_header(demux.out, &header))
880 die(_("protocol error: bad pack header"));
881 pass_header = 1;
882 if (ntohl(header.hdr_entries) < unpack_limit)
883 do_keep = 0;
884 else
885 do_keep = 1;
886 }
887
888 if (alternate_shallow_file) {
889 argv_array_push(&cmd.args, "--shallow-file");
890 argv_array_push(&cmd.args, alternate_shallow_file);
891 }
892
893 if (do_keep || args->from_promisor) {
894 if (pack_lockfile)
895 cmd.out = -1;
896 cmd_name = "index-pack";
897 argv_array_push(&cmd.args, cmd_name);
898 argv_array_push(&cmd.args, "--stdin");
899 if (!args->quiet && !args->no_progress)
900 argv_array_push(&cmd.args, "-v");
901 if (args->use_thin_pack)
902 argv_array_push(&cmd.args, "--fix-thin");
903 if (do_keep && (args->lock_pack || unpack_limit)) {
904 char hostname[HOST_NAME_MAX + 1];
905 if (xgethostname(hostname, sizeof(hostname)))
906 xsnprintf(hostname, sizeof(hostname), "localhost");
907 argv_array_pushf(&cmd.args,
908 "--keep=fetch-pack %"PRIuMAX " on %s",
909 (uintmax_t)getpid(), hostname);
910 }
911 if (args->check_self_contained_and_connected)
912 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
913 if (args->from_promisor)
914 argv_array_push(&cmd.args, "--promisor");
915 }
916 else {
917 cmd_name = "unpack-objects";
918 argv_array_push(&cmd.args, cmd_name);
919 if (args->quiet || args->no_progress)
920 argv_array_push(&cmd.args, "-q");
921 args->check_self_contained_and_connected = 0;
922 }
923
924 if (pass_header)
925 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
926 ntohl(header.hdr_version),
927 ntohl(header.hdr_entries));
928 if (fetch_fsck_objects >= 0
929 ? fetch_fsck_objects
930 : transfer_fsck_objects >= 0
931 ? transfer_fsck_objects
932 : 0) {
933 if (args->from_promisor)
934 /*
935 * We cannot use --strict in index-pack because it
936 * checks both broken objects and links, but we only
937 * want to check for broken objects.
938 */
939 argv_array_push(&cmd.args, "--fsck-objects");
940 else
941 argv_array_push(&cmd.args, "--strict");
942 }
943
944 cmd.in = demux.out;
945 cmd.git_cmd = 1;
946 if (start_command(&cmd))
947 die(_("fetch-pack: unable to fork off %s"), cmd_name);
948 if (do_keep && pack_lockfile) {
949 *pack_lockfile = index_pack_lockfile(cmd.out);
950 close(cmd.out);
951 }
952
953 if (!use_sideband)
954 /* Closed by start_command() */
955 xd[0] = -1;
956
957 ret = finish_command(&cmd);
958 if (!ret || (args->check_self_contained_and_connected && ret == 1))
959 args->self_contained_and_connected =
960 args->check_self_contained_and_connected &&
961 ret == 0;
962 else
963 die(_("%s failed"), cmd_name);
964 if (use_sideband && finish_async(&demux))
965 die(_("error in sideband demultiplexer"));
966 return 0;
967 }
968
969 static int cmp_ref_by_name(const void *a_, const void *b_)
970 {
971 const struct ref *a = *((const struct ref **)a_);
972 const struct ref *b = *((const struct ref **)b_);
973 return strcmp(a->name, b->name);
974 }
975
976 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
977 int fd[2],
978 const struct ref *orig_ref,
979 struct ref **sought, int nr_sought,
980 struct shallow_info *si,
981 char **pack_lockfile)
982 {
983 struct ref *ref = copy_ref_list(orig_ref);
984 struct object_id oid;
985 const char *agent_feature;
986 int agent_len;
987
988 sort_ref_list(&ref, ref_compare_name);
989 QSORT(sought, nr_sought, cmp_ref_by_name);
990
991 if ((args->depth > 0 || is_repository_shallow(the_repository)) && !server_supports("shallow"))
992 die(_("Server does not support shallow clients"));
993 if (args->depth > 0 || args->deepen_since || args->deepen_not)
994 args->deepen = 1;
995 if (server_supports("multi_ack_detailed")) {
996 print_verbose(args, _("Server supports multi_ack_detailed"));
997 multi_ack = 2;
998 if (server_supports("no-done")) {
999 print_verbose(args, _("Server supports no-done"));
1000 if (args->stateless_rpc)
1001 no_done = 1;
1002 }
1003 }
1004 else if (server_supports("multi_ack")) {
1005 print_verbose(args, _("Server supports multi_ack"));
1006 multi_ack = 1;
1007 }
1008 if (server_supports("side-band-64k")) {
1009 print_verbose(args, _("Server supports side-band-64k"));
1010 use_sideband = 2;
1011 }
1012 else if (server_supports("side-band")) {
1013 print_verbose(args, _("Server supports side-band"));
1014 use_sideband = 1;
1015 }
1016 if (server_supports("allow-tip-sha1-in-want")) {
1017 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
1018 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
1019 }
1020 if (server_supports("allow-reachable-sha1-in-want")) {
1021 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
1022 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1023 }
1024 if (!server_supports("thin-pack"))
1025 args->use_thin_pack = 0;
1026 if (!server_supports("no-progress"))
1027 args->no_progress = 0;
1028 if (!server_supports("include-tag"))
1029 args->include_tag = 0;
1030 if (server_supports("ofs-delta"))
1031 print_verbose(args, _("Server supports ofs-delta"));
1032 else
1033 prefer_ofs_delta = 0;
1034
1035 if (server_supports("filter")) {
1036 server_supports_filtering = 1;
1037 print_verbose(args, _("Server supports filter"));
1038 } else if (args->filter_options.choice) {
1039 warning("filtering not recognized by server, ignoring");
1040 }
1041
1042 if ((agent_feature = server_feature_value("agent", &agent_len))) {
1043 agent_supported = 1;
1044 if (agent_len)
1045 print_verbose(args, _("Server version is %.*s"),
1046 agent_len, agent_feature);
1047 }
1048 if (server_supports("deepen-since"))
1049 deepen_since_ok = 1;
1050 else if (args->deepen_since)
1051 die(_("Server does not support --shallow-since"));
1052 if (server_supports("deepen-not"))
1053 deepen_not_ok = 1;
1054 else if (args->deepen_not)
1055 die(_("Server does not support --shallow-exclude"));
1056 if (!server_supports("deepen-relative") && args->deepen_relative)
1057 die(_("Server does not support --deepen"));
1058
1059 if (everything_local(args, &ref, sought, nr_sought)) {
1060 packet_flush(fd[1]);
1061 goto all_done;
1062 }
1063 if (find_common(args, fd, &oid, ref) < 0)
1064 if (!args->keep_pack)
1065 /* When cloning, it is not unusual to have
1066 * no common commit.
1067 */
1068 warning(_("no common commits"));
1069
1070 if (args->stateless_rpc)
1071 packet_flush(fd[1]);
1072 if (args->deepen)
1073 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1074 NULL);
1075 else if (si->nr_ours || si->nr_theirs)
1076 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1077 else
1078 alternate_shallow_file = NULL;
1079 if (get_pack(args, fd, pack_lockfile))
1080 die(_("git fetch-pack: fetch failed."));
1081
1082 all_done:
1083 return ref;
1084 }
1085
1086 static void add_shallow_requests(struct strbuf *req_buf,
1087 const struct fetch_pack_args *args)
1088 {
1089 if (is_repository_shallow(the_repository))
1090 write_shallow_commits(req_buf, 1, NULL);
1091 if (args->depth > 0)
1092 packet_buf_write(req_buf, "deepen %d", args->depth);
1093 if (args->deepen_since) {
1094 timestamp_t max_age = approxidate(args->deepen_since);
1095 packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
1096 }
1097 if (args->deepen_not) {
1098 int i;
1099 for (i = 0; i < args->deepen_not->nr; i++) {
1100 struct string_list_item *s = args->deepen_not->items + i;
1101 packet_buf_write(req_buf, "deepen-not %s", s->string);
1102 }
1103 }
1104 }
1105
1106 static void add_wants(const struct ref *wants, struct strbuf *req_buf)
1107 {
1108 for ( ; wants ; wants = wants->next) {
1109 const struct object_id *remote = &wants->old_oid;
1110 const char *remote_hex;
1111 struct object *o;
1112
1113 /*
1114 * If that object is complete (i.e. it is an ancestor of a
1115 * local ref), we tell them we have it but do not have to
1116 * tell them about its ancestors, which they already know
1117 * about.
1118 *
1119 * We use lookup_object here because we are only
1120 * interested in the case we *know* the object is
1121 * reachable and we have already scanned it.
1122 */
1123 if (((o = lookup_object(remote->hash)) != NULL) &&
1124 (o->flags & COMPLETE)) {
1125 continue;
1126 }
1127
1128 remote_hex = oid_to_hex(remote);
1129 packet_buf_write(req_buf, "want %s\n", remote_hex);
1130 }
1131 }
1132
1133 static void add_common(struct strbuf *req_buf, struct oidset *common)
1134 {
1135 struct oidset_iter iter;
1136 const struct object_id *oid;
1137 oidset_iter_init(common, &iter);
1138
1139 while ((oid = oidset_iter_next(&iter))) {
1140 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1141 }
1142 }
1143
1144 static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
1145 {
1146 int ret = 0;
1147 int haves_added = 0;
1148 const struct object_id *oid;
1149
1150 while ((oid = get_rev())) {
1151 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1152 if (++haves_added >= *haves_to_send)
1153 break;
1154 }
1155
1156 *in_vain += haves_added;
1157 if (!haves_added || *in_vain >= MAX_IN_VAIN) {
1158 /* Send Done */
1159 packet_buf_write(req_buf, "done\n");
1160 ret = 1;
1161 }
1162
1163 /* Increase haves to send on next round */
1164 *haves_to_send = next_flush(1, *haves_to_send);
1165
1166 return ret;
1167 }
1168
1169 static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
1170 const struct ref *wants, struct oidset *common,
1171 int *haves_to_send, int *in_vain)
1172 {
1173 int ret = 0;
1174 struct strbuf req_buf = STRBUF_INIT;
1175
1176 if (server_supports_v2("fetch", 1))
1177 packet_buf_write(&req_buf, "command=fetch");
1178 if (server_supports_v2("agent", 0))
1179 packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
1180 if (args->server_options && args->server_options->nr &&
1181 server_supports_v2("server-option", 1)) {
1182 int i;
1183 for (i = 0; i < args->server_options->nr; i++)
1184 packet_write_fmt(fd_out, "server-option=%s",
1185 args->server_options->items[i].string);
1186 }
1187
1188 packet_buf_delim(&req_buf);
1189 if (args->use_thin_pack)
1190 packet_buf_write(&req_buf, "thin-pack");
1191 if (args->no_progress)
1192 packet_buf_write(&req_buf, "no-progress");
1193 if (args->include_tag)
1194 packet_buf_write(&req_buf, "include-tag");
1195 if (prefer_ofs_delta)
1196 packet_buf_write(&req_buf, "ofs-delta");
1197
1198 /* Add shallow-info and deepen request */
1199 if (server_supports_feature("fetch", "shallow", 0))
1200 add_shallow_requests(&req_buf, args);
1201 else if (is_repository_shallow(the_repository) || args->deepen)
1202 die(_("Server does not support shallow requests"));
1203
1204 /* Add filter */
1205 if (server_supports_feature("fetch", "filter", 0) &&
1206 args->filter_options.choice) {
1207 print_verbose(args, _("Server supports filter"));
1208 packet_buf_write(&req_buf, "filter %s",
1209 args->filter_options.filter_spec);
1210 } else if (args->filter_options.choice) {
1211 warning("filtering not recognized by server, ignoring");
1212 }
1213
1214 /* add wants */
1215 add_wants(wants, &req_buf);
1216
1217 if (args->no_dependents) {
1218 packet_buf_write(&req_buf, "done");
1219 ret = 1;
1220 } else {
1221 /* Add all of the common commits we've found in previous rounds */
1222 add_common(&req_buf, common);
1223
1224 /* Add initial haves */
1225 ret = add_haves(&req_buf, haves_to_send, in_vain);
1226 }
1227
1228 /* Send request */
1229 packet_buf_flush(&req_buf);
1230 write_or_die(fd_out, req_buf.buf, req_buf.len);
1231
1232 strbuf_release(&req_buf);
1233 return ret;
1234 }
1235
1236 /*
1237 * Processes a section header in a server's response and checks if it matches
1238 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1239 * not consumed); if 0, the line will be consumed and the function will die if
1240 * the section header doesn't match what was expected.
1241 */
1242 static int process_section_header(struct packet_reader *reader,
1243 const char *section, int peek)
1244 {
1245 int ret;
1246
1247 if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
1248 die("error reading section header '%s'", section);
1249
1250 ret = !strcmp(reader->line, section);
1251
1252 if (!peek) {
1253 if (!ret)
1254 die("expected '%s', received '%s'",
1255 section, reader->line);
1256 packet_reader_read(reader);
1257 }
1258
1259 return ret;
1260 }
1261
1262 static int process_acks(struct packet_reader *reader, struct oidset *common)
1263 {
1264 /* received */
1265 int received_ready = 0;
1266 int received_ack = 0;
1267
1268 process_section_header(reader, "acknowledgments", 0);
1269 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1270 const char *arg;
1271
1272 if (!strcmp(reader->line, "NAK"))
1273 continue;
1274
1275 if (skip_prefix(reader->line, "ACK ", &arg)) {
1276 struct object_id oid;
1277 if (!get_oid_hex(arg, &oid)) {
1278 struct commit *commit;
1279 oidset_insert(common, &oid);
1280 commit = lookup_commit(&oid);
1281 mark_common(commit, 0, 1);
1282 }
1283 continue;
1284 }
1285
1286 if (!strcmp(reader->line, "ready")) {
1287 clear_prio_queue(&rev_list);
1288 received_ready = 1;
1289 continue;
1290 }
1291
1292 die("unexpected acknowledgment line: '%s'", reader->line);
1293 }
1294
1295 if (reader->status != PACKET_READ_FLUSH &&
1296 reader->status != PACKET_READ_DELIM)
1297 die("error processing acks: %d", reader->status);
1298
1299 /* return 0 if no common, 1 if there are common, or 2 if ready */
1300 return received_ready ? 2 : (received_ack ? 1 : 0);
1301 }
1302
1303 static void receive_shallow_info(struct fetch_pack_args *args,
1304 struct packet_reader *reader)
1305 {
1306 process_section_header(reader, "shallow-info", 0);
1307 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1308 const char *arg;
1309 struct object_id oid;
1310
1311 if (skip_prefix(reader->line, "shallow ", &arg)) {
1312 if (get_oid_hex(arg, &oid))
1313 die(_("invalid shallow line: %s"), reader->line);
1314 register_shallow(the_repository, &oid);
1315 continue;
1316 }
1317 if (skip_prefix(reader->line, "unshallow ", &arg)) {
1318 if (get_oid_hex(arg, &oid))
1319 die(_("invalid unshallow line: %s"), reader->line);
1320 if (!lookup_object(oid.hash))
1321 die(_("object not found: %s"), reader->line);
1322 /* make sure that it is parsed as shallow */
1323 if (!parse_object(the_repository, &oid))
1324 die(_("error in object: %s"), reader->line);
1325 if (unregister_shallow(&oid))
1326 die(_("no shallow found: %s"), reader->line);
1327 continue;
1328 }
1329 die(_("expected shallow/unshallow, got %s"), reader->line);
1330 }
1331
1332 if (reader->status != PACKET_READ_FLUSH &&
1333 reader->status != PACKET_READ_DELIM)
1334 die("error processing shallow info: %d", reader->status);
1335
1336 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
1337 args->deepen = 1;
1338 }
1339
1340 enum fetch_state {
1341 FETCH_CHECK_LOCAL = 0,
1342 FETCH_SEND_REQUEST,
1343 FETCH_PROCESS_ACKS,
1344 FETCH_GET_PACK,
1345 FETCH_DONE,
1346 };
1347
1348 static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
1349 int fd[2],
1350 const struct ref *orig_ref,
1351 struct ref **sought, int nr_sought,
1352 char **pack_lockfile)
1353 {
1354 struct ref *ref = copy_ref_list(orig_ref);
1355 enum fetch_state state = FETCH_CHECK_LOCAL;
1356 struct oidset common = OIDSET_INIT;
1357 struct packet_reader reader;
1358 int in_vain = 0;
1359 int haves_to_send = INITIAL_FLUSH;
1360 packet_reader_init(&reader, fd[0], NULL, 0,
1361 PACKET_READ_CHOMP_NEWLINE);
1362
1363 while (state != FETCH_DONE) {
1364 switch (state) {
1365 case FETCH_CHECK_LOCAL:
1366 sort_ref_list(&ref, ref_compare_name);
1367 QSORT(sought, nr_sought, cmp_ref_by_name);
1368
1369 /* v2 supports these by default */
1370 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1371 use_sideband = 2;
1372 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1373 args->deepen = 1;
1374
1375 if (marked)
1376 for_each_ref(clear_marks, NULL);
1377 marked = 1;
1378
1379 for_each_ref(rev_list_insert_ref_oid, NULL);
1380 for_each_cached_alternate(insert_one_alternate_object);
1381
1382 /* Filter 'ref' by 'sought' and those that aren't local */
1383 if (everything_local(args, &ref, sought, nr_sought))
1384 state = FETCH_DONE;
1385 else
1386 state = FETCH_SEND_REQUEST;
1387 break;
1388 case FETCH_SEND_REQUEST:
1389 if (send_fetch_request(fd[1], args, ref, &common,
1390 &haves_to_send, &in_vain))
1391 state = FETCH_GET_PACK;
1392 else
1393 state = FETCH_PROCESS_ACKS;
1394 break;
1395 case FETCH_PROCESS_ACKS:
1396 /* Process ACKs/NAKs */
1397 switch (process_acks(&reader, &common)) {
1398 case 2:
1399 state = FETCH_GET_PACK;
1400 break;
1401 case 1:
1402 in_vain = 0;
1403 /* fallthrough */
1404 default:
1405 state = FETCH_SEND_REQUEST;
1406 break;
1407 }
1408 break;
1409 case FETCH_GET_PACK:
1410 /* Check for shallow-info section */
1411 if (process_section_header(&reader, "shallow-info", 1))
1412 receive_shallow_info(args, &reader);
1413
1414 /* get the pack */
1415 process_section_header(&reader, "packfile", 0);
1416 if (get_pack(args, fd, pack_lockfile))
1417 die(_("git fetch-pack: fetch failed."));
1418
1419 state = FETCH_DONE;
1420 break;
1421 case FETCH_DONE:
1422 continue;
1423 }
1424 }
1425
1426 oidset_clear(&common);
1427 return ref;
1428 }
1429
1430 static void fetch_pack_config(void)
1431 {
1432 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1433 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1434 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1435 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1436 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1437
1438 git_config(git_default_config, NULL);
1439 }
1440
1441 static void fetch_pack_setup(void)
1442 {
1443 static int did_setup;
1444 if (did_setup)
1445 return;
1446 fetch_pack_config();
1447 if (0 <= transfer_unpack_limit)
1448 unpack_limit = transfer_unpack_limit;
1449 else if (0 <= fetch_unpack_limit)
1450 unpack_limit = fetch_unpack_limit;
1451 did_setup = 1;
1452 }
1453
1454 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1455 {
1456 struct string_list names = STRING_LIST_INIT_NODUP;
1457 int src, dst;
1458
1459 for (src = dst = 0; src < nr; src++) {
1460 struct string_list_item *item;
1461 item = string_list_insert(&names, ref[src]->name);
1462 if (item->util)
1463 continue; /* already have it */
1464 item->util = ref[src];
1465 if (src != dst)
1466 ref[dst] = ref[src];
1467 dst++;
1468 }
1469 for (src = dst; src < nr; src++)
1470 ref[src] = NULL;
1471 string_list_clear(&names, 0);
1472 return dst;
1473 }
1474
1475 static void update_shallow(struct fetch_pack_args *args,
1476 struct ref **sought, int nr_sought,
1477 struct shallow_info *si)
1478 {
1479 struct oid_array ref = OID_ARRAY_INIT;
1480 int *status;
1481 int i;
1482
1483 if (args->deepen && alternate_shallow_file) {
1484 if (*alternate_shallow_file == '\0') { /* --unshallow */
1485 unlink_or_warn(git_path_shallow(the_repository));
1486 rollback_lock_file(&shallow_lock);
1487 } else
1488 commit_lock_file(&shallow_lock);
1489 return;
1490 }
1491
1492 if (!si->shallow || !si->shallow->nr)
1493 return;
1494
1495 if (args->cloning) {
1496 /*
1497 * remote is shallow, but this is a clone, there are
1498 * no objects in repo to worry about. Accept any
1499 * shallow points that exist in the pack (iow in repo
1500 * after get_pack() and reprepare_packed_git())
1501 */
1502 struct oid_array extra = OID_ARRAY_INIT;
1503 struct object_id *oid = si->shallow->oid;
1504 for (i = 0; i < si->shallow->nr; i++)
1505 if (has_object_file(&oid[i]))
1506 oid_array_append(&extra, &oid[i]);
1507 if (extra.nr) {
1508 setup_alternate_shallow(&shallow_lock,
1509 &alternate_shallow_file,
1510 &extra);
1511 commit_lock_file(&shallow_lock);
1512 }
1513 oid_array_clear(&extra);
1514 return;
1515 }
1516
1517 if (!si->nr_ours && !si->nr_theirs)
1518 return;
1519
1520 remove_nonexistent_theirs_shallow(si);
1521 if (!si->nr_ours && !si->nr_theirs)
1522 return;
1523 for (i = 0; i < nr_sought; i++)
1524 oid_array_append(&ref, &sought[i]->old_oid);
1525 si->ref = &ref;
1526
1527 if (args->update_shallow) {
1528 /*
1529 * remote is also shallow, .git/shallow may be updated
1530 * so all refs can be accepted. Make sure we only add
1531 * shallow roots that are actually reachable from new
1532 * refs.
1533 */
1534 struct oid_array extra = OID_ARRAY_INIT;
1535 struct object_id *oid = si->shallow->oid;
1536 assign_shallow_commits_to_refs(si, NULL, NULL);
1537 if (!si->nr_ours && !si->nr_theirs) {
1538 oid_array_clear(&ref);
1539 return;
1540 }
1541 for (i = 0; i < si->nr_ours; i++)
1542 oid_array_append(&extra, &oid[si->ours[i]]);
1543 for (i = 0; i < si->nr_theirs; i++)
1544 oid_array_append(&extra, &oid[si->theirs[i]]);
1545 setup_alternate_shallow(&shallow_lock,
1546 &alternate_shallow_file,
1547 &extra);
1548 commit_lock_file(&shallow_lock);
1549 oid_array_clear(&extra);
1550 oid_array_clear(&ref);
1551 return;
1552 }
1553
1554 /*
1555 * remote is also shallow, check what ref is safe to update
1556 * without updating .git/shallow
1557 */
1558 status = xcalloc(nr_sought, sizeof(*status));
1559 assign_shallow_commits_to_refs(si, NULL, status);
1560 if (si->nr_ours || si->nr_theirs) {
1561 for (i = 0; i < nr_sought; i++)
1562 if (status[i])
1563 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1564 }
1565 free(status);
1566 oid_array_clear(&ref);
1567 }
1568
1569 struct ref *fetch_pack(struct fetch_pack_args *args,
1570 int fd[], struct child_process *conn,
1571 const struct ref *ref,
1572 const char *dest,
1573 struct ref **sought, int nr_sought,
1574 struct oid_array *shallow,
1575 char **pack_lockfile,
1576 enum protocol_version version)
1577 {
1578 struct ref *ref_cpy;
1579 struct shallow_info si;
1580
1581 fetch_pack_setup();
1582 if (nr_sought)
1583 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1584
1585 if (!ref) {
1586 packet_flush(fd[1]);
1587 die(_("no matching remote head"));
1588 }
1589 prepare_shallow_info(&si, shallow);
1590 if (version == protocol_v2)
1591 ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
1592 pack_lockfile);
1593 else
1594 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1595 &si, pack_lockfile);
1596 reprepare_packed_git(the_repository);
1597 update_shallow(args, sought, nr_sought, &si);
1598 clear_shallow_info(&si);
1599 return ref_cpy;
1600 }
1601
1602 int report_unmatched_refs(struct ref **sought, int nr_sought)
1603 {
1604 int i, ret = 0;
1605
1606 for (i = 0; i < nr_sought; i++) {
1607 if (!sought[i])
1608 continue;
1609 switch (sought[i]->match_status) {
1610 case REF_MATCHED:
1611 continue;
1612 case REF_NOT_MATCHED:
1613 error(_("no such remote ref %s"), sought[i]->name);
1614 break;
1615 case REF_UNADVERTISED_NOT_ALLOWED:
1616 error(_("Server does not allow request for unadvertised object %s"),
1617 sought[i]->name);
1618 break;
1619 }
1620 ret = 1;
1621 }
1622 return ret;
1623 }