fetch-pack: write shallow, then check connectivity
[git/git.git] / fetch-pack.c
1 #include "cache.h"
2 #include "repository.h"
3 #include "config.h"
4 #include "lockfile.h"
5 #include "refs.h"
6 #include "pkt-line.h"
7 #include "commit.h"
8 #include "tag.h"
9 #include "exec-cmd.h"
10 #include "pack.h"
11 #include "sideband.h"
12 #include "fetch-pack.h"
13 #include "remote.h"
14 #include "run-command.h"
15 #include "connect.h"
16 #include "transport.h"
17 #include "version.h"
18 #include "prio-queue.h"
19 #include "sha1-array.h"
20 #include "oidset.h"
21 #include "packfile.h"
22 #include "connected.h"
23
24 static int transfer_unpack_limit = -1;
25 static int fetch_unpack_limit = -1;
26 static int unpack_limit = 100;
27 static int prefer_ofs_delta = 1;
28 static int no_done;
29 static int deepen_since_ok;
30 static int deepen_not_ok;
31 static int fetch_fsck_objects = -1;
32 static int transfer_fsck_objects = -1;
33 static int agent_supported;
34 static int server_supports_filtering;
35 static struct lock_file shallow_lock;
36 static const char *alternate_shallow_file;
37
38 /* Remember to update object flag allocation in object.h */
39 #define COMPLETE (1U << 0)
40 #define COMMON (1U << 1)
41 #define COMMON_REF (1U << 2)
42 #define SEEN (1U << 3)
43 #define POPPED (1U << 4)
44 #define ALTERNATE (1U << 5)
45
46 static int marked;
47
48 /*
49 * After sending this many "have"s if we do not get any new ACK , we
50 * give up traversing our history.
51 */
52 #define MAX_IN_VAIN 256
53
54 static struct prio_queue rev_list = { compare_commits_by_commit_date };
55 static int non_common_revs, multi_ack, use_sideband;
56 /* Allow specifying sha1 if it is a ref tip. */
57 #define ALLOW_TIP_SHA1 01
58 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
59 #define ALLOW_REACHABLE_SHA1 02
60 static unsigned int allow_unadvertised_object_request;
61
62 __attribute__((format (printf, 2, 3)))
63 static inline void print_verbose(const struct fetch_pack_args *args,
64 const char *fmt, ...)
65 {
66 va_list params;
67
68 if (!args->verbose)
69 return;
70
71 va_start(params, fmt);
72 vfprintf(stderr, fmt, params);
73 va_end(params);
74 fputc('\n', stderr);
75 }
76
77 struct alternate_object_cache {
78 struct object **items;
79 size_t nr, alloc;
80 };
81
82 static void cache_one_alternate(const char *refname,
83 const struct object_id *oid,
84 void *vcache)
85 {
86 struct alternate_object_cache *cache = vcache;
87 struct object *obj = parse_object(oid);
88
89 if (!obj || (obj->flags & ALTERNATE))
90 return;
91
92 obj->flags |= ALTERNATE;
93 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
94 cache->items[cache->nr++] = obj;
95 }
96
97 static void for_each_cached_alternate(void (*cb)(struct object *))
98 {
99 static int initialized;
100 static struct alternate_object_cache cache;
101 size_t i;
102
103 if (!initialized) {
104 for_each_alternate_ref(cache_one_alternate, &cache);
105 initialized = 1;
106 }
107
108 for (i = 0; i < cache.nr; i++)
109 cb(cache.items[i]);
110 }
111
112 static void rev_list_push(struct commit *commit, int mark)
113 {
114 if (!(commit->object.flags & mark)) {
115 commit->object.flags |= mark;
116
117 if (parse_commit(commit))
118 return;
119
120 prio_queue_put(&rev_list, commit);
121
122 if (!(commit->object.flags & COMMON))
123 non_common_revs++;
124 }
125 }
126
127 static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
128 {
129 struct object *o = deref_tag(parse_object(oid), refname, 0);
130
131 if (o && o->type == OBJ_COMMIT)
132 rev_list_push((struct commit *)o, SEEN);
133
134 return 0;
135 }
136
137 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
138 int flag, void *cb_data)
139 {
140 return rev_list_insert_ref(refname, oid);
141 }
142
143 static int clear_marks(const char *refname, const struct object_id *oid,
144 int flag, void *cb_data)
145 {
146 struct object *o = deref_tag(parse_object(oid), refname, 0);
147
148 if (o && o->type == OBJ_COMMIT)
149 clear_commit_marks((struct commit *)o,
150 COMMON | COMMON_REF | SEEN | POPPED);
151 return 0;
152 }
153
154 /*
155 This function marks a rev and its ancestors as common.
156 In some cases, it is desirable to mark only the ancestors (for example
157 when only the server does not yet know that they are common).
158 */
159
160 static void mark_common(struct commit *commit,
161 int ancestors_only, int dont_parse)
162 {
163 if (commit != NULL && !(commit->object.flags & COMMON)) {
164 struct object *o = (struct object *)commit;
165
166 if (!ancestors_only)
167 o->flags |= COMMON;
168
169 if (!(o->flags & SEEN))
170 rev_list_push(commit, SEEN);
171 else {
172 struct commit_list *parents;
173
174 if (!ancestors_only && !(o->flags & POPPED))
175 non_common_revs--;
176 if (!o->parsed && !dont_parse)
177 if (parse_commit(commit))
178 return;
179
180 for (parents = commit->parents;
181 parents;
182 parents = parents->next)
183 mark_common(parents->item, 0, dont_parse);
184 }
185 }
186 }
187
188 /*
189 Get the next rev to send, ignoring the common.
190 */
191
192 static const struct object_id *get_rev(void)
193 {
194 struct commit *commit = NULL;
195
196 while (commit == NULL) {
197 unsigned int mark;
198 struct commit_list *parents;
199
200 if (rev_list.nr == 0 || non_common_revs == 0)
201 return NULL;
202
203 commit = prio_queue_get(&rev_list);
204 parse_commit(commit);
205 parents = commit->parents;
206
207 commit->object.flags |= POPPED;
208 if (!(commit->object.flags & COMMON))
209 non_common_revs--;
210
211 if (commit->object.flags & COMMON) {
212 /* do not send "have", and ignore ancestors */
213 commit = NULL;
214 mark = COMMON | SEEN;
215 } else if (commit->object.flags & COMMON_REF)
216 /* send "have", and ignore ancestors */
217 mark = COMMON | SEEN;
218 else
219 /* send "have", also for its ancestors */
220 mark = SEEN;
221
222 while (parents) {
223 if (!(parents->item->object.flags & SEEN))
224 rev_list_push(parents->item, mark);
225 if (mark & COMMON)
226 mark_common(parents->item, 1, 0);
227 parents = parents->next;
228 }
229 }
230
231 return &commit->object.oid;
232 }
233
234 enum ack_type {
235 NAK = 0,
236 ACK,
237 ACK_continue,
238 ACK_common,
239 ACK_ready
240 };
241
242 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
243 {
244 if (args->stateless_rpc && args->deepen) {
245 /* If we sent a depth we will get back "duplicate"
246 * shallow and unshallow commands every time there
247 * is a block of have lines exchanged.
248 */
249 char *line;
250 while ((line = packet_read_line(fd, NULL))) {
251 if (starts_with(line, "shallow "))
252 continue;
253 if (starts_with(line, "unshallow "))
254 continue;
255 die(_("git fetch-pack: expected shallow list"));
256 }
257 }
258 }
259
260 static enum ack_type get_ack(int fd, struct object_id *result_oid)
261 {
262 int len;
263 char *line = packet_read_line(fd, &len);
264 const char *arg;
265
266 if (!line)
267 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
268 if (!strcmp(line, "NAK"))
269 return NAK;
270 if (skip_prefix(line, "ACK ", &arg)) {
271 if (!get_oid_hex(arg, result_oid)) {
272 arg += 40;
273 len -= arg - line;
274 if (len < 1)
275 return ACK;
276 if (strstr(arg, "continue"))
277 return ACK_continue;
278 if (strstr(arg, "common"))
279 return ACK_common;
280 if (strstr(arg, "ready"))
281 return ACK_ready;
282 return ACK;
283 }
284 }
285 if (skip_prefix(line, "ERR ", &arg))
286 die(_("remote error: %s"), arg);
287 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
288 }
289
290 static void send_request(struct fetch_pack_args *args,
291 int fd, struct strbuf *buf)
292 {
293 if (args->stateless_rpc) {
294 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
295 packet_flush(fd);
296 } else
297 write_or_die(fd, buf->buf, buf->len);
298 }
299
300 static void insert_one_alternate_object(struct object *obj)
301 {
302 rev_list_insert_ref(NULL, &obj->oid);
303 }
304
305 #define INITIAL_FLUSH 16
306 #define PIPESAFE_FLUSH 32
307 #define LARGE_FLUSH 16384
308
309 static int next_flush(int stateless_rpc, int count)
310 {
311 if (stateless_rpc) {
312 if (count < LARGE_FLUSH)
313 count <<= 1;
314 else
315 count = count * 11 / 10;
316 } else {
317 if (count < PIPESAFE_FLUSH)
318 count <<= 1;
319 else
320 count += PIPESAFE_FLUSH;
321 }
322 return count;
323 }
324
325 static int find_common(struct fetch_pack_args *args,
326 int fd[2], struct object_id *result_oid,
327 struct ref *refs)
328 {
329 int fetching;
330 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
331 const struct object_id *oid;
332 unsigned in_vain = 0;
333 int got_continue = 0;
334 int got_ready = 0;
335 struct strbuf req_buf = STRBUF_INIT;
336 size_t state_len = 0;
337
338 if (args->stateless_rpc && multi_ack == 1)
339 die(_("--stateless-rpc requires multi_ack_detailed"));
340 if (marked)
341 for_each_ref(clear_marks, NULL);
342 marked = 1;
343
344 for_each_ref(rev_list_insert_ref_oid, NULL);
345 for_each_cached_alternate(insert_one_alternate_object);
346
347 fetching = 0;
348 for ( ; refs ; refs = refs->next) {
349 struct object_id *remote = &refs->old_oid;
350 const char *remote_hex;
351 struct object *o;
352
353 /*
354 * If that object is complete (i.e. it is an ancestor of a
355 * local ref), we tell them we have it but do not have to
356 * tell them about its ancestors, which they already know
357 * about.
358 *
359 * We use lookup_object here because we are only
360 * interested in the case we *know* the object is
361 * reachable and we have already scanned it.
362 */
363 if (((o = lookup_object(remote->hash)) != NULL) &&
364 (o->flags & COMPLETE)) {
365 continue;
366 }
367
368 remote_hex = oid_to_hex(remote);
369 if (!fetching) {
370 struct strbuf c = STRBUF_INIT;
371 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
372 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
373 if (no_done) strbuf_addstr(&c, " no-done");
374 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
375 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
376 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
377 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
378 if (args->no_progress) strbuf_addstr(&c, " no-progress");
379 if (args->include_tag) strbuf_addstr(&c, " include-tag");
380 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
381 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
382 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
383 if (agent_supported) strbuf_addf(&c, " agent=%s",
384 git_user_agent_sanitized());
385 if (args->filter_options.choice)
386 strbuf_addstr(&c, " filter");
387 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
388 strbuf_release(&c);
389 } else
390 packet_buf_write(&req_buf, "want %s\n", remote_hex);
391 fetching++;
392 }
393
394 if (!fetching) {
395 strbuf_release(&req_buf);
396 packet_flush(fd[1]);
397 return 1;
398 }
399
400 if (is_repository_shallow())
401 write_shallow_commits(&req_buf, 1, NULL);
402 if (args->depth > 0)
403 packet_buf_write(&req_buf, "deepen %d", args->depth);
404 if (args->deepen_since) {
405 timestamp_t max_age = approxidate(args->deepen_since);
406 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
407 }
408 if (args->deepen_not) {
409 int i;
410 for (i = 0; i < args->deepen_not->nr; i++) {
411 struct string_list_item *s = args->deepen_not->items + i;
412 packet_buf_write(&req_buf, "deepen-not %s", s->string);
413 }
414 }
415 if (server_supports_filtering && args->filter_options.choice)
416 packet_buf_write(&req_buf, "filter %s",
417 args->filter_options.filter_spec);
418 packet_buf_flush(&req_buf);
419 state_len = req_buf.len;
420
421 if (args->deepen) {
422 char *line;
423 const char *arg;
424 struct object_id oid;
425
426 send_request(args, fd[1], &req_buf);
427 while ((line = packet_read_line(fd[0], NULL))) {
428 if (skip_prefix(line, "shallow ", &arg)) {
429 if (get_oid_hex(arg, &oid))
430 die(_("invalid shallow line: %s"), line);
431 register_shallow(&oid);
432 continue;
433 }
434 if (skip_prefix(line, "unshallow ", &arg)) {
435 if (get_oid_hex(arg, &oid))
436 die(_("invalid unshallow line: %s"), line);
437 if (!lookup_object(oid.hash))
438 die(_("object not found: %s"), line);
439 /* make sure that it is parsed as shallow */
440 if (!parse_object(&oid))
441 die(_("error in object: %s"), line);
442 if (unregister_shallow(&oid))
443 die(_("no shallow found: %s"), line);
444 continue;
445 }
446 die(_("expected shallow/unshallow, got %s"), line);
447 }
448 } else if (!args->stateless_rpc)
449 send_request(args, fd[1], &req_buf);
450
451 if (!args->stateless_rpc) {
452 /* If we aren't using the stateless-rpc interface
453 * we don't need to retain the headers.
454 */
455 strbuf_setlen(&req_buf, 0);
456 state_len = 0;
457 }
458
459 flushes = 0;
460 retval = -1;
461 if (args->no_dependents)
462 goto done;
463 while ((oid = get_rev())) {
464 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
465 print_verbose(args, "have %s", oid_to_hex(oid));
466 in_vain++;
467 if (flush_at <= ++count) {
468 int ack;
469
470 packet_buf_flush(&req_buf);
471 send_request(args, fd[1], &req_buf);
472 strbuf_setlen(&req_buf, state_len);
473 flushes++;
474 flush_at = next_flush(args->stateless_rpc, count);
475
476 /*
477 * We keep one window "ahead" of the other side, and
478 * will wait for an ACK only on the next one
479 */
480 if (!args->stateless_rpc && count == INITIAL_FLUSH)
481 continue;
482
483 consume_shallow_list(args, fd[0]);
484 do {
485 ack = get_ack(fd[0], result_oid);
486 if (ack)
487 print_verbose(args, _("got %s %d %s"), "ack",
488 ack, oid_to_hex(result_oid));
489 switch (ack) {
490 case ACK:
491 flushes = 0;
492 multi_ack = 0;
493 retval = 0;
494 goto done;
495 case ACK_common:
496 case ACK_ready:
497 case ACK_continue: {
498 struct commit *commit =
499 lookup_commit(result_oid);
500 if (!commit)
501 die(_("invalid commit %s"), oid_to_hex(result_oid));
502 if (args->stateless_rpc
503 && ack == ACK_common
504 && !(commit->object.flags & COMMON)) {
505 /* We need to replay the have for this object
506 * on the next RPC request so the peer knows
507 * it is in common with us.
508 */
509 const char *hex = oid_to_hex(result_oid);
510 packet_buf_write(&req_buf, "have %s\n", hex);
511 state_len = req_buf.len;
512 /*
513 * Reset in_vain because an ack
514 * for this commit has not been
515 * seen.
516 */
517 in_vain = 0;
518 } else if (!args->stateless_rpc
519 || ack != ACK_common)
520 in_vain = 0;
521 mark_common(commit, 0, 1);
522 retval = 0;
523 got_continue = 1;
524 if (ack == ACK_ready) {
525 clear_prio_queue(&rev_list);
526 got_ready = 1;
527 }
528 break;
529 }
530 }
531 } while (ack);
532 flushes--;
533 if (got_continue && MAX_IN_VAIN < in_vain) {
534 print_verbose(args, _("giving up"));
535 break; /* give up */
536 }
537 }
538 }
539 done:
540 if (!got_ready || !no_done) {
541 packet_buf_write(&req_buf, "done\n");
542 send_request(args, fd[1], &req_buf);
543 }
544 print_verbose(args, _("done"));
545 if (retval != 0) {
546 multi_ack = 0;
547 flushes++;
548 }
549 strbuf_release(&req_buf);
550
551 if (!got_ready || !no_done)
552 consume_shallow_list(args, fd[0]);
553 while (flushes || multi_ack) {
554 int ack = get_ack(fd[0], result_oid);
555 if (ack) {
556 print_verbose(args, _("got %s (%d) %s"), "ack",
557 ack, oid_to_hex(result_oid));
558 if (ack == ACK)
559 return 0;
560 multi_ack = 1;
561 continue;
562 }
563 flushes--;
564 }
565 /* it is no error to fetch into a completely empty repo */
566 return count ? retval : 0;
567 }
568
569 static struct commit_list *complete;
570
571 static int mark_complete(const struct object_id *oid)
572 {
573 struct object *o = parse_object(oid);
574
575 while (o && o->type == OBJ_TAG) {
576 struct tag *t = (struct tag *) o;
577 if (!t->tagged)
578 break; /* broken repository */
579 o->flags |= COMPLETE;
580 o = parse_object(&t->tagged->oid);
581 }
582 if (o && o->type == OBJ_COMMIT) {
583 struct commit *commit = (struct commit *)o;
584 if (!(commit->object.flags & COMPLETE)) {
585 commit->object.flags |= COMPLETE;
586 commit_list_insert(commit, &complete);
587 }
588 }
589 return 0;
590 }
591
592 static int mark_complete_oid(const char *refname, const struct object_id *oid,
593 int flag, void *cb_data)
594 {
595 return mark_complete(oid);
596 }
597
598 static void mark_recent_complete_commits(struct fetch_pack_args *args,
599 timestamp_t cutoff)
600 {
601 while (complete && cutoff <= complete->item->date) {
602 print_verbose(args, _("Marking %s as complete"),
603 oid_to_hex(&complete->item->object.oid));
604 pop_most_recent_commit(&complete, COMPLETE);
605 }
606 }
607
608 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
609 {
610 for (; refs; refs = refs->next)
611 oidset_insert(oids, &refs->old_oid);
612 }
613
614 static int tip_oids_contain(struct oidset *tip_oids,
615 struct ref *unmatched, struct ref *newlist,
616 const struct object_id *id)
617 {
618 /*
619 * Note that this only looks at the ref lists the first time it's
620 * called. This works out in filter_refs() because even though it may
621 * add to "newlist" between calls, the additions will always be for
622 * oids that are already in the set.
623 */
624 if (!tip_oids->map.map.tablesize) {
625 add_refs_to_oidset(tip_oids, unmatched);
626 add_refs_to_oidset(tip_oids, newlist);
627 }
628 return oidset_contains(tip_oids, id);
629 }
630
631 static void filter_refs(struct fetch_pack_args *args,
632 struct ref **refs,
633 struct ref **sought, int nr_sought)
634 {
635 struct ref *newlist = NULL;
636 struct ref **newtail = &newlist;
637 struct ref *unmatched = NULL;
638 struct ref *ref, *next;
639 struct oidset tip_oids = OIDSET_INIT;
640 int i;
641
642 i = 0;
643 for (ref = *refs; ref; ref = next) {
644 int keep = 0;
645 next = ref->next;
646
647 if (starts_with(ref->name, "refs/") &&
648 check_refname_format(ref->name, 0))
649 ; /* trash */
650 else {
651 while (i < nr_sought) {
652 int cmp = strcmp(ref->name, sought[i]->name);
653 if (cmp < 0)
654 break; /* definitely do not have it */
655 else if (cmp == 0) {
656 keep = 1; /* definitely have it */
657 sought[i]->match_status = REF_MATCHED;
658 }
659 i++;
660 }
661 }
662
663 if (!keep && args->fetch_all &&
664 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
665 keep = 1;
666
667 if (keep) {
668 *newtail = ref;
669 ref->next = NULL;
670 newtail = &ref->next;
671 } else {
672 ref->next = unmatched;
673 unmatched = ref;
674 }
675 }
676
677 /* Append unmatched requests to the list */
678 for (i = 0; i < nr_sought; i++) {
679 struct object_id oid;
680 const char *p;
681
682 ref = sought[i];
683 if (ref->match_status != REF_NOT_MATCHED)
684 continue;
685 if (parse_oid_hex(ref->name, &oid, &p) ||
686 *p != '\0' ||
687 oidcmp(&oid, &ref->old_oid))
688 continue;
689
690 if ((allow_unadvertised_object_request &
691 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
692 tip_oids_contain(&tip_oids, unmatched, newlist,
693 &ref->old_oid)) {
694 ref->match_status = REF_MATCHED;
695 *newtail = copy_ref(ref);
696 newtail = &(*newtail)->next;
697 } else {
698 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
699 }
700 }
701
702 oidset_clear(&tip_oids);
703 for (ref = unmatched; ref; ref = next) {
704 next = ref->next;
705 free(ref);
706 }
707
708 *refs = newlist;
709 }
710
711 static void mark_alternate_complete(struct object *obj)
712 {
713 mark_complete(&obj->oid);
714 }
715
716 struct loose_object_iter {
717 struct oidset *loose_object_set;
718 struct ref *refs;
719 };
720
721 /*
722 * If the number of refs is not larger than the number of loose objects,
723 * this function stops inserting.
724 */
725 static int add_loose_objects_to_set(const struct object_id *oid,
726 const char *path,
727 void *data)
728 {
729 struct loose_object_iter *iter = data;
730 oidset_insert(iter->loose_object_set, oid);
731 if (iter->refs == NULL)
732 return 1;
733
734 iter->refs = iter->refs->next;
735 return 0;
736 }
737
738 static int everything_local(struct fetch_pack_args *args,
739 struct ref **refs,
740 struct ref **sought, int nr_sought)
741 {
742 struct ref *ref;
743 int retval;
744 int old_save_commit_buffer = save_commit_buffer;
745 timestamp_t cutoff = 0;
746 struct oidset loose_oid_set = OIDSET_INIT;
747 int use_oidset = 0;
748 struct loose_object_iter iter = {&loose_oid_set, *refs};
749
750 /* Enumerate all loose objects or know refs are not so many. */
751 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
752 &iter, 0);
753
754 save_commit_buffer = 0;
755
756 for (ref = *refs; ref; ref = ref->next) {
757 struct object *o;
758 unsigned int flags = OBJECT_INFO_QUICK;
759
760 if (use_oidset &&
761 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
762 /*
763 * I know this does not exist in the loose form,
764 * so check if it exists in a non-loose form.
765 */
766 flags |= OBJECT_INFO_IGNORE_LOOSE;
767 }
768
769 if (!has_object_file_with_flags(&ref->old_oid, flags))
770 continue;
771 o = parse_object(&ref->old_oid);
772 if (!o)
773 continue;
774
775 /* We already have it -- which may mean that we were
776 * in sync with the other side at some time after
777 * that (it is OK if we guess wrong here).
778 */
779 if (o->type == OBJ_COMMIT) {
780 struct commit *commit = (struct commit *)o;
781 if (!cutoff || cutoff < commit->date)
782 cutoff = commit->date;
783 }
784 }
785
786 oidset_clear(&loose_oid_set);
787
788 if (!args->no_dependents) {
789 if (!args->deepen) {
790 for_each_ref(mark_complete_oid, NULL);
791 for_each_cached_alternate(mark_alternate_complete);
792 commit_list_sort_by_date(&complete);
793 if (cutoff)
794 mark_recent_complete_commits(args, cutoff);
795 }
796
797 /*
798 * Mark all complete remote refs as common refs.
799 * Don't mark them common yet; the server has to be told so first.
800 */
801 for (ref = *refs; ref; ref = ref->next) {
802 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
803 NULL, 0);
804
805 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
806 continue;
807
808 if (!(o->flags & SEEN)) {
809 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
810
811 mark_common((struct commit *)o, 1, 1);
812 }
813 }
814 }
815
816 filter_refs(args, refs, sought, nr_sought);
817
818 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
819 const struct object_id *remote = &ref->old_oid;
820 struct object *o;
821
822 o = lookup_object(remote->hash);
823 if (!o || !(o->flags & COMPLETE)) {
824 retval = 0;
825 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
826 ref->name);
827 continue;
828 }
829 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
830 ref->name);
831 }
832
833 save_commit_buffer = old_save_commit_buffer;
834
835 return retval;
836 }
837
838 static int sideband_demux(int in, int out, void *data)
839 {
840 int *xd = data;
841 int ret;
842
843 ret = recv_sideband("fetch-pack", xd[0], out);
844 close(out);
845 return ret;
846 }
847
848 static int get_pack(struct fetch_pack_args *args,
849 int xd[2], char **pack_lockfile)
850 {
851 struct async demux;
852 int do_keep = args->keep_pack;
853 const char *cmd_name;
854 struct pack_header header;
855 int pass_header = 0;
856 struct child_process cmd = CHILD_PROCESS_INIT;
857 int ret;
858
859 memset(&demux, 0, sizeof(demux));
860 if (use_sideband) {
861 /* xd[] is talking with upload-pack; subprocess reads from
862 * xd[0], spits out band#2 to stderr, and feeds us band#1
863 * through demux->out.
864 */
865 demux.proc = sideband_demux;
866 demux.data = xd;
867 demux.out = -1;
868 demux.isolate_sigpipe = 1;
869 if (start_async(&demux))
870 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
871 }
872 else
873 demux.out = xd[0];
874
875 if (!args->keep_pack && unpack_limit) {
876
877 if (read_pack_header(demux.out, &header))
878 die(_("protocol error: bad pack header"));
879 pass_header = 1;
880 if (ntohl(header.hdr_entries) < unpack_limit)
881 do_keep = 0;
882 else
883 do_keep = 1;
884 }
885
886 if (alternate_shallow_file) {
887 argv_array_push(&cmd.args, "--shallow-file");
888 argv_array_push(&cmd.args, alternate_shallow_file);
889 }
890
891 if (do_keep || args->from_promisor) {
892 if (pack_lockfile)
893 cmd.out = -1;
894 cmd_name = "index-pack";
895 argv_array_push(&cmd.args, cmd_name);
896 argv_array_push(&cmd.args, "--stdin");
897 if (!args->quiet && !args->no_progress)
898 argv_array_push(&cmd.args, "-v");
899 if (args->use_thin_pack)
900 argv_array_push(&cmd.args, "--fix-thin");
901 if (do_keep && (args->lock_pack || unpack_limit)) {
902 char hostname[HOST_NAME_MAX + 1];
903 if (xgethostname(hostname, sizeof(hostname)))
904 xsnprintf(hostname, sizeof(hostname), "localhost");
905 argv_array_pushf(&cmd.args,
906 "--keep=fetch-pack %"PRIuMAX " on %s",
907 (uintmax_t)getpid(), hostname);
908 }
909 if (args->check_self_contained_and_connected)
910 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
911 if (args->from_promisor)
912 argv_array_push(&cmd.args, "--promisor");
913 }
914 else {
915 cmd_name = "unpack-objects";
916 argv_array_push(&cmd.args, cmd_name);
917 if (args->quiet || args->no_progress)
918 argv_array_push(&cmd.args, "-q");
919 args->check_self_contained_and_connected = 0;
920 }
921
922 if (pass_header)
923 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
924 ntohl(header.hdr_version),
925 ntohl(header.hdr_entries));
926 if (fetch_fsck_objects >= 0
927 ? fetch_fsck_objects
928 : transfer_fsck_objects >= 0
929 ? transfer_fsck_objects
930 : 0) {
931 if (args->from_promisor)
932 /*
933 * We cannot use --strict in index-pack because it
934 * checks both broken objects and links, but we only
935 * want to check for broken objects.
936 */
937 argv_array_push(&cmd.args, "--fsck-objects");
938 else
939 argv_array_push(&cmd.args, "--strict");
940 }
941
942 cmd.in = demux.out;
943 cmd.git_cmd = 1;
944 if (start_command(&cmd))
945 die(_("fetch-pack: unable to fork off %s"), cmd_name);
946 if (do_keep && pack_lockfile) {
947 *pack_lockfile = index_pack_lockfile(cmd.out);
948 close(cmd.out);
949 }
950
951 if (!use_sideband)
952 /* Closed by start_command() */
953 xd[0] = -1;
954
955 ret = finish_command(&cmd);
956 if (!ret || (args->check_self_contained_and_connected && ret == 1))
957 args->self_contained_and_connected =
958 args->check_self_contained_and_connected &&
959 ret == 0;
960 else
961 die(_("%s failed"), cmd_name);
962 if (use_sideband && finish_async(&demux))
963 die(_("error in sideband demultiplexer"));
964 return 0;
965 }
966
967 static int cmp_ref_by_name(const void *a_, const void *b_)
968 {
969 const struct ref *a = *((const struct ref **)a_);
970 const struct ref *b = *((const struct ref **)b_);
971 return strcmp(a->name, b->name);
972 }
973
974 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
975 int fd[2],
976 const struct ref *orig_ref,
977 struct ref **sought, int nr_sought,
978 struct shallow_info *si,
979 char **pack_lockfile)
980 {
981 struct ref *ref = copy_ref_list(orig_ref);
982 struct object_id oid;
983 const char *agent_feature;
984 int agent_len;
985
986 sort_ref_list(&ref, ref_compare_name);
987 QSORT(sought, nr_sought, cmp_ref_by_name);
988
989 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
990 die(_("Server does not support shallow clients"));
991 if (args->depth > 0 || args->deepen_since || args->deepen_not)
992 args->deepen = 1;
993 if (server_supports("multi_ack_detailed")) {
994 print_verbose(args, _("Server supports multi_ack_detailed"));
995 multi_ack = 2;
996 if (server_supports("no-done")) {
997 print_verbose(args, _("Server supports no-done"));
998 if (args->stateless_rpc)
999 no_done = 1;
1000 }
1001 }
1002 else if (server_supports("multi_ack")) {
1003 print_verbose(args, _("Server supports multi_ack"));
1004 multi_ack = 1;
1005 }
1006 if (server_supports("side-band-64k")) {
1007 print_verbose(args, _("Server supports side-band-64k"));
1008 use_sideband = 2;
1009 }
1010 else if (server_supports("side-band")) {
1011 print_verbose(args, _("Server supports side-band"));
1012 use_sideband = 1;
1013 }
1014 if (server_supports("allow-tip-sha1-in-want")) {
1015 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
1016 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
1017 }
1018 if (server_supports("allow-reachable-sha1-in-want")) {
1019 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
1020 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1021 }
1022 if (!server_supports("thin-pack"))
1023 args->use_thin_pack = 0;
1024 if (!server_supports("no-progress"))
1025 args->no_progress = 0;
1026 if (!server_supports("include-tag"))
1027 args->include_tag = 0;
1028 if (server_supports("ofs-delta"))
1029 print_verbose(args, _("Server supports ofs-delta"));
1030 else
1031 prefer_ofs_delta = 0;
1032
1033 if (server_supports("filter")) {
1034 server_supports_filtering = 1;
1035 print_verbose(args, _("Server supports filter"));
1036 } else if (args->filter_options.choice) {
1037 warning("filtering not recognized by server, ignoring");
1038 }
1039
1040 if ((agent_feature = server_feature_value("agent", &agent_len))) {
1041 agent_supported = 1;
1042 if (agent_len)
1043 print_verbose(args, _("Server version is %.*s"),
1044 agent_len, agent_feature);
1045 }
1046 if (server_supports("deepen-since"))
1047 deepen_since_ok = 1;
1048 else if (args->deepen_since)
1049 die(_("Server does not support --shallow-since"));
1050 if (server_supports("deepen-not"))
1051 deepen_not_ok = 1;
1052 else if (args->deepen_not)
1053 die(_("Server does not support --shallow-exclude"));
1054 if (!server_supports("deepen-relative") && args->deepen_relative)
1055 die(_("Server does not support --deepen"));
1056
1057 if (everything_local(args, &ref, sought, nr_sought)) {
1058 packet_flush(fd[1]);
1059 goto all_done;
1060 }
1061 if (find_common(args, fd, &oid, ref) < 0)
1062 if (!args->keep_pack)
1063 /* When cloning, it is not unusual to have
1064 * no common commit.
1065 */
1066 warning(_("no common commits"));
1067
1068 if (args->stateless_rpc)
1069 packet_flush(fd[1]);
1070 if (args->deepen)
1071 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1072 NULL);
1073 else if (si->nr_ours || si->nr_theirs)
1074 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1075 else
1076 alternate_shallow_file = NULL;
1077 if (get_pack(args, fd, pack_lockfile))
1078 die(_("git fetch-pack: fetch failed."));
1079
1080 all_done:
1081 return ref;
1082 }
1083
1084 static void add_shallow_requests(struct strbuf *req_buf,
1085 const struct fetch_pack_args *args)
1086 {
1087 if (is_repository_shallow())
1088 write_shallow_commits(req_buf, 1, NULL);
1089 if (args->depth > 0)
1090 packet_buf_write(req_buf, "deepen %d", args->depth);
1091 if (args->deepen_since) {
1092 timestamp_t max_age = approxidate(args->deepen_since);
1093 packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
1094 }
1095 if (args->deepen_not) {
1096 int i;
1097 for (i = 0; i < args->deepen_not->nr; i++) {
1098 struct string_list_item *s = args->deepen_not->items + i;
1099 packet_buf_write(req_buf, "deepen-not %s", s->string);
1100 }
1101 }
1102 }
1103
1104 static void add_wants(const struct ref *wants, struct strbuf *req_buf)
1105 {
1106 int use_ref_in_want = server_supports_feature("fetch", "ref-in-want", 0);
1107
1108 for ( ; wants ; wants = wants->next) {
1109 const struct object_id *remote = &wants->old_oid;
1110 struct object *o;
1111
1112 /*
1113 * If that object is complete (i.e. it is an ancestor of a
1114 * local ref), we tell them we have it but do not have to
1115 * tell them about its ancestors, which they already know
1116 * about.
1117 *
1118 * We use lookup_object here because we are only
1119 * interested in the case we *know* the object is
1120 * reachable and we have already scanned it.
1121 */
1122 if (((o = lookup_object(remote->hash)) != NULL) &&
1123 (o->flags & COMPLETE)) {
1124 continue;
1125 }
1126
1127 if (!use_ref_in_want || wants->exact_oid)
1128 packet_buf_write(req_buf, "want %s\n", oid_to_hex(remote));
1129 else
1130 packet_buf_write(req_buf, "want-ref %s\n", wants->name);
1131 }
1132 }
1133
1134 static void add_common(struct strbuf *req_buf, struct oidset *common)
1135 {
1136 struct oidset_iter iter;
1137 const struct object_id *oid;
1138 oidset_iter_init(common, &iter);
1139
1140 while ((oid = oidset_iter_next(&iter))) {
1141 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1142 }
1143 }
1144
1145 static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
1146 {
1147 int ret = 0;
1148 int haves_added = 0;
1149 const struct object_id *oid;
1150
1151 while ((oid = get_rev())) {
1152 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1153 if (++haves_added >= *haves_to_send)
1154 break;
1155 }
1156
1157 *in_vain += haves_added;
1158 if (!haves_added || *in_vain >= MAX_IN_VAIN) {
1159 /* Send Done */
1160 packet_buf_write(req_buf, "done\n");
1161 ret = 1;
1162 }
1163
1164 /* Increase haves to send on next round */
1165 *haves_to_send = next_flush(1, *haves_to_send);
1166
1167 return ret;
1168 }
1169
1170 static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
1171 const struct ref *wants, struct oidset *common,
1172 int *haves_to_send, int *in_vain)
1173 {
1174 int ret = 0;
1175 struct strbuf req_buf = STRBUF_INIT;
1176
1177 if (server_supports_v2("fetch", 1))
1178 packet_buf_write(&req_buf, "command=fetch");
1179 if (server_supports_v2("agent", 0))
1180 packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
1181 if (args->server_options && args->server_options->nr &&
1182 server_supports_v2("server-option", 1)) {
1183 int i;
1184 for (i = 0; i < args->server_options->nr; i++)
1185 packet_write_fmt(fd_out, "server-option=%s",
1186 args->server_options->items[i].string);
1187 }
1188
1189 packet_buf_delim(&req_buf);
1190 if (args->use_thin_pack)
1191 packet_buf_write(&req_buf, "thin-pack");
1192 if (args->no_progress)
1193 packet_buf_write(&req_buf, "no-progress");
1194 if (args->include_tag)
1195 packet_buf_write(&req_buf, "include-tag");
1196 if (prefer_ofs_delta)
1197 packet_buf_write(&req_buf, "ofs-delta");
1198
1199 /* Add shallow-info and deepen request */
1200 if (server_supports_feature("fetch", "shallow", 0))
1201 add_shallow_requests(&req_buf, args);
1202 else if (is_repository_shallow() || args->deepen)
1203 die(_("Server does not support shallow requests"));
1204
1205 /* Add filter */
1206 if (server_supports_feature("fetch", "filter", 0) &&
1207 args->filter_options.choice) {
1208 print_verbose(args, _("Server supports filter"));
1209 packet_buf_write(&req_buf, "filter %s",
1210 args->filter_options.filter_spec);
1211 } else if (args->filter_options.choice) {
1212 warning("filtering not recognized by server, ignoring");
1213 }
1214
1215 /* add wants */
1216 add_wants(wants, &req_buf);
1217
1218 if (args->no_dependents) {
1219 packet_buf_write(&req_buf, "done");
1220 ret = 1;
1221 } else {
1222 /* Add all of the common commits we've found in previous rounds */
1223 add_common(&req_buf, common);
1224
1225 /* Add initial haves */
1226 ret = add_haves(&req_buf, haves_to_send, in_vain);
1227 }
1228
1229 /* Send request */
1230 packet_buf_flush(&req_buf);
1231 write_or_die(fd_out, req_buf.buf, req_buf.len);
1232
1233 strbuf_release(&req_buf);
1234 return ret;
1235 }
1236
1237 /*
1238 * Processes a section header in a server's response and checks if it matches
1239 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1240 * not consumed); if 0, the line will be consumed and the function will die if
1241 * the section header doesn't match what was expected.
1242 */
1243 static int process_section_header(struct packet_reader *reader,
1244 const char *section, int peek)
1245 {
1246 int ret;
1247
1248 if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
1249 die("error reading section header '%s'", section);
1250
1251 ret = !strcmp(reader->line, section);
1252
1253 if (!peek) {
1254 if (!ret)
1255 die("expected '%s', received '%s'",
1256 section, reader->line);
1257 packet_reader_read(reader);
1258 }
1259
1260 return ret;
1261 }
1262
1263 static int process_acks(struct packet_reader *reader, struct oidset *common)
1264 {
1265 /* received */
1266 int received_ready = 0;
1267 int received_ack = 0;
1268
1269 process_section_header(reader, "acknowledgments", 0);
1270 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1271 const char *arg;
1272
1273 if (!strcmp(reader->line, "NAK"))
1274 continue;
1275
1276 if (skip_prefix(reader->line, "ACK ", &arg)) {
1277 struct object_id oid;
1278 if (!get_oid_hex(arg, &oid)) {
1279 struct commit *commit;
1280 oidset_insert(common, &oid);
1281 commit = lookup_commit(&oid);
1282 mark_common(commit, 0, 1);
1283 }
1284 continue;
1285 }
1286
1287 if (!strcmp(reader->line, "ready")) {
1288 clear_prio_queue(&rev_list);
1289 received_ready = 1;
1290 continue;
1291 }
1292
1293 die("unexpected acknowledgment line: '%s'", reader->line);
1294 }
1295
1296 if (reader->status != PACKET_READ_FLUSH &&
1297 reader->status != PACKET_READ_DELIM)
1298 die("error processing acks: %d", reader->status);
1299
1300 /* return 0 if no common, 1 if there are common, or 2 if ready */
1301 return received_ready ? 2 : (received_ack ? 1 : 0);
1302 }
1303
1304 static void receive_shallow_info(struct fetch_pack_args *args,
1305 struct packet_reader *reader)
1306 {
1307 process_section_header(reader, "shallow-info", 0);
1308 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1309 const char *arg;
1310 struct object_id oid;
1311
1312 if (skip_prefix(reader->line, "shallow ", &arg)) {
1313 if (get_oid_hex(arg, &oid))
1314 die(_("invalid shallow line: %s"), reader->line);
1315 register_shallow(&oid);
1316 continue;
1317 }
1318 if (skip_prefix(reader->line, "unshallow ", &arg)) {
1319 if (get_oid_hex(arg, &oid))
1320 die(_("invalid unshallow line: %s"), reader->line);
1321 if (!lookup_object(oid.hash))
1322 die(_("object not found: %s"), reader->line);
1323 /* make sure that it is parsed as shallow */
1324 if (!parse_object(&oid))
1325 die(_("error in object: %s"), reader->line);
1326 if (unregister_shallow(&oid))
1327 die(_("no shallow found: %s"), reader->line);
1328 continue;
1329 }
1330 die(_("expected shallow/unshallow, got %s"), reader->line);
1331 }
1332
1333 if (reader->status != PACKET_READ_FLUSH &&
1334 reader->status != PACKET_READ_DELIM)
1335 die("error processing shallow info: %d", reader->status);
1336
1337 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
1338 args->deepen = 1;
1339 }
1340
1341 static void receive_wanted_refs(struct packet_reader *reader, struct ref *refs)
1342 {
1343 process_section_header(reader, "wanted-refs", 0);
1344 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1345 struct object_id oid;
1346 const char *end;
1347 struct ref *r = NULL;
1348
1349 if (parse_oid_hex(reader->line, &oid, &end) || *end++ != ' ')
1350 die("expected wanted-ref, got '%s'", reader->line);
1351
1352 for (r = refs; r; r = r->next) {
1353 if (!strcmp(end, r->name)) {
1354 oidcpy(&r->old_oid, &oid);
1355 break;
1356 }
1357 }
1358
1359 if (!r)
1360 die("unexpected wanted-ref: '%s'", reader->line);
1361 }
1362
1363 if (reader->status != PACKET_READ_DELIM)
1364 die("error processing wanted refs: %d", reader->status);
1365 }
1366
1367 enum fetch_state {
1368 FETCH_CHECK_LOCAL = 0,
1369 FETCH_SEND_REQUEST,
1370 FETCH_PROCESS_ACKS,
1371 FETCH_GET_PACK,
1372 FETCH_DONE,
1373 };
1374
1375 static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
1376 int fd[2],
1377 const struct ref *orig_ref,
1378 struct ref **sought, int nr_sought,
1379 char **pack_lockfile)
1380 {
1381 struct ref *ref = copy_ref_list(orig_ref);
1382 enum fetch_state state = FETCH_CHECK_LOCAL;
1383 struct oidset common = OIDSET_INIT;
1384 struct packet_reader reader;
1385 int in_vain = 0;
1386 int haves_to_send = INITIAL_FLUSH;
1387 packet_reader_init(&reader, fd[0], NULL, 0,
1388 PACKET_READ_CHOMP_NEWLINE);
1389
1390 while (state != FETCH_DONE) {
1391 switch (state) {
1392 case FETCH_CHECK_LOCAL:
1393 sort_ref_list(&ref, ref_compare_name);
1394 QSORT(sought, nr_sought, cmp_ref_by_name);
1395
1396 /* v2 supports these by default */
1397 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1398 use_sideband = 2;
1399 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1400 args->deepen = 1;
1401
1402 if (marked)
1403 for_each_ref(clear_marks, NULL);
1404 marked = 1;
1405
1406 for_each_ref(rev_list_insert_ref_oid, NULL);
1407 for_each_cached_alternate(insert_one_alternate_object);
1408
1409 /* Filter 'ref' by 'sought' and those that aren't local */
1410 if (everything_local(args, &ref, sought, nr_sought))
1411 state = FETCH_DONE;
1412 else
1413 state = FETCH_SEND_REQUEST;
1414 break;
1415 case FETCH_SEND_REQUEST:
1416 if (send_fetch_request(fd[1], args, ref, &common,
1417 &haves_to_send, &in_vain))
1418 state = FETCH_GET_PACK;
1419 else
1420 state = FETCH_PROCESS_ACKS;
1421 break;
1422 case FETCH_PROCESS_ACKS:
1423 /* Process ACKs/NAKs */
1424 switch (process_acks(&reader, &common)) {
1425 case 2:
1426 state = FETCH_GET_PACK;
1427 break;
1428 case 1:
1429 in_vain = 0;
1430 /* fallthrough */
1431 default:
1432 state = FETCH_SEND_REQUEST;
1433 break;
1434 }
1435 break;
1436 case FETCH_GET_PACK:
1437 /* Check for shallow-info section */
1438 if (process_section_header(&reader, "shallow-info", 1))
1439 receive_shallow_info(args, &reader);
1440
1441 if (process_section_header(&reader, "wanted-refs", 1))
1442 receive_wanted_refs(&reader, ref);
1443
1444 /* get the pack */
1445 process_section_header(&reader, "packfile", 0);
1446 if (get_pack(args, fd, pack_lockfile))
1447 die(_("git fetch-pack: fetch failed."));
1448
1449 state = FETCH_DONE;
1450 break;
1451 case FETCH_DONE:
1452 continue;
1453 }
1454 }
1455
1456 oidset_clear(&common);
1457 return ref;
1458 }
1459
1460 static void fetch_pack_config(void)
1461 {
1462 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1463 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1464 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1465 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1466 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1467
1468 git_config(git_default_config, NULL);
1469 }
1470
1471 static void fetch_pack_setup(void)
1472 {
1473 static int did_setup;
1474 if (did_setup)
1475 return;
1476 fetch_pack_config();
1477 if (0 <= transfer_unpack_limit)
1478 unpack_limit = transfer_unpack_limit;
1479 else if (0 <= fetch_unpack_limit)
1480 unpack_limit = fetch_unpack_limit;
1481 did_setup = 1;
1482 }
1483
1484 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1485 {
1486 struct string_list names = STRING_LIST_INIT_NODUP;
1487 int src, dst;
1488
1489 for (src = dst = 0; src < nr; src++) {
1490 struct string_list_item *item;
1491 item = string_list_insert(&names, ref[src]->name);
1492 if (item->util)
1493 continue; /* already have it */
1494 item->util = ref[src];
1495 if (src != dst)
1496 ref[dst] = ref[src];
1497 dst++;
1498 }
1499 for (src = dst; src < nr; src++)
1500 ref[src] = NULL;
1501 string_list_clear(&names, 0);
1502 return dst;
1503 }
1504
1505 static void update_shallow(struct fetch_pack_args *args,
1506 struct ref *refs,
1507 struct shallow_info *si)
1508 {
1509 struct oid_array ref = OID_ARRAY_INIT;
1510 int *status;
1511 int i;
1512 struct ref *r;
1513
1514 if (args->deepen && alternate_shallow_file) {
1515 if (*alternate_shallow_file == '\0') { /* --unshallow */
1516 unlink_or_warn(git_path_shallow());
1517 rollback_lock_file(&shallow_lock);
1518 } else
1519 commit_lock_file(&shallow_lock);
1520 return;
1521 }
1522
1523 if (!si->shallow || !si->shallow->nr)
1524 return;
1525
1526 if (args->cloning) {
1527 /*
1528 * remote is shallow, but this is a clone, there are
1529 * no objects in repo to worry about. Accept any
1530 * shallow points that exist in the pack (iow in repo
1531 * after get_pack() and reprepare_packed_git())
1532 */
1533 struct oid_array extra = OID_ARRAY_INIT;
1534 struct object_id *oid = si->shallow->oid;
1535 for (i = 0; i < si->shallow->nr; i++)
1536 if (has_object_file(&oid[i]))
1537 oid_array_append(&extra, &oid[i]);
1538 if (extra.nr) {
1539 setup_alternate_shallow(&shallow_lock,
1540 &alternate_shallow_file,
1541 &extra);
1542 commit_lock_file(&shallow_lock);
1543 }
1544 oid_array_clear(&extra);
1545 return;
1546 }
1547
1548 if (!si->nr_ours && !si->nr_theirs)
1549 return;
1550
1551 remove_nonexistent_theirs_shallow(si);
1552 if (!si->nr_ours && !si->nr_theirs)
1553 return;
1554 for (r = refs; r; r = r->next)
1555 oid_array_append(&ref, &r->old_oid);
1556 si->ref = &ref;
1557
1558 if (args->update_shallow) {
1559 /*
1560 * remote is also shallow, .git/shallow may be updated
1561 * so all refs can be accepted. Make sure we only add
1562 * shallow roots that are actually reachable from new
1563 * refs.
1564 */
1565 struct oid_array extra = OID_ARRAY_INIT;
1566 struct object_id *oid = si->shallow->oid;
1567 assign_shallow_commits_to_refs(si, NULL, NULL);
1568 if (!si->nr_ours && !si->nr_theirs) {
1569 oid_array_clear(&ref);
1570 return;
1571 }
1572 for (i = 0; i < si->nr_ours; i++)
1573 oid_array_append(&extra, &oid[si->ours[i]]);
1574 for (i = 0; i < si->nr_theirs; i++)
1575 oid_array_append(&extra, &oid[si->theirs[i]]);
1576 setup_alternate_shallow(&shallow_lock,
1577 &alternate_shallow_file,
1578 &extra);
1579 commit_lock_file(&shallow_lock);
1580 oid_array_clear(&extra);
1581 oid_array_clear(&ref);
1582 return;
1583 }
1584
1585 /*
1586 * remote is also shallow, check what ref is safe to update
1587 * without updating .git/shallow
1588 */
1589 status = xcalloc(ref.nr, sizeof(*status));
1590 assign_shallow_commits_to_refs(si, NULL, status);
1591 if (si->nr_ours || si->nr_theirs) {
1592 for (r = refs, i = 0; r; r = r->next, i++)
1593 if (status[i])
1594 r->status = REF_STATUS_REJECT_SHALLOW;
1595 }
1596 free(status);
1597 oid_array_clear(&ref);
1598 }
1599
1600 static int iterate_ref_map(void *cb_data, struct object_id *oid)
1601 {
1602 struct ref **rm = cb_data;
1603 struct ref *ref = *rm;
1604
1605 if (!ref)
1606 return -1; /* end of the list */
1607 *rm = ref->next;
1608 oidcpy(oid, &ref->old_oid);
1609 return 0;
1610 }
1611
1612 struct ref *fetch_pack(struct fetch_pack_args *args,
1613 int fd[], struct child_process *conn,
1614 const struct ref *ref,
1615 const char *dest,
1616 struct ref **sought, int nr_sought,
1617 struct oid_array *shallow,
1618 char **pack_lockfile,
1619 enum protocol_version version)
1620 {
1621 struct ref *ref_cpy;
1622 struct shallow_info si;
1623
1624 fetch_pack_setup();
1625 if (nr_sought)
1626 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1627
1628 if (!ref) {
1629 packet_flush(fd[1]);
1630 die(_("no matching remote head"));
1631 }
1632 prepare_shallow_info(&si, shallow);
1633 if (version == protocol_v2)
1634 ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
1635 pack_lockfile);
1636 else
1637 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1638 &si, pack_lockfile);
1639 reprepare_packed_git(the_repository);
1640
1641 if (!args->cloning && args->deepen) {
1642 struct check_connected_options opt = CHECK_CONNECTED_INIT;
1643 struct ref *iterator = ref_cpy;
1644 opt.shallow_file = alternate_shallow_file;
1645 if (args->deepen)
1646 opt.is_deepening_fetch = 1;
1647 if (check_connected(iterate_ref_map, &iterator, &opt)) {
1648 error(_("remote did not send all necessary objects"));
1649 free_refs(ref_cpy);
1650 ref_cpy = NULL;
1651 rollback_lock_file(&shallow_lock);
1652 goto cleanup;
1653 }
1654 args->connectivity_checked = 1;
1655 }
1656
1657 update_shallow(args, ref_cpy, &si);
1658 cleanup:
1659 clear_shallow_info(&si);
1660 return ref_cpy;
1661 }
1662
1663 int report_unmatched_refs(struct ref **sought, int nr_sought)
1664 {
1665 int i, ret = 0;
1666
1667 for (i = 0; i < nr_sought; i++) {
1668 if (!sought[i])
1669 continue;
1670 switch (sought[i]->match_status) {
1671 case REF_MATCHED:
1672 continue;
1673 case REF_NOT_MATCHED:
1674 error(_("no such remote ref %s"), sought[i]->name);
1675 break;
1676 case REF_UNADVERTISED_NOT_ALLOWED:
1677 error(_("Server does not allow request for unadvertised object %s"),
1678 sought[i]->name);
1679 break;
1680 }
1681 ret = 1;
1682 }
1683 return ret;
1684 }