pack-objects: don't check size when the object is bad
[git/git.git] / builtin / pack-objects.c
CommitLineData
5d4a6003 1#include "builtin.h"
c323ac7d 2#include "cache.h"
a80d72db 3#include "repository.h"
b2141fc1 4#include "config.h"
a74db82e 5#include "attr.h"
c323ac7d 6#include "object.h"
8e440259
PE
7#include "blob.h"
8#include "commit.h"
9#include "tag.h"
10#include "tree.h"
c323ac7d 11#include "delta.h"
a733cb60 12#include "pack.h"
3449f8c4 13#include "pack-revindex.h"
c38138cd 14#include "csum-file.h"
1b0c7174 15#include "tree-walk.h"
b5d97e6b
JH
16#include "diff.h"
17#include "revision.h"
18#include "list-objects.h"
9535ce73
JH
19#include "list-objects-filter.h"
20#include "list-objects-filter-options.h"
2834bc27 21#include "pack-objects.h"
96a02f8f 22#include "progress.h"
f0a24aa5 23#include "refs.h"
cf2ba13a 24#include "streaming.h"
93749194 25#include "thread-utils.h"
6b8fda2d 26#include "pack-bitmap.h"
abcb8655
JK
27#include "reachable.h"
28#include "sha1-array.h"
edfbb2aa 29#include "argv-array.h"
ec2dd32c 30#include "list.h"
0317f455 31#include "packfile.h"
a80d72db 32#include "object-store.h"
8ecce684 33
43fa44fa 34#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
898eba5e
NTND
35#define DELTA(obj) oe_delta(&to_pack, obj)
36#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
37#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
38#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
39#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
40#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
43fa44fa 41
99fb6e04 42static const char *pack_usage[] = {
b8c1d275
AH
43 N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
44 N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"),
99fb6e04
NTND
45 NULL
46};
c323ac7d 47
3f9ac8d2 48/*
2834bc27
VM
49 * Objects we are going to pack are collected in the `to_pack` structure.
50 * It contains an array (dynamically expanded) of the object data, and a map
51 * that can resolve SHA1s to their position in the array.
3f9ac8d2 52 */
2834bc27
VM
53static struct packing_data to_pack;
54
79814f42 55static struct pack_idx_entry **written_list;
2834bc27 56static uint32_t nr_result, nr_written;
3f9ac8d2 57
96f1e58f 58static int non_empty;
a7de7130 59static int reuse_delta = 1, reuse_object = 1;
e5e9714a 60static int keep_unreachable, unpack_unreachable, include_tag;
dddbad72 61static timestamp_t unpack_unreachable_expiration;
e26a8c47 62static int pack_loose_unreachable;
96f1e58f 63static int local;
56dfeb62 64static int have_non_local_packs;
96f1e58f 65static int incremental;
e96fb9b8 66static int ignore_packed_keep;
be6b1914 67static int allow_ofs_delta;
ebcfb379 68static struct pack_idx_option pack_idx_opts;
d01fb92f 69static const char *base_name;
024701f1 70static int progress = 1;
4812a93a 71static int window = 10;
568508e7 72static unsigned long pack_size_limit;
618e613a 73static int depth = 50;
43cc2b42 74static int delta_search_threads;
df6d6101 75static int pack_to_stdout;
8d1d8f83 76static int num_preferred_base;
dc6a0757 77static struct progress *progress_state;
c323ac7d 78
6b8fda2d
VM
79static struct packed_git *reuse_packfile;
80static uint32_t reuse_packfile_objects;
81static off_t reuse_packfile_offset;
82
645c432d
KS
83static int use_bitmap_index_default = 1;
84static int use_bitmap_index = -1;
7cc8f971 85static int write_bitmap_index;
ae4f07fb 86static uint16_t write_bitmap_options;
6b8fda2d 87
0c16cd49
JT
88static int exclude_promisor_objects;
89
074b2eea 90static unsigned long delta_cache_size = 0;
5749b0b2 91static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
e3dfddb3 92static unsigned long cache_max_small_delta_size = 1000;
074b2eea 93
a97773ce
BD
94static unsigned long window_memory_limit = 0;
95
9535ce73
JH
96static struct list_objects_filter_options filter_options;
97
98enum missing_action {
0c16cd49
JT
99 MA_ERROR = 0, /* fail if any missing objects are encountered */
100 MA_ALLOW_ANY, /* silently allow ALL missing objects */
101 MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
9535ce73
JH
102};
103static enum missing_action arg_missing_action;
104static show_object_fn fn_show_object;
105
3f9ac8d2
JH
106/*
107 * stats
108 */
7cadf491
SP
109static uint32_t written, written_delta;
110static uint32_t reused, reused_delta;
3f9ac8d2 111
7cc8f971
VM
112/*
113 * Indexed commits
114 */
115static struct commit **indexed_commits;
116static unsigned int indexed_commits_nr;
117static unsigned int indexed_commits_alloc;
118
119static void index_commit_for_bitmap(struct commit *commit)
120{
121 if (indexed_commits_nr >= indexed_commits_alloc) {
122 indexed_commits_alloc = (indexed_commits_alloc + 32) * 2;
2756ca43 123 REALLOC_ARRAY(indexed_commits, indexed_commits_alloc);
7cc8f971
VM
124 }
125
126 indexed_commits[indexed_commits_nr++] = commit;
127}
780e6e73 128
3613f9b4 129static void *get_delta(struct object_entry *entry)
c323ac7d 130{
3613f9b4
NP
131 unsigned long size, base_size, delta_size;
132 void *buf, *base_buf, *delta_buf;
21666f1a 133 enum object_type type;
c323ac7d 134
b4f5aca4 135 buf = read_object_file(&entry->idx.oid, &type, &size);
3613f9b4 136 if (!buf)
e6a492b7 137 die("unable to read %s", oid_to_hex(&entry->idx.oid));
898eba5e
NTND
138 base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
139 &base_size);
3613f9b4 140 if (!base_buf)
e6a492b7 141 die("unable to read %s",
898eba5e 142 oid_to_hex(&DELTA(entry)->idx.oid));
3613f9b4 143 delta_buf = diff_delta(base_buf, base_size,
dcde55bc 144 buf, size, &delta_size, 0);
3613f9b4 145 if (!delta_buf || delta_size != entry->delta_size)
a6080a0a 146 die("delta size changed");
3613f9b4
NP
147 free(buf);
148 free(base_buf);
c323ac7d
LT
149 return delta_buf;
150}
151
30ebb40a
NP
152static unsigned long do_compress(void **pptr, unsigned long size)
153{
ef49a7a0 154 git_zstream stream;
30ebb40a
NP
155 void *in, *out;
156 unsigned long maxsize;
157
55bb5c91 158 git_deflate_init(&stream, pack_compression_level);
225a6f10 159 maxsize = git_deflate_bound(&stream, size);
30ebb40a
NP
160
161 in = *pptr;
162 out = xmalloc(maxsize);
163 *pptr = out;
164
165 stream.next_in = in;
166 stream.avail_in = size;
167 stream.next_out = out;
168 stream.avail_out = maxsize;
55bb5c91 169 while (git_deflate(&stream, Z_FINISH) == Z_OK)
30ebb40a 170 ; /* nothing */
55bb5c91 171 git_deflate_end(&stream);
30ebb40a
NP
172
173 free(in);
174 return stream.total_out;
175}
176
98a3beab 177static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
188960b4 178 const struct object_id *oid)
cf2ba13a
NTND
179{
180 git_zstream stream;
181 unsigned char ibuf[1024 * 16];
182 unsigned char obuf[1024 * 16];
183 unsigned long olen = 0;
184
cf2ba13a
NTND
185 git_deflate_init(&stream, pack_compression_level);
186
187 for (;;) {
188 ssize_t readlen;
189 int zret = Z_OK;
190 readlen = read_istream(st, ibuf, sizeof(ibuf));
191 if (readlen == -1)
188960b4 192 die(_("unable to read %s"), oid_to_hex(oid));
cf2ba13a
NTND
193
194 stream.next_in = ibuf;
195 stream.avail_in = readlen;
196 while ((stream.avail_in || readlen == 0) &&
197 (zret == Z_OK || zret == Z_BUF_ERROR)) {
198 stream.next_out = obuf;
199 stream.avail_out = sizeof(obuf);
200 zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
98a3beab 201 hashwrite(f, obuf, stream.next_out - obuf);
cf2ba13a
NTND
202 olen += stream.next_out - obuf;
203 }
204 if (stream.avail_in)
205 die(_("deflate error (%d)"), zret);
206 if (readlen == 0) {
207 if (zret != Z_STREAM_END)
208 die(_("deflate error (%d)"), zret);
209 break;
210 }
211 }
212 git_deflate_end(&stream);
213 return olen;
214}
215
780e6e73
NP
216/*
217 * we are going to reuse the existing object data as is. make
218 * sure it is not corrupt.
219 */
079afb18
SP
220static int check_pack_inflate(struct packed_git *p,
221 struct pack_window **w_curs,
6777a59f
SP
222 off_t offset,
223 off_t len,
079afb18
SP
224 unsigned long expect)
225{
ef49a7a0 226 git_zstream stream;
079afb18
SP
227 unsigned char fakebuf[4096], *in;
228 int st;
229
230 memset(&stream, 0, sizeof(stream));
39c68542 231 git_inflate_init(&stream);
079afb18
SP
232 do {
233 in = use_pack(p, w_curs, offset, &stream.avail_in);
234 stream.next_in = in;
235 stream.next_out = fakebuf;
236 stream.avail_out = sizeof(fakebuf);
39c68542 237 st = git_inflate(&stream, Z_FINISH);
079afb18
SP
238 offset += stream.next_in - in;
239 } while (st == Z_OK || st == Z_BUF_ERROR);
39c68542 240 git_inflate_end(&stream);
079afb18
SP
241 return (st == Z_STREAM_END &&
242 stream.total_out == expect &&
243 stream.total_in == len) ? 0 : -1;
244}
245
98a3beab 246static void copy_pack_data(struct hashfile *f,
079afb18
SP
247 struct packed_git *p,
248 struct pack_window **w_curs,
6777a59f
SP
249 off_t offset,
250 off_t len)
079afb18
SP
251{
252 unsigned char *in;
ef49a7a0 253 unsigned long avail;
079afb18
SP
254
255 while (len) {
256 in = use_pack(p, w_curs, offset, &avail);
257 if (avail > len)
ef49a7a0 258 avail = (unsigned long)len;
98a3beab 259 hashwrite(f, in, avail);
079afb18
SP
260 offset += avail;
261 len -= avail;
262 }
263}
264
1b4bb16b 265/* Return 0 if we will bust the pack-size limit */
98a3beab 266static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
c9018b03 267 unsigned long limit, int usable_delta)
c323ac7d 268{
c9018b03 269 unsigned long size, datalen;
2c5e2865
JK
270 unsigned char header[MAX_PACK_OBJECT_HEADER],
271 dheader[MAX_PACK_OBJECT_HEADER];
6777a59f 272 unsigned hdrlen;
2c5ef824 273 enum object_type type;
c9018b03 274 void *buf;
cf2ba13a 275 struct git_istream *st = NULL;
c9018b03
NTND
276
277 if (!usable_delta) {
fd9b1bae 278 if (oe_type(entry) == OBJ_BLOB &&
cf2ba13a 279 entry->size > big_file_threshold &&
ef7b5195 280 (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
cf2ba13a
NTND
281 buf = NULL;
282 else {
b4f5aca4 283 buf = read_object_file(&entry->idx.oid, &type, &size);
cf2ba13a 284 if (!buf)
e6a492b7 285 die(_("unable to read %s"),
286 oid_to_hex(&entry->idx.oid));
cf2ba13a 287 }
c9018b03
NTND
288 /*
289 * make sure no cached delta data remains from a
290 * previous attempt before a pack split occurred.
291 */
6a83d902 292 FREE_AND_NULL(entry->delta_data);
c9018b03
NTND
293 entry->z_delta_size = 0;
294 } else if (entry->delta_data) {
295 size = entry->delta_size;
296 buf = entry->delta_data;
297 entry->delta_data = NULL;
898eba5e 298 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
c9018b03
NTND
299 OBJ_OFS_DELTA : OBJ_REF_DELTA;
300 } else {
301 buf = get_delta(entry);
302 size = entry->delta_size;
898eba5e 303 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
c9018b03
NTND
304 OBJ_OFS_DELTA : OBJ_REF_DELTA;
305 }
306
cf2ba13a
NTND
307 if (st) /* large blob case, just assume we don't compress well */
308 datalen = size;
309 else if (entry->z_delta_size)
c9018b03
NTND
310 datalen = entry->z_delta_size;
311 else
312 datalen = do_compress(&buf, size);
313
314 /*
315 * The object header is a byte of 'type' followed by zero or
316 * more bytes of length.
317 */
7202a6fa
JK
318 hdrlen = encode_in_pack_object_header(header, sizeof(header),
319 type, size);
c9018b03
NTND
320
321 if (type == OBJ_OFS_DELTA) {
322 /*
323 * Deltas with relative base contain an additional
324 * encoding of the relative offset for the delta
325 * base from this object's position in the pack.
326 */
898eba5e 327 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
c9018b03
NTND
328 unsigned pos = sizeof(dheader) - 1;
329 dheader[pos] = ofs & 127;
330 while (ofs >>= 7)
331 dheader[--pos] = 128 | (--ofs & 127);
332 if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
cf2ba13a
NTND
333 if (st)
334 close_istream(st);
c9018b03
NTND
335 free(buf);
336 return 0;
337 }
98a3beab 338 hashwrite(f, header, hdrlen);
339 hashwrite(f, dheader + pos, sizeof(dheader) - pos);
c9018b03
NTND
340 hdrlen += sizeof(dheader) - pos;
341 } else if (type == OBJ_REF_DELTA) {
342 /*
343 * Deltas with a base reference contain
344 * an additional 20 bytes for the base sha1.
345 */
346 if (limit && hdrlen + 20 + datalen + 20 >= limit) {
cf2ba13a
NTND
347 if (st)
348 close_istream(st);
c9018b03
NTND
349 free(buf);
350 return 0;
351 }
98a3beab 352 hashwrite(f, header, hdrlen);
898eba5e 353 hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
c9018b03
NTND
354 hdrlen += 20;
355 } else {
356 if (limit && hdrlen + datalen + 20 >= limit) {
cf2ba13a
NTND
357 if (st)
358 close_istream(st);
c9018b03
NTND
359 free(buf);
360 return 0;
361 }
98a3beab 362 hashwrite(f, header, hdrlen);
c9018b03 363 }
cf2ba13a 364 if (st) {
188960b4 365 datalen = write_large_blob_data(st, f, &entry->idx.oid);
cf2ba13a
NTND
366 close_istream(st);
367 } else {
98a3beab 368 hashwrite(f, buf, datalen);
cf2ba13a
NTND
369 free(buf);
370 }
c9018b03
NTND
371
372 return hdrlen + datalen;
373}
374
375/* Return 0 if we will bust the pack-size limit */
98a3beab 376static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
af92a645 377 unsigned long limit, int usable_delta)
c9018b03 378{
43fa44fa 379 struct packed_git *p = IN_PACK(entry);
c9018b03
NTND
380 struct pack_window *w_curs = NULL;
381 struct revindex_entry *revidx;
382 off_t offset;
fd9b1bae 383 enum object_type type = oe_type(entry);
211c61c6 384 off_t datalen;
2c5e2865
JK
385 unsigned char header[MAX_PACK_OBJECT_HEADER],
386 dheader[MAX_PACK_OBJECT_HEADER];
c9018b03
NTND
387 unsigned hdrlen;
388
898eba5e
NTND
389 if (DELTA(entry))
390 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
c9018b03 391 OBJ_OFS_DELTA : OBJ_REF_DELTA;
7202a6fa
JK
392 hdrlen = encode_in_pack_object_header(header, sizeof(header),
393 type, entry->size);
c9018b03
NTND
394
395 offset = entry->in_pack_offset;
396 revidx = find_pack_revindex(p, offset);
397 datalen = revidx[1].offset - offset;
398 if (!pack_to_stdout && p->index_version > 1 &&
399 check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
e6a492b7 400 error("bad packed object CRC for %s",
401 oid_to_hex(&entry->idx.oid));
c9018b03
NTND
402 unuse_pack(&w_curs);
403 return write_no_reuse_object(f, entry, limit, usable_delta);
404 }
405
406 offset += entry->in_pack_header_size;
407 datalen -= entry->in_pack_header_size;
408
409 if (!pack_to_stdout && p->index_version == 1 &&
410 check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
e6a492b7 411 error("corrupt packed object for %s",
412 oid_to_hex(&entry->idx.oid));
c9018b03
NTND
413 unuse_pack(&w_curs);
414 return write_no_reuse_object(f, entry, limit, usable_delta);
415 }
416
417 if (type == OBJ_OFS_DELTA) {
898eba5e 418 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
c9018b03
NTND
419 unsigned pos = sizeof(dheader) - 1;
420 dheader[pos] = ofs & 127;
421 while (ofs >>= 7)
422 dheader[--pos] = 128 | (--ofs & 127);
423 if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
424 unuse_pack(&w_curs);
425 return 0;
426 }
98a3beab 427 hashwrite(f, header, hdrlen);
428 hashwrite(f, dheader + pos, sizeof(dheader) - pos);
c9018b03
NTND
429 hdrlen += sizeof(dheader) - pos;
430 reused_delta++;
431 } else if (type == OBJ_REF_DELTA) {
432 if (limit && hdrlen + 20 + datalen + 20 >= limit) {
433 unuse_pack(&w_curs);
434 return 0;
435 }
98a3beab 436 hashwrite(f, header, hdrlen);
898eba5e 437 hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
c9018b03
NTND
438 hdrlen += 20;
439 reused_delta++;
440 } else {
441 if (limit && hdrlen + datalen + 20 >= limit) {
442 unuse_pack(&w_curs);
443 return 0;
444 }
98a3beab 445 hashwrite(f, header, hdrlen);
c9018b03
NTND
446 }
447 copy_pack_data(f, p, &w_curs, offset, datalen);
448 unuse_pack(&w_curs);
449 reused++;
450 return hdrlen + datalen;
451}
452
453/* Return 0 if we will bust the pack-size limit */
98a3beab 454static off_t write_object(struct hashfile *f,
af92a645
NTND
455 struct object_entry *entry,
456 off_t write_offset)
c9018b03 457{
af92a645
NTND
458 unsigned long limit;
459 off_t len;
2c5ef824 460 int usable_delta, to_reuse;
c323ac7d 461
78d1e84f
NP
462 if (!pack_to_stdout)
463 crc32_begin(f);
464
a2430dde 465 /* apply size limit if limited packsize and not first object */
a1e4760f
NP
466 if (!pack_size_limit || !nr_written)
467 limit = 0;
468 else if (pack_size_limit <= write_offset)
469 /*
470 * the earlier object did not fit the limit; avoid
471 * mistaking this with unlimited (i.e. limit = 0).
472 */
473 limit = 1;
474 else
475 limit = pack_size_limit - write_offset;
2c5ef824 476
898eba5e 477 if (!DELTA(entry))
2c5ef824
NP
478 usable_delta = 0; /* no delta */
479 else if (!pack_size_limit)
480 usable_delta = 1; /* unlimited packfile */
898eba5e 481 else if (DELTA(entry)->idx.offset == (off_t)-1)
2c5ef824 482 usable_delta = 0; /* base was written to another pack */
898eba5e 483 else if (DELTA(entry)->idx.offset)
2c5ef824
NP
484 usable_delta = 1; /* base already exists in this pack */
485 else
486 usable_delta = 0; /* base could end up in another pack */
487
a7de7130 488 if (!reuse_object)
fa736f72 489 to_reuse = 0; /* explicit */
43fa44fa 490 else if (!IN_PACK(entry))
ab7cd7bb 491 to_reuse = 0; /* can't reuse what we don't have */
fd9b1bae
NTND
492 else if (oe_type(entry) == OBJ_REF_DELTA ||
493 oe_type(entry) == OBJ_OFS_DELTA)
17b08f2c
DH
494 /* check_object() decided it for us ... */
495 to_reuse = usable_delta;
496 /* ... but pack split may override that */
fd9b1bae 497 else if (oe_type(entry) != entry->in_pack_type)
ab7cd7bb 498 to_reuse = 0; /* pack has delta which is unusable */
898eba5e 499 else if (DELTA(entry))
ab7cd7bb
JH
500 to_reuse = 0; /* we want to pack afresh */
501 else
502 to_reuse = 1; /* we have it in-pack undeltified,
503 * and we do not need to deltify it.
504 */
505
c9018b03
NTND
506 if (!to_reuse)
507 len = write_no_reuse_object(f, entry, limit, usable_delta);
508 else
509 len = write_reuse_object(f, entry, limit, usable_delta);
510 if (!len)
511 return 0;
64bd76b1 512
17b08f2c 513 if (usable_delta)
ab7cd7bb 514 written_delta++;
3f9ac8d2 515 written++;
78d1e84f 516 if (!pack_to_stdout)
aa7e44bf 517 entry->idx.crc32 = crc32_end(f);
c9018b03 518 return len;
c323ac7d
LT
519}
520
f63c79db
JH
521enum write_one_status {
522 WRITE_ONE_SKIP = -1, /* already written */
523 WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
524 WRITE_ONE_WRITTEN = 1, /* normal */
525 WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
526};
527
98a3beab 528static enum write_one_status write_one(struct hashfile *f,
f63c79db
JH
529 struct object_entry *e,
530 off_t *offset)
9d5ab962 531{
af92a645 532 off_t size;
f63c79db 533 int recursing;
d7dd0223 534
f63c79db
JH
535 /*
536 * we set offset to 1 (which is an impossible value) to mark
537 * the fact that this object is involved in "write its base
538 * first before writing a deltified object" recursion.
539 */
540 recursing = (e->idx.offset == 1);
541 if (recursing) {
542 warning("recursive delta detected for object %s",
e6a492b7 543 oid_to_hex(&e->idx.oid));
f63c79db
JH
544 return WRITE_ONE_RECURSIVE;
545 } else if (e->idx.offset || e->preferred_base) {
546 /* offset is non zero if object is written already. */
547 return WRITE_ONE_SKIP;
548 }
d7dd0223 549
720c9f7b 550 /* if we are deltified, write out base object first. */
898eba5e 551 if (DELTA(e)) {
f63c79db 552 e->idx.offset = 1; /* now recurse */
898eba5e 553 switch (write_one(f, DELTA(e), offset)) {
f63c79db
JH
554 case WRITE_ONE_RECURSIVE:
555 /* we cannot depend on this one */
898eba5e 556 SET_DELTA(e, NULL);
f63c79db
JH
557 break;
558 default:
559 break;
560 case WRITE_ONE_BREAK:
561 e->idx.offset = recursing;
562 return WRITE_ONE_BREAK;
563 }
564 }
d7dd0223 565
6ed7f25e
NP
566 e->idx.offset = *offset;
567 size = write_object(f, e, *offset);
17b08f2c 568 if (!size) {
f63c79db
JH
569 e->idx.offset = recursing;
570 return WRITE_ONE_BREAK;
17b08f2c 571 }
79814f42 572 written_list[nr_written++] = &e->idx;
d7dd0223
NP
573
574 /* make sure off_t is sufficiently large not to wrap */
c03c8315 575 if (signed_add_overflows(*offset, size))
d7dd0223 576 die("pack too large for current definition of off_t");
6ed7f25e 577 *offset += size;
f63c79db 578 return WRITE_ONE_WRITTEN;
9d5ab962
JH
579}
580
d155254c 581static int mark_tagged(const char *path, const struct object_id *oid, int flag,
1b4bb16b
JH
582 void *cb_data)
583{
188960b4 584 struct object_id peeled;
d155254c 585 struct object_entry *entry = packlist_find(&to_pack, oid->hash, NULL);
1b4bb16b
JH
586
587 if (entry)
588 entry->tagged = 1;
b420d909 589 if (!peel_ref(path, &peeled)) {
188960b4 590 entry = packlist_find(&to_pack, peeled.hash, NULL);
1b4bb16b
JH
591 if (entry)
592 entry->tagged = 1;
593 }
594 return 0;
595}
596
be126818 597static inline void add_to_write_order(struct object_entry **wo,
92bef1a1 598 unsigned int *endp,
1b4bb16b
JH
599 struct object_entry *e)
600{
601 if (e->filled)
602 return;
603 wo[(*endp)++] = e;
604 e->filled = 1;
605}
606
607static void add_descendants_to_write_order(struct object_entry **wo,
92bef1a1 608 unsigned int *endp,
1b4bb16b
JH
609 struct object_entry *e)
610{
f380872f
DM
611 int add_to_order = 1;
612 while (e) {
613 if (add_to_order) {
614 struct object_entry *s;
615 /* add this node... */
616 add_to_write_order(wo, endp, e);
617 /* all its siblings... */
898eba5e 618 for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
f380872f
DM
619 add_to_write_order(wo, endp, s);
620 }
621 }
622 /* drop down a level to add left subtree nodes if possible */
898eba5e 623 if (DELTA_CHILD(e)) {
f380872f 624 add_to_order = 1;
898eba5e 625 e = DELTA_CHILD(e);
f380872f
DM
626 } else {
627 add_to_order = 0;
628 /* our sibling might have some children, it is next */
898eba5e
NTND
629 if (DELTA_SIBLING(e)) {
630 e = DELTA_SIBLING(e);
f380872f
DM
631 continue;
632 }
633 /* go back to our parent node */
898eba5e
NTND
634 e = DELTA(e);
635 while (e && !DELTA_SIBLING(e)) {
f380872f
DM
636 /* we're on the right side of a subtree, keep
637 * going up until we can go right again */
898eba5e 638 e = DELTA(e);
f380872f
DM
639 }
640 if (!e) {
641 /* done- we hit our original root node */
642 return;
643 }
644 /* pass it off to sibling at this level */
898eba5e 645 e = DELTA_SIBLING(e);
f380872f
DM
646 }
647 };
1b4bb16b
JH
648}
649
650static void add_family_to_write_order(struct object_entry **wo,
92bef1a1 651 unsigned int *endp,
1b4bb16b
JH
652 struct object_entry *e)
653{
654 struct object_entry *root;
655
898eba5e 656 for (root = e; DELTA(root); root = DELTA(root))
1b4bb16b 657 ; /* nothing */
1b4bb16b
JH
658 add_descendants_to_write_order(wo, endp, root);
659}
660
661static struct object_entry **compute_write_order(void)
662{
38d4debb 663 unsigned int i, wo_end, last_untagged;
1b4bb16b 664
b32fa95f 665 struct object_entry **wo;
2834bc27 666 struct object_entry *objects = to_pack.objects;
1b4bb16b 667
2834bc27 668 for (i = 0; i < to_pack.nr_objects; i++) {
1b4bb16b
JH
669 objects[i].tagged = 0;
670 objects[i].filled = 0;
898eba5e
NTND
671 SET_DELTA_CHILD(&objects[i], NULL);
672 SET_DELTA_SIBLING(&objects[i], NULL);
1b4bb16b
JH
673 }
674
675 /*
676 * Fully connect delta_child/delta_sibling network.
677 * Make sure delta_sibling is sorted in the original
678 * recency order.
679 */
2834bc27 680 for (i = to_pack.nr_objects; i > 0;) {
92bef1a1 681 struct object_entry *e = &objects[--i];
898eba5e 682 if (!DELTA(e))
1b4bb16b
JH
683 continue;
684 /* Mark me as the first child */
898eba5e
NTND
685 e->delta_sibling_idx = DELTA(e)->delta_child_idx;
686 SET_DELTA_CHILD(DELTA(e), e);
1b4bb16b
JH
687 }
688
689 /*
690 * Mark objects that are at the tip of tags.
691 */
d155254c 692 for_each_tag_ref(mark_tagged, NULL);
1b4bb16b
JH
693
694 /*
38d4debb 695 * Give the objects in the original recency order until
1b4bb16b
JH
696 * we see a tagged tip.
697 */
b32fa95f 698 ALLOC_ARRAY(wo, to_pack.nr_objects);
2834bc27 699 for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
1b4bb16b
JH
700 if (objects[i].tagged)
701 break;
702 add_to_write_order(wo, &wo_end, &objects[i]);
703 }
38d4debb 704 last_untagged = i;
1b4bb16b
JH
705
706 /*
707 * Then fill all the tagged tips.
708 */
2834bc27 709 for (; i < to_pack.nr_objects; i++) {
1b4bb16b
JH
710 if (objects[i].tagged)
711 add_to_write_order(wo, &wo_end, &objects[i]);
712 }
713
714 /*
715 * And then all remaining commits and tags.
716 */
2834bc27 717 for (i = last_untagged; i < to_pack.nr_objects; i++) {
fd9b1bae
NTND
718 if (oe_type(&objects[i]) != OBJ_COMMIT &&
719 oe_type(&objects[i]) != OBJ_TAG)
1b4bb16b
JH
720 continue;
721 add_to_write_order(wo, &wo_end, &objects[i]);
722 }
723
724 /*
725 * And then all the trees.
726 */
2834bc27 727 for (i = last_untagged; i < to_pack.nr_objects; i++) {
fd9b1bae 728 if (oe_type(&objects[i]) != OBJ_TREE)
1b4bb16b
JH
729 continue;
730 add_to_write_order(wo, &wo_end, &objects[i]);
731 }
732
733 /*
734 * Finally all the rest in really tight order
735 */
2834bc27 736 for (i = last_untagged; i < to_pack.nr_objects; i++) {
38d4debb
DM
737 if (!objects[i].filled)
738 add_family_to_write_order(wo, &wo_end, &objects[i]);
739 }
740
2834bc27
VM
741 if (wo_end != to_pack.nr_objects)
742 die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
1b4bb16b
JH
743
744 return wo;
745}
746
98a3beab 747static off_t write_reused_pack(struct hashfile *f)
6b8fda2d
VM
748{
749 unsigned char buffer[8192];
657673f1 750 off_t to_write, total;
6b8fda2d
VM
751 int fd;
752
753 if (!is_pack_valid(reuse_packfile))
754 die("packfile is invalid: %s", reuse_packfile->pack_name);
755
a5436b57 756 fd = git_open(reuse_packfile->pack_name);
6b8fda2d
VM
757 if (fd < 0)
758 die_errno("unable to open packfile for reuse: %s",
759 reuse_packfile->pack_name);
760
761 if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
762 die_errno("unable to seek in reused packfile");
763
764 if (reuse_packfile_offset < 0)
765 reuse_packfile_offset = reuse_packfile->pack_size - 20;
766
657673f1 767 total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
6b8fda2d
VM
768
769 while (to_write) {
770 int read_pack = xread(fd, buffer, sizeof(buffer));
771
772 if (read_pack <= 0)
773 die_errno("unable to read from reused packfile");
774
775 if (read_pack > to_write)
776 read_pack = to_write;
777
98a3beab 778 hashwrite(f, buffer, read_pack);
6b8fda2d 779 to_write -= read_pack;
657673f1
JK
780
781 /*
782 * We don't know the actual number of objects written,
783 * only how many bytes written, how many bytes total, and
784 * how many objects total. So we can fake it by pretending all
785 * objects we are writing are the same size. This gives us a
786 * smooth progress meter, and at the end it matches the true
787 * answer.
788 */
789 written = reuse_packfile_objects *
790 (((double)(total - to_write)) / total);
791 display_progress(progress_state, written);
6b8fda2d
VM
792 }
793
794 close(fd);
657673f1
JK
795 written = reuse_packfile_objects;
796 display_progress(progress_state, written);
6b8fda2d
VM
797 return reuse_packfile_offset - sizeof(struct pack_header);
798}
799
9cea46cd
EW
800static const char no_split_warning[] = N_(
801"disabling bitmap writing, packs are split due to pack.packSizeLimit"
802);
803
d01fb92f 804static void write_pack_file(void)
c323ac7d 805{
ebe27b13 806 uint32_t i = 0, j;
98a3beab 807 struct hashfile *f;
6ed7f25e 808 off_t offset;
ebe27b13 809 uint32_t nr_remaining = nr_result;
f746bae8 810 time_t last_mtime = 0;
1b4bb16b 811 struct object_entry **write_order;
81a216a5 812
bcd7954e 813 if (progress > pack_to_stdout)
754dbc43 814 progress_state = start_progress(_("Writing objects"), nr_result);
b32fa95f 815 ALLOC_ARRAY(written_list, to_pack.nr_objects);
1b4bb16b 816 write_order = compute_write_order();
183bdb2c 817
ebe27b13 818 do {
188960b4 819 struct object_id oid;
7ba502c4 820 char *pack_tmp_name = NULL;
aa7e44bf 821
cdf9db3c 822 if (pack_to_stdout)
98a3beab 823 f = hashfd_throughput(1, "<stdout>", progress_state);
cdf9db3c
JH
824 else
825 f = create_tmp_packfile(&pack_tmp_name);
ebe27b13 826
c0ad4657 827 offset = write_pack_header(f, nr_remaining);
6b8fda2d
VM
828
829 if (reuse_packfile) {
830 off_t packfile_size;
831 assert(pack_to_stdout);
832
833 packfile_size = write_reused_pack(f);
834 offset += packfile_size;
835 }
836
ebe27b13 837 nr_written = 0;
2834bc27 838 for (; i < to_pack.nr_objects; i++) {
1b4bb16b 839 struct object_entry *e = write_order[i];
cddec4f8 840 if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
720c9f7b
NP
841 break;
842 display_progress(progress_state, written);
843 }
c553ca25 844
ebe27b13
DH
845 /*
846 * Did we write the wrong # entries in the header?
847 * If so, rewrite it like in fast-import
848 */
54352bb2 849 if (pack_to_stdout) {
98a3beab 850 hashclose(f, oid.hash, CSUM_CLOSE);
54352bb2 851 } else if (nr_written == nr_remaining) {
98a3beab 852 hashclose(f, oid.hash, CSUM_FSYNC);
ebe27b13 853 } else {
98a3beab 854 int fd = hashclose(f, oid.hash, 0);
188960b4 855 fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
856 nr_written, oid.hash, offset);
7ba502c4 857 close(fd);
9cea46cd
EW
858 if (write_bitmap_index) {
859 warning(_(no_split_warning));
860 write_bitmap_index = 0;
861 }
ebe27b13
DH
862 }
863
864 if (!pack_to_stdout) {
f746bae8 865 struct stat st;
58892711 866 struct strbuf tmpname = STRBUF_INIT;
d01fb92f 867
f746bae8
NP
868 /*
869 * Packs are runtime accessed in their mtime
870 * order since newer packs are more likely to contain
871 * younger objects. So if we are creating multiple
872 * packs then we should modify the mtime of later ones
873 * to preserve this property.
874 */
0e990530 875 if (stat(pack_tmp_name, &st) < 0) {
54d47394 876 warning_errno("failed to stat %s", pack_tmp_name);
f746bae8
NP
877 } else if (!last_mtime) {
878 last_mtime = st.st_mtime;
879 } else {
880 struct utimbuf utb;
881 utb.actime = st.st_atime;
882 utb.modtime = --last_mtime;
0e990530 883 if (utime(pack_tmp_name, &utb) < 0)
54d47394 884 warning_errno("failed utime() on %s", pack_tmp_name);
f746bae8
NP
885 }
886
58892711 887 strbuf_addf(&tmpname, "%s-", base_name);
7cc8f971
VM
888
889 if (write_bitmap_index) {
188960b4 890 bitmap_writer_set_checksum(oid.hash);
06af3bba
NTND
891 bitmap_writer_build_type_index(
892 &to_pack, written_list, nr_written);
7cc8f971
VM
893 }
894
58892711 895 finish_tmp_packfile(&tmpname, pack_tmp_name,
0e990530 896 written_list, nr_written,
188960b4 897 &pack_idx_opts, oid.hash);
7cc8f971
VM
898
899 if (write_bitmap_index) {
188960b4 900 strbuf_addf(&tmpname, "%s.bitmap", oid_to_hex(&oid));
7cc8f971
VM
901
902 stop_progress(&progress_state);
903
904 bitmap_writer_show_progress(progress);
905 bitmap_writer_reuse_bitmaps(&to_pack);
906 bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
907 bitmap_writer_build(&to_pack);
ae4f07fb 908 bitmap_writer_finish(written_list, nr_written,
58892711 909 tmpname.buf, write_bitmap_options);
7cc8f971
VM
910 write_bitmap_index = 0;
911 }
912
58892711 913 strbuf_release(&tmpname);
7ba502c4 914 free(pack_tmp_name);
188960b4 915 puts(oid_to_hex(&oid));
ebe27b13
DH
916 }
917
918 /* mark written objects as written to previous pack */
919 for (j = 0; j < nr_written; j++) {
79814f42 920 written_list[j]->offset = (off_t)-1;
ebe27b13
DH
921 }
922 nr_remaining -= nr_written;
2834bc27 923 } while (nr_remaining && i < to_pack.nr_objects);
ebe27b13
DH
924
925 free(written_list);
1b4bb16b 926 free(write_order);
4d4fcc54 927 stop_progress(&progress_state);
67c08ce1 928 if (written != nr_result)
6e1c2344
RJ
929 die("wrote %"PRIu32" objects while expecting %"PRIu32,
930 written, nr_result);
c323ac7d
LT
931}
932
a74db82e
JH
933static int no_try_delta(const char *path)
934{
2aef63d3 935 static struct attr_check *check;
a74db82e 936
2aef63d3
JH
937 if (!check)
938 check = attr_check_initl("delta", NULL);
939 if (git_check_attr(path, check))
a74db82e 940 return 0;
2aef63d3 941 if (ATTR_FALSE(check->items[0].value))
a74db82e
JH
942 return 1;
943 return 0;
944}
945
ce2bc424
JK
946/*
947 * When adding an object, check whether we have already added it
948 * to our packing list. If so, we can skip. However, if we are
949 * being asked to excludei t, but the previous mention was to include
950 * it, make sure to adjust its flags and tweak our numbers accordingly.
951 *
952 * As an optimization, we pass out the index position where we would have
953 * found the item, since that saves us from having to look it up again a
954 * few lines later when we want to add the new entry.
955 */
188960b4 956static int have_duplicate_entry(const struct object_id *oid,
ce2bc424
JK
957 int exclude,
958 uint32_t *index_pos)
c323ac7d 959{
c323ac7d 960 struct object_entry *entry;
29b734e4 961
188960b4 962 entry = packlist_find(&to_pack, oid->hash, index_pos);
ce2bc424 963 if (!entry)
29b734e4 964 return 0;
ce2bc424
JK
965
966 if (exclude) {
967 if (!entry->preferred_base)
968 nr_result--;
969 entry->preferred_base = 1;
29b734e4 970 }
c323ac7d 971
ce2bc424
JK
972 return 1;
973}
974
702d1b95
KS
975static int want_found_object(int exclude, struct packed_git *p)
976{
977 if (exclude)
978 return 1;
979 if (incremental)
980 return 0;
981
982 /*
983 * When asked to do --local (do not include an object that appears in a
984 * pack we borrow from elsewhere) or --honor-pack-keep (do not include
985 * an object that appears in a pack marked with .keep), finding a pack
986 * that matches the criteria is sufficient for us to decide to omit it.
987 * However, even if this pack does not satisfy the criteria, we need to
988 * make sure no copy of this object appears in _any_ pack that makes us
989 * to omit the object, so we need to check all the packs.
990 *
991 * We can however first check whether these options can possible matter;
992 * if they do not matter we know we want the object in generated pack.
993 * Otherwise, we signal "-1" at the end to tell the caller that we do
994 * not know either way, and it needs to check more packs.
995 */
996 if (!ignore_packed_keep &&
997 (!local || !have_non_local_packs))
998 return 1;
999
1000 if (local && !p->pack_local)
1001 return 0;
1002 if (ignore_packed_keep && p->pack_local && p->pack_keep)
1003 return 0;
1004
1005 /* we don't know yet; keep looking for more packs */
1006 return -1;
1007}
1008
ce2bc424
JK
1009/*
1010 * Check whether we want the object in the pack (e.g., we do not want
1011 * objects found in non-local stores if the "--local" option was used).
1012 *
702d1b95
KS
1013 * If the caller already knows an existing pack it wants to take the object
1014 * from, that is passed in *found_pack and *found_offset; otherwise this
1015 * function finds if there is any pack that has the object and returns the pack
1016 * and its offset in these variables.
ce2bc424 1017 */
188960b4 1018static int want_object_in_pack(const struct object_id *oid,
ce2bc424
JK
1019 int exclude,
1020 struct packed_git **found_pack,
1021 off_t *found_offset)
1022{
702d1b95 1023 int want;
8865859d 1024 struct list_head *pos;
ce2bc424 1025
188960b4 1026 if (!exclude && local && has_loose_object_nonlocal(oid->hash))
daae0625
BC
1027 return 0;
1028
702d1b95
KS
1029 /*
1030 * If we already know the pack object lives in, start checks from that
1031 * pack - in the usual case when neither --local was given nor .keep files
1032 * are present we will determine the answer right now.
1033 */
1034 if (*found_pack) {
1035 want = want_found_object(exclude, *found_pack);
1036 if (want != -1)
1037 return want;
1038 }
a80d72db 1039 list_for_each(pos, get_packed_git_mru(the_repository)) {
ec2dd32c 1040 struct packed_git *p = list_entry(pos, struct packed_git, mru);
702d1b95
KS
1041 off_t offset;
1042
1043 if (p == *found_pack)
1044 offset = *found_offset;
1045 else
188960b4 1046 offset = find_pack_entry_one(oid->hash, p);
702d1b95 1047
5c49c116 1048 if (offset) {
ce2bc424 1049 if (!*found_pack) {
319b678a 1050 if (!is_pack_valid(p))
4c080182 1051 continue;
ce2bc424
JK
1052 *found_offset = offset;
1053 *found_pack = p;
64560374 1054 }
702d1b95 1055 want = want_found_object(exclude, p);
e6e24c94 1056 if (!exclude && want > 0)
a80d72db
SB
1057 list_move(&p->mru,
1058 get_packed_git_mru(the_repository));
702d1b95
KS
1059 if (want != -1)
1060 return want;
64560374
LT
1061 }
1062 }
eb019375 1063
ce2bc424
JK
1064 return 1;
1065}
1066
188960b4 1067static void create_object_entry(const struct object_id *oid,
ce2bc424
JK
1068 enum object_type type,
1069 uint32_t hash,
1070 int exclude,
1071 int no_try_delta,
1072 uint32_t index_pos,
1073 struct packed_git *found_pack,
1074 off_t found_offset)
1075{
1076 struct object_entry *entry;
29b734e4 1077
188960b4 1078 entry = packlist_alloc(&to_pack, oid->hash, index_pos);
27225f2e 1079 entry->hash = hash;
fd9b1bae 1080 oe_set_type(entry, type);
29b734e4
NP
1081 if (exclude)
1082 entry->preferred_base = 1;
81a216a5
NP
1083 else
1084 nr_result++;
29b734e4 1085 if (found_pack) {
43fa44fa 1086 oe_set_in_pack(&to_pack, entry, found_pack);
29b734e4
NP
1087 entry->in_pack_offset = found_offset;
1088 }
7a979d99 1089
ce2bc424
JK
1090 entry->no_try_delta = no_try_delta;
1091}
29b734e4 1092
373c67da
JK
1093static const char no_closure_warning[] = N_(
1094"disabling bitmap writing, as some objects are not being packed"
1095);
1096
188960b4 1097static int add_object_entry(const struct object_id *oid, enum object_type type,
ce2bc424
JK
1098 const char *name, int exclude)
1099{
702d1b95
KS
1100 struct packed_git *found_pack = NULL;
1101 off_t found_offset = 0;
ce2bc424 1102 uint32_t index_pos;
7a979d99 1103
188960b4 1104 if (have_duplicate_entry(oid, exclude, &index_pos))
ce2bc424 1105 return 0;
a74db82e 1106
188960b4 1107 if (!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {
373c67da
JK
1108 /* The pack is missing an object, so it will not have closure */
1109 if (write_bitmap_index) {
1110 warning(_(no_closure_warning));
1111 write_bitmap_index = 0;
1112 }
ce2bc424 1113 return 0;
373c67da 1114 }
29b734e4 1115
188960b4 1116 create_object_entry(oid, type, pack_name_hash(name),
ce2bc424
JK
1117 exclude, name && no_try_delta(name),
1118 index_pos, found_pack, found_offset);
a74db82e 1119
78d2214e 1120 display_progress(progress_state, nr_result);
29b734e4 1121 return 1;
c323ac7d
LT
1122}
1123
20664967 1124static int add_object_entry_from_bitmap(const struct object_id *oid,
6b8fda2d
VM
1125 enum object_type type,
1126 int flags, uint32_t name_hash,
1127 struct packed_git *pack, off_t offset)
1128{
1129 uint32_t index_pos;
1130
188960b4 1131 if (have_duplicate_entry(oid, 0, &index_pos))
6b8fda2d
VM
1132 return 0;
1133
188960b4 1134 if (!want_object_in_pack(oid, 0, &pack, &offset))
702d1b95
KS
1135 return 0;
1136
188960b4 1137 create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
6b8fda2d 1138
78d2214e 1139 display_progress(progress_state, nr_result);
29b734e4 1140 return 1;
c323ac7d
LT
1141}
1142
5379a5c5 1143struct pbase_tree_cache {
188960b4 1144 struct object_id oid;
5379a5c5
JH
1145 int ref;
1146 int temporary;
1147 void *tree_data;
1148 unsigned long tree_size;
1149};
1150
1151static struct pbase_tree_cache *(pbase_tree_cache[256]);
188960b4 1152static int pbase_tree_cache_ix(const struct object_id *oid)
5379a5c5 1153{
188960b4 1154 return oid->hash[0] % ARRAY_SIZE(pbase_tree_cache);
5379a5c5
JH
1155}
1156static int pbase_tree_cache_ix_incr(int ix)
1157{
1158 return (ix+1) % ARRAY_SIZE(pbase_tree_cache);
1159}
1160
1161static struct pbase_tree {
1162 struct pbase_tree *next;
1163 /* This is a phony "cache" entry; we are not
01689909 1164 * going to evict it or find it through _get()
5379a5c5
JH
1165 * mechanism -- this is for the toplevel node that
1166 * would almost always change with any commit.
1167 */
1168 struct pbase_tree_cache pcache;
1169} *pbase_tree;
1170
188960b4 1171static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
5379a5c5
JH
1172{
1173 struct pbase_tree_cache *ent, *nent;
1174 void *data;
1175 unsigned long size;
21666f1a 1176 enum object_type type;
5379a5c5 1177 int neigh;
188960b4 1178 int my_ix = pbase_tree_cache_ix(oid);
5379a5c5
JH
1179 int available_ix = -1;
1180
1181 /* pbase-tree-cache acts as a limited hashtable.
1182 * your object will be found at your index or within a few
1183 * slots after that slot if it is cached.
1184 */
1185 for (neigh = 0; neigh < 8; neigh++) {
1186 ent = pbase_tree_cache[my_ix];
188960b4 1187 if (ent && !oidcmp(&ent->oid, oid)) {
5379a5c5
JH
1188 ent->ref++;
1189 return ent;
1190 }
1191 else if (((available_ix < 0) && (!ent || !ent->ref)) ||
1192 ((0 <= available_ix) &&
1193 (!ent && pbase_tree_cache[available_ix])))
1194 available_ix = my_ix;
1195 if (!ent)
1196 break;
1197 my_ix = pbase_tree_cache_ix_incr(my_ix);
1198 }
1199
1200 /* Did not find one. Either we got a bogus request or
1201 * we need to read and perhaps cache.
1202 */
b4f5aca4 1203 data = read_object_file(oid, &type, &size);
5379a5c5
JH
1204 if (!data)
1205 return NULL;
21666f1a 1206 if (type != OBJ_TREE) {
5379a5c5
JH
1207 free(data);
1208 return NULL;
1209 }
1210
1211 /* We need to either cache or return a throwaway copy */
1212
1213 if (available_ix < 0)
1214 ent = NULL;
1215 else {
1216 ent = pbase_tree_cache[available_ix];
1217 my_ix = available_ix;
1218 }
1219
1220 if (!ent) {
1221 nent = xmalloc(sizeof(*nent));
1222 nent->temporary = (available_ix < 0);
1223 }
1224 else {
1225 /* evict and reuse */
1226 free(ent->tree_data);
1227 nent = ent;
1228 }
188960b4 1229 oidcpy(&nent->oid, oid);
5379a5c5
JH
1230 nent->tree_data = data;
1231 nent->tree_size = size;
1232 nent->ref = 1;
1233 if (!nent->temporary)
1234 pbase_tree_cache[my_ix] = nent;
1235 return nent;
1236}
1237
1238static void pbase_tree_put(struct pbase_tree_cache *cache)
1239{
1240 if (!cache->temporary) {
1241 cache->ref--;
1242 return;
1243 }
1244 free(cache->tree_data);
1245 free(cache);
1246}
1247
1248static int name_cmp_len(const char *name)
1249{
1250 int i;
1251 for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)
1252 ;
1253 return i;
1254}
1255
1256static void add_pbase_object(struct tree_desc *tree,
5379a5c5 1257 const char *name,
ce0bd642
LT
1258 int cmplen,
1259 const char *fullname)
3f9ac8d2 1260{
4c068a98 1261 struct name_entry entry;
8a5a8d6c 1262 int cmp;
4c068a98
LT
1263
1264 while (tree_entry(tree,&entry)) {
1211be6b
LT
1265 if (S_ISGITLINK(entry.mode))
1266 continue;
0de16337 1267 cmp = tree_entry_len(&entry) != cmplen ? 1 :
8a5a8d6c
NP
1268 memcmp(name, entry.path, cmplen);
1269 if (cmp > 0)
7a979d99 1270 continue;
8a5a8d6c
NP
1271 if (cmp < 0)
1272 return;
5379a5c5 1273 if (name[cmplen] != '/') {
188960b4 1274 add_object_entry(entry.oid,
4d1012c3 1275 object_type(entry.mode),
bc32fed5 1276 fullname, 1);
5379a5c5
JH
1277 return;
1278 }
8a5a8d6c 1279 if (S_ISDIR(entry.mode)) {
7a979d99 1280 struct tree_desc sub;
5379a5c5
JH
1281 struct pbase_tree_cache *tree;
1282 const char *down = name+cmplen+1;
1283 int downlen = name_cmp_len(down);
1284
188960b4 1285 tree = pbase_tree_get(entry.oid);
5379a5c5
JH
1286 if (!tree)
1287 return;
6fda5e51 1288 init_tree_desc(&sub, tree->tree_data, tree->tree_size);
5379a5c5 1289
ce0bd642 1290 add_pbase_object(&sub, down, downlen, fullname);
5379a5c5
JH
1291 pbase_tree_put(tree);
1292 }
1293 }
1294}
1d6b38cc 1295
5379a5c5
JH
1296static unsigned *done_pbase_paths;
1297static int done_pbase_paths_num;
1298static int done_pbase_paths_alloc;
1299static int done_pbase_path_pos(unsigned hash)
1300{
1301 int lo = 0;
1302 int hi = done_pbase_paths_num;
1303 while (lo < hi) {
19716b21 1304 int mi = lo + (hi - lo) / 2;
5379a5c5
JH
1305 if (done_pbase_paths[mi] == hash)
1306 return mi;
1307 if (done_pbase_paths[mi] < hash)
1308 hi = mi;
1309 else
1310 lo = mi + 1;
1311 }
1312 return -lo-1;
1313}
1314
1315static int check_pbase_path(unsigned hash)
1316{
c7b07805 1317 int pos = done_pbase_path_pos(hash);
5379a5c5
JH
1318 if (0 <= pos)
1319 return 1;
1320 pos = -pos - 1;
25e19407
DD
1321 ALLOC_GROW(done_pbase_paths,
1322 done_pbase_paths_num + 1,
1323 done_pbase_paths_alloc);
5379a5c5
JH
1324 done_pbase_paths_num++;
1325 if (pos < done_pbase_paths_num)
f331ab9d
RS
1326 MOVE_ARRAY(done_pbase_paths + pos + 1, done_pbase_paths + pos,
1327 done_pbase_paths_num - pos - 1);
5379a5c5
JH
1328 done_pbase_paths[pos] = hash;
1329 return 0;
1330}
1331
bc32fed5 1332static void add_preferred_base_object(const char *name)
5379a5c5
JH
1333{
1334 struct pbase_tree *it;
8a5a8d6c 1335 int cmplen;
68fb36eb 1336 unsigned hash = pack_name_hash(name);
5379a5c5 1337
8a5a8d6c 1338 if (!num_preferred_base || check_pbase_path(hash))
5379a5c5
JH
1339 return;
1340
8a5a8d6c 1341 cmplen = name_cmp_len(name);
5379a5c5
JH
1342 for (it = pbase_tree; it; it = it->next) {
1343 if (cmplen == 0) {
188960b4 1344 add_object_entry(&it->pcache.oid, OBJ_TREE, NULL, 1);
5379a5c5
JH
1345 }
1346 else {
1347 struct tree_desc tree;
6fda5e51 1348 init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);
ce0bd642 1349 add_pbase_object(&tree, name, cmplen, name);
7a979d99 1350 }
3f9ac8d2 1351 }
3f9ac8d2
JH
1352}
1353
188960b4 1354static void add_preferred_base(struct object_id *oid)
3f9ac8d2 1355{
5379a5c5
JH
1356 struct pbase_tree *it;
1357 void *data;
1358 unsigned long size;
188960b4 1359 struct object_id tree_oid;
1d6b38cc 1360
8d1d8f83
JH
1361 if (window <= num_preferred_base++)
1362 return;
1363
02f0547e 1364 data = read_object_with_reference(oid, tree_type, &size, &tree_oid);
5379a5c5 1365 if (!data)
7a979d99 1366 return;
5379a5c5
JH
1367
1368 for (it = pbase_tree; it; it = it->next) {
188960b4 1369 if (!oidcmp(&it->pcache.oid, &tree_oid)) {
5379a5c5
JH
1370 free(data);
1371 return;
1372 }
1373 }
1374
1375 it = xcalloc(1, sizeof(*it));
1376 it->next = pbase_tree;
1377 pbase_tree = it;
1378
188960b4 1379 oidcpy(&it->pcache.oid, &tree_oid);
5379a5c5
JH
1380 it->pcache.tree_data = data;
1381 it->pcache.tree_size = size;
3f9ac8d2
JH
1382}
1383
0ef95f72
NP
1384static void cleanup_preferred_base(void)
1385{
1386 struct pbase_tree *it;
1387 unsigned i;
1388
1389 it = pbase_tree;
1390 pbase_tree = NULL;
1391 while (it) {
095b3b2c
BW
1392 struct pbase_tree *tmp = it;
1393 it = tmp->next;
1394 free(tmp->pcache.tree_data);
1395 free(tmp);
0ef95f72
NP
1396 }
1397
1398 for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
1399 if (!pbase_tree_cache[i])
1400 continue;
1401 free(pbase_tree_cache[i]->tree_data);
6a83d902 1402 FREE_AND_NULL(pbase_tree_cache[i]);
0ef95f72
NP
1403 }
1404
6a83d902 1405 FREE_AND_NULL(done_pbase_paths);
0ef95f72
NP
1406 done_pbase_paths_num = done_pbase_paths_alloc = 0;
1407}
1408
c323ac7d
LT
1409static void check_object(struct object_entry *entry)
1410{
43fa44fa
NTND
1411 if (IN_PACK(entry)) {
1412 struct packed_git *p = IN_PACK(entry);
03e79c88 1413 struct pack_window *w_curs = NULL;
5c49c116
NP
1414 const unsigned char *base_ref = NULL;
1415 struct object_entry *base_entry;
1416 unsigned long used, used_0;
ef49a7a0 1417 unsigned long avail;
5c49c116
NP
1418 off_t ofs;
1419 unsigned char *buf, c;
fd9b1bae 1420 enum object_type type;
780e6e73 1421
6777a59f 1422 buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
ab7cd7bb 1423
5c49c116 1424 /*
fa736f72
NP
1425 * We want in_pack_type even if we do not reuse delta
1426 * since non-delta representations could still be reused.
ab7cd7bb 1427 */
09ded04b 1428 used = unpack_object_header_buffer(buf, avail,
fd9b1bae 1429 &type,
5c49c116 1430 &entry->size);
03d66015
NP
1431 if (used == 0)
1432 goto give_up;
ab7cd7bb 1433
fd9b1bae
NTND
1434 if (type < 0)
1435 BUG("invalid type %d", type);
1436 entry->in_pack_type = type;
1437
5c49c116
NP
1438 /*
1439 * Determine if this is a delta and if so whether we can
1440 * reuse it or not. Otherwise let's find out as cheaply as
1441 * possible what the actual type and size for this object is.
3f9ac8d2 1442 */
5c49c116
NP
1443 switch (entry->in_pack_type) {
1444 default:
1445 /* Not a delta hence we've already got all we need. */
fd9b1bae 1446 oe_set_type(entry, entry->in_pack_type);
5c49c116 1447 entry->in_pack_header_size = used;
fd9b1bae 1448 if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
03d66015 1449 goto give_up;
5c49c116
NP
1450 unuse_pack(&w_curs);
1451 return;
1452 case OBJ_REF_DELTA:
a7de7130 1453 if (reuse_delta && !entry->preferred_base)
5c49c116
NP
1454 base_ref = use_pack(p, &w_curs,
1455 entry->in_pack_offset + used, NULL);
1456 entry->in_pack_header_size = used + 20;
1457 break;
1458 case OBJ_OFS_DELTA:
1459 buf = use_pack(p, &w_curs,
1460 entry->in_pack_offset + used, NULL);
1461 used_0 = 0;
1462 c = buf[used_0++];
1463 ofs = c & 127;
1464 while (c & 128) {
1465 ofs += 1;
03d66015
NP
1466 if (!ofs || MSB(ofs, 7)) {
1467 error("delta base offset overflow in pack for %s",
e6a492b7 1468 oid_to_hex(&entry->idx.oid));
03d66015
NP
1469 goto give_up;
1470 }
5c49c116
NP
1471 c = buf[used_0++];
1472 ofs = (ofs << 7) + (c & 127);
780e6e73 1473 }
5c49c116 1474 ofs = entry->in_pack_offset - ofs;
03d66015
NP
1475 if (ofs <= 0 || ofs >= entry->in_pack_offset) {
1476 error("delta base offset out of bound for %s",
e6a492b7 1477 oid_to_hex(&entry->idx.oid));
03d66015
NP
1478 goto give_up;
1479 }
a7de7130 1480 if (reuse_delta && !entry->preferred_base) {
3449f8c4
NP
1481 struct revindex_entry *revidx;
1482 revidx = find_pack_revindex(p, ofs);
08698b1e
NP
1483 if (!revidx)
1484 goto give_up;
3449f8c4
NP
1485 base_ref = nth_packed_object_sha1(p, revidx->nr);
1486 }
5c49c116
NP
1487 entry->in_pack_header_size = used + used_0;
1488 break;
780e6e73 1489 }
780e6e73 1490
2834bc27 1491 if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {
5c49c116
NP
1492 /*
1493 * If base_ref was set above that means we wish to
1494 * reuse delta data, and we even found that base
1495 * in the list of objects we want to pack. Goodie!
1496 *
1497 * Depth value does not matter - find_deltas() will
1498 * never consider reused delta as the base object to
1499 * deltify other objects against, in order to avoid
1500 * circular deltas.
3f9ac8d2 1501 */
fd9b1bae 1502 oe_set_type(entry, entry->in_pack_type);
898eba5e 1503 SET_DELTA(entry, base_entry);
64bd76b1 1504 entry->delta_size = entry->size;
898eba5e
NTND
1505 entry->delta_sibling_idx = base_entry->delta_child_idx;
1506 SET_DELTA_CHILD(base_entry, entry);
5c49c116
NP
1507 unuse_pack(&w_curs);
1508 return;
1509 }
ab7cd7bb 1510
fd9b1bae 1511 if (oe_type(entry)) {
5c49c116
NP
1512 /*
1513 * This must be a delta and we already know what the
1514 * final object type is. Let's extract the actual
1515 * object size from the delta header.
1516 */
1517 entry->size = get_size_from_delta(p, &w_curs,
1518 entry->in_pack_offset + entry->in_pack_header_size);
03d66015
NP
1519 if (entry->size == 0)
1520 goto give_up;
5c49c116 1521 unuse_pack(&w_curs);
3f9ac8d2
JH
1522 return;
1523 }
5c49c116
NP
1524
1525 /*
1526 * No choice but to fall back to the recursive delta walk
1527 * with sha1_object_info() to find about the object type
1528 * at this point...
1529 */
03d66015 1530 give_up:
5c49c116 1531 unuse_pack(&w_curs);
36e4d74a 1532 }
3f9ac8d2 1533
fd9b1bae 1534 oe_set_type(entry, oid_object_info(&entry->idx.oid, &entry->size));
6d6f9cdd
SP
1535 /*
1536 * The error condition is checked in prepare_pack(). This is
1537 * to permit a missing preferred base object to be ignored
1538 * as a preferred base. Doing so can result in a larger
1539 * pack file, but the transfer will still take place.
1540 */
3f9ac8d2
JH
1541}
1542
5c49c116
NP
1543static int pack_offset_sort(const void *_a, const void *_b)
1544{
1545 const struct object_entry *a = *(struct object_entry **)_a;
1546 const struct object_entry *b = *(struct object_entry **)_b;
43fa44fa
NTND
1547 const struct packed_git *a_in_pack = IN_PACK(a);
1548 const struct packed_git *b_in_pack = IN_PACK(b);
5c49c116
NP
1549
1550 /* avoid filesystem trashing with loose objects */
43fa44fa 1551 if (!a_in_pack && !b_in_pack)
e6a492b7 1552 return oidcmp(&a->idx.oid, &b->idx.oid);
5c49c116 1553
43fa44fa 1554 if (a_in_pack < b_in_pack)
5c49c116 1555 return -1;
43fa44fa 1556 if (a_in_pack > b_in_pack)
5c49c116
NP
1557 return 1;
1558 return a->in_pack_offset < b->in_pack_offset ? -1 :
1559 (a->in_pack_offset > b->in_pack_offset);
1560}
1561
4cf2143e
JK
1562/*
1563 * Drop an on-disk delta we were planning to reuse. Naively, this would
1564 * just involve blanking out the "delta" field, but we have to deal
1565 * with some extra book-keeping:
1566 *
1567 * 1. Removing ourselves from the delta_sibling linked list.
1568 *
1569 * 2. Updating our size/type to the non-delta representation. These were
1570 * either not recorded initially (size) or overwritten with the delta type
1571 * (type) when check_object() decided to reuse the delta.
7dbabbbe
JK
1572 *
1573 * 3. Resetting our delta depth, as we are now a base object.
4cf2143e
JK
1574 */
1575static void drop_reused_delta(struct object_entry *entry)
1576{
898eba5e 1577 unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
4cf2143e 1578 struct object_info oi = OBJECT_INFO_INIT;
fd9b1bae 1579 enum object_type type;
4cf2143e 1580
898eba5e
NTND
1581 while (*idx) {
1582 struct object_entry *oe = &to_pack.objects[*idx - 1];
1583
1584 if (oe == entry)
1585 *idx = oe->delta_sibling_idx;
4cf2143e 1586 else
898eba5e 1587 idx = &oe->delta_sibling_idx;
4cf2143e 1588 }
898eba5e 1589 SET_DELTA(entry, NULL);
7dbabbbe 1590 entry->depth = 0;
4cf2143e
JK
1591
1592 oi.sizep = &entry->size;
fd9b1bae 1593 oi.typep = &type;
43fa44fa 1594 if (packed_object_info(IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
4cf2143e
JK
1595 /*
1596 * We failed to get the info from this pack for some reason;
1597 * fall back to sha1_object_info, which may find another copy.
fd9b1bae 1598 * And if that fails, the error will be recorded in oe_type(entry)
4cf2143e
JK
1599 * and dealt with in prepare_pack().
1600 */
fd9b1bae
NTND
1601 oe_set_type(entry, oid_object_info(&entry->idx.oid,
1602 &entry->size));
1603 } else {
1604 oe_set_type(entry, type);
4cf2143e
JK
1605 }
1606}
1607
1608/*
1609 * Follow the chain of deltas from this entry onward, throwing away any links
1610 * that cause us to hit a cycle (as determined by the DFS state flags in
1611 * the entries).
7dbabbbe
JK
1612 *
1613 * We also detect too-long reused chains that would violate our --depth
1614 * limit.
4cf2143e
JK
1615 */
1616static void break_delta_chains(struct object_entry *entry)
1617{
42b766d7
JK
1618 /*
1619 * The actual depth of each object we will write is stored as an int,
1620 * as it cannot exceed our int "depth" limit. But before we break
1621 * changes based no that limit, we may potentially go as deep as the
1622 * number of objects, which is elsewhere bounded to a uint32_t.
1623 */
1624 uint32_t total_depth;
1625 struct object_entry *cur, *next;
1626
1627 for (cur = entry, total_depth = 0;
1628 cur;
898eba5e 1629 cur = DELTA(cur), total_depth++) {
42b766d7
JK
1630 if (cur->dfs_state == DFS_DONE) {
1631 /*
1632 * We've already seen this object and know it isn't
1633 * part of a cycle. We do need to append its depth
1634 * to our count.
1635 */
1636 total_depth += cur->depth;
1637 break;
1638 }
4cf2143e 1639
4cf2143e 1640 /*
42b766d7
JK
1641 * We break cycles before looping, so an ACTIVE state (or any
1642 * other cruft which made its way into the state variable)
1643 * is a bug.
4cf2143e 1644 */
42b766d7
JK
1645 if (cur->dfs_state != DFS_NONE)
1646 die("BUG: confusing delta dfs state in first pass: %d",
1647 cur->dfs_state);
4cf2143e 1648
4cf2143e 1649 /*
42b766d7
JK
1650 * Now we know this is the first time we've seen the object. If
1651 * it's not a delta, we're done traversing, but we'll mark it
1652 * done to save time on future traversals.
4cf2143e 1653 */
898eba5e 1654 if (!DELTA(cur)) {
42b766d7
JK
1655 cur->dfs_state = DFS_DONE;
1656 break;
1657 }
4cf2143e 1658
4cf2143e 1659 /*
42b766d7
JK
1660 * Mark ourselves as active and see if the next step causes
1661 * us to cycle to another active object. It's important to do
1662 * this _before_ we loop, because it impacts where we make the
1663 * cut, and thus how our total_depth counter works.
1664 * E.g., We may see a partial loop like:
1665 *
1666 * A -> B -> C -> D -> B
1667 *
1668 * Cutting B->C breaks the cycle. But now the depth of A is
1669 * only 1, and our total_depth counter is at 3. The size of the
1670 * error is always one less than the size of the cycle we
1671 * broke. Commits C and D were "lost" from A's chain.
1672 *
1673 * If we instead cut D->B, then the depth of A is correct at 3.
1674 * We keep all commits in the chain that we examined.
4cf2143e 1675 */
42b766d7 1676 cur->dfs_state = DFS_ACTIVE;
898eba5e 1677 if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
42b766d7
JK
1678 drop_reused_delta(cur);
1679 cur->dfs_state = DFS_DONE;
1680 break;
7dbabbbe 1681 }
42b766d7 1682 }
7dbabbbe 1683
42b766d7
JK
1684 /*
1685 * And now that we've gone all the way to the bottom of the chain, we
1686 * need to clear the active flags and set the depth fields as
1687 * appropriate. Unlike the loop above, which can quit when it drops a
1688 * delta, we need to keep going to look for more depth cuts. So we need
1689 * an extra "next" pointer to keep going after we reset cur->delta.
1690 */
1691 for (cur = entry; cur; cur = next) {
898eba5e 1692 next = DELTA(cur);
4cf2143e 1693
42b766d7
JK
1694 /*
1695 * We should have a chain of zero or more ACTIVE states down to
1696 * a final DONE. We can quit after the DONE, because either it
1697 * has no bases, or we've already handled them in a previous
1698 * call.
1699 */
1700 if (cur->dfs_state == DFS_DONE)
1701 break;
1702 else if (cur->dfs_state != DFS_ACTIVE)
1703 die("BUG: confusing delta dfs state in second pass: %d",
1704 cur->dfs_state);
4cf2143e 1705
4cf2143e 1706 /*
42b766d7
JK
1707 * If the total_depth is more than depth, then we need to snip
1708 * the chain into two or more smaller chains that don't exceed
1709 * the maximum depth. Most of the resulting chains will contain
1710 * (depth + 1) entries (i.e., depth deltas plus one base), and
1711 * the last chain (i.e., the one containing entry) will contain
1712 * whatever entries are left over, namely
1713 * (total_depth % (depth + 1)) of them.
1714 *
1715 * Since we are iterating towards decreasing depth, we need to
1716 * decrement total_depth as we go, and we need to write to the
1717 * entry what its final depth will be after all of the
1718 * snipping. Since we're snipping into chains of length (depth
1719 * + 1) entries, the final depth of an entry will be its
1720 * original depth modulo (depth + 1). Any time we encounter an
1721 * entry whose final depth is supposed to be zero, we snip it
1722 * from its delta base, thereby making it so.
4cf2143e 1723 */
42b766d7
JK
1724 cur->depth = (total_depth--) % (depth + 1);
1725 if (!cur->depth)
1726 drop_reused_delta(cur);
1727
1728 cur->dfs_state = DFS_DONE;
4cf2143e
JK
1729 }
1730}
1731
c323ac7d
LT
1732static void get_object_details(void)
1733{
7cadf491 1734 uint32_t i;
5c49c116
NP
1735 struct object_entry **sorted_by_offset;
1736
2834bc27
VM
1737 sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
1738 for (i = 0; i < to_pack.nr_objects; i++)
1739 sorted_by_offset[i] = to_pack.objects + i;
9ed0d8d6 1740 QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);
c323ac7d 1741
2834bc27 1742 for (i = 0; i < to_pack.nr_objects; i++) {
15366280
JH
1743 struct object_entry *entry = sorted_by_offset[i];
1744 check_object(entry);
660b3735 1745 if (entry->type_valid && big_file_threshold < entry->size)
15366280
JH
1746 entry->no_try_delta = 1;
1747 }
3449f8c4 1748
4cf2143e
JK
1749 /*
1750 * This must happen in a second pass, since we rely on the delta
1751 * information for the whole list being completed.
1752 */
1753 for (i = 0; i < to_pack.nr_objects; i++)
1754 break_delta_chains(&to_pack.objects[i]);
1755
5c49c116 1756 free(sorted_by_offset);
c323ac7d
LT
1757}
1758
b904166c
NP
1759/*
1760 * We search for deltas in a list sorted by type, by filename hash, and then
1761 * by size, so that we see progressively smaller and smaller files.
1762 * That's because we prefer deltas to be from the bigger file
1763 * to the smaller -- deletes are potentially cheaper, but perhaps
1764 * more importantly, the bigger file is likely the more recent
1765 * one. The deepest deltas are therefore the oldest objects which are
1766 * less susceptible to be accessed often.
1767 */
9668cf59 1768static int type_size_sort(const void *_a, const void *_b)
c323ac7d 1769{
9668cf59
NP
1770 const struct object_entry *a = *(struct object_entry **)_a;
1771 const struct object_entry *b = *(struct object_entry **)_b;
fd9b1bae
NTND
1772 enum object_type a_type = oe_type(a);
1773 enum object_type b_type = oe_type(b);
9668cf59 1774
fd9b1bae 1775 if (a_type > b_type)
27225f2e 1776 return -1;
fd9b1bae 1777 if (a_type < b_type)
27225f2e 1778 return 1;
b904166c 1779 if (a->hash > b->hash)
7a979d99 1780 return -1;
b904166c 1781 if (a->hash < b->hash)
7a979d99 1782 return 1;
b904166c 1783 if (a->preferred_base > b->preferred_base)
c323ac7d 1784 return -1;
b904166c
NP
1785 if (a->preferred_base < b->preferred_base)
1786 return 1;
c323ac7d 1787 if (a->size > b->size)
b904166c
NP
1788 return -1;
1789 if (a->size < b->size)
c323ac7d 1790 return 1;
b904166c 1791 return a < b ? -1 : (a > b); /* newest first */
c323ac7d
LT
1792}
1793
1794struct unpacked {
1795 struct object_entry *entry;
1796 void *data;
f6c7081a 1797 struct delta_index *index;
5a235b5e 1798 unsigned depth;
c323ac7d
LT
1799};
1800
d250626c
NP
1801static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
1802 unsigned long delta_size)
074b2eea
MK
1803{
1804 if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
1805 return 0;
1806
e3dfddb3
MK
1807 if (delta_size < cache_max_small_delta_size)
1808 return 1;
1809
074b2eea
MK
1810 /* cache delta, if objects are large enough compared to delta size */
1811 if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
1812 return 1;
1813
1814 return 0;
1815}
1816
7eb151d6 1817#ifndef NO_PTHREADS
8ecce684 1818
44626dc7 1819static pthread_mutex_t read_mutex;
8ecce684
NP
1820#define read_lock() pthread_mutex_lock(&read_mutex)
1821#define read_unlock() pthread_mutex_unlock(&read_mutex)
1822
44626dc7 1823static pthread_mutex_t cache_mutex;
3c701839
NP
1824#define cache_lock() pthread_mutex_lock(&cache_mutex)
1825#define cache_unlock() pthread_mutex_unlock(&cache_mutex)
1826
44626dc7 1827static pthread_mutex_t progress_mutex;
8ecce684
NP
1828#define progress_lock() pthread_mutex_lock(&progress_mutex)
1829#define progress_unlock() pthread_mutex_unlock(&progress_mutex)
1830
1831#else
1832
e1ef8673
JH
1833#define read_lock() (void)0
1834#define read_unlock() (void)0
1835#define cache_lock() (void)0
1836#define cache_unlock() (void)0
1837#define progress_lock() (void)0
1838#define progress_unlock() (void)0
8ecce684
NP
1839
1840#endif
1841
f6c7081a 1842static int try_delta(struct unpacked *trg, struct unpacked *src,
ef0316fc 1843 unsigned max_depth, unsigned long *mem_usage)
c323ac7d 1844{
f6c7081a
NP
1845 struct object_entry *trg_entry = trg->entry;
1846 struct object_entry *src_entry = src->entry;
560b25a8 1847 unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;
c83f032e 1848 unsigned ref_depth;
21666f1a 1849 enum object_type type;
c323ac7d
LT
1850 void *delta_buf;
1851
1852 /* Don't bother doing diffs between different types */
fd9b1bae 1853 if (oe_type(trg_entry) != oe_type(src_entry))
c323ac7d
LT
1854 return -1;
1855
51d1e83f 1856 /*
15f07e06
JK
1857 * We do not bother to try a delta that we discarded on an
1858 * earlier try, but only when reusing delta data. Note that
1859 * src_entry that is marked as the preferred_base should always
1860 * be considered, as even if we produce a suboptimal delta against
1861 * it, we will still save the transfer cost, as we already know
1862 * the other side has it and we won't send src_entry at all.
51d1e83f 1863 */
43fa44fa
NTND
1864 if (reuse_delta && IN_PACK(trg_entry) &&
1865 IN_PACK(trg_entry) == IN_PACK(src_entry) &&
15f07e06 1866 !src_entry->preferred_base &&
e9195b58
JH
1867 trg_entry->in_pack_type != OBJ_REF_DELTA &&
1868 trg_entry->in_pack_type != OBJ_OFS_DELTA)
51d1e83f
LT
1869 return 0;
1870
898b14ce 1871 /* Let's not bust the allowed depth. */
5a235b5e 1872 if (src->depth >= max_depth)
d116a45a 1873 return 0;
c323ac7d 1874
c3b06a69 1875 /* Now some size filtering heuristics. */
560b25a8 1876 trg_size = trg_entry->size;
898eba5e 1877 if (!DELTA(trg_entry)) {
c83f032e
NP
1878 max_size = trg_size/2 - 20;
1879 ref_depth = 1;
1880 } else {
1881 max_size = trg_entry->delta_size;
5a235b5e 1882 ref_depth = trg->depth;
c83f032e 1883 }
720fe22d 1884 max_size = (uint64_t)max_size * (max_depth - src->depth) /
c83f032e 1885 (max_depth - ref_depth + 1);
c3b06a69
NP
1886 if (max_size == 0)
1887 return 0;
f6c7081a 1888 src_size = src_entry->size;
560b25a8 1889 sizediff = src_size < trg_size ? trg_size - src_size : 0;
27225f2e 1890 if (sizediff >= max_size)
f527cb8c 1891 return 0;
a1dab41a
BD
1892 if (trg_size < src_size / 32)
1893 return 0;
f6c7081a 1894
560b25a8
NP
1895 /* Load data if not already done */
1896 if (!trg->data) {
8ecce684 1897 read_lock();
b4f5aca4 1898 trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
8ecce684 1899 read_unlock();
2e3404c3
JH
1900 if (!trg->data)
1901 die("object %s cannot be read",
e6a492b7 1902 oid_to_hex(&trg_entry->idx.oid));
560b25a8
NP
1903 if (sz != trg_size)
1904 die("object %s inconsistent object length (%lu vs %lu)",
e6a492b7 1905 oid_to_hex(&trg_entry->idx.oid), sz,
1906 trg_size);
ef0316fc 1907 *mem_usage += sz;
560b25a8
NP
1908 }
1909 if (!src->data) {
8ecce684 1910 read_lock();
b4f5aca4 1911 src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
8ecce684 1912 read_unlock();
71064a95
NP
1913 if (!src->data) {
1914 if (src_entry->preferred_base) {
1915 static int warned = 0;
1916 if (!warned++)
1917 warning("object %s cannot be read",
e6a492b7 1918 oid_to_hex(&src_entry->idx.oid));
71064a95
NP
1919 /*
1920 * Those objects are not included in the
1921 * resulting pack. Be resilient and ignore
1922 * them if they can't be read, in case the
1923 * pack could be created nevertheless.
1924 */
1925 return 0;
1926 }
2e3404c3 1927 die("object %s cannot be read",
e6a492b7 1928 oid_to_hex(&src_entry->idx.oid));
71064a95 1929 }
560b25a8
NP
1930 if (sz != src_size)
1931 die("object %s inconsistent object length (%lu vs %lu)",
e6a492b7 1932 oid_to_hex(&src_entry->idx.oid), sz,
1933 src_size);
ef0316fc 1934 *mem_usage += sz;
560b25a8
NP
1935 }
1936 if (!src->index) {
1937 src->index = create_delta_index(src->data, src_size);
a588d88a
MK
1938 if (!src->index) {
1939 static int warned = 0;
1940 if (!warned++)
1941 warning("suboptimal pack - out of memory");
1942 return 0;
1943 }
ef0316fc 1944 *mem_usage += sizeof_delta_index(src->index);
560b25a8
NP
1945 }
1946
1947 delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
c323ac7d 1948 if (!delta_buf)
75c42d8c 1949 return 0;
f6c7081a 1950
898eba5e 1951 if (DELTA(trg_entry)) {
848d732c
BD
1952 /* Prefer only shallower same-sized deltas. */
1953 if (delta_size == trg_entry->delta_size &&
5a235b5e 1954 src->depth + 1 >= trg->depth) {
848d732c
BD
1955 free(delta_buf);
1956 return 0;
1957 }
074b2eea 1958 }
9e2d57a0 1959
3c701839
NP
1960 /*
1961 * Handle memory allocation outside of the cache
1962 * accounting lock. Compiler will optimize the strangeness
7eb151d6 1963 * away when NO_PTHREADS is defined.
3c701839 1964 */
8e0f7003 1965 free(trg_entry->delta_data);
3c701839 1966 cache_lock();
9e2d57a0
NP
1967 if (trg_entry->delta_data) {
1968 delta_cache_size -= trg_entry->delta_size;
9e2d57a0
NP
1969 trg_entry->delta_data = NULL;
1970 }
d250626c 1971 if (delta_cacheable(src_size, trg_size, delta_size)) {
b7a28f78 1972 delta_cache_size += delta_size;
3c701839
NP
1973 cache_unlock();
1974 trg_entry->delta_data = xrealloc(delta_buf, delta_size);
1975 } else {
1976 cache_unlock();
074b2eea 1977 free(delta_buf);
3c701839
NP
1978 }
1979
898eba5e 1980 SET_DELTA(trg_entry, src_entry);
b7a28f78
NP
1981 trg_entry->delta_size = delta_size;
1982 trg->depth = src->depth + 1;
1983
f6c7081a 1984 return 1;
c323ac7d
LT
1985}
1986
898b14ce 1987static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
b2504a0d 1988{
898eba5e 1989 struct object_entry *child = DELTA_CHILD(me);
898b14ce
NP
1990 unsigned int m = n;
1991 while (child) {
1992 unsigned int c = check_delta_limit(child, n + 1);
1993 if (m < c)
1994 m = c;
898eba5e 1995 child = DELTA_SIBLING(child);
898b14ce
NP
1996 }
1997 return m;
b2504a0d
NP
1998}
1999
75ad235c 2000static unsigned long free_unpacked(struct unpacked *n)
a97773ce 2001{
ef0316fc 2002 unsigned long freed_mem = sizeof_delta_index(n->index);
a97773ce
BD
2003 free_delta_index(n->index);
2004 n->index = NULL;
2005 if (n->data) {
ef0316fc 2006 freed_mem += n->entry->size;
6a83d902 2007 FREE_AND_NULL(n->data);
a97773ce
BD
2008 }
2009 n->entry = NULL;
7d7baa5e 2010 n->depth = 0;
ef0316fc 2011 return freed_mem;
a97773ce
BD
2012}
2013
384b32c0 2014static void find_deltas(struct object_entry **list, unsigned *list_size,
e334977d 2015 int window, int depth, unsigned *processed)
c323ac7d 2016{
384b32c0 2017 uint32_t i, idx = 0, count = 0;
7cadf491 2018 struct unpacked *array;
ef0316fc 2019 unsigned long mem_usage = 0;
c323ac7d 2020
19d4b416 2021 array = xcalloc(window, sizeof(struct unpacked));
21fcd1bd 2022
384b32c0 2023 for (;;) {
421b488a 2024 struct object_entry *entry;
c323ac7d 2025 struct unpacked *n = array + idx;
ef0316fc 2026 int j, max_depth, best_base = -1;
c323ac7d 2027
384b32c0
NP
2028 progress_lock();
2029 if (!*list_size) {
2030 progress_unlock();
2031 break;
2032 }
421b488a 2033 entry = *list++;
384b32c0
NP
2034 (*list_size)--;
2035 if (!entry->preferred_base) {
2036 (*processed)++;
2037 display_progress(progress_state, *processed);
2038 }
2039 progress_unlock();
2040
ef0316fc 2041 mem_usage -= free_unpacked(n);
c323ac7d 2042 n->entry = entry;
ab7cd7bb 2043
a97773ce 2044 while (window_memory_limit &&
ef0316fc 2045 mem_usage > window_memory_limit &&
a97773ce
BD
2046 count > 1) {
2047 uint32_t tail = (idx + window - count) % window;
75ad235c 2048 mem_usage -= free_unpacked(array + tail);
a97773ce
BD
2049 count--;
2050 }
2051
75d39853
NP
2052 /* We do not compute delta to *create* objects we are not
2053 * going to pack.
2054 */
2055 if (entry->preferred_base)
2056 goto next;
2057
898b14ce
NP
2058 /*
2059 * If the current object is at pack edge, take the depth the
2060 * objects that depend on the current object into account
2061 * otherwise they would become too deep.
2062 */
2063 max_depth = depth;
898eba5e 2064 if (DELTA_CHILD(entry)) {
898b14ce
NP
2065 max_depth -= check_delta_limit(entry, 0);
2066 if (max_depth <= 0)
2067 goto next;
2068 }
2069
78817c15
LT
2070 j = window;
2071 while (--j > 0) {
77639870 2072 int ret;
7cadf491 2073 uint32_t other_idx = idx + j;
c323ac7d 2074 struct unpacked *m;
78817c15
LT
2075 if (other_idx >= window)
2076 other_idx -= window;
c323ac7d
LT
2077 m = array + other_idx;
2078 if (!m->entry)
2079 break;
ef0316fc 2080 ret = try_delta(n, m, max_depth, &mem_usage);
77639870 2081 if (ret < 0)
c323ac7d 2082 break;
77639870
JH
2083 else if (ret > 0)
2084 best_base = other_idx;
c323ac7d 2085 }
898b14ce 2086
ed4a9031
NP
2087 /*
2088 * If we decided to cache the delta data, then it is best
2089 * to compress it right away. First because we have to do
2090 * it anyway, and doing it here while we're threaded will
2091 * save a lot of time in the non threaded write phase,
2092 * as well as allow for caching more deltas within
2093 * the same cache size limit.
2094 * ...
2095 * But only if not writing to stdout, since in that case
2096 * the network is most likely throttling writes anyway,
2097 * and therefore it is best to go to the write phase ASAP
2098 * instead, as we can afford spending more time compressing
2099 * between writes at that moment.
2100 */
2101 if (entry->delta_data && !pack_to_stdout) {
0cb3c142
NTND
2102 unsigned long size;
2103
2104 size = do_compress(&entry->delta_data, entry->delta_size);
2105 if (size < (1U << OE_Z_DELTA_BITS)) {
2106 entry->z_delta_size = size;
2107 cache_lock();
2108 delta_cache_size -= entry->delta_size;
2109 delta_cache_size += entry->z_delta_size;
2110 cache_unlock();
2111 } else {
2112 FREE_AND_NULL(entry->delta_data);
2113 entry->z_delta_size = 0;
2114 }
ed4a9031
NP
2115 }
2116
70ca1a3f
JH
2117 /* if we made n a delta, and if n is already at max
2118 * depth, leaving it in the window is pointless. we
2119 * should evict it first.
70ca1a3f 2120 */
898eba5e 2121 if (DELTA(entry) && max_depth <= n->depth)
70ca1a3f 2122 continue;
ff45715c 2123
77639870
JH
2124 /*
2125 * Move the best delta base up in the window, after the
2126 * currently deltified object, to keep it longer. It will
2127 * be the first base object to be attempted next.
2128 */
898eba5e 2129 if (DELTA(entry)) {
77639870
JH
2130 struct unpacked swap = array[best_base];
2131 int dist = (window + idx - best_base) % window;
2132 int dst = best_base;
2133 while (dist--) {
2134 int src = (dst + 1) % window;
2135 array[dst] = array[src];
2136 dst = src;
2137 }
2138 array[dst] = swap;
2139 }
2140
898b14ce 2141 next:
521a4f4c 2142 idx++;
a97773ce
BD
2143 if (count + 1 < window)
2144 count++;
521a4f4c
LT
2145 if (idx >= window)
2146 idx = 0;
384b32c0 2147 }
adee7bdf 2148
f6c7081a 2149 for (i = 0; i < window; ++i) {
ff45715c 2150 free_delta_index(array[i].index);
adee7bdf 2151 free(array[i].data);
f6c7081a 2152 }
adee7bdf 2153 free(array);
c323ac7d
LT
2154}
2155
7eb151d6 2156#ifndef NO_PTHREADS
8ecce684 2157
a9a74636
NP
2158static void try_to_free_from_threads(size_t size)
2159{
2160 read_lock();
7c3ecb32 2161 release_pack_memory(size);
a9a74636
NP
2162 read_unlock();
2163}
2164
bc9b2175 2165static try_to_free_t old_try_to_free_routine;
851c34b0 2166
50f22ada
JS
2167/*
2168 * The main thread waits on the condition that (at least) one of the workers
2169 * has stopped working (which is indicated in the .working member of
2170 * struct thread_params).
2171 * When a work thread has completed its work, it sets .working to 0 and
2172 * signals the main thread and waits on the condition that .data_ready
2173 * becomes 1.
2174 */
2175
8ecce684
NP
2176struct thread_params {
2177 pthread_t thread;
2178 struct object_entry **list;
2179 unsigned list_size;
384b32c0 2180 unsigned remaining;
8ecce684
NP
2181 int window;
2182 int depth;
50f22ada
JS
2183 int working;
2184 int data_ready;
2185 pthread_mutex_t mutex;
2186 pthread_cond_t cond;
8ecce684
NP
2187 unsigned *processed;
2188};
2189
44626dc7
AH
2190static pthread_cond_t progress_cond;
2191
2192/*
2193 * Mutex and conditional variable can't be statically-initialized on Windows.
2194 */
2195static void init_threaded_search(void)
2196{
93749194 2197 init_recursive_mutex(&read_mutex);
44626dc7
AH
2198 pthread_mutex_init(&cache_mutex, NULL);
2199 pthread_mutex_init(&progress_mutex, NULL);
2200 pthread_cond_init(&progress_cond, NULL);
851c34b0 2201 old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
44626dc7
AH
2202}
2203
2204static void cleanup_threaded_search(void)
2205{
851c34b0 2206 set_try_to_free_routine(old_try_to_free_routine);
44626dc7
AH
2207 pthread_cond_destroy(&progress_cond);
2208 pthread_mutex_destroy(&read_mutex);
2209 pthread_mutex_destroy(&cache_mutex);
2210 pthread_mutex_destroy(&progress_mutex);
2211}
c2a33679 2212
8ecce684
NP
2213static void *threaded_find_deltas(void *arg)
2214{
c2a33679
NP
2215 struct thread_params *me = arg;
2216
0c2ad00b 2217 progress_lock();
50f22ada 2218 while (me->remaining) {
0c2ad00b
2219 progress_unlock();
2220
384b32c0 2221 find_deltas(me->list, &me->remaining,
c2a33679 2222 me->window, me->depth, me->processed);
50f22ada
JS
2223
2224 progress_lock();
2225 me->working = 0;
2226 pthread_cond_signal(&progress_cond);
2227 progress_unlock();
2228
2229 /*
2230 * We must not set ->data_ready before we wait on the
2231 * condition because the main thread may have set it to 1
2232 * before we get here. In order to be sure that new
2233 * work is available if we see 1 in ->data_ready, it
2234 * was initialized to 0 before this thread was spawned
2235 * and we reset it to 0 right away.
2236 */
2237 pthread_mutex_lock(&me->mutex);
2238 while (!me->data_ready)
2239 pthread_cond_wait(&me->cond, &me->mutex);
2240 me->data_ready = 0;
2241 pthread_mutex_unlock(&me->mutex);
0c2ad00b
2242
2243 progress_lock();
c2a33679 2244 }
0c2ad00b 2245 progress_unlock();
50f22ada
JS
2246 /* leave ->working 1 so that this doesn't get more work assigned */
2247 return NULL;
8ecce684
NP
2248}
2249
8ecce684
NP
2250static void ll_find_deltas(struct object_entry **list, unsigned list_size,
2251 int window, int depth, unsigned *processed)
2252{
dcda3614 2253 struct thread_params *p;
384b32c0 2254 int i, ret, active_threads = 0;
c2a33679 2255
44626dc7
AH
2256 init_threaded_search();
2257
367f4a43 2258 if (delta_search_threads <= 1) {
384b32c0 2259 find_deltas(list, &list_size, window, depth, processed);
44626dc7 2260 cleanup_threaded_search();
367f4a43
NP
2261 return;
2262 }
43cc2b42 2263 if (progress > pack_to_stdout)
b6c29915 2264 fprintf(stderr, "Delta compression using up to %d threads.\n",
43cc2b42 2265 delta_search_threads);
dcda3614 2266 p = xcalloc(delta_search_threads, sizeof(*p));
367f4a43 2267
50f22ada 2268 /* Partition the work amongst work threads. */
367f4a43 2269 for (i = 0; i < delta_search_threads; i++) {
50f22ada
JS
2270 unsigned sub_size = list_size / (delta_search_threads - i);
2271
bf874896
NP
2272 /* don't use too small segments or no deltas will be found */
2273 if (sub_size < 2*window && i+1 < delta_search_threads)
2274 sub_size = 0;
2275
8ecce684
NP
2276 p[i].window = window;
2277 p[i].depth = depth;
2278 p[i].processed = processed;
50f22ada
JS
2279 p[i].working = 1;
2280 p[i].data_ready = 0;
c2a33679 2281
59921b4b 2282 /* try to split chunks on "path" boundaries */
6fc74703
NP
2283 while (sub_size && sub_size < list_size &&
2284 list[sub_size]->hash &&
384b32c0
NP
2285 list[sub_size]->hash == list[sub_size-1]->hash)
2286 sub_size++;
2287
50f22ada
JS
2288 p[i].list = list;
2289 p[i].list_size = sub_size;
2290 p[i].remaining = sub_size;
59921b4b 2291
384b32c0
NP
2292 list += sub_size;
2293 list_size -= sub_size;
2294 }
2295
50f22ada
JS
2296 /* Start work threads. */
2297 for (i = 0; i < delta_search_threads; i++) {
2298 if (!p[i].list_size)
2299 continue;
68e6a4f8
JS
2300 pthread_mutex_init(&p[i].mutex, NULL);
2301 pthread_cond_init(&p[i].cond, NULL);
50f22ada
JS
2302 ret = pthread_create(&p[i].thread, NULL,
2303 threaded_find_deltas, &p[i]);
2304 if (ret)
2305 die("unable to create thread: %s", strerror(ret));
2306 active_threads++;
2307 }
2308
384b32c0
NP
2309 /*
2310 * Now let's wait for work completion. Each time a thread is done
2311 * with its work, we steal half of the remaining work from the
2312 * thread with the largest number of unprocessed objects and give
2313 * it to that newly idle thread. This ensure good load balancing
2314 * until the remaining object list segments are simply too short
2315 * to be worth splitting anymore.
2316 */
50f22ada
JS
2317 while (active_threads) {
2318 struct thread_params *target = NULL;
384b32c0
NP
2319 struct thread_params *victim = NULL;
2320 unsigned sub_size = 0;
384b32c0
NP
2321
2322 progress_lock();
50f22ada
JS
2323 for (;;) {
2324 for (i = 0; !target && i < delta_search_threads; i++)
2325 if (!p[i].working)
2326 target = &p[i];
2327 if (target)
2328 break;
2329 pthread_cond_wait(&progress_cond, &progress_mutex);
2330 }
2331
384b32c0
NP
2332 for (i = 0; i < delta_search_threads; i++)
2333 if (p[i].remaining > 2*window &&
2334 (!victim || victim->remaining < p[i].remaining))
2335 victim = &p[i];
2336 if (victim) {
2337 sub_size = victim->remaining / 2;
2338 list = victim->list + victim->list_size - sub_size;
2339 while (sub_size && list[0]->hash &&
2340 list[0]->hash == list[-1]->hash) {
2341 list++;
2342 sub_size--;
2343 }
eb9688ff
NP
2344 if (!sub_size) {
2345 /*
2346 * It is possible for some "paths" to have
2347 * so many objects that no hash boundary
2348 * might be found. Let's just steal the
2349 * exact half in that case.
2350 */
2351 sub_size = victim->remaining / 2;
2352 list -= sub_size;
2353 }
384b32c0
NP
2354 target->list = list;
2355 victim->list_size -= sub_size;
2356 victim->remaining -= sub_size;
2357 }
384b32c0
NP
2358 target->list_size = sub_size;
2359 target->remaining = sub_size;
50f22ada
JS
2360 target->working = 1;
2361 progress_unlock();
2362
2363 pthread_mutex_lock(&target->mutex);
2364 target->data_ready = 1;
2365 pthread_cond_signal(&target->cond);
2366 pthread_mutex_unlock(&target->mutex);
c2a33679 2367
384b32c0 2368 if (!sub_size) {
b81d9af7 2369 pthread_join(target->thread, NULL);
50f22ada
JS
2370 pthread_cond_destroy(&target->cond);
2371 pthread_mutex_destroy(&target->mutex);
384b32c0 2372 active_threads--;
c2a33679 2373 }
50f22ada 2374 }
44626dc7 2375 cleanup_threaded_search();
dcda3614 2376 free(p);
8ecce684
NP
2377}
2378
2379#else
384b32c0 2380#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)
8ecce684
NP
2381#endif
2382
b773ddea
JK
2383static void add_tag_chain(const struct object_id *oid)
2384{
2385 struct tag *tag;
2386
2387 /*
2388 * We catch duplicates already in add_object_entry(), but we'd
2389 * prefer to do this extra check to avoid having to parse the
2390 * tag at all if we already know that it's being packed (e.g., if
2391 * it was included via bitmaps, we would not have parsed it
2392 * previously).
2393 */
2394 if (packlist_find(&to_pack, oid->hash, NULL))
2395 return;
2396
d3101b53 2397 tag = lookup_tag(oid);
b773ddea
JK
2398 while (1) {
2399 if (!tag || parse_tag(tag) || !tag->tagged)
2400 die("unable to pack objects reachable from tag %s",
2401 oid_to_hex(oid));
2402
188960b4 2403 add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);
b773ddea
JK
2404
2405 if (tag->tagged->type != OBJ_TAG)
2406 return;
2407
2408 tag = (struct tag *)tag->tagged;
2409 }
2410}
2411
d155254c 2412static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data)
f0a24aa5 2413{
d155254c 2414 struct object_id peeled;
f0a24aa5 2415
59556548 2416 if (starts_with(path, "refs/tags/") && /* is a tag? */
b420d909 2417 !peel_ref(path, &peeled) && /* peelable? */
d155254c 2418 packlist_find(&to_pack, peeled.hash, NULL)) /* object packed? */
b773ddea 2419 add_tag_chain(oid);
f0a24aa5
SP
2420 return 0;
2421}
2422
f3123c4a
JH
2423static void prepare_pack(int window, int depth)
2424{
9668cf59 2425 struct object_entry **delta_list;
6e1c2344
RJ
2426 uint32_t i, nr_deltas;
2427 unsigned n;
9668cf59 2428
3f9ac8d2 2429 get_object_details();
9668cf59 2430
0e8189e2
NP
2431 /*
2432 * If we're locally repacking then we need to be doubly careful
2433 * from now on in order to make sure no stealth corruption gets
2434 * propagated to the new pack. Clients receiving streamed packs
2435 * should validate everything they get anyway so no need to incur
2436 * the additional cost here in that case.
2437 */
2438 if (!pack_to_stdout)
2439 do_check_packed_object_crc = 1;
2440
2834bc27 2441 if (!to_pack.nr_objects || !window || !depth)
9668cf59
NP
2442 return;
2443
b32fa95f 2444 ALLOC_ARRAY(delta_list, to_pack.nr_objects);
75d39853
NP
2445 nr_deltas = n = 0;
2446
2834bc27
VM
2447 for (i = 0; i < to_pack.nr_objects; i++) {
2448 struct object_entry *entry = to_pack.objects + i;
75d39853 2449
898eba5e 2450 if (DELTA(entry))
75d39853 2451 /* This happens if we decided to reuse existing
a7de7130 2452 * delta from a pack. "reuse_delta &&" is implied.
75d39853
NP
2453 */
2454 continue;
2455
660b3735 2456 if (!entry->type_valid || entry->size < 50)
75d39853
NP
2457 continue;
2458
2459 if (entry->no_try_delta)
2460 continue;
2461
6d6f9cdd 2462 if (!entry->preferred_base) {
75d39853 2463 nr_deltas++;
fd9b1bae 2464 if (oe_type(entry) < 0)
6d6f9cdd 2465 die("unable to get type of object %s",
e6a492b7 2466 oid_to_hex(&entry->idx.oid));
eede9f42 2467 } else {
fd9b1bae 2468 if (oe_type(entry) < 0) {
eede9f42
NP
2469 /*
2470 * This object is not found, but we
2471 * don't have to include it anyway.
2472 */
2473 continue;
2474 }
6d6f9cdd 2475 }
75d39853
NP
2476
2477 delta_list[n++] = entry;
2478 }
2479
2f8b8947 2480 if (nr_deltas && n > 1) {
e334977d
NP
2481 unsigned nr_done = 0;
2482 if (progress)
754dbc43 2483 progress_state = start_progress(_("Compressing objects"),
dc6a0757 2484 nr_deltas);
9ed0d8d6 2485 QSORT(delta_list, n, type_size_sort);
8ecce684 2486 ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
4d4fcc54 2487 stop_progress(&progress_state);
e334977d
NP
2488 if (nr_done != nr_deltas)
2489 die("inconsistency with delta count");
75d39853 2490 }
9668cf59 2491 free(delta_list);
f3123c4a
JH
2492}
2493
ef90d6d4 2494static int git_pack_config(const char *k, const char *v, void *cb)
4812a93a 2495{
eeefa7c9 2496 if (!strcmp(k, "pack.window")) {
4812a93a
JK
2497 window = git_config_int(k, v);
2498 return 0;
2499 }
a97773ce
BD
2500 if (!strcmp(k, "pack.windowmemory")) {
2501 window_memory_limit = git_config_ulong(k, v);
2502 return 0;
2503 }
2504 if (!strcmp(k, "pack.depth")) {
842aaf93
TT
2505 depth = git_config_int(k, v);
2506 return 0;
2507 }
074b2eea
MK
2508 if (!strcmp(k, "pack.deltacachesize")) {
2509 max_delta_cache_size = git_config_int(k, v);
2510 return 0;
2511 }
e3dfddb3
MK
2512 if (!strcmp(k, "pack.deltacachelimit")) {
2513 cache_max_small_delta_size = git_config_int(k, v);
2514 return 0;
2515 }
ae4f07fb
VM
2516 if (!strcmp(k, "pack.writebitmaphashcache")) {
2517 if (git_config_bool(k, v))
2518 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;
2519 else
2520 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
2521 }
6b8fda2d 2522 if (!strcmp(k, "pack.usebitmaps")) {
645c432d 2523 use_bitmap_index_default = git_config_bool(k, v);
6b8fda2d
VM
2524 return 0;
2525 }
693b86ff
NP
2526 if (!strcmp(k, "pack.threads")) {
2527 delta_search_threads = git_config_int(k, v);
833e3df1 2528 if (delta_search_threads < 0)
693b86ff
NP
2529 die("invalid number of threads specified (%d)",
2530 delta_search_threads);
7eb151d6 2531#ifdef NO_PTHREADS
2e96d815 2532 if (delta_search_threads != 1) {
693b86ff 2533 warning("no threads support, ignoring %s", k);
2e96d815
ÆAB
2534 delta_search_threads = 0;
2535 }
693b86ff
NP
2536#endif
2537 return 0;
2538 }
4d00bda2 2539 if (!strcmp(k, "pack.indexversion")) {
ebcfb379
JH
2540 pack_idx_opts.version = git_config_int(k, v);
2541 if (pack_idx_opts.version > 2)
6e1c2344 2542 die("bad pack.indexversion=%"PRIu32,
ebcfb379 2543 pack_idx_opts.version);
4d00bda2
NP
2544 return 0;
2545 }
ef90d6d4 2546 return git_default_config(k, v, cb);
4812a93a
JK
2547}
2548
b5d97e6b 2549static void read_object_list_from_stdin(void)
c323ac7d 2550{
188960b4 2551 char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];
2552 struct object_id oid;
2553 const char *p;
b2504a0d 2554
da93d12b 2555 for (;;) {
da93d12b
LT
2556 if (!fgets(line, sizeof(line), stdin)) {
2557 if (feof(stdin))
2558 break;
2559 if (!ferror(stdin))
2560 die("fgets returned NULL, not EOF, not error!");
687dd75c 2561 if (errno != EINTR)
d824cbba 2562 die_errno("fgets");
687dd75c
JH
2563 clearerr(stdin);
2564 continue;
da93d12b 2565 }
7a979d99 2566 if (line[0] == '-') {
188960b4 2567 if (get_oid_hex(line+1, &oid))
2568 die("expected edge object ID, got garbage:\n %s",
b5d97e6b 2569 line);
188960b4 2570 add_preferred_base(&oid);
7a979d99 2571 continue;
21fcd1bd 2572 }
188960b4 2573 if (parse_oid_hex(line, &oid, &p))
2574 die("expected object ID, got garbage:\n %s", line);
b5d97e6b 2575
188960b4 2576 add_preferred_base_object(p + 1);
fd9b1bae 2577 add_object_entry(&oid, OBJ_NONE, p + 1, 0);
c323ac7d 2578 }
b5d97e6b
JH
2579}
2580
95308d64 2581/* Remember to update object flag allocation in object.h */
08cdfb13
JH
2582#define OBJECT_ADDED (1u<<20)
2583
11c211fa 2584static void show_commit(struct commit *commit, void *data)
b5d97e6b 2585{
188960b4 2586 add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0);
08cdfb13 2587 commit->object.flags |= OBJECT_ADDED;
7cc8f971
VM
2588
2589 if (write_bitmap_index)
2590 index_commit_for_bitmap(commit);
b5d97e6b
JH
2591}
2592
de1e67d0 2593static void show_object(struct object *obj, const char *name, void *data)
b5d97e6b 2594{
8d2dfc49 2595 add_preferred_base_object(name);
188960b4 2596 add_object_entry(&obj->oid, obj->type, name, 0);
8d2dfc49 2597 obj->flags |= OBJECT_ADDED;
b5d97e6b
JH
2598}
2599
9535ce73
JH
2600static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)
2601{
2602 assert(arg_missing_action == MA_ALLOW_ANY);
2603
2604 /*
2605 * Quietly ignore ALL missing objects. This avoids problems with
2606 * staging them now and getting an odd error later.
2607 */
2608 if (!has_object_file(&obj->oid))
2609 return;
2610
2611 show_object(obj, name, data);
2612}
2613
0c16cd49
JT
2614static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)
2615{
2616 assert(arg_missing_action == MA_ALLOW_PROMISOR);
2617
2618 /*
2619 * Quietly ignore EXPECTED missing objects. This avoids problems with
2620 * staging them now and getting an odd error later.
2621 */
2622 if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))
2623 return;
2624
2625 show_object(obj, name, data);
2626}
2627
9535ce73
JH
2628static int option_parse_missing_action(const struct option *opt,
2629 const char *arg, int unset)
2630{
2631 assert(arg);
2632 assert(!unset);
2633
2634 if (!strcmp(arg, "error")) {
2635 arg_missing_action = MA_ERROR;
2636 fn_show_object = show_object;
2637 return 0;
2638 }
2639
2640 if (!strcmp(arg, "allow-any")) {
2641 arg_missing_action = MA_ALLOW_ANY;
0c16cd49 2642 fetch_if_missing = 0;
9535ce73
JH
2643 fn_show_object = show_object__ma_allow_any;
2644 return 0;
2645 }
2646
0c16cd49
JT
2647 if (!strcmp(arg, "allow-promisor")) {
2648 arg_missing_action = MA_ALLOW_PROMISOR;
2649 fetch_if_missing = 0;
2650 fn_show_object = show_object__ma_allow_promisor;
2651 return 0;
2652 }
2653
9535ce73
JH
2654 die(_("invalid value for --missing"));
2655 return 0;
2656}
2657
8d1d8f83
JH
2658static void show_edge(struct commit *commit)
2659{
188960b4 2660 add_preferred_base(&commit->object.oid);
8d1d8f83
JH
2661}
2662
08cdfb13
JH
2663struct in_pack_object {
2664 off_t offset;
2665 struct object *object;
2666};
2667
2668struct in_pack {
071bcaab
RJ
2669 unsigned int alloc;
2670 unsigned int nr;
08cdfb13
JH
2671 struct in_pack_object *array;
2672};
2673
2674static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
2675{
ed1c9977 2676 in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p);
08cdfb13
JH
2677 in_pack->array[in_pack->nr].object = object;
2678 in_pack->nr++;
2679}
2680
2681/*
2682 * Compare the objects in the offset order, in order to emulate the
f18d244a 2683 * "git rev-list --objects" output that produced the pack originally.
08cdfb13
JH
2684 */
2685static int ofscmp(const void *a_, const void *b_)
2686{
2687 struct in_pack_object *a = (struct in_pack_object *)a_;
2688 struct in_pack_object *b = (struct in_pack_object *)b_;
2689
2690 if (a->offset < b->offset)
2691 return -1;
2692 else if (a->offset > b->offset)
2693 return 1;
2694 else
f2fd0760 2695 return oidcmp(&a->object->oid, &b->object->oid);
08cdfb13
JH
2696}
2697
2698static void add_objects_in_unpacked_packs(struct rev_info *revs)
2699{
2700 struct packed_git *p;
2701 struct in_pack in_pack;
2702 uint32_t i;
2703
2704 memset(&in_pack, 0, sizeof(in_pack));
2705
a80d72db 2706 for (p = get_packed_git(the_repository); p; p = p->next) {
188960b4 2707 struct object_id oid;
08cdfb13
JH
2708 struct object *o;
2709
79bc4c71 2710 if (!p->pack_local || p->pack_keep)
08cdfb13
JH
2711 continue;
2712 if (open_pack_index(p))
2713 die("cannot open pack index");
2714
2715 ALLOC_GROW(in_pack.array,
2716 in_pack.nr + p->num_objects,
2717 in_pack.alloc);
2718
2719 for (i = 0; i < p->num_objects; i++) {
188960b4 2720 nth_packed_object_oid(&oid, p, i);
2721 o = lookup_unknown_object(oid.hash);
08cdfb13
JH
2722 if (!(o->flags & OBJECT_ADDED))
2723 mark_in_pack_object(o, p, &in_pack);
2724 o->flags |= OBJECT_ADDED;
2725 }
2726 }
2727
2728 if (in_pack.nr) {
9ed0d8d6 2729 QSORT(in_pack.array, in_pack.nr, ofscmp);
08cdfb13
JH
2730 for (i = 0; i < in_pack.nr; i++) {
2731 struct object *o = in_pack.array[i].object;
188960b4 2732 add_object_entry(&o->oid, o->type, "", 0);
08cdfb13
JH
2733 }
2734 }
2735 free(in_pack.array);
2736}
2737
76c1d9a0 2738static int add_loose_object(const struct object_id *oid, const char *path,
e26a8c47
JK
2739 void *data)
2740{
abef9020 2741 enum object_type type = oid_object_info(oid, NULL);
e26a8c47
JK
2742
2743 if (type < 0) {
2744 warning("loose object at %s could not be examined", path);
2745 return 0;
2746 }
2747
188960b4 2748 add_object_entry(oid, type, "", 0);
e26a8c47
JK
2749 return 0;
2750}
2751
2752/*
2753 * We actually don't even have to worry about reachability here.
2754 * add_object_entry will weed out duplicates, so we just add every
2755 * loose object we find.
2756 */
2757static void add_unreachable_loose_objects(void)
2758{
2759 for_each_loose_file_in_objdir(get_object_directory(),
2760 add_loose_object,
2761 NULL, NULL, NULL);
2762}
2763
188960b4 2764static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
094085e3
BC
2765{
2766 static struct packed_git *last_found = (void *)1;
2767 struct packed_git *p;
2768
a80d72db
SB
2769 p = (last_found != (void *)1) ? last_found :
2770 get_packed_git(the_repository);
094085e3
BC
2771
2772 while (p) {
2773 if ((!p->pack_local || p->pack_keep) &&
188960b4 2774 find_pack_entry_one(oid->hash, p)) {
094085e3
BC
2775 last_found = p;
2776 return 1;
2777 }
2778 if (p == last_found)
a80d72db 2779 p = get_packed_git(the_repository);
094085e3
BC
2780 else
2781 p = p->next;
2782 if (p == last_found)
2783 p = p->next;
2784 }
2785 return 0;
2786}
2787
abcb8655
JK
2788/*
2789 * Store a list of sha1s that are should not be discarded
2790 * because they are either written too recently, or are
2791 * reachable from another object that was.
2792 *
2793 * This is filled by get_object_list.
2794 */
910650d2 2795static struct oid_array recent_objects;
abcb8655 2796
4ce3621a 2797static int loosened_object_can_be_discarded(const struct object_id *oid,
dddbad72 2798 timestamp_t mtime)
d0d46abc
JK
2799{
2800 if (!unpack_unreachable_expiration)
2801 return 0;
2802 if (mtime > unpack_unreachable_expiration)
2803 return 0;
910650d2 2804 if (oid_array_lookup(&recent_objects, oid) >= 0)
abcb8655 2805 return 0;
d0d46abc
JK
2806 return 1;
2807}
2808
ca11b212
NP
2809static void loosen_unused_packed_objects(struct rev_info *revs)
2810{
2811 struct packed_git *p;
2812 uint32_t i;
4ce3621a 2813 struct object_id oid;
ca11b212 2814
a80d72db 2815 for (p = get_packed_git(the_repository); p; p = p->next) {
79bc4c71 2816 if (!p->pack_local || p->pack_keep)
ca11b212
NP
2817 continue;
2818
2819 if (open_pack_index(p))
2820 die("cannot open pack index");
2821
2822 for (i = 0; i < p->num_objects; i++) {
4ce3621a 2823 nth_packed_object_oid(&oid, p, i);
2824 if (!packlist_find(&to_pack, oid.hash, NULL) &&
188960b4 2825 !has_sha1_pack_kept_or_nonlocal(&oid) &&
4ce3621a 2826 !loosened_object_can_be_discarded(&oid, p->mtime))
4bdb70a4 2827 if (force_object_loose(&oid, p->mtime))
ca11b212
NP
2828 die("unable to force loose object");
2829 }
2830 }
2831}
2832
69e4b342 2833/*
645c432d
KS
2834 * This tracks any options which pack-reuse code expects to be on, or which a
2835 * reader of the pack might not understand, and which would therefore prevent
2836 * blind reuse of what we have on disk.
69e4b342
JK
2837 */
2838static int pack_options_allow_reuse(void)
2839{
9df4a607
JK
2840 return pack_to_stdout &&
2841 allow_ofs_delta &&
2842 !ignore_packed_keep &&
2843 (!local || !have_non_local_packs) &&
2844 !incremental;
69e4b342
JK
2845}
2846
6b8fda2d
VM
2847static int get_object_list_from_bitmap(struct rev_info *revs)
2848{
2849 if (prepare_bitmap_walk(revs) < 0)
2850 return -1;
2851
69e4b342
JK
2852 if (pack_options_allow_reuse() &&
2853 !reuse_partial_packfile_from_bitmap(
6b8fda2d
VM
2854 &reuse_packfile,
2855 &reuse_packfile_objects,
2856 &reuse_packfile_offset)) {
2857 assert(reuse_packfile_objects);
2858 nr_result += reuse_packfile_objects;
78d2214e 2859 display_progress(progress_state, nr_result);
6b8fda2d
VM
2860 }
2861
2862 traverse_bitmap_commit_list(&add_object_entry_from_bitmap);
2863 return 0;
2864}
2865
abcb8655 2866static void record_recent_object(struct object *obj,
de1e67d0 2867 const char *name,
abcb8655
JK
2868 void *data)
2869{
910650d2 2870 oid_array_append(&recent_objects, &obj->oid);
abcb8655
JK
2871}
2872
2873static void record_recent_commit(struct commit *commit, void *data)
2874{
910650d2 2875 oid_array_append(&recent_objects, &commit->object.oid);
abcb8655
JK
2876}
2877
8d1d8f83 2878static void get_object_list(int ac, const char **av)
b5d97e6b
JH
2879{
2880 struct rev_info revs;
2881 char line[1000];
b5d97e6b
JH
2882 int flags = 0;
2883
b5d97e6b
JH
2884 init_revisions(&revs, NULL);
2885 save_commit_buffer = 0;
b5d97e6b
JH
2886 setup_revisions(ac, av, &revs, NULL);
2887
b790e0f6
NTND
2888 /* make sure shallows are read */
2889 is_repository_shallow();
2890
b5d97e6b
JH
2891 while (fgets(line, sizeof(line), stdin) != NULL) {
2892 int len = strlen(line);
872c930d 2893 if (len && line[len - 1] == '\n')
b5d97e6b
JH
2894 line[--len] = 0;
2895 if (!len)
2896 break;
2897 if (*line == '-') {
2898 if (!strcmp(line, "--not")) {
2899 flags ^= UNINTERESTING;
7cc8f971 2900 write_bitmap_index = 0;
b5d97e6b
JH
2901 continue;
2902 }
b790e0f6 2903 if (starts_with(line, "--shallow ")) {
e92b848c 2904 struct object_id oid;
2905 if (get_oid_hex(line + 10, &oid))
b790e0f6 2906 die("not an SHA-1 '%s'", line + 10);
e92b848c 2907 register_shallow(&oid);
f7f91086 2908 use_bitmap_index = 0;
b790e0f6
NTND
2909 continue;
2910 }
b5d97e6b
JH
2911 die("not a rev '%s'", line);
2912 }
8e676e8b 2913 if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
b5d97e6b
JH
2914 die("bad revision '%s'", line);
2915 }
2916
6b8fda2d
VM
2917 if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
2918 return;
2919
3d51e1b5
MK
2920 if (prepare_revision_walk(&revs))
2921 die("revision walk setup failed");
e76a5fb4 2922 mark_edges_uninteresting(&revs, show_edge);
9535ce73
JH
2923
2924 if (!fn_show_object)
2925 fn_show_object = show_object;
2926 traverse_commit_list_filtered(&filter_options, &revs,
2927 show_commit, fn_show_object, NULL,
2928 NULL);
08cdfb13 2929
abcb8655
JK
2930 if (unpack_unreachable_expiration) {
2931 revs.ignore_missing_links = 1;
2932 if (add_unseen_recent_objects_to_traversal(&revs,
2933 unpack_unreachable_expiration))
2934 die("unable to add recent objects");
2935 if (prepare_revision_walk(&revs))
2936 die("revision walk setup failed");
2937 traverse_commit_list(&revs, record_recent_commit,
2938 record_recent_object, NULL);
2939 }
2940
08cdfb13
JH
2941 if (keep_unreachable)
2942 add_objects_in_unpacked_packs(&revs);
e26a8c47
JK
2943 if (pack_loose_unreachable)
2944 add_unreachable_loose_objects();
ca11b212
NP
2945 if (unpack_unreachable)
2946 loosen_unused_packed_objects(&revs);
abcb8655 2947
910650d2 2948 oid_array_clear(&recent_objects);
b5d97e6b
JH
2949}
2950
99fb6e04
NTND
2951static int option_parse_index_version(const struct option *opt,
2952 const char *arg, int unset)
2953{
2954 char *c;
2955 const char *val = arg;
2956 pack_idx_opts.version = strtoul(val, &c, 10);
2957 if (pack_idx_opts.version > 2)
2958 die(_("unsupported index version %s"), val);
2959 if (*c == ',' && c[1])
2960 pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
2961 if (*c || pack_idx_opts.off32_limit & 0x80000000)
2962 die(_("bad index version '%s'"), val);
2963 return 0;
2964}
2965
7e52f566
JK
2966static int option_parse_unpack_unreachable(const struct option *opt,
2967 const char *arg, int unset)
2968{
2969 if (unset) {
2970 unpack_unreachable = 0;
2971 unpack_unreachable_expiration = 0;
2972 }
2973 else {
2974 unpack_unreachable = 1;
2975 if (arg)
2976 unpack_unreachable_expiration = approxidate(arg);
2977 }
2978 return 0;
2979}
2980
b5d97e6b
JH
2981int cmd_pack_objects(int argc, const char **argv, const char *prefix)
2982{
b5d97e6b 2983 int use_internal_rev_list = 0;
8d1d8f83 2984 int thin = 0;
2dacf26d 2985 int shallow = 0;
4f366275 2986 int all_progress_implied = 0;
edfbb2aa 2987 struct argv_array rp = ARGV_ARRAY_INIT;
99fb6e04 2988 int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
c90f9e13 2989 int rev_list_index = 0;
99fb6e04
NTND
2990 struct option pack_objects_options[] = {
2991 OPT_SET_INT('q', "quiet", &progress,
4c688120 2992 N_("do not show progress meter"), 0),
99fb6e04 2993 OPT_SET_INT(0, "progress", &progress,
4c688120 2994 N_("show progress meter"), 1),
99fb6e04 2995 OPT_SET_INT(0, "all-progress", &progress,
4c688120 2996 N_("show progress meter during object writing phase"), 2),
99fb6e04
NTND
2997 OPT_BOOL(0, "all-progress-implied",
2998 &all_progress_implied,
4c688120
NTND
2999 N_("similar to --all-progress when progress meter is shown")),
3000 { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
3001 N_("write the pack index file in the specified idx format version"),
99fb6e04 3002 0, option_parse_index_version },
2a514ed8
CB
3003 OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
3004 N_("maximum size of each output pack file")),
99fb6e04 3005 OPT_BOOL(0, "local", &local,
4c688120 3006 N_("ignore borrowed objects from alternate object store")),
99fb6e04 3007 OPT_BOOL(0, "incremental", &incremental,
4c688120 3008 N_("ignore packed objects")),
99fb6e04 3009 OPT_INTEGER(0, "window", &window,
4c688120 3010 N_("limit pack window by objects")),
2a514ed8
CB
3011 OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,
3012 N_("limit pack window by memory in addition to object limit")),
99fb6e04 3013 OPT_INTEGER(0, "depth", &depth,
4c688120 3014 N_("maximum length of delta chain allowed in the resulting pack")),
99fb6e04 3015 OPT_BOOL(0, "reuse-delta", &reuse_delta,
4c688120 3016 N_("reuse existing deltas")),
99fb6e04 3017 OPT_BOOL(0, "reuse-object", &reuse_object,
4c688120 3018 N_("reuse existing objects")),
99fb6e04 3019 OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,
4c688120 3020 N_("use OFS_DELTA objects")),
99fb6e04 3021 OPT_INTEGER(0, "threads", &delta_search_threads,
4c688120 3022 N_("use threads when searching for best delta matches")),
99fb6e04 3023 OPT_BOOL(0, "non-empty", &non_empty,
4c688120 3024 N_("do not create an empty pack output")),
99fb6e04 3025 OPT_BOOL(0, "revs", &use_internal_rev_list,
4c688120 3026 N_("read revision arguments from standard input")),
99fb6e04 3027 { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,
4c688120 3028 N_("limit the objects to those that are not yet packed"),
99fb6e04
NTND
3029 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
3030 { OPTION_SET_INT, 0, "all", &rev_list_all, NULL,
4c688120 3031 N_("include objects reachable from any reference"),
99fb6e04
NTND
3032 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
3033 { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,
4c688120 3034 N_("include objects referred by reflog entries"),
99fb6e04 3035 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
c90f9e13
JK
3036 { OPTION_SET_INT, 0, "indexed-objects", &rev_list_index, NULL,
3037 N_("include objects referred to by the index"),
3038 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
99fb6e04 3039 OPT_BOOL(0, "stdout", &pack_to_stdout,
4c688120 3040 N_("output pack to stdout")),
99fb6e04 3041 OPT_BOOL(0, "include-tag", &include_tag,
4c688120 3042 N_("include tag objects that refer to objects to be packed")),
99fb6e04 3043 OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
4c688120 3044 N_("keep unreachable objects")),
e26a8c47
JK
3045 OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,
3046 N_("pack loose unreachable objects")),
4c688120
NTND
3047 { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),
3048 N_("unpack unreachable objects newer than <time>"),
7e52f566 3049 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },
99fb6e04 3050 OPT_BOOL(0, "thin", &thin,
4c688120 3051 N_("create thin packs")),
2dacf26d 3052 OPT_BOOL(0, "shallow", &shallow,
3053 N_("create packs suitable for shallow fetches")),
99fb6e04 3054 OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
4c688120 3055 N_("ignore packs that have companion .keep file")),
99fb6e04 3056 OPT_INTEGER(0, "compression", &pack_compression_level,
4c688120 3057 N_("pack compression level")),
99fb6e04 3058 OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
4c688120 3059 N_("do not hide commits by grafts"), 0),
6b8fda2d
VM
3060 OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,
3061 N_("use a bitmap index if available to speed up counting objects")),
7cc8f971
VM
3062 OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index,
3063 N_("write a bitmap index together with the pack index")),
9535ce73
JH
3064 OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
3065 { OPTION_CALLBACK, 0, "missing", NULL, N_("action"),
3066 N_("handling for missing objects"), PARSE_OPT_NONEG,
3067 option_parse_missing_action },
0c16cd49
JT
3068 OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
3069 N_("do not pack objects in promisor packfiles")),
99fb6e04
NTND
3070 OPT_END(),
3071 };
8d1d8f83 3072
0c6804ab
NTND
3073 if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
3074 BUG("too many dfs states, increase OE_DFS_STATE_BITS");
3075
afc711b8 3076 check_replace_refs = 0;
dae556bd 3077
ebcfb379 3078 reset_pack_idx_option(&pack_idx_opts);
ef90d6d4 3079 git_config(git_pack_config, NULL);
b5d97e6b
JH
3080
3081 progress = isatty(2);
99fb6e04
NTND
3082 argc = parse_options(argc, argv, prefix, pack_objects_options,
3083 pack_usage, 0);
b5d97e6b 3084
99fb6e04
NTND
3085 if (argc) {
3086 base_name = argv[0];
3087 argc--;
b5d97e6b 3088 }
99fb6e04
NTND
3089 if (pack_to_stdout != !base_name || argc)
3090 usage_with_options(pack_usage, pack_objects_options);
b5d97e6b 3091
b5c0cbd8
NTND
3092 if (depth >= (1 << OE_DEPTH_BITS)) {
3093 warning(_("delta chain depth %d is too deep, forcing %d"),
3094 depth, (1 << OE_DEPTH_BITS) - 1);
3095 depth = (1 << OE_DEPTH_BITS) - 1;
3096 }
0cb3c142
NTND
3097 if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
3098 warning(_("pack.deltaCacheLimit is too high, forcing %d"),
3099 (1U << OE_Z_DELTA_BITS) - 1);
3100 cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
3101 }
b5c0cbd8 3102
edfbb2aa 3103 argv_array_push(&rp, "pack-objects");
99fb6e04
NTND
3104 if (thin) {
3105 use_internal_rev_list = 1;
2dacf26d 3106 argv_array_push(&rp, shallow
3107 ? "--objects-edge-aggressive"
3108 : "--objects-edge");
99fb6e04 3109 } else
edfbb2aa 3110 argv_array_push(&rp, "--objects");
b5d97e6b 3111
99fb6e04
NTND
3112 if (rev_list_all) {
3113 use_internal_rev_list = 1;
edfbb2aa 3114 argv_array_push(&rp, "--all");
99fb6e04
NTND
3115 }
3116 if (rev_list_reflog) {
3117 use_internal_rev_list = 1;
edfbb2aa 3118 argv_array_push(&rp, "--reflog");
99fb6e04 3119 }
c90f9e13
JK
3120 if (rev_list_index) {
3121 use_internal_rev_list = 1;
3122 argv_array_push(&rp, "--indexed-objects");
99fb6e04
NTND
3123 }
3124 if (rev_list_unpacked) {
3125 use_internal_rev_list = 1;
edfbb2aa 3126 argv_array_push(&rp, "--unpacked");
99fb6e04 3127 }
b5d97e6b 3128
0c16cd49
JT
3129 if (exclude_promisor_objects) {
3130 use_internal_rev_list = 1;
3131 fetch_if_missing = 0;
3132 argv_array_push(&rp, "--exclude-promisor-objects");
3133 }
3134
99fb6e04
NTND
3135 if (!reuse_object)
3136 reuse_delta = 0;
3137 if (pack_compression_level == -1)
3138 pack_compression_level = Z_DEFAULT_COMPRESSION;
3139 else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
3140 die("bad pack compression level %d", pack_compression_level);
0c45d258
JH
3141
3142 if (!delta_search_threads) /* --threads=0 means autodetect */
3143 delta_search_threads = online_cpus();
3144
99fb6e04
NTND
3145#ifdef NO_PTHREADS
3146 if (delta_search_threads != 1)
2b34e486 3147 warning("no threads support, ignoring --threads");
99fb6e04 3148#endif
2b84b5a8
JS
3149 if (!pack_to_stdout && !pack_size_limit)
3150 pack_size_limit = pack_size_limit_cfg;
01c12a23
DH
3151 if (pack_to_stdout && pack_size_limit)
3152 die("--max-pack-size cannot be used to build a pack for transfer.");
07cf0f24
NP
3153 if (pack_size_limit && pack_size_limit < 1024*1024) {
3154 warning("minimum pack size limit is 1 MiB");
3155 pack_size_limit = 1024*1024;
3156 }
01c12a23 3157
8d1d8f83
JH
3158 if (!pack_to_stdout && thin)
3159 die("--thin cannot be used to build an indexable pack.");
b5d97e6b 3160
ca11b212
NP
3161 if (keep_unreachable && unpack_unreachable)
3162 die("--keep-unreachable and --unpack-unreachable are incompatible.");
b1e757f3
JK
3163 if (!rev_list_all || !rev_list_reflog || !rev_list_index)
3164 unpack_unreachable_expiration = 0;
ca11b212 3165
9535ce73
JH
3166 if (filter_options.choice) {
3167 if (!pack_to_stdout)
3168 die("cannot use --filter without --stdout.");
3169 use_bitmap_index = 0;
3170 }
3171
645c432d
KS
3172 /*
3173 * "soft" reasons not to use bitmaps - for on-disk repack by default we want
3174 *
3175 * - to produce good pack (with bitmap index not-yet-packed objects are
3176 * packed in suboptimal order).
3177 *
3178 * - to use more robust pack-generation codepath (avoiding possible
3179 * bugs in bitmap code and possible bitmap index corruption).
3180 */
3181 if (!pack_to_stdout)
3182 use_bitmap_index_default = 0;
3183
3184 if (use_bitmap_index < 0)
3185 use_bitmap_index = use_bitmap_index_default;
3186
3187 /* "hard" reasons not to use bitmaps; these just won't work at all */
3188 if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow())
6b8fda2d
VM
3189 use_bitmap_index = 0;
3190
7cc8f971
VM
3191 if (pack_to_stdout || !rev_list_all)
3192 write_bitmap_index = 0;
3193
4f366275
NP
3194 if (progress && all_progress_implied)
3195 progress = 2;
3196
56dfeb62
JK
3197 if (ignore_packed_keep) {
3198 struct packed_git *p;
a80d72db 3199 for (p = get_packed_git(the_repository); p; p = p->next)
56dfeb62
JK
3200 if (p->pack_local && p->pack_keep)
3201 break;
3202 if (!p) /* no keep-able packs found */
3203 ignore_packed_keep = 0;
3204 }
3205 if (local) {
3206 /*
3207 * unlike ignore_packed_keep above, we do not want to
3208 * unset "local" based on looking at packs, as it
3209 * also covers non-local objects
3210 */
3211 struct packed_git *p;
a80d72db 3212 for (p = get_packed_git(the_repository); p; p = p->next) {
56dfeb62
JK
3213 if (!p->pack_local) {
3214 have_non_local_packs = 1;
3215 break;
3216 }
3217 }
3218 }
b5d97e6b 3219
43fa44fa
NTND
3220 prepare_packing_data(&to_pack);