Update messages in preparation for i18n
[git/git.git] / builtin / pack-objects.c
CommitLineData
5d4a6003 1#include "builtin.h"
c323ac7d 2#include "cache.h"
a80d72db 3#include "repository.h"
b2141fc1 4#include "config.h"
a74db82e 5#include "attr.h"
c323ac7d 6#include "object.h"
8e440259
PE
7#include "blob.h"
8#include "commit.h"
9#include "tag.h"
10#include "tree.h"
c323ac7d 11#include "delta.h"
a733cb60 12#include "pack.h"
3449f8c4 13#include "pack-revindex.h"
c38138cd 14#include "csum-file.h"
1b0c7174 15#include "tree-walk.h"
b5d97e6b
JH
16#include "diff.h"
17#include "revision.h"
18#include "list-objects.h"
9535ce73
JH
19#include "list-objects-filter.h"
20#include "list-objects-filter-options.h"
2834bc27 21#include "pack-objects.h"
96a02f8f 22#include "progress.h"
f0a24aa5 23#include "refs.h"
cf2ba13a 24#include "streaming.h"
93749194 25#include "thread-utils.h"
6b8fda2d 26#include "pack-bitmap.h"
abcb8655
JK
27#include "reachable.h"
28#include "sha1-array.h"
edfbb2aa 29#include "argv-array.h"
ec2dd32c 30#include "list.h"
0317f455 31#include "packfile.h"
a80d72db 32#include "object-store.h"
ed7e5fc3 33#include "dir.h"
8ecce684 34
43fa44fa 35#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
ac77d0c3
NTND
36#define SIZE(obj) oe_size(&to_pack, obj)
37#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
0aca34e8 38#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
898eba5e
NTND
39#define DELTA(obj) oe_delta(&to_pack, obj)
40#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
41#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
42#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
0aca34e8 43#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
898eba5e
NTND
44#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
45#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
43fa44fa 46
99fb6e04 47static const char *pack_usage[] = {
b8c1d275
AH
48 N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
49 N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"),
99fb6e04
NTND
50 NULL
51};
c323ac7d 52
3f9ac8d2 53/*
2834bc27
VM
54 * Objects we are going to pack are collected in the `to_pack` structure.
55 * It contains an array (dynamically expanded) of the object data, and a map
56 * that can resolve SHA1s to their position in the array.
3f9ac8d2 57 */
2834bc27
VM
58static struct packing_data to_pack;
59
79814f42 60static struct pack_idx_entry **written_list;
5af05043 61static uint32_t nr_result, nr_written, nr_seen;
3f9ac8d2 62
96f1e58f 63static int non_empty;
a7de7130 64static int reuse_delta = 1, reuse_object = 1;
e5e9714a 65static int keep_unreachable, unpack_unreachable, include_tag;
dddbad72 66static timestamp_t unpack_unreachable_expiration;
e26a8c47 67static int pack_loose_unreachable;
96f1e58f 68static int local;
56dfeb62 69static int have_non_local_packs;
96f1e58f 70static int incremental;
ed7e5fc3
NTND
71static int ignore_packed_keep_on_disk;
72static int ignore_packed_keep_in_core;
be6b1914 73static int allow_ofs_delta;
ebcfb379 74static struct pack_idx_option pack_idx_opts;
d01fb92f 75static const char *base_name;
024701f1 76static int progress = 1;
4812a93a 77static int window = 10;
568508e7 78static unsigned long pack_size_limit;
618e613a 79static int depth = 50;
43cc2b42 80static int delta_search_threads;
df6d6101 81static int pack_to_stdout;
8d1d8f83 82static int num_preferred_base;
dc6a0757 83static struct progress *progress_state;
c323ac7d 84
6b8fda2d
VM
85static struct packed_git *reuse_packfile;
86static uint32_t reuse_packfile_objects;
87static off_t reuse_packfile_offset;
88
645c432d
KS
89static int use_bitmap_index_default = 1;
90static int use_bitmap_index = -1;
7cc8f971 91static int write_bitmap_index;
ae4f07fb 92static uint16_t write_bitmap_options;
6b8fda2d 93
0c16cd49
JT
94static int exclude_promisor_objects;
95
074b2eea 96static unsigned long delta_cache_size = 0;
9806f5a7 97static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
e3dfddb3 98static unsigned long cache_max_small_delta_size = 1000;
074b2eea 99
a97773ce
BD
100static unsigned long window_memory_limit = 0;
101
9535ce73
JH
102static struct list_objects_filter_options filter_options;
103
104enum missing_action {
0c16cd49
JT
105 MA_ERROR = 0, /* fail if any missing objects are encountered */
106 MA_ALLOW_ANY, /* silently allow ALL missing objects */
107 MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
9535ce73
JH
108};
109static enum missing_action arg_missing_action;
110static show_object_fn fn_show_object;
111
3f9ac8d2
JH
112/*
113 * stats
114 */
7cadf491
SP
115static uint32_t written, written_delta;
116static uint32_t reused, reused_delta;
3f9ac8d2 117
7cc8f971
VM
118/*
119 * Indexed commits
120 */
121static struct commit **indexed_commits;
122static unsigned int indexed_commits_nr;
123static unsigned int indexed_commits_alloc;
124
125static void index_commit_for_bitmap(struct commit *commit)
126{
127 if (indexed_commits_nr >= indexed_commits_alloc) {
128 indexed_commits_alloc = (indexed_commits_alloc + 32) * 2;
2756ca43 129 REALLOC_ARRAY(indexed_commits, indexed_commits_alloc);
7cc8f971
VM
130 }
131
132 indexed_commits[indexed_commits_nr++] = commit;
133}
780e6e73 134
3613f9b4 135static void *get_delta(struct object_entry *entry)
c323ac7d 136{
3613f9b4
NP
137 unsigned long size, base_size, delta_size;
138 void *buf, *base_buf, *delta_buf;
21666f1a 139 enum object_type type;
c323ac7d 140
b4f5aca4 141 buf = read_object_file(&entry->idx.oid, &type, &size);
3613f9b4 142 if (!buf)
e6a492b7 143 die("unable to read %s", oid_to_hex(&entry->idx.oid));
898eba5e
NTND
144 base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
145 &base_size);
3613f9b4 146 if (!base_buf)
e6a492b7 147 die("unable to read %s",
898eba5e 148 oid_to_hex(&DELTA(entry)->idx.oid));
3613f9b4 149 delta_buf = diff_delta(base_buf, base_size,
dcde55bc 150 buf, size, &delta_size, 0);
1a07e59c
NTND
151 /*
152 * We succesfully computed this delta once but dropped it for
153 * memory reasons. Something is very wrong if this time we
154 * recompute and create a different delta.
155 */
0aca34e8 156 if (!delta_buf || delta_size != DELTA_SIZE(entry))
1a07e59c 157 BUG("delta size changed");
3613f9b4
NP
158 free(buf);
159 free(base_buf);
c323ac7d
LT
160 return delta_buf;
161}
162
30ebb40a
NP
163static unsigned long do_compress(void **pptr, unsigned long size)
164{
ef49a7a0 165 git_zstream stream;
30ebb40a
NP
166 void *in, *out;
167 unsigned long maxsize;
168
55bb5c91 169 git_deflate_init(&stream, pack_compression_level);
225a6f10 170 maxsize = git_deflate_bound(&stream, size);
30ebb40a
NP
171
172 in = *pptr;
173 out = xmalloc(maxsize);
174 *pptr = out;
175
176 stream.next_in = in;
177 stream.avail_in = size;
178 stream.next_out = out;
179 stream.avail_out = maxsize;
55bb5c91 180 while (git_deflate(&stream, Z_FINISH) == Z_OK)
30ebb40a 181 ; /* nothing */
55bb5c91 182 git_deflate_end(&stream);
30ebb40a
NP
183
184 free(in);
185 return stream.total_out;
186}
187
98a3beab 188static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
188960b4 189 const struct object_id *oid)
cf2ba13a
NTND
190{
191 git_zstream stream;
192 unsigned char ibuf[1024 * 16];
193 unsigned char obuf[1024 * 16];
194 unsigned long olen = 0;
195
cf2ba13a
NTND
196 git_deflate_init(&stream, pack_compression_level);
197
198 for (;;) {
199 ssize_t readlen;
200 int zret = Z_OK;
201 readlen = read_istream(st, ibuf, sizeof(ibuf));
202 if (readlen == -1)
188960b4 203 die(_("unable to read %s"), oid_to_hex(oid));
cf2ba13a
NTND
204
205 stream.next_in = ibuf;
206 stream.avail_in = readlen;
207 while ((stream.avail_in || readlen == 0) &&
208 (zret == Z_OK || zret == Z_BUF_ERROR)) {
209 stream.next_out = obuf;
210 stream.avail_out = sizeof(obuf);
211 zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
98a3beab 212 hashwrite(f, obuf, stream.next_out - obuf);
cf2ba13a
NTND
213 olen += stream.next_out - obuf;
214 }
215 if (stream.avail_in)
216 die(_("deflate error (%d)"), zret);
217 if (readlen == 0) {
218 if (zret != Z_STREAM_END)
219 die(_("deflate error (%d)"), zret);
220 break;
221 }
222 }
223 git_deflate_end(&stream);
224 return olen;
225}
226
780e6e73
NP
227/*
228 * we are going to reuse the existing object data as is. make
229 * sure it is not corrupt.
230 */
079afb18
SP
231static int check_pack_inflate(struct packed_git *p,
232 struct pack_window **w_curs,
6777a59f
SP
233 off_t offset,
234 off_t len,
079afb18
SP
235 unsigned long expect)
236{
ef49a7a0 237 git_zstream stream;
079afb18
SP
238 unsigned char fakebuf[4096], *in;
239 int st;
240
241 memset(&stream, 0, sizeof(stream));
39c68542 242 git_inflate_init(&stream);
079afb18
SP
243 do {
244 in = use_pack(p, w_curs, offset, &stream.avail_in);
245 stream.next_in = in;
246 stream.next_out = fakebuf;
247 stream.avail_out = sizeof(fakebuf);
39c68542 248 st = git_inflate(&stream, Z_FINISH);
079afb18
SP
249 offset += stream.next_in - in;
250 } while (st == Z_OK || st == Z_BUF_ERROR);
39c68542 251 git_inflate_end(&stream);
079afb18
SP
252 return (st == Z_STREAM_END &&
253 stream.total_out == expect &&
254 stream.total_in == len) ? 0 : -1;
255}
256
98a3beab 257static void copy_pack_data(struct hashfile *f,
079afb18
SP
258 struct packed_git *p,
259 struct pack_window **w_curs,
6777a59f
SP
260 off_t offset,
261 off_t len)
079afb18
SP
262{
263 unsigned char *in;
ef49a7a0 264 unsigned long avail;
079afb18
SP
265
266 while (len) {
267 in = use_pack(p, w_curs, offset, &avail);
268 if (avail > len)
ef49a7a0 269 avail = (unsigned long)len;
98a3beab 270 hashwrite(f, in, avail);
079afb18
SP
271 offset += avail;
272 len -= avail;
273 }
274}
275
1b4bb16b 276/* Return 0 if we will bust the pack-size limit */
98a3beab 277static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
c9018b03 278 unsigned long limit, int usable_delta)
c323ac7d 279{
c9018b03 280 unsigned long size, datalen;
2c5e2865
JK
281 unsigned char header[MAX_PACK_OBJECT_HEADER],
282 dheader[MAX_PACK_OBJECT_HEADER];
6777a59f 283 unsigned hdrlen;
2c5ef824 284 enum object_type type;
c9018b03 285 void *buf;
cf2ba13a 286 struct git_istream *st = NULL;
41179100 287 const unsigned hashsz = the_hash_algo->rawsz;
c9018b03
NTND
288
289 if (!usable_delta) {
fd9b1bae 290 if (oe_type(entry) == OBJ_BLOB &&
ac77d0c3 291 oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
ef7b5195 292 (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
cf2ba13a
NTND
293 buf = NULL;
294 else {
b4f5aca4 295 buf = read_object_file(&entry->idx.oid, &type, &size);
cf2ba13a 296 if (!buf)
e6a492b7 297 die(_("unable to read %s"),
298 oid_to_hex(&entry->idx.oid));
cf2ba13a 299 }
c9018b03
NTND
300 /*
301 * make sure no cached delta data remains from a
302 * previous attempt before a pack split occurred.
303 */
6a83d902 304 FREE_AND_NULL(entry->delta_data);
c9018b03
NTND
305 entry->z_delta_size = 0;
306 } else if (entry->delta_data) {
0aca34e8 307 size = DELTA_SIZE(entry);
c9018b03
NTND
308 buf = entry->delta_data;
309 entry->delta_data = NULL;
898eba5e 310 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
c9018b03
NTND
311 OBJ_OFS_DELTA : OBJ_REF_DELTA;
312 } else {
313 buf = get_delta(entry);
0aca34e8 314 size = DELTA_SIZE(entry);
898eba5e 315 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
c9018b03
NTND
316 OBJ_OFS_DELTA : OBJ_REF_DELTA;
317 }
318
cf2ba13a
NTND
319 if (st) /* large blob case, just assume we don't compress well */
320 datalen = size;
321 else if (entry->z_delta_size)
c9018b03
NTND
322 datalen = entry->z_delta_size;
323 else
324 datalen = do_compress(&buf, size);
325
326 /*
327 * The object header is a byte of 'type' followed by zero or
328 * more bytes of length.
329 */
7202a6fa
JK
330 hdrlen = encode_in_pack_object_header(header, sizeof(header),
331 type, size);
c9018b03
NTND
332
333 if (type == OBJ_OFS_DELTA) {
334 /*
335 * Deltas with relative base contain an additional
336 * encoding of the relative offset for the delta
337 * base from this object's position in the pack.
338 */
898eba5e 339 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
c9018b03
NTND
340 unsigned pos = sizeof(dheader) - 1;
341 dheader[pos] = ofs & 127;
342 while (ofs >>= 7)
343 dheader[--pos] = 128 | (--ofs & 127);
41179100 344 if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
cf2ba13a
NTND
345 if (st)
346 close_istream(st);
c9018b03
NTND
347 free(buf);
348 return 0;
349 }
98a3beab 350 hashwrite(f, header, hdrlen);
351 hashwrite(f, dheader + pos, sizeof(dheader) - pos);
c9018b03
NTND
352 hdrlen += sizeof(dheader) - pos;
353 } else if (type == OBJ_REF_DELTA) {
354 /*
355 * Deltas with a base reference contain
41179100 356 * additional bytes for the base object ID.
c9018b03 357 */
41179100 358 if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
cf2ba13a
NTND
359 if (st)
360 close_istream(st);
c9018b03
NTND
361 free(buf);
362 return 0;
363 }
98a3beab 364 hashwrite(f, header, hdrlen);
42c8ce1c 365 hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
41179100 366 hdrlen += hashsz;
c9018b03 367 } else {
41179100 368 if (limit && hdrlen + datalen + hashsz >= limit) {
cf2ba13a
NTND
369 if (st)
370 close_istream(st);
c9018b03
NTND
371 free(buf);
372 return 0;
373 }
98a3beab 374 hashwrite(f, header, hdrlen);
c9018b03 375 }
cf2ba13a 376 if (st) {
188960b4 377 datalen = write_large_blob_data(st, f, &entry->idx.oid);
cf2ba13a
NTND
378 close_istream(st);
379 } else {
98a3beab 380 hashwrite(f, buf, datalen);
cf2ba13a
NTND
381 free(buf);
382 }
c9018b03
NTND
383
384 return hdrlen + datalen;
385}
386
387/* Return 0 if we will bust the pack-size limit */
98a3beab 388static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
af92a645 389 unsigned long limit, int usable_delta)
c9018b03 390{
43fa44fa 391 struct packed_git *p = IN_PACK(entry);
c9018b03
NTND
392 struct pack_window *w_curs = NULL;
393 struct revindex_entry *revidx;
394 off_t offset;
fd9b1bae 395 enum object_type type = oe_type(entry);
211c61c6 396 off_t datalen;
2c5e2865
JK
397 unsigned char header[MAX_PACK_OBJECT_HEADER],
398 dheader[MAX_PACK_OBJECT_HEADER];
c9018b03 399 unsigned hdrlen;
41179100 400 const unsigned hashsz = the_hash_algo->rawsz;
ac77d0c3 401 unsigned long entry_size = SIZE(entry);
c9018b03 402
898eba5e
NTND
403 if (DELTA(entry))
404 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
c9018b03 405 OBJ_OFS_DELTA : OBJ_REF_DELTA;
7202a6fa 406 hdrlen = encode_in_pack_object_header(header, sizeof(header),
ac77d0c3 407 type, entry_size);
c9018b03
NTND
408
409 offset = entry->in_pack_offset;
410 revidx = find_pack_revindex(p, offset);
411 datalen = revidx[1].offset - offset;
412 if (!pack_to_stdout && p->index_version > 1 &&
413 check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
e6a492b7 414 error("bad packed object CRC for %s",
415 oid_to_hex(&entry->idx.oid));
c9018b03
NTND
416 unuse_pack(&w_curs);
417 return write_no_reuse_object(f, entry, limit, usable_delta);
418 }
419
420 offset += entry->in_pack_header_size;
421 datalen -= entry->in_pack_header_size;
422
423 if (!pack_to_stdout && p->index_version == 1 &&
ac77d0c3 424 check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
e6a492b7 425 error("corrupt packed object for %s",
426 oid_to_hex(&entry->idx.oid));
c9018b03
NTND
427 unuse_pack(&w_curs);
428 return write_no_reuse_object(f, entry, limit, usable_delta);
429 }
430
431 if (type == OBJ_OFS_DELTA) {
898eba5e 432 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
c9018b03
NTND
433 unsigned pos = sizeof(dheader) - 1;
434 dheader[pos] = ofs & 127;
435 while (ofs >>= 7)
436 dheader[--pos] = 128 | (--ofs & 127);
41179100 437 if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
c9018b03
NTND
438 unuse_pack(&w_curs);
439 return 0;
440 }
98a3beab 441 hashwrite(f, header, hdrlen);
442 hashwrite(f, dheader + pos, sizeof(dheader) - pos);
c9018b03
NTND
443 hdrlen += sizeof(dheader) - pos;
444 reused_delta++;
445 } else if (type == OBJ_REF_DELTA) {
41179100 446 if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
c9018b03
NTND
447 unuse_pack(&w_curs);
448 return 0;
449 }
98a3beab 450 hashwrite(f, header, hdrlen);
42c8ce1c 451 hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
41179100 452 hdrlen += hashsz;
c9018b03
NTND
453 reused_delta++;
454 } else {
41179100 455 if (limit && hdrlen + datalen + hashsz >= limit) {
c9018b03
NTND
456 unuse_pack(&w_curs);
457 return 0;
458 }
98a3beab 459 hashwrite(f, header, hdrlen);
c9018b03
NTND
460 }
461 copy_pack_data(f, p, &w_curs, offset, datalen);
462 unuse_pack(&w_curs);
463 reused++;
464 return hdrlen + datalen;
465}
466
467/* Return 0 if we will bust the pack-size limit */
98a3beab 468static off_t write_object(struct hashfile *f,
af92a645
NTND
469 struct object_entry *entry,
470 off_t write_offset)
c9018b03 471{
af92a645
NTND
472 unsigned long limit;
473 off_t len;
2c5ef824 474 int usable_delta, to_reuse;
c323ac7d 475
78d1e84f
NP
476 if (!pack_to_stdout)
477 crc32_begin(f);
478
a2430dde 479 /* apply size limit if limited packsize and not first object */
a1e4760f
NP
480 if (!pack_size_limit || !nr_written)
481 limit = 0;
482 else if (pack_size_limit <= write_offset)
483 /*
484 * the earlier object did not fit the limit; avoid
485 * mistaking this with unlimited (i.e. limit = 0).
486 */
487 limit = 1;
488 else
489 limit = pack_size_limit - write_offset;
2c5ef824 490
898eba5e 491 if (!DELTA(entry))
2c5ef824
NP
492 usable_delta = 0; /* no delta */
493 else if (!pack_size_limit)
494 usable_delta = 1; /* unlimited packfile */
898eba5e 495 else if (DELTA(entry)->idx.offset == (off_t)-1)
2c5ef824 496 usable_delta = 0; /* base was written to another pack */
898eba5e 497 else if (DELTA(entry)->idx.offset)
2c5ef824
NP
498 usable_delta = 1; /* base already exists in this pack */
499 else
500 usable_delta = 0; /* base could end up in another pack */
501
a7de7130 502 if (!reuse_object)
fa736f72 503 to_reuse = 0; /* explicit */
43fa44fa 504 else if (!IN_PACK(entry))
ab7cd7bb 505 to_reuse = 0; /* can't reuse what we don't have */
fd9b1bae
NTND
506 else if (oe_type(entry) == OBJ_REF_DELTA ||
507 oe_type(entry) == OBJ_OFS_DELTA)
17b08f2c
DH
508 /* check_object() decided it for us ... */
509 to_reuse = usable_delta;
510 /* ... but pack split may override that */
fd9b1bae 511 else if (oe_type(entry) != entry->in_pack_type)
ab7cd7bb 512 to_reuse = 0; /* pack has delta which is unusable */
898eba5e 513 else if (DELTA(entry))
ab7cd7bb
JH
514 to_reuse = 0; /* we want to pack afresh */
515 else
516 to_reuse = 1; /* we have it in-pack undeltified,
517 * and we do not need to deltify it.
518 */
519
c9018b03
NTND
520 if (!to_reuse)
521 len = write_no_reuse_object(f, entry, limit, usable_delta);
522 else
523 len = write_reuse_object(f, entry, limit, usable_delta);
524 if (!len)
525 return 0;
64bd76b1 526
17b08f2c 527 if (usable_delta)
ab7cd7bb 528 written_delta++;
3f9ac8d2 529 written++;
78d1e84f 530 if (!pack_to_stdout)
aa7e44bf 531 entry->idx.crc32 = crc32_end(f);
c9018b03 532 return len;
c323ac7d
LT
533}
534
f63c79db
JH
535enum write_one_status {
536 WRITE_ONE_SKIP = -1, /* already written */
537 WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
538 WRITE_ONE_WRITTEN = 1, /* normal */
539 WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
540};
541
98a3beab 542static enum write_one_status write_one(struct hashfile *f,
f63c79db
JH
543 struct object_entry *e,
544 off_t *offset)
9d5ab962 545{
af92a645 546 off_t size;
f63c79db 547 int recursing;
d7dd0223 548
f63c79db
JH
549 /*
550 * we set offset to 1 (which is an impossible value) to mark
551 * the fact that this object is involved in "write its base
552 * first before writing a deltified object" recursion.
553 */
554 recursing = (e->idx.offset == 1);
555 if (recursing) {
556 warning("recursive delta detected for object %s",
e6a492b7 557 oid_to_hex(&e->idx.oid));
f63c79db
JH
558 return WRITE_ONE_RECURSIVE;
559 } else if (e->idx.offset || e->preferred_base) {
560 /* offset is non zero if object is written already. */
561 return WRITE_ONE_SKIP;
562 }
d7dd0223 563
720c9f7b 564 /* if we are deltified, write out base object first. */
898eba5e 565 if (DELTA(e)) {
f63c79db 566 e->idx.offset = 1; /* now recurse */
898eba5e 567 switch (write_one(f, DELTA(e), offset)) {
f63c79db
JH
568 case WRITE_ONE_RECURSIVE:
569 /* we cannot depend on this one */
898eba5e 570 SET_DELTA(e, NULL);
f63c79db
JH
571 break;
572 default:
573 break;
574 case WRITE_ONE_BREAK:
575 e->idx.offset = recursing;
576 return WRITE_ONE_BREAK;
577 }
578 }
d7dd0223 579
6ed7f25e
NP
580 e->idx.offset = *offset;
581 size = write_object(f, e, *offset);
17b08f2c 582 if (!size) {
f63c79db
JH
583 e->idx.offset = recursing;
584 return WRITE_ONE_BREAK;
17b08f2c 585 }
79814f42 586 written_list[nr_written++] = &e->idx;
d7dd0223
NP
587
588 /* make sure off_t is sufficiently large not to wrap */
c03c8315 589 if (signed_add_overflows(*offset, size))
d7dd0223 590 die("pack too large for current definition of off_t");
6ed7f25e 591 *offset += size;
f63c79db 592 return WRITE_ONE_WRITTEN;
9d5ab962
JH
593}
594
d155254c 595static int mark_tagged(const char *path, const struct object_id *oid, int flag,
1b4bb16b
JH
596 void *cb_data)
597{
188960b4 598 struct object_id peeled;
d155254c 599 struct object_entry *entry = packlist_find(&to_pack, oid->hash, NULL);
1b4bb16b
JH
600
601 if (entry)
602 entry->tagged = 1;
b420d909 603 if (!peel_ref(path, &peeled)) {
188960b4 604 entry = packlist_find(&to_pack, peeled.hash, NULL);
1b4bb16b
JH
605 if (entry)
606 entry->tagged = 1;
607 }
608 return 0;
609}
610
be126818 611static inline void add_to_write_order(struct object_entry **wo,
92bef1a1 612 unsigned int *endp,
1b4bb16b
JH
613 struct object_entry *e)
614{
615 if (e->filled)
616 return;
617 wo[(*endp)++] = e;
618 e->filled = 1;
619}
620
621static void add_descendants_to_write_order(struct object_entry **wo,
92bef1a1 622 unsigned int *endp,
1b4bb16b
JH
623 struct object_entry *e)
624{
f380872f
DM
625 int add_to_order = 1;
626 while (e) {
627 if (add_to_order) {
628 struct object_entry *s;
629 /* add this node... */
630 add_to_write_order(wo, endp, e);
631 /* all its siblings... */
898eba5e 632 for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
f380872f
DM
633 add_to_write_order(wo, endp, s);
634 }
635 }
636 /* drop down a level to add left subtree nodes if possible */
898eba5e 637 if (DELTA_CHILD(e)) {
f380872f 638 add_to_order = 1;
898eba5e 639 e = DELTA_CHILD(e);
f380872f
DM
640 } else {
641 add_to_order = 0;
642 /* our sibling might have some children, it is next */
898eba5e
NTND
643 if (DELTA_SIBLING(e)) {
644 e = DELTA_SIBLING(e);
f380872f
DM
645 continue;
646 }
647 /* go back to our parent node */
898eba5e
NTND
648 e = DELTA(e);
649 while (e && !DELTA_SIBLING(e)) {
f380872f
DM
650 /* we're on the right side of a subtree, keep
651 * going up until we can go right again */
898eba5e 652 e = DELTA(e);
f380872f
DM
653 }
654 if (!e) {
655 /* done- we hit our original root node */
656 return;
657 }
658 /* pass it off to sibling at this level */
898eba5e 659 e = DELTA_SIBLING(e);
f380872f
DM
660 }
661 };
1b4bb16b
JH
662}
663
664static void add_family_to_write_order(struct object_entry **wo,
92bef1a1 665 unsigned int *endp,
1b4bb16b
JH
666 struct object_entry *e)
667{
668 struct object_entry *root;
669
898eba5e 670 for (root = e; DELTA(root); root = DELTA(root))
1b4bb16b 671 ; /* nothing */
1b4bb16b
JH
672 add_descendants_to_write_order(wo, endp, root);
673}
674
675static struct object_entry **compute_write_order(void)
676{
38d4debb 677 unsigned int i, wo_end, last_untagged;
1b4bb16b 678
b32fa95f 679 struct object_entry **wo;
2834bc27 680 struct object_entry *objects = to_pack.objects;
1b4bb16b 681
2834bc27 682 for (i = 0; i < to_pack.nr_objects; i++) {
1b4bb16b
JH
683 objects[i].tagged = 0;
684 objects[i].filled = 0;
898eba5e
NTND
685 SET_DELTA_CHILD(&objects[i], NULL);
686 SET_DELTA_SIBLING(&objects[i], NULL);
1b4bb16b
JH
687 }
688
689 /*
690 * Fully connect delta_child/delta_sibling network.
691 * Make sure delta_sibling is sorted in the original
692 * recency order.
693 */
2834bc27 694 for (i = to_pack.nr_objects; i > 0;) {
92bef1a1 695 struct object_entry *e = &objects[--i];
898eba5e 696 if (!DELTA(e))
1b4bb16b
JH
697 continue;
698 /* Mark me as the first child */
898eba5e
NTND
699 e->delta_sibling_idx = DELTA(e)->delta_child_idx;
700 SET_DELTA_CHILD(DELTA(e), e);
1b4bb16b
JH
701 }
702
703 /*
704 * Mark objects that are at the tip of tags.
705 */
d155254c 706 for_each_tag_ref(mark_tagged, NULL);
1b4bb16b
JH
707
708 /*
38d4debb 709 * Give the objects in the original recency order until
1b4bb16b
JH
710 * we see a tagged tip.
711 */
b32fa95f 712 ALLOC_ARRAY(wo, to_pack.nr_objects);
2834bc27 713 for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
1b4bb16b
JH
714 if (objects[i].tagged)
715 break;
716 add_to_write_order(wo, &wo_end, &objects[i]);
717 }
38d4debb 718 last_untagged = i;
1b4bb16b
JH
719
720 /*
721 * Then fill all the tagged tips.
722 */
2834bc27 723 for (; i < to_pack.nr_objects; i++) {
1b4bb16b
JH
724 if (objects[i].tagged)
725 add_to_write_order(wo, &wo_end, &objects[i]);
726 }
727
728 /*
729 * And then all remaining commits and tags.
730 */
2834bc27 731 for (i = last_untagged; i < to_pack.nr_objects; i++) {
fd9b1bae
NTND
732 if (oe_type(&objects[i]) != OBJ_COMMIT &&
733 oe_type(&objects[i]) != OBJ_TAG)
1b4bb16b
JH
734 continue;
735 add_to_write_order(wo, &wo_end, &objects[i]);
736 }
737
738 /*
739 * And then all the trees.
740 */
2834bc27 741 for (i = last_untagged; i < to_pack.nr_objects; i++) {
fd9b1bae 742 if (oe_type(&objects[i]) != OBJ_TREE)
1b4bb16b
JH
743 continue;
744 add_to_write_order(wo, &wo_end, &objects[i]);
745 }
746
747 /*
748 * Finally all the rest in really tight order
749 */
2834bc27 750 for (i = last_untagged; i < to_pack.nr_objects; i++) {
38d4debb
DM
751 if (!objects[i].filled)
752 add_family_to_write_order(wo, &wo_end, &objects[i]);
753 }
754
2834bc27
VM
755 if (wo_end != to_pack.nr_objects)
756 die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
1b4bb16b
JH
757
758 return wo;
759}
760
98a3beab 761static off_t write_reused_pack(struct hashfile *f)
6b8fda2d
VM
762{
763 unsigned char buffer[8192];
657673f1 764 off_t to_write, total;
6b8fda2d
VM
765 int fd;
766
767 if (!is_pack_valid(reuse_packfile))
768 die("packfile is invalid: %s", reuse_packfile->pack_name);
769
a5436b57 770 fd = git_open(reuse_packfile->pack_name);
6b8fda2d
VM
771 if (fd < 0)
772 die_errno("unable to open packfile for reuse: %s",
773 reuse_packfile->pack_name);
774
775 if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
776 die_errno("unable to seek in reused packfile");
777
778 if (reuse_packfile_offset < 0)
41179100 779 reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz;
6b8fda2d 780
657673f1 781 total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
6b8fda2d
VM
782
783 while (to_write) {
784 int read_pack = xread(fd, buffer, sizeof(buffer));
785
786 if (read_pack <= 0)
787 die_errno("unable to read from reused packfile");
788
789 if (read_pack > to_write)
790 read_pack = to_write;
791
98a3beab 792 hashwrite(f, buffer, read_pack);
6b8fda2d 793 to_write -= read_pack;
657673f1
JK
794
795 /*
796 * We don't know the actual number of objects written,
797 * only how many bytes written, how many bytes total, and
798 * how many objects total. So we can fake it by pretending all
799 * objects we are writing are the same size. This gives us a
800 * smooth progress meter, and at the end it matches the true
801 * answer.
802 */
803 written = reuse_packfile_objects *
804 (((double)(total - to_write)) / total);
805 display_progress(progress_state, written);
6b8fda2d
VM
806 }
807
808 close(fd);
657673f1
JK
809 written = reuse_packfile_objects;
810 display_progress(progress_state, written);
6b8fda2d
VM
811 return reuse_packfile_offset - sizeof(struct pack_header);
812}
813
9cea46cd
EW
814static const char no_split_warning[] = N_(
815"disabling bitmap writing, packs are split due to pack.packSizeLimit"
816);
817
d01fb92f 818static void write_pack_file(void)
c323ac7d 819{
ebe27b13 820 uint32_t i = 0, j;
98a3beab 821 struct hashfile *f;
6ed7f25e 822 off_t offset;
ebe27b13 823 uint32_t nr_remaining = nr_result;
f746bae8 824 time_t last_mtime = 0;
1b4bb16b 825 struct object_entry **write_order;
81a216a5 826
bcd7954e 827 if (progress > pack_to_stdout)
754dbc43 828 progress_state = start_progress(_("Writing objects"), nr_result);
b32fa95f 829 ALLOC_ARRAY(written_list, to_pack.nr_objects);
1b4bb16b 830 write_order = compute_write_order();
183bdb2c 831
ebe27b13 832 do {
188960b4 833 struct object_id oid;
7ba502c4 834 char *pack_tmp_name = NULL;
aa7e44bf 835
cdf9db3c 836 if (pack_to_stdout)
98a3beab 837 f = hashfd_throughput(1, "<stdout>", progress_state);
cdf9db3c
JH
838 else
839 f = create_tmp_packfile(&pack_tmp_name);
ebe27b13 840
c0ad4657 841 offset = write_pack_header(f, nr_remaining);
6b8fda2d
VM
842
843 if (reuse_packfile) {
844 off_t packfile_size;
845 assert(pack_to_stdout);
846
847 packfile_size = write_reused_pack(f);
848 offset += packfile_size;
849 }
850
ebe27b13 851 nr_written = 0;
2834bc27 852 for (; i < to_pack.nr_objects; i++) {
1b4bb16b 853 struct object_entry *e = write_order[i];
cddec4f8 854 if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
720c9f7b
NP
855 break;
856 display_progress(progress_state, written);
857 }
c553ca25 858
ebe27b13
DH
859 /*
860 * Did we write the wrong # entries in the header?
861 * If so, rewrite it like in fast-import
862 */
54352bb2 863 if (pack_to_stdout) {
cfe83216 864 finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
54352bb2 865 } else if (nr_written == nr_remaining) {
cfe83216 866 finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
ebe27b13 867 } else {
f2af9f5e 868 int fd = finalize_hashfile(f, oid.hash, 0);
188960b4 869 fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
870 nr_written, oid.hash, offset);
7ba502c4 871 close(fd);
9cea46cd
EW
872 if (write_bitmap_index) {
873 warning(_(no_split_warning));
874 write_bitmap_index = 0;
875 }
ebe27b13
DH
876 }
877
878 if (!pack_to_stdout) {
f746bae8 879 struct stat st;
58892711 880 struct strbuf tmpname = STRBUF_INIT;
d01fb92f 881
f746bae8
NP
882 /*
883 * Packs are runtime accessed in their mtime
884 * order since newer packs are more likely to contain
885 * younger objects. So if we are creating multiple
886 * packs then we should modify the mtime of later ones
887 * to preserve this property.
888 */
0e990530 889 if (stat(pack_tmp_name, &st) < 0) {
54d47394 890 warning_errno("failed to stat %s", pack_tmp_name);
f746bae8
NP
891 } else if (!last_mtime) {
892 last_mtime = st.st_mtime;
893 } else {
894 struct utimbuf utb;
895 utb.actime = st.st_atime;
896 utb.modtime = --last_mtime;
0e990530 897 if (utime(pack_tmp_name, &utb) < 0)
54d47394 898 warning_errno("failed utime() on %s", pack_tmp_name);
f746bae8
NP
899 }
900
58892711 901 strbuf_addf(&tmpname, "%s-", base_name);
7cc8f971
VM
902
903 if (write_bitmap_index) {
188960b4 904 bitmap_writer_set_checksum(oid.hash);
06af3bba
NTND
905 bitmap_writer_build_type_index(
906 &to_pack, written_list, nr_written);
7cc8f971
VM
907 }
908
58892711 909 finish_tmp_packfile(&tmpname, pack_tmp_name,
0e990530 910 written_list, nr_written,
188960b4 911 &pack_idx_opts, oid.hash);
7cc8f971
VM
912
913 if (write_bitmap_index) {
188960b4 914 strbuf_addf(&tmpname, "%s.bitmap", oid_to_hex(&oid));
7cc8f971
VM
915
916 stop_progress(&progress_state);
917
918 bitmap_writer_show_progress(progress);
919 bitmap_writer_reuse_bitmaps(&to_pack);
920 bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
921 bitmap_writer_build(&to_pack);
ae4f07fb 922 bitmap_writer_finish(written_list, nr_written,
58892711 923 tmpname.buf, write_bitmap_options);
7cc8f971
VM
924 write_bitmap_index = 0;
925 }
926
58892711 927 strbuf_release(&tmpname);
7ba502c4 928 free(pack_tmp_name);
188960b4 929 puts(oid_to_hex(&oid));
ebe27b13
DH
930 }
931
932 /* mark written objects as written to previous pack */
933 for (j = 0; j < nr_written; j++) {
79814f42 934 written_list[j]->offset = (off_t)-1;
ebe27b13
DH
935 }
936 nr_remaining -= nr_written;
2834bc27 937 } while (nr_remaining && i < to_pack.nr_objects);
ebe27b13
DH
938
939 free(written_list);
1b4bb16b 940 free(write_order);
4d4fcc54 941 stop_progress(&progress_state);
67c08ce1 942 if (written != nr_result)
6e1c2344
RJ
943 die("wrote %"PRIu32" objects while expecting %"PRIu32,
944 written, nr_result);
c323ac7d
LT
945}
946
a74db82e
JH
947static int no_try_delta(const char *path)
948{
2aef63d3 949 static struct attr_check *check;
a74db82e 950
2aef63d3
JH
951 if (!check)
952 check = attr_check_initl("delta", NULL);
953 if (git_check_attr(path, check))
a74db82e 954 return 0;
2aef63d3 955 if (ATTR_FALSE(check->items[0].value))
a74db82e
JH
956 return 1;
957 return 0;
958}
959
ce2bc424
JK
960/*
961 * When adding an object, check whether we have already added it
962 * to our packing list. If so, we can skip. However, if we are
963 * being asked to excludei t, but the previous mention was to include
964 * it, make sure to adjust its flags and tweak our numbers accordingly.
965 *
966 * As an optimization, we pass out the index position where we would have
967 * found the item, since that saves us from having to look it up again a
968 * few lines later when we want to add the new entry.
969 */
188960b4 970static int have_duplicate_entry(const struct object_id *oid,
ce2bc424
JK
971 int exclude,
972 uint32_t *index_pos)
c323ac7d 973{
c323ac7d 974 struct object_entry *entry;
29b734e4 975
188960b4 976 entry = packlist_find(&to_pack, oid->hash, index_pos);
ce2bc424 977 if (!entry)
29b734e4 978 return 0;
ce2bc424
JK
979
980 if (exclude) {
981 if (!entry->preferred_base)
982 nr_result--;
983 entry->preferred_base = 1;
29b734e4 984 }
c323ac7d 985
ce2bc424
JK
986 return 1;
987}
988
702d1b95
KS
989static int want_found_object(int exclude, struct packed_git *p)
990{
991 if (exclude)
992 return 1;
993 if (incremental)
994 return 0;
995
996 /*
997 * When asked to do --local (do not include an object that appears in a
998 * pack we borrow from elsewhere) or --honor-pack-keep (do not include
999 * an object that appears in a pack marked with .keep), finding a pack
1000 * that matches the criteria is sufficient for us to decide to omit it.
1001 * However, even if this pack does not satisfy the criteria, we need to
1002 * make sure no copy of this object appears in _any_ pack that makes us
1003 * to omit the object, so we need to check all the packs.
1004 *
1005 * We can however first check whether these options can possible matter;
1006 * if they do not matter we know we want the object in generated pack.
1007 * Otherwise, we signal "-1" at the end to tell the caller that we do
1008 * not know either way, and it needs to check more packs.
1009 */
ed7e5fc3
NTND
1010 if (!ignore_packed_keep_on_disk &&
1011 !ignore_packed_keep_in_core &&
702d1b95
KS
1012 (!local || !have_non_local_packs))
1013 return 1;
1014
1015 if (local && !p->pack_local)
1016 return 0;
ed7e5fc3
NTND
1017 if (p->pack_local &&
1018 ((ignore_packed_keep_on_disk && p->pack_keep) ||
1019 (ignore_packed_keep_in_core && p->pack_keep_in_core)))
702d1b95
KS
1020 return 0;
1021
1022 /* we don't know yet; keep looking for more packs */
1023 return -1;
1024}
1025
ce2bc424
JK
1026/*
1027 * Check whether we want the object in the pack (e.g., we do not want
1028 * objects found in non-local stores if the "--local" option was used).
1029 *
702d1b95
KS
1030 * If the caller already knows an existing pack it wants to take the object
1031 * from, that is passed in *found_pack and *found_offset; otherwise this
1032 * function finds if there is any pack that has the object and returns the pack
1033 * and its offset in these variables.
ce2bc424 1034 */
188960b4 1035static int want_object_in_pack(const struct object_id *oid,
ce2bc424
JK
1036 int exclude,
1037 struct packed_git **found_pack,
1038 off_t *found_offset)
1039{
702d1b95 1040 int want;
8865859d 1041 struct list_head *pos;
ce2bc424 1042
6862ebbf 1043 if (!exclude && local && has_loose_object_nonlocal(oid))
daae0625
BC
1044 return 0;
1045
702d1b95
KS
1046 /*
1047 * If we already know the pack object lives in, start checks from that
1048 * pack - in the usual case when neither --local was given nor .keep files
1049 * are present we will determine the answer right now.
1050 */
1051 if (*found_pack) {
1052 want = want_found_object(exclude, *found_pack);
1053 if (want != -1)
1054 return want;
1055 }
a80d72db 1056 list_for_each(pos, get_packed_git_mru(the_repository)) {
ec2dd32c 1057 struct packed_git *p = list_entry(pos, struct packed_git, mru);
702d1b95
KS
1058 off_t offset;
1059
1060 if (p == *found_pack)
1061 offset = *found_offset;
1062 else
188960b4 1063 offset = find_pack_entry_one(oid->hash, p);
702d1b95 1064
5c49c116 1065 if (offset) {
ce2bc424 1066 if (!*found_pack) {
319b678a 1067 if (!is_pack_valid(p))
4c080182 1068 continue;
ce2bc424
JK
1069 *found_offset = offset;
1070 *found_pack = p;
64560374 1071 }
702d1b95 1072 want = want_found_object(exclude, p);
e6e24c94 1073 if (!exclude && want > 0)
a80d72db
SB
1074 list_move(&p->mru,
1075 get_packed_git_mru(the_repository));
702d1b95
KS
1076 if (want != -1)
1077 return want;
64560374
LT
1078 }
1079 }
eb019375 1080
ce2bc424
JK
1081 return 1;
1082}
1083
188960b4 1084static void create_object_entry(const struct object_id *oid,
ce2bc424
JK
1085 enum object_type type,
1086 uint32_t hash,
1087 int exclude,
1088 int no_try_delta,
1089 uint32_t index_pos,
1090 struct packed_git *found_pack,
1091 off_t found_offset)
1092{
1093 struct object_entry *entry;
29b734e4 1094
188960b4 1095 entry = packlist_alloc(&to_pack, oid->hash, index_pos);
27225f2e 1096 entry->hash = hash;
fd9b1bae 1097 oe_set_type(entry, type);
29b734e4
NP
1098 if (exclude)
1099 entry->preferred_base = 1;
81a216a5
NP
1100 else
1101 nr_result++;
29b734e4 1102 if (found_pack) {
43fa44fa 1103 oe_set_in_pack(&to_pack, entry, found_pack);
29b734e4
NP
1104 entry->in_pack_offset = found_offset;
1105 }
7a979d99 1106
ce2bc424
JK
1107 entry->no_try_delta = no_try_delta;
1108}
29b734e4 1109
373c67da
JK
1110static const char no_closure_warning[] = N_(
1111"disabling bitmap writing, as some objects are not being packed"
1112);
1113
188960b4 1114static int add_object_entry(const struct object_id *oid, enum object_type type,
ce2bc424
JK
1115 const char *name, int exclude)
1116{
702d1b95
KS
1117 struct packed_git *found_pack = NULL;
1118 off_t found_offset = 0;
ce2bc424 1119 uint32_t index_pos;
7a979d99 1120
5af05043
NTND
1121 display_progress(progress_state, ++nr_seen);
1122
188960b4 1123 if (have_duplicate_entry(oid, exclude, &index_pos))
ce2bc424 1124 return 0;
a74db82e 1125
188960b4 1126 if (!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {
373c67da
JK
1127 /* The pack is missing an object, so it will not have closure */
1128 if (write_bitmap_index) {
1129 warning(_(no_closure_warning));
1130 write_bitmap_index = 0;
1131 }
ce2bc424 1132 return 0;
373c67da 1133 }
29b734e4 1134
188960b4 1135 create_object_entry(oid, type, pack_name_hash(name),
ce2bc424
JK
1136 exclude, name && no_try_delta(name),
1137 index_pos, found_pack, found_offset);
29b734e4 1138 return 1;
c323ac7d
LT
1139}
1140
20664967 1141static int add_object_entry_from_bitmap(const struct object_id *oid,
6b8fda2d
VM
1142 enum object_type type,
1143 int flags, uint32_t name_hash,
1144 struct packed_git *pack, off_t offset)
1145{
1146 uint32_t index_pos;
1147
5af05043
NTND
1148 display_progress(progress_state, ++nr_seen);
1149
188960b4 1150 if (have_duplicate_entry(oid, 0, &index_pos))
6b8fda2d
VM
1151 return 0;
1152
188960b4 1153 if (!want_object_in_pack(oid, 0, &pack, &offset))
702d1b95
KS
1154 return 0;
1155
188960b4 1156 create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
29b734e4 1157 return 1;
c323ac7d
LT
1158}
1159
5379a5c5 1160struct pbase_tree_cache {
188960b4 1161 struct object_id oid;
5379a5c5
JH
1162 int ref;
1163 int temporary;
1164 void *tree_data;
1165 unsigned long tree_size;
1166};
1167
1168static struct pbase_tree_cache *(pbase_tree_cache[256]);
188960b4 1169static int pbase_tree_cache_ix(const struct object_id *oid)
5379a5c5 1170{
188960b4 1171 return oid->hash[0] % ARRAY_SIZE(pbase_tree_cache);
5379a5c5
JH
1172}
1173static int pbase_tree_cache_ix_incr(int ix)
1174{
1175 return (ix+1) % ARRAY_SIZE(pbase_tree_cache);
1176}
1177
1178static struct pbase_tree {
1179 struct pbase_tree *next;
1180 /* This is a phony "cache" entry; we are not
01689909 1181 * going to evict it or find it through _get()
5379a5c5
JH
1182 * mechanism -- this is for the toplevel node that
1183 * would almost always change with any commit.
1184 */
1185 struct pbase_tree_cache pcache;
1186} *pbase_tree;
1187
188960b4 1188static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
5379a5c5
JH
1189{
1190 struct pbase_tree_cache *ent, *nent;
1191 void *data;
1192 unsigned long size;
21666f1a 1193 enum object_type type;
5379a5c5 1194 int neigh;
188960b4 1195 int my_ix = pbase_tree_cache_ix(oid);
5379a5c5
JH
1196 int available_ix = -1;
1197
1198 /* pbase-tree-cache acts as a limited hashtable.
1199 * your object will be found at your index or within a few
1200 * slots after that slot if it is cached.
1201 */
1202 for (neigh = 0; neigh < 8; neigh++) {
1203 ent = pbase_tree_cache[my_ix];
188960b4 1204 if (ent && !oidcmp(&ent->oid, oid)) {
5379a5c5
JH
1205 ent->ref++;
1206 return ent;
1207 }
1208 else if (((available_ix < 0) && (!ent || !ent->ref)) ||
1209 ((0 <= available_ix) &&
1210 (!ent && pbase_tree_cache[available_ix])))
1211 available_ix = my_ix;
1212 if (!ent)
1213 break;
1214 my_ix = pbase_tree_cache_ix_incr(my_ix);
1215 }
1216
1217 /* Did not find one. Either we got a bogus request or
1218 * we need to read and perhaps cache.
1219 */
b4f5aca4 1220 data = read_object_file(oid, &type, &size);
5379a5c5
JH
1221 if (!data)
1222 return NULL;
21666f1a 1223 if (type != OBJ_TREE) {
5379a5c5
JH
1224 free(data);
1225 return NULL;
1226 }
1227
1228 /* We need to either cache or return a throwaway copy */
1229
1230 if (available_ix < 0)
1231 ent = NULL;
1232 else {
1233 ent = pbase_tree_cache[available_ix];
1234 my_ix = available_ix;
1235 }
1236
1237 if (!ent) {
1238 nent = xmalloc(sizeof(*nent));
1239 nent->temporary = (available_ix < 0);
1240 }
1241 else {
1242 /* evict and reuse */
1243 free(ent->tree_data);
1244 nent = ent;
1245 }
188960b4 1246 oidcpy(&nent->oid, oid);
5379a5c5
JH
1247 nent->tree_data = data;
1248 nent->tree_size = size;
1249 nent->ref = 1;
1250 if (!nent->temporary)
1251 pbase_tree_cache[my_ix] = nent;
1252 return nent;
1253}
1254
1255static void pbase_tree_put(struct pbase_tree_cache *cache)
1256{
1257 if (!cache->temporary) {
1258 cache->ref--;
1259 return;
1260 }
1261 free(cache->tree_data);
1262 free(cache);
1263}
1264
1265static int name_cmp_len(const char *name)
1266{
1267 int i;
1268 for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)
1269 ;
1270 return i;
1271}
1272
1273static void add_pbase_object(struct tree_desc *tree,
5379a5c5 1274 const char *name,
ce0bd642
LT
1275 int cmplen,
1276 const char *fullname)
3f9ac8d2 1277{
4c068a98 1278 struct name_entry entry;
8a5a8d6c 1279 int cmp;
4c068a98
LT
1280
1281 while (tree_entry(tree,&entry)) {
1211be6b
LT
1282 if (S_ISGITLINK(entry.mode))
1283 continue;
0de16337 1284 cmp = tree_entry_len(&entry) != cmplen ? 1 :
8a5a8d6c
NP
1285 memcmp(name, entry.path, cmplen);
1286 if (cmp > 0)
7a979d99 1287 continue;
8a5a8d6c
NP
1288 if (cmp < 0)
1289 return;
5379a5c5 1290 if (name[cmplen] != '/') {
188960b4 1291 add_object_entry(entry.oid,
4d1012c3 1292 object_type(entry.mode),
bc32fed5 1293 fullname, 1);
5379a5c5
JH
1294 return;
1295 }
8a5a8d6c 1296 if (S_ISDIR(entry.mode)) {
7a979d99 1297 struct tree_desc sub;
5379a5c5
JH
1298 struct pbase_tree_cache *tree;
1299 const char *down = name+cmplen+1;
1300 int downlen = name_cmp_len(down);
1301
188960b4 1302 tree = pbase_tree_get(entry.oid);
5379a5c5
JH
1303 if (!tree)
1304 return;
6fda5e51 1305 init_tree_desc(&sub, tree->tree_data, tree->tree_size);
5379a5c5 1306
ce0bd642 1307 add_pbase_object(&sub, down, downlen, fullname);
5379a5c5
JH
1308 pbase_tree_put(tree);
1309 }
1310 }
1311}
1d6b38cc 1312
5379a5c5
JH
1313static unsigned *done_pbase_paths;
1314static int done_pbase_paths_num;
1315static int done_pbase_paths_alloc;
1316static int done_pbase_path_pos(unsigned hash)
1317{
1318 int lo = 0;
1319 int hi = done_pbase_paths_num;
1320 while (lo < hi) {
19716b21 1321 int mi = lo + (hi - lo) / 2;
5379a5c5
JH
1322 if (done_pbase_paths[mi] == hash)
1323 return mi;
1324 if (done_pbase_paths[mi] < hash)
1325 hi = mi;
1326 else
1327 lo = mi + 1;
1328 }
1329 return -lo-1;
1330}
1331
1332static int check_pbase_path(unsigned hash)
1333{
c7b07805 1334 int pos = done_pbase_path_pos(hash);
5379a5c5
JH
1335 if (0 <= pos)
1336 return 1;
1337 pos = -pos - 1;
25e19407
DD
1338 ALLOC_GROW(done_pbase_paths,
1339 done_pbase_paths_num + 1,
1340 done_pbase_paths_alloc);
5379a5c5
JH
1341 done_pbase_paths_num++;
1342 if (pos < done_pbase_paths_num)
f331ab9d
RS
1343 MOVE_ARRAY(done_pbase_paths + pos + 1, done_pbase_paths + pos,
1344 done_pbase_paths_num - pos - 1);
5379a5c5
JH
1345 done_pbase_paths[pos] = hash;
1346 return 0;
1347}
1348
bc32fed5 1349static void add_preferred_base_object(const char *name)
5379a5c5
JH
1350{
1351 struct pbase_tree *it;
8a5a8d6c 1352 int cmplen;
68fb36eb 1353 unsigned hash = pack_name_hash(name);
5379a5c5 1354
8a5a8d6c 1355 if (!num_preferred_base || check_pbase_path(hash))
5379a5c5
JH
1356 return;
1357
8a5a8d6c 1358 cmplen = name_cmp_len(name);
5379a5c5
JH
1359 for (it = pbase_tree; it; it = it->next) {
1360 if (cmplen == 0) {
188960b4 1361 add_object_entry(&it->pcache.oid, OBJ_TREE, NULL, 1);
5379a5c5
JH
1362 }
1363 else {
1364 struct tree_desc tree;
6fda5e51 1365 init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);
ce0bd642 1366 add_pbase_object(&tree, name, cmplen, name);
7a979d99 1367 }
3f9ac8d2 1368 }
3f9ac8d2
JH
1369}
1370
188960b4 1371static void add_preferred_base(struct object_id *oid)
3f9ac8d2 1372{
5379a5c5
JH
1373 struct pbase_tree *it;
1374 void *data;
1375 unsigned long size;
188960b4 1376 struct object_id tree_oid;
1d6b38cc 1377
8d1d8f83
JH
1378 if (window <= num_preferred_base++)
1379 return;
1380
02f0547e 1381 data = read_object_with_reference(oid, tree_type, &size, &tree_oid);
5379a5c5 1382 if (!data)
7a979d99 1383 return;
5379a5c5
JH
1384
1385 for (it = pbase_tree; it; it = it->next) {
188960b4 1386 if (!oidcmp(&it->pcache.oid, &tree_oid)) {
5379a5c5
JH
1387 free(data);
1388 return;
1389 }
1390 }
1391
1392 it = xcalloc(1, sizeof(*it));
1393 it->next = pbase_tree;
1394 pbase_tree = it;
1395
188960b4 1396 oidcpy(&it->pcache.oid, &tree_oid);
5379a5c5
JH
1397 it->pcache.tree_data = data;
1398 it->pcache.tree_size = size;
3f9ac8d2
JH
1399}
1400
0ef95f72
NP
1401static void cleanup_preferred_base(void)
1402{
1403 struct pbase_tree *it;
1404 unsigned i;
1405
1406 it = pbase_tree;
1407 pbase_tree = NULL;
1408 while (it) {
095b3b2c
BW
1409 struct pbase_tree *tmp = it;
1410 it = tmp->next;
1411 free(tmp->pcache.tree_data);
1412 free(tmp);
0ef95f72
NP
1413 }
1414
1415 for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
1416 if (!pbase_tree_cache[i])
1417 continue;
1418 free(pbase_tree_cache[i]->tree_data);
6a83d902 1419 FREE_AND_NULL(pbase_tree_cache[i]);
0ef95f72
NP
1420 }
1421
6a83d902 1422 FREE_AND_NULL(done_pbase_paths);
0ef95f72
NP
1423 done_pbase_paths_num = done_pbase_paths_alloc = 0;
1424}
1425
c323ac7d
LT
1426static void check_object(struct object_entry *entry)
1427{
ac77d0c3
NTND
1428 unsigned long canonical_size;
1429
43fa44fa
NTND
1430 if (IN_PACK(entry)) {
1431 struct packed_git *p = IN_PACK(entry);
03e79c88 1432 struct pack_window *w_curs = NULL;
5c49c116
NP
1433 const unsigned char *base_ref = NULL;
1434 struct object_entry *base_entry;
1435 unsigned long used, used_0;
ef49a7a0 1436 unsigned long avail;
5c49c116
NP
1437 off_t ofs;
1438 unsigned char *buf, c;
fd9b1bae 1439 enum object_type type;
27a7d067 1440 unsigned long in_pack_size;
780e6e73 1441
6777a59f 1442 buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
ab7cd7bb 1443
5c49c116 1444 /*
fa736f72
NP
1445 * We want in_pack_type even if we do not reuse delta
1446 * since non-delta representations could still be reused.
ab7cd7bb 1447 */
09ded04b 1448 used = unpack_object_header_buffer(buf, avail,
fd9b1bae 1449 &type,
27a7d067 1450 &in_pack_size);
03d66015
NP
1451 if (used == 0)
1452 goto give_up;
ab7cd7bb 1453
fd9b1bae
NTND
1454 if (type < 0)
1455 BUG("invalid type %d", type);
1456 entry->in_pack_type = type;
1457
5c49c116
NP
1458 /*
1459 * Determine if this is a delta and if so whether we can
1460 * reuse it or not. Otherwise let's find out as cheaply as
1461 * possible what the actual type and size for this object is.
3f9ac8d2 1462 */
5c49c116
NP
1463 switch (entry->in_pack_type) {
1464 default:
1465 /* Not a delta hence we've already got all we need. */
fd9b1bae 1466 oe_set_type(entry, entry->in_pack_type);
ac77d0c3 1467 SET_SIZE(entry, in_pack_size);
5c49c116 1468 entry->in_pack_header_size = used;
fd9b1bae 1469 if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
03d66015 1470 goto give_up;
5c49c116
NP
1471 unuse_pack(&w_curs);
1472 return;
1473 case OBJ_REF_DELTA:
a7de7130 1474 if (reuse_delta && !entry->preferred_base)
5c49c116
NP
1475 base_ref = use_pack(p, &w_curs,
1476 entry->in_pack_offset + used, NULL);
41179100 1477 entry->in_pack_header_size = used + the_hash_algo->rawsz;
5c49c116
NP
1478 break;
1479 case OBJ_OFS_DELTA:
1480 buf = use_pack(p, &w_curs,
1481 entry->in_pack_offset + used, NULL);
1482 used_0 = 0;
1483 c = buf[used_0++];
1484 ofs = c & 127;
1485 while (c & 128) {
1486 ofs += 1;
03d66015
NP
1487 if (!ofs || MSB(ofs, 7)) {
1488 error("delta base offset overflow in pack for %s",
e6a492b7 1489 oid_to_hex(&entry->idx.oid));
03d66015
NP
1490 goto give_up;
1491 }
5c49c116
NP
1492 c = buf[used_0++];
1493 ofs = (ofs << 7) + (c & 127);
780e6e73 1494 }
5c49c116 1495 ofs = entry->in_pack_offset - ofs;
03d66015
NP
1496 if (ofs <= 0 || ofs >= entry->in_pack_offset) {
1497 error("delta base offset out of bound for %s",
e6a492b7 1498 oid_to_hex(&entry->idx.oid));
03d66015
NP
1499 goto give_up;
1500 }
a7de7130 1501 if (reuse_delta && !entry->preferred_base) {
3449f8c4
NP
1502 struct revindex_entry *revidx;
1503 revidx = find_pack_revindex(p, ofs);
08698b1e
NP
1504 if (!revidx)
1505 goto give_up;
3449f8c4
NP
1506 base_ref = nth_packed_object_sha1(p, revidx->nr);
1507 }
5c49c116
NP
1508 entry->in_pack_header_size = used + used_0;
1509 break;
780e6e73 1510 }
780e6e73 1511
2834bc27 1512 if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {
5c49c116
NP
1513 /*
1514 * If base_ref was set above that means we wish to
1515 * reuse delta data, and we even found that base
1516 * in the list of objects we want to pack. Goodie!
1517 *
1518 * Depth value does not matter - find_deltas() will
1519 * never consider reused delta as the base object to
1520 * deltify other objects against, in order to avoid
1521 * circular deltas.
3f9ac8d2 1522 */
fd9b1bae 1523 oe_set_type(entry, entry->in_pack_type);
ac77d0c3 1524 SET_SIZE(entry, in_pack_size); /* delta size */
898eba5e 1525 SET_DELTA(entry, base_entry);
0aca34e8 1526 SET_DELTA_SIZE(entry, in_pack_size);
898eba5e
NTND
1527 entry->delta_sibling_idx = base_entry->delta_child_idx;
1528 SET_DELTA_CHILD(base_entry, entry);
5c49c116
NP
1529 unuse_pack(&w_curs);
1530 return;
1531 }
ab7cd7bb 1532
fd9b1bae 1533 if (oe_type(entry)) {
27a7d067
NTND
1534 off_t delta_pos;
1535
5c49c116
NP
1536 /*
1537 * This must be a delta and we already know what the
1538 * final object type is. Let's extract the actual
1539 * object size from the delta header.
1540 */
27a7d067 1541 delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
ac77d0c3
NTND
1542 canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
1543 if (canonical_size == 0)
03d66015 1544 goto give_up;
ac77d0c3 1545 SET_SIZE(entry, canonical_size);
5c49c116 1546 unuse_pack(&w_curs);
3f9ac8d2
JH
1547 return;
1548 }
5c49c116
NP
1549
1550 /*
1551 * No choice but to fall back to the recursive delta walk
1552 * with sha1_object_info() to find about the object type
1553 * at this point...
1554 */
03d66015 1555 give_up:
5c49c116 1556 unuse_pack(&w_curs);
36e4d74a 1557 }
3f9ac8d2 1558
ad635e82
JH
1559 oe_set_type(entry,
1560 oid_object_info(the_repository, &entry->idx.oid, &canonical_size));
ac77d0c3
NTND
1561 if (entry->type_valid) {
1562 SET_SIZE(entry, canonical_size);
1563 } else {
1564 /*
1565 * Bad object type is checked in prepare_pack(). This is
1566 * to permit a missing preferred base object to be ignored
1567 * as a preferred base. Doing so can result in a larger
1568 * pack file, but the transfer will still take place.
1569 */
1570 }
3f9ac8d2
JH
1571}
1572
5c49c116
NP
1573static int pack_offset_sort(const void *_a, const void *_b)
1574{
1575 const struct object_entry *a = *(struct object_entry **)_a;
1576 const struct object_entry *b = *(struct object_entry **)_b;
43fa44fa
NTND
1577 const struct packed_git *a_in_pack = IN_PACK(a);
1578 const struct packed_git *b_in_pack = IN_PACK(b);
5c49c116
NP
1579
1580 /* avoid filesystem trashing with loose objects */
43fa44fa 1581 if (!a_in_pack && !b_in_pack)
e6a492b7 1582 return oidcmp(&a->idx.oid, &b->idx.oid);
5c49c116 1583
43fa44fa 1584 if (a_in_pack < b_in_pack)
5c49c116 1585 return -1;
43fa44fa 1586 if (a_in_pack > b_in_pack)
5c49c116
NP
1587 return 1;
1588 return a->in_pack_offset < b->in_pack_offset ? -1 :
1589 (a->in_pack_offset > b->in_pack_offset);
1590}
1591
4cf2143e
JK
1592/*
1593 * Drop an on-disk delta we were planning to reuse. Naively, this would
1594 * just involve blanking out the "delta" field, but we have to deal
1595 * with some extra book-keeping:
1596 *
1597 * 1. Removing ourselves from the delta_sibling linked list.
1598 *
1599 * 2. Updating our size/type to the non-delta representation. These were
1600 * either not recorded initially (size) or overwritten with the delta type
1601 * (type) when check_object() decided to reuse the delta.
7dbabbbe
JK
1602 *
1603 * 3. Resetting our delta depth, as we are now a base object.
4cf2143e
JK
1604 */
1605static void drop_reused_delta(struct object_entry *entry)
1606{
898eba5e 1607 unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
4cf2143e 1608 struct object_info oi = OBJECT_INFO_INIT;
fd9b1bae 1609 enum object_type type;
ac77d0c3 1610 unsigned long size;
4cf2143e 1611
898eba5e
NTND
1612 while (*idx) {
1613 struct object_entry *oe = &to_pack.objects[*idx - 1];
4cf2143e 1614
898eba5e
NTND
1615 if (oe == entry)
1616 *idx = oe->delta_sibling_idx;
4cf2143e 1617 else
898eba5e 1618 idx = &oe->delta_sibling_idx;
4cf2143e 1619 }
898eba5e 1620 SET_DELTA(entry, NULL);
7dbabbbe 1621 entry->depth = 0;
4cf2143e 1622
ac77d0c3 1623 oi.sizep = &size;
fd9b1bae 1624 oi.typep = &type;
ad635e82 1625 if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
4cf2143e
JK
1626 /*
1627 * We failed to get the info from this pack for some reason;
1628 * fall back to sha1_object_info, which may find another copy.
fd9b1bae 1629 * And if that fails, the error will be recorded in oe_type(entry)
4cf2143e
JK
1630 * and dealt with in prepare_pack().
1631 */
ad635e82
JH
1632 oe_set_type(entry,
1633 oid_object_info(the_repository, &entry->idx.oid, &size));
fd9b1bae
NTND
1634 } else {
1635 oe_set_type(entry, type);
4cf2143e 1636 }
ac77d0c3 1637 SET_SIZE(entry, size);
4cf2143e
JK
1638}
1639
1640/*
1641 * Follow the chain of deltas from this entry onward, throwing away any links
1642 * that cause us to hit a cycle (as determined by the DFS state flags in
1643 * the entries).
7dbabbbe
JK
1644 *
1645 * We also detect too-long reused chains that would violate our --depth
1646 * limit.
4cf2143e
JK
1647 */
1648static void break_delta_chains(struct object_entry *entry)
1649{
42b766d7
JK
1650 /*
1651 * The actual depth of each object we will write is stored as an int,
1652 * as it cannot exceed our int "depth" limit. But before we break
1653 * changes based no that limit, we may potentially go as deep as the
1654 * number of objects, which is elsewhere bounded to a uint32_t.
1655 */
1656 uint32_t total_depth;
1657 struct object_entry *cur, *next;
1658
1659 for (cur = entry, total_depth = 0;
1660 cur;
898eba5e 1661 cur = DELTA(cur), total_depth++) {
42b766d7
JK
1662 if (cur->dfs_state == DFS_DONE) {
1663 /*
1664 * We've already seen this object and know it isn't
1665 * part of a cycle. We do need to append its depth
1666 * to our count.
1667 */
1668 total_depth += cur->depth;
1669 break;
1670 }
4cf2143e 1671
4cf2143e 1672 /*
42b766d7
JK
1673 * We break cycles before looping, so an ACTIVE state (or any
1674 * other cruft which made its way into the state variable)
1675 * is a bug.
4cf2143e 1676 */
42b766d7 1677 if (cur->dfs_state != DFS_NONE)
033abf97 1678 BUG("confusing delta dfs state in first pass: %d",
42b766d7 1679 cur->dfs_state);
4cf2143e 1680
4cf2143e 1681 /*
42b766d7
JK
1682 * Now we know this is the first time we've seen the object. If
1683 * it's not a delta, we're done traversing, but we'll mark it
1684 * done to save time on future traversals.
4cf2143e 1685 */
898eba5e 1686 if (!DELTA(cur)) {
42b766d7
JK
1687 cur->dfs_state = DFS_DONE;
1688 break;
1689 }
4cf2143e 1690
4cf2143e 1691 /*
42b766d7
JK
1692 * Mark ourselves as active and see if the next step causes
1693 * us to cycle to another active object. It's important to do
1694 * this _before_ we loop, because it impacts where we make the
1695 * cut, and thus how our total_depth counter works.
1696 * E.g., We may see a partial loop like:
1697 *
1698 * A -> B -> C -> D -> B
1699 *
1700 * Cutting B->C breaks the cycle. But now the depth of A is
1701 * only 1, and our total_depth counter is at 3. The size of the
1702 * error is always one less than the size of the cycle we
1703 * broke. Commits C and D were "lost" from A's chain.
1704 *
1705 * If we instead cut D->B, then the depth of A is correct at 3.
1706 * We keep all commits in the chain that we examined.
4cf2143e 1707 */
42b766d7 1708 cur->dfs_state = DFS_ACTIVE;
898eba5e 1709 if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
42b766d7
JK
1710 drop_reused_delta(cur);
1711 cur->dfs_state = DFS_DONE;
1712 break;
7dbabbbe 1713 }
42b766d7 1714 }
7dbabbbe 1715
42b766d7
JK
1716 /*
1717 * And now that we've gone all the way to the bottom of the chain, we
1718 * need to clear the active flags and set the depth fields as
1719 * appropriate. Unlike the loop above, which can quit when it drops a
1720 * delta, we need to keep going to look for more depth cuts. So we need
1721 * an extra "next" pointer to keep going after we reset cur->delta.
1722 */
1723 for (cur = entry; cur; cur = next) {
898eba5e 1724 next = DELTA(cur);
4cf2143e 1725
42b766d7
JK
1726 /*
1727 * We should have a chain of zero or more ACTIVE states down to
1728 * a final DONE. We can quit after the DONE, because either it
1729 * has no bases, or we've already handled them in a previous
1730 * call.
1731 */
1732 if (cur->dfs_state == DFS_DONE)
1733 break;
1734 else if (cur->dfs_state != DFS_ACTIVE)
033abf97 1735 BUG("confusing delta dfs state in second pass: %d",
42b766d7 1736 cur->dfs_state);
4cf2143e 1737
4cf2143e 1738 /*
42b766d7
JK
1739 * If the total_depth is more than depth, then we need to snip
1740 * the chain into two or more smaller chains that don't exceed
1741 * the maximum depth. Most of the resulting chains will contain
1742 * (depth + 1) entries (i.e., depth deltas plus one base), and
1743 * the last chain (i.e., the one containing entry) will contain
1744 * whatever entries are left over, namely
1745 * (total_depth % (depth + 1)) of them.
1746 *
1747 * Since we are iterating towards decreasing depth, we need to
1748 * decrement total_depth as we go, and we need to write to the
1749 * entry what its final depth will be after all of the
1750 * snipping. Since we're snipping into chains of length (depth
1751 * + 1) entries, the final depth of an entry will be its
1752 * original depth modulo (depth + 1). Any time we encounter an
1753 * entry whose final depth is supposed to be zero, we snip it
1754 * from its delta base, thereby making it so.
4cf2143e 1755 */
42b766d7
JK
1756 cur->depth = (total_depth--) % (depth + 1);
1757 if (!cur->depth)
1758 drop_reused_delta(cur);
1759
1760 cur->dfs_state = DFS_DONE;
4cf2143e
JK
1761 }
1762}
1763
c323ac7d
LT
1764static void get_object_details(void)
1765{
7cadf491 1766 uint32_t i;
5c49c116
NP
1767 struct object_entry **sorted_by_offset;
1768
5af05043
NTND
1769 if (progress)
1770 progress_state = start_progress(_("Counting objects"),
1771 to_pack.nr_objects);
1772
2834bc27
VM
1773 sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
1774 for (i = 0; i < to_pack.nr_objects; i++)
1775 sorted_by_offset[i] = to_pack.objects + i;
9ed0d8d6 1776 QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);
c323ac7d 1777
2834bc27 1778 for (i = 0; i < to_pack.nr_objects; i++) {
15366280
JH
1779 struct object_entry *entry = sorted_by_offset[i];
1780 check_object(entry);
ac77d0c3
NTND
1781 if (entry->type_valid &&
1782 oe_size_greater_than(&to_pack, entry, big_file_threshold))
15366280 1783 entry->no_try_delta = 1;
5af05043 1784 display_progress(progress_state, i + 1);
15366280 1785 }
5af05043 1786 stop_progress(&progress_state);
3449f8c4 1787
4cf2143e
JK
1788 /*
1789 * This must happen in a second pass, since we rely on the delta
1790 * information for the whole list being completed.
1791 */
1792 for (i = 0; i < to_pack.nr_objects; i++)
1793 break_delta_chains(&to_pack.objects[i]);
1794
5c49c116 1795 free(sorted_by_offset);
c323ac7d
LT
1796}
1797
b904166c
NP
1798/*
1799 * We search for deltas in a list sorted by type, by filename hash, and then
1800 * by size, so that we see progressively smaller and smaller files.
1801 * That's because we prefer deltas to be from the bigger file
1802 * to the smaller -- deletes are potentially cheaper, but perhaps
1803 * more importantly, the bigger file is likely the more recent
1804 * one. The deepest deltas are therefore the oldest objects which are
1805 * less susceptible to be accessed often.
1806 */
9668cf59 1807static int type_size_sort(const void *_a, const void *_b)
c323ac7d 1808{
9668cf59
NP
1809 const struct object_entry *a = *(struct object_entry **)_a;
1810 const struct object_entry *b = *(struct object_entry **)_b;
fd9b1bae
NTND
1811 enum object_type a_type = oe_type(a);
1812 enum object_type b_type = oe_type(b);
ac77d0c3
NTND
1813 unsigned long a_size = SIZE(a);
1814 unsigned long b_size = SIZE(b);
9668cf59 1815
fd9b1bae 1816 if (a_type > b_type)
27225f2e 1817 return -1;
fd9b1bae 1818 if (a_type < b_type)
27225f2e 1819 return 1;
b904166c 1820 if (a->hash > b->hash)
7a979d99 1821 return -1;
b904166c 1822 if (a->hash < b->hash)
7a979d99 1823 return 1;
b904166c 1824 if (a->preferred_base > b->preferred_base)
c323ac7d 1825 return -1;
b904166c
NP
1826 if (a->preferred_base < b->preferred_base)
1827 return 1;
ac77d0c3 1828 if (a_size > b_size)
b904166c 1829 return -1;
ac77d0c3 1830 if (a_size < b_size)
c323ac7d 1831 return 1;
b904166c 1832 return a < b ? -1 : (a > b); /* newest first */
c323ac7d
LT
1833}
1834
1835struct unpacked {
1836 struct object_entry *entry;
1837 void *data;
f6c7081a 1838 struct delta_index *index;
5a235b5e 1839 unsigned depth;
c323ac7d
LT
1840};
1841
d250626c
NP
1842static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
1843 unsigned long delta_size)
074b2eea
MK
1844{
1845 if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
1846 return 0;
1847
e3dfddb3
MK
1848 if (delta_size < cache_max_small_delta_size)
1849 return 1;
1850
074b2eea
MK
1851 /* cache delta, if objects are large enough compared to delta size */
1852 if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
1853 return 1;
1854
1855 return 0;
1856}
1857
7eb151d6 1858#ifndef NO_PTHREADS
8ecce684 1859
44626dc7 1860static pthread_mutex_t read_mutex;
8ecce684
NP
1861#define read_lock() pthread_mutex_lock(&read_mutex)
1862#define read_unlock() pthread_mutex_unlock(&read_mutex)
1863
44626dc7 1864static pthread_mutex_t cache_mutex;
3c701839
NP
1865#define cache_lock() pthread_mutex_lock(&cache_mutex)
1866#define cache_unlock() pthread_mutex_unlock(&cache_mutex)
1867
44626dc7 1868static pthread_mutex_t progress_mutex;
8ecce684
NP
1869#define progress_lock() pthread_mutex_lock(&progress_mutex)
1870#define progress_unlock() pthread_mutex_unlock(&progress_mutex)
1871
1872#else
1873
e1ef8673
JH
1874#define read_lock() (void)0
1875#define read_unlock() (void)0
1876#define cache_lock() (void)0
1877#define cache_unlock() (void)0
1878#define progress_lock() (void)0
1879#define progress_unlock() (void)0
8ecce684
NP
1880
1881#endif
1882
ac77d0c3
NTND
1883/*
1884 * Return the size of the object without doing any delta
1885 * reconstruction (so non-deltas are true object sizes, but deltas
1886 * return the size of the delta data).
1887 */
1888unsigned long oe_get_size_slow(struct packing_data *pack,
1889 const struct object_entry *e)
1890{
1891 struct packed_git *p;
1892 struct pack_window *w_curs;
1893 unsigned char *buf;
1894 enum object_type type;
1895 unsigned long used, avail, size;
1896
1897 if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
1898 read_lock();
ad635e82 1899 if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
ac77d0c3
NTND
1900 die(_("unable to get size of %s"),
1901 oid_to_hex(&e->idx.oid));
1902 read_unlock();
1903 return size;
1904 }
1905
1906 p = oe_in_pack(pack, e);
1907 if (!p)
1908 BUG("when e->type is a delta, it must belong to a pack");
1909
1910 read_lock();
1911 w_curs = NULL;
1912 buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
1913 used = unpack_object_header_buffer(buf, avail, &type, &size);
1914 if (used == 0)
1915 die(_("unable to parse object header of %s"),
1916 oid_to_hex(&e->idx.oid));
1917
1918 unuse_pack(&w_curs);
1919 read_unlock();
1920 return size;
1921}
1922
f6c7081a 1923static int try_delta(struct unpacked *trg, struct unpacked *src,
ef0316fc 1924 unsigned max_depth, unsigned long *mem_usage)
c323ac7d 1925{
f6c7081a
NP
1926 struct object_entry *trg_entry = trg->entry;
1927 struct object_entry *src_entry = src->entry;
560b25a8 1928 unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;
c83f032e 1929 unsigned ref_depth;
21666f1a 1930 enum object_type type;
c323ac7d
LT
1931 void *delta_buf;
1932
1933 /* Don't bother doing diffs between different types */
fd9b1bae 1934 if (oe_type(trg_entry) != oe_type(src_entry))
c323ac7d
LT
1935 return -1;
1936
51d1e83f 1937 /*
15f07e06
JK
1938 * We do not bother to try a delta that we discarded on an
1939 * earlier try, but only when reusing delta data. Note that
1940 * src_entry that is marked as the preferred_base should always
1941 * be considered, as even if we produce a suboptimal delta against
1942 * it, we will still save the transfer cost, as we already know
1943 * the other side has it and we won't send src_entry at all.
51d1e83f 1944 */
43fa44fa
NTND
1945 if (reuse_delta && IN_PACK(trg_entry) &&
1946 IN_PACK(trg_entry) == IN_PACK(src_entry) &&
15f07e06 1947 !src_entry->preferred_base &&
e9195b58
JH
1948 trg_entry->in_pack_type != OBJ_REF_DELTA &&
1949 trg_entry->in_pack_type != OBJ_OFS_DELTA)
51d1e83f
LT
1950 return 0;
1951
898b14ce 1952 /* Let's not bust the allowed depth. */
5a235b5e 1953 if (src->depth >= max_depth)
d116a45a 1954 return 0;
c323ac7d 1955
c3b06a69 1956 /* Now some size filtering heuristics. */
ac77d0c3 1957 trg_size = SIZE(trg_entry);
898eba5e 1958 if (!DELTA(trg_entry)) {
41179100 1959 max_size = trg_size/2 - the_hash_algo->rawsz;
c83f032e
NP
1960 ref_depth = 1;
1961 } else {
0aca34e8 1962 max_size = DELTA_SIZE(trg_entry);
5a235b5e 1963 ref_depth = trg->depth;
c83f032e 1964 }
720fe22d 1965 max_size = (uint64_t)max_size * (max_depth - src->depth) /
c83f032e 1966 (max_depth - ref_depth + 1);
c3b06a69
NP
1967 if (max_size == 0)
1968 return 0;
ac77d0c3 1969 src_size = SIZE(src_entry);
560b25a8 1970 sizediff = src_size < trg_size ? trg_size - src_size : 0;
27225f2e 1971 if (sizediff >= max_size)
f527cb8c 1972 return 0;
a1dab41a
BD
1973 if (trg_size < src_size / 32)
1974 return 0;
f6c7081a 1975
560b25a8
NP
1976 /* Load data if not already done */
1977 if (!trg->data) {
8ecce684 1978 read_lock();
b4f5aca4 1979 trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
8ecce684 1980 read_unlock();
2e3404c3
JH
1981 if (!trg->data)
1982 die("object %s cannot be read",
e6a492b7 1983 oid_to_hex(&trg_entry->idx.oid));
560b25a8
NP
1984 if (sz != trg_size)
1985 die("object %s inconsistent object length (%lu vs %lu)",
e6a492b7 1986 oid_to_hex(&trg_entry->idx.oid), sz,
1987 trg_size);
ef0316fc 1988 *mem_usage += sz;
560b25a8
NP
1989 }
1990 if (!src->data) {
8ecce684 1991 read_lock();
b4f5aca4 1992 src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
8ecce684 1993 read_unlock();
71064a95
NP
1994 if (!src->data) {
1995 if (src_entry->preferred_base) {
1996 static int warned = 0;
1997 if (!warned++)
1998 warning("object %s cannot be read",
e6a492b7 1999 oid_to_hex(&src_entry->idx.oid));
71064a95
NP
2000 /*
2001 * Those objects are not included in the
2002 * resulting pack. Be resilient and ignore
2003 * them if they can't be read, in case the
2004 * pack could be created nevertheless.
2005 */
2006 return 0;
2007 }
2e3404c3 2008 die("object %s cannot be read",
e6a492b7 2009 oid_to_hex(&src_entry->idx.oid));
71064a95 2010 }
560b25a8
NP
2011 if (sz != src_size)
2012 die("object %s inconsistent object length (%lu vs %lu)",
e6a492b7 2013 oid_to_hex(&src_entry->idx.oid), sz,
2014 src_size);
ef0316fc 2015 *mem_usage += sz;
560b25a8
NP
2016 }
2017 if (!src->index) {
2018 src->index = create_delta_index(src->data, src_size);
a588d88a
MK
2019 if (!src->index) {
2020 static int warned = 0;
2021 if (!warned++)
2022 warning("suboptimal pack - out of memory");
2023 return 0;
2024 }
ef0316fc 2025 *mem_usage += sizeof_delta_index(src->index);
560b25a8
NP
2026 }
2027
2028 delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
c323ac7d 2029 if (!delta_buf)
75c42d8c 2030 return 0;
0aca34e8
NTND
2031 if (delta_size >= (1U << OE_DELTA_SIZE_BITS)) {
2032 free(delta_buf);
2033 return 0;
2034 }
f6c7081a 2035
898eba5e 2036 if (DELTA(trg_entry)) {
848d732c 2037 /* Prefer only shallower same-sized deltas. */
0aca34e8 2038 if (delta_size == DELTA_SIZE(trg_entry) &&
5a235b5e 2039 src->depth + 1 >= trg->depth) {
848d732c
BD
2040 free(delta_buf);
2041 return 0;
2042 }
074b2eea 2043 }
9e2d57a0 2044
3c701839
NP
2045 /*
2046 * Handle memory allocation outside of the cache
2047 * accounting lock. Compiler will optimize the strangeness
7eb151d6 2048 * away when NO_PTHREADS is defined.
3c701839 2049 */
8e0f7003 2050 free(trg_entry->delta_data);
3c701839 2051 cache_lock();
9e2d57a0 2052 if (trg_entry->delta_data) {
0aca34e8 2053 delta_cache_size -= DELTA_SIZE(trg_entry);
9e2d57a0
NP
2054 trg_entry->delta_data = NULL;
2055 }
d250626c 2056 if (delta_cacheable(src_size, trg_size, delta_size)) {
b7a28f78 2057 delta_cache_size += delta_size;
3c701839
NP
2058 cache_unlock();
2059 trg_entry->delta_data = xrealloc(delta_buf, delta_size);
2060 } else {
2061 cache_unlock();
074b2eea 2062 free(delta_buf);
3c701839
NP
2063 }
2064
898eba5e 2065 SET_DELTA(trg_entry, src_entry);
0aca34e8 2066 SET_DELTA_SIZE(trg_entry, delta_size);
b7a28f78
NP
2067 trg->depth = src->depth + 1;
2068
f6c7081a 2069 return 1;
c323ac7d
LT
2070}
2071
898b14ce 2072static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
b2504a0d 2073{
898eba5e 2074 struct object_entry *child = DELTA_CHILD(me);
898b14ce
NP
2075 unsigned int m = n;
2076 while (child) {
2077 unsigned int c = check_delta_limit(child, n + 1);
2078 if (m < c)
2079 m = c;
898eba5e 2080 child = DELTA_SIBLING(child);
898b14ce
NP
2081 }
2082 return m;
b2504a0d
NP
2083}
2084
75ad235c 2085static unsigned long free_unpacked(struct unpacked *n)
a97773ce 2086{
ef0316fc 2087 unsigned long freed_mem = sizeof_delta_index(n->index);
a97773ce
BD
2088 free_delta_index(n->index);
2089 n->index = NULL;
2090 if (n->data) {
ac77d0c3 2091 freed_mem += SIZE(n->entry);
6a83d902 2092 FREE_AND_NULL(n->data);
a97773ce
BD
2093 }
2094 n->entry = NULL;
7d7baa5e 2095 n->depth = 0;
ef0316fc 2096 return freed_mem;
a97773ce
BD
2097}
2098
384b32c0 2099static void find_deltas(struct object_entry **list, unsigned *list_size,
e334977d 2100 int window, int depth, unsigned *processed)
c323ac7d 2101{
384b32c0 2102 uint32_t i, idx = 0, count = 0;
7cadf491 2103 struct unpacked *array;
ef0316fc 2104 unsigned long mem_usage = 0;
c323ac7d 2105
19d4b416 2106 array = xcalloc(window, sizeof(struct unpacked));
21fcd1bd 2107
384b32c0 2108 for (;;) {
421b488a 2109 struct object_entry *entry;
c323ac7d 2110 struct unpacked *n = array + idx;
ef0316fc 2111 int j, max_depth, best_base = -1;
c323ac7d 2112
384b32c0
NP
2113 progress_lock();
2114 if (!*list_size) {
2115 progress_unlock();
2116 break;
2117 }
421b488a 2118 entry = *list++;
384b32c0
NP
2119 (*list_size)--;
2120 if (!entry->preferred_base) {
2121 (*processed)++;
2122 display_progress(progress_state, *processed);
2123 }
2124 progress_unlock();
2125
ef0316fc 2126 mem_usage -= free_unpacked(n);
c323ac7d 2127 n->entry = entry;
ab7cd7bb 2128
a97773ce 2129 while (window_memory_limit &&
ef0316fc 2130 mem_usage > window_memory_limit &&
a97773ce
BD
2131 count > 1) {
2132 uint32_t tail = (idx + window - count) % window;
75ad235c 2133 mem_usage -= free_unpacked(array + tail);
a97773ce
BD
2134 count--;
2135 }
2136
75d39853
NP
2137 /* We do not compute delta to *create* objects we are not
2138 * going to pack.
2139 */
2140 if (entry->preferred_base)
2141 goto next;
2142
898b14ce
NP
2143 /*
2144 * If the current object is at pack edge, take the depth the
2145 * objects that depend on the current object into account
2146 * otherwise they would become too deep.
2147 */
2148 max_depth = depth;
898eba5e 2149 if (DELTA_CHILD(entry)) {
898b14ce
NP
2150 max_depth -= check_delta_limit(entry, 0);
2151 if (max_depth <= 0)
2152 goto next;
2153 }
2154
78817c15
LT
2155 j = window;
2156 while (--j > 0) {
77639870 2157 int ret;
7cadf491 2158 uint32_t other_idx = idx + j;
c323ac7d 2159 struct unpacked *m;
78817c15
LT
2160 if (other_idx >= window)
2161 other_idx -= window;
c323ac7d
LT
2162 m = array + other_idx;
2163 if (!m->entry)
2164 break;
ef0316fc 2165 ret = try_delta(n, m, max_depth, &mem_usage);
77639870 2166 if (ret < 0)
c323ac7d 2167 break;
77639870
JH
2168 else if (ret > 0)
2169 best_base = other_idx;
c323ac7d 2170 }
898b14ce 2171
ed4a9031
NP
2172 /*
2173 * If we decided to cache the delta data, then it is best
2174 * to compress it right away. First because we have to do
2175 * it anyway, and doing it here while we're threaded will
2176 * save a lot of time in the non threaded write phase,
2177 * as well as allow for caching more deltas within
2178 * the same cache size limit.
2179 * ...
2180 * But only if not writing to stdout, since in that case
2181 * the network is most likely throttling writes anyway,
2182 * and therefore it is best to go to the write phase ASAP
2183 * instead, as we can afford spending more time compressing
2184 * between writes at that moment.
2185 */
2186 if (entry->delta_data && !pack_to_stdout) {
0cb3c142
NTND
2187 unsigned long size;
2188
0aca34e8 2189 size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
0cb3c142
NTND
2190 if (size < (1U << OE_Z_DELTA_BITS)) {
2191 entry->z_delta_size = size;
2192 cache_lock();
0aca34e8 2193 delta_cache_size -= DELTA_SIZE(entry);
0cb3c142
NTND
2194 delta_cache_size += entry->z_delta_size;
2195 cache_unlock();
2196 } else {
2197 FREE_AND_NULL(entry->delta_data);
2198 entry->z_delta_size = 0;
2199 }
ed4a9031
NP
2200 }
2201
70ca1a3f
JH
2202 /* if we made n a delta, and if n is already at max
2203 * depth, leaving it in the window is pointless. we
2204 * should evict it first.
70ca1a3f 2205 */
898eba5e 2206 if (DELTA(entry) && max_depth <= n->depth)
70ca1a3f 2207 continue;
ff45715c 2208
77639870
JH
2209 /*
2210 * Move the best delta base up in the window, after the
2211 * currently deltified object, to keep it longer. It will
2212 * be the first base object to be attempted next.
2213 */
898eba5e 2214 if (DELTA(entry)) {
77639870
JH
2215 struct unpacked swap = array[best_base];
2216 int dist = (window + idx - best_base) % window;
2217 int dst = best_base;
2218 while (dist--) {
2219 int src = (dst + 1) % window;
2220 array[dst] = array[src];
2221 dst = src;
2222 }
2223 array[dst] = swap;
2224 }
2225
898b14ce 2226 next:
521a4f4c 2227 idx++;
a97773ce
BD
2228 if (count + 1 < window)
2229 count++;
521a4f4c
LT
2230 if (idx >= window)
2231 idx = 0;
384b32c0 2232 }
adee7bdf 2233
f6c7081a 2234 for (i = 0; i < window; ++i) {
ff45715c 2235 free_delta_index(array[i].index);
adee7bdf 2236 free(array[i].data);
f6c7081a 2237 }
adee7bdf 2238 free(array);
c323ac7d
LT
2239}
2240
7eb151d6 2241#ifndef NO_PTHREADS
8ecce684 2242
a9a74636
NP
2243static void try_to_free_from_threads(size_t size)
2244{
2245 read_lock();
7c3ecb32 2246 release_pack_memory(size);
a9a74636
NP
2247 read_unlock();
2248}
2249
bc9b2175 2250static try_to_free_t old_try_to_free_routine;
851c34b0 2251
50f22ada
JS
2252/*
2253 * The main thread waits on the condition that (at least) one of the workers
2254 * has stopped working (which is indicated in the .working member of
2255 * struct thread_params).
2256 * When a work thread has completed its work, it sets .working to 0 and
2257 * signals the main thread and waits on the condition that .data_ready
2258 * becomes 1.
2259 */
2260
8ecce684
NP
2261struct thread_params {
2262 pthread_t thread;
2263 struct object_entry **list;
2264 unsigned list_size;
384b32c0 2265 unsigned remaining;
8ecce684
NP
2266 int window;
2267 int depth;
50f22ada
JS
2268 int working;
2269 int data_ready;
2270 pthread_mutex_t mutex;
2271 pthread_cond_t cond;
8ecce684
NP
2272 unsigned *processed;
2273};
2274
44626dc7
AH
2275static pthread_cond_t progress_cond;
2276
2277/*
2278 * Mutex and conditional variable can't be statically-initialized on Windows.
2279 */
2280static void init_threaded_search(void)
2281{
93749194 2282 init_recursive_mutex(&read_mutex);
44626dc7
AH
2283 pthread_mutex_init(&cache_mutex, NULL);
2284 pthread_mutex_init(&progress_mutex, NULL);
2285 pthread_cond_init(&progress_cond, NULL);
851c34b0 2286 old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
44626dc7
AH
2287}
2288
2289static void cleanup_threaded_search(void)
2290{
851c34b0 2291 set_try_to_free_routine(old_try_to_free_routine);
44626dc7
AH
2292 pthread_cond_destroy(&progress_cond);
2293 pthread_mutex_destroy(&read_mutex);
2294 pthread_mutex_destroy(&cache_mutex);
2295 pthread_mutex_destroy(&progress_mutex);
2296}
c2a33679 2297
8ecce684
NP
2298static void *threaded_find_deltas(void *arg)
2299{
c2a33679
NP
2300 struct thread_params *me = arg;
2301
0c2ad00b 2302 progress_lock();
50f22ada 2303 while (me->remaining) {
0c2ad00b
2304 progress_unlock();
2305
384b32c0 2306 find_deltas(me->list, &me->remaining,
c2a33679 2307 me->window, me->depth, me->processed);
50f22ada
JS
2308
2309 progress_lock();
2310 me->working = 0;
2311 pthread_cond_signal(&progress_cond);
2312 progress_unlock();
2313
2314 /*
2315 * We must not set ->data_ready before we wait on the
2316 * condition because the main thread may have set it to 1
2317 * before we get here. In order to be sure that new
2318 * work is available if we see 1 in ->data_ready, it
2319 * was initialized to 0 before this thread was spawned
2320 * and we reset it to 0 right away.
2321 */
2322 pthread_mutex_lock(&me->mutex);
2323 while (!me->data_ready)
2324 pthread_cond_wait(&me->cond, &me->mutex);
2325 me->data_ready = 0;
2326 pthread_mutex_unlock(&me->mutex);
0c2ad00b
2327
2328 progress_lock();
c2a33679 2329 }
0c2ad00b 2330 progress_unlock();
50f22ada
JS
2331 /* leave ->working 1 so that this doesn't get more work assigned */
2332 return NULL;
8ecce684
NP
2333}
2334
8ecce684
NP
2335static void ll_find_deltas(struct object_entry **list, unsigned list_size,
2336 int window, int depth, unsigned *processed)
2337{
dcda3614 2338 struct thread_params *p;
384b32c0 2339 int i, ret, active_threads = 0;
c2a33679 2340
44626dc7
AH
2341 init_threaded_search();
2342
367f4a43 2343 if (delta_search_threads <= 1) {
384b32c0 2344 find_deltas(list, &list_size, window, depth, processed);
44626dc7 2345 cleanup_threaded_search();
367f4a43
NP
2346 return;
2347 }
43cc2b42 2348 if (progress > pack_to_stdout)
1a07e59c
NTND
2349 fprintf_ln(stderr, "Delta compression using up to %d threads",
2350 delta_search_threads);
dcda3614 2351 p = xcalloc(delta_search_threads, sizeof(*p));
367f4a43 2352
50f22ada 2353 /* Partition the work amongst work threads. */
367f4a43 2354 for (i = 0; i < delta_search_threads; i++) {
50f22ada
JS
2355 unsigned sub_size = list_size / (delta_search_threads - i);
2356
bf874896
NP
2357 /* don't use too small segments or no deltas will be found */
2358 if (sub_size < 2*window && i+1 < delta_search_threads)
2359 sub_size = 0;
2360
8ecce684
NP
2361 p[i].window = window;
2362 p[i].depth = depth;
2363 p[i].processed = processed;
50f22ada
JS
2364 p[i].working = 1;
2365 p[i].data_ready = 0;
c2a33679 2366
59921b4b 2367 /* try to split chunks on "path" boundaries */
6fc74703
NP
2368 while (sub_size && sub_size < list_size &&
2369 list[sub_size]->hash &&
384b32c0
NP
2370 list[sub_size]->hash == list[sub_size-1]->hash)
2371 sub_size++;
2372
50f22ada
JS
2373 p[i].list = list;
2374 p[i].list_size = sub_size;
2375 p[i].remaining = sub_size;
59921b4b 2376
384b32c0
NP
2377 list += sub_size;
2378 list_size -= sub_size;
2379 }
2380
50f22ada
JS
2381 /* Start work threads. */
2382 for (i = 0; i < delta_search_threads; i++) {
2383 if (!p[i].list_size)
2384 continue;
68e6a4f8
JS
2385 pthread_mutex_init(&p[i].mutex, NULL);
2386 pthread_cond_init(&p[i].cond, NULL);
50f22ada
JS
2387 ret = pthread_create(&p[i].thread, NULL,
2388 threaded_find_deltas, &p[i]);
2389 if (ret)
2390 die("unable to create thread: %s", strerror(ret));
2391 active_threads++;
2392 }
2393
384b32c0
NP
2394 /*
2395 * Now let's wait for work completion. Each time a thread is done
2396 * with its work, we steal half of the remaining work from the
2397 * thread with the largest number of unprocessed objects and give
2398 * it to that newly idle thread. This ensure good load balancing
2399 * until the remaining object list segments are simply too short
2400 * to be worth splitting anymore.
2401 */
50f22ada
JS
2402 while (active_threads) {
2403 struct thread_params *target = NULL;
384b32c0
NP
2404 struct thread_params *victim = NULL;
2405 unsigned sub_size = 0;
384b32c0
NP
2406
2407 progress_lock();
50f22ada
JS
2408 for (;;) {
2409 for (i = 0; !target && i < delta_search_threads; i++)
2410 if (!p[i].working)
2411 target = &p[i];
2412 if (target)
2413 break;
2414 pthread_cond_wait(&progress_cond, &progress_mutex);
2415 }
2416
384b32c0
NP
2417 for (i = 0; i < delta_search_threads; i++)
2418 if (p[i].remaining > 2*window &&
2419 (!victim || victim->remaining < p[i].remaining))
2420 victim = &p[i];
2421 if (victim) {
2422 sub_size = victim->remaining / 2;
2423 list = victim->list + victim->list_size - sub_size;
2424 while (sub_size && list[0]->hash &&
2425 list[0]->hash == list[-1]->hash) {
2426 list++;
2427 sub_size--;
2428 }
eb9688ff
NP
2429 if (!sub_size) {
2430 /*
2431 * It is possible for some "paths" to have
2432 * so many objects that no hash boundary
2433 * might be found. Let's just steal the
2434 * exact half in that case.
2435 */
2436 sub_size = victim->remaining / 2;
2437 list -= sub_size;
2438 }
384b32c0
NP
2439 target->list = list;
2440 victim->list_size -= sub_size;
2441 victim->remaining -= sub_size;
2442 }
384b32c0
NP
2443 target->list_size = sub_size;
2444 target->remaining = sub_size;
50f22ada
JS
2445 target->working = 1;
2446 progress_unlock();
2447
2448 pthread_mutex_lock(&target->mutex);
2449 target->data_ready = 1;
2450 pthread_cond_signal(&target->cond);
2451 pthread_mutex_unlock(&target->mutex);
c2a33679 2452
384b32c0 2453 if (!sub_size) {
b81d9af7 2454 pthread_join(target->thread, NULL);
50f22ada
JS
2455 pthread_cond_destroy(&target->cond);
2456 pthread_mutex_destroy(&target->mutex);
384b32c0 2457 active_threads--;
c2a33679 2458 }
50f22ada 2459 }
44626dc7 2460 cleanup_threaded_search();
dcda3614 2461 free(p);
8ecce684
NP
2462}
2463
2464#else
384b32c0 2465#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)
8ecce684
NP
2466#endif
2467
b773ddea
JK
2468static void add_tag_chain(const struct object_id *oid)
2469{
2470 struct tag *tag;
2471
2472 /*
2473 * We catch duplicates already in add_object_entry(), but we'd
2474 * prefer to do this extra check to avoid having to parse the
2475 * tag at all if we already know that it's being packed (e.g., if
2476 * it was included via bitmaps, we would not have parsed it
2477 * previously).
2478 */
2479 if (packlist_find(&to_pack, oid->hash, NULL))
2480 return;
2481
d3101b53 2482 tag = lookup_tag(oid);
b773ddea
JK
2483 while (1) {
2484 if (!tag || parse_tag(tag) || !tag->tagged)
2485 die("unable to pack objects reachable from tag %s",
2486 oid_to_hex(oid));
2487
188960b4 2488 add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);
b773ddea
JK
2489
2490 if (tag->tagged->type != OBJ_TAG)
2491 return;
2492
2493 tag = (struct tag *)tag->tagged;
2494 }
2495}
2496
d155254c 2497static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data)
f0a24aa5 2498{
d155254c 2499 struct object_id peeled;
f0a24aa5 2500
59556548 2501 if (starts_with(path, "refs/tags/") && /* is a tag? */
b420d909 2502 !peel_ref(path, &peeled) && /* peelable? */
d155254c 2503 packlist_find(&to_pack, peeled.hash, NULL)) /* object packed? */
b773ddea 2504 add_tag_chain(oid);
f0a24aa5
SP
2505 return 0;
2506}
2507
f3123c4a
JH
2508static void prepare_pack(int window, int depth)
2509{
9668cf59 2510 struct object_entry **delta_list;
6e1c2344
RJ
2511 uint32_t i, nr_deltas;
2512 unsigned n;
9668cf59 2513
3f9ac8d2 2514 get_object_details();
9668cf59 2515
0e8189e2
NP
2516 /*
2517 * If we're locally repacking then we need to be doubly careful
2518 * from now on in order to make sure no stealth corruption gets
2519 * propagated to the new pack. Clients receiving streamed packs
2520 * should validate everything they get anyway so no need to incur
2521 * the additional cost here in that case.
2522 */
2523 if (!pack_to_stdout)
2524 do_check_packed_object_crc = 1;
2525
2834bc27 2526 if (!to_pack.nr_objects || !window || !depth)
9668cf59
NP
2527 return;
2528
b32fa95f 2529 ALLOC_ARRAY(delta_list, to_pack.nr_objects);
75d39853
NP
2530 nr_deltas = n = 0;
2531
2834bc27
VM
2532 for (i = 0; i < to_pack.nr_objects; i++) {
2533 struct object_entry *entry = to_pack.objects + i;
75d39853 2534
898eba5e 2535 if (DELTA(entry))
75d39853 2536 /* This happens if we decided to reuse existing
a7de7130 2537 * delta from a pack. "reuse_delta &&" is implied.
75d39853
NP
2538 */
2539 continue;
2540
ac77d0c3
NTND
2541 if (!entry->type_valid ||
2542 oe_size_less_than(&to_pack, entry, 50))
75d39853
NP
2543 continue;
2544
2545 if (entry->no_try_delta)
2546 continue;
2547
6d6f9cdd 2548 if (!entry->preferred_base) {
75d39853 2549 nr_deltas++;
fd9b1bae 2550 if (oe_type(entry) < 0)
6d6f9cdd 2551 die("unable to get type of object %s",
e6a492b7 2552 oid_to_hex(&entry->idx.oid));
eede9f42 2553 } else {
fd9b1bae 2554 if (oe_type(entry) < 0) {
eede9f42
NP
2555 /*
2556 * This object is not found, but we
2557 * don't have to include it anyway.
2558 */
2559 continue;
2560 }
6d6f9cdd 2561 }
75d39853
NP
2562
2563 delta_list[n++] = entry;
2564 }
2565
2f8b8947 2566 if (nr_deltas && n > 1) {
e334977d
NP
2567 unsigned nr_done = 0;
2568 if (progress)
754dbc43 2569 progress_state = start_progress(_("Compressing objects"),
dc6a0757 2570 nr_deltas);
9ed0d8d6 2571 QSORT(delta_list, n, type_size_sort);
8ecce684 2572 ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
4d4fcc54 2573 stop_progress(&progress_state);
e334977d
NP
2574 if (nr_done != nr_deltas)
2575 die("inconsistency with delta count");
75d39853 2576 }
9668cf59 2577 free(delta_list);
f3123c4a
JH
2578}
2579
ef90d6d4 2580static int git_pack_config(const char *k, const char *v, void *cb)
4812a93a 2581{
eeefa7c9 2582 if (!strcmp(k, "pack.window")) {
4812a93a
JK
2583 window = git_config_int(k, v);
2584 return 0;
2585 }
a97773ce
BD
2586 if (!strcmp(k, "pack.windowmemory")) {
2587 window_memory_limit = git_config_ulong(k, v);
2588 return 0;
2589 }
2590 if (!strcmp(k, "pack.depth")) {
842aaf93
TT
2591 depth = git_config_int(k, v);
2592 return 0;
2593 }
074b2eea
MK
2594 if (!strcmp(k, "pack.deltacachesize")) {
2595 max_delta_cache_size = git_config_int(k, v);
2596 return 0;
2597 }
e3dfddb3
MK
2598 if (!strcmp(k, "pack.deltacachelimit")) {
2599 cache_max_small_delta_size = git_config_int(k, v);
2600 return 0;
2601 }
ae4f07fb
VM
2602 if (!strcmp(k, "pack.writebitmaphashcache")) {
2603 if (git_config_bool(k, v))
2604 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;
2605 else
2606 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
2607 }
6b8fda2d 2608 if (!strcmp(k, "pack.usebitmaps")) {
645c432d 2609 use_bitmap_index_default = git_config_bool(k, v);
6b8fda2d
VM
2610 return 0;
2611 }
693b86ff
NP
2612 if (!strcmp(k, "pack.threads")) {
2613 delta_search_threads = git_config_int(k, v);
833e3df1 2614 if (delta_search_threads < 0)
693b86ff
NP
2615 die("invalid number of threads specified (%d)",
2616 delta_search_threads);
7eb151d6 2617#ifdef NO_PTHREADS
2e96d815 2618 if (delta_search_threads != 1) {
693b86ff 2619 warning("no threads support, ignoring %s", k);
2e96d815
ÆAB
2620 delta_search_threads = 0;
2621 }
693b86ff
NP
2622#endif
2623 return 0;
2624 }
4d00bda2 2625 if (!strcmp(k, "pack.indexversion")) {
ebcfb379
JH
2626 pack_idx_opts.version = git_config_int(k, v);
2627 if (pack_idx_opts.version > 2)
6e1c2344 2628 die("bad pack.indexversion=%"PRIu32,
ebcfb379 2629 pack_idx_opts.version);
4d00bda2
NP
2630 return 0;
2631 }
ef90d6d4 2632 return git_default_config(k, v, cb);
4812a93a
JK
2633}
2634
b5d97e6b 2635static void read_object_list_from_stdin(void)
c323ac7d 2636{
188960b4 2637 char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];
2638 struct object_id oid;
2639 const char *p;
b2504a0d 2640
da93d12b 2641 for (;;) {
da93d12b
LT
2642 if (!fgets(line, sizeof(line), stdin)) {
2643 if (feof(stdin))
2644 break;
2645 if (!ferror(stdin))
1a07e59c 2646 die("BUG: fgets returned NULL, not EOF, not error!");
687dd75c 2647 if (errno != EINTR)
d824cbba 2648 die_errno("fgets");
687dd75c
JH
2649 clearerr(stdin);
2650 continue;
da93d12b 2651 }
7a979d99 2652 if (line[0] == '-') {
188960b4 2653 if (get_oid_hex(line+1, &oid))
2654 die("expected edge object ID, got garbage:\n %s",
b5d97e6b 2655 line);
188960b4 2656 add_preferred_base(&oid);
7a979d99 2657 continue;
21fcd1bd 2658 }
188960b4 2659 if (parse_oid_hex(line, &oid, &p))
2660 die("expected object ID, got garbage:\n %s", line);
b5d97e6b 2661
188960b4 2662 add_preferred_base_object(p + 1);
fd9b1bae 2663 add_object_entry(&oid, OBJ_NONE, p + 1, 0);
c323ac7d 2664 }
b5d97e6b
JH
2665}
2666
95308d64 2667/* Remember to update object flag allocation in object.h */
08cdfb13
JH
2668#define OBJECT_ADDED (1u<<20)
2669
11c211fa 2670static void show_commit(struct commit *commit, void *data)
b5d97e6b 2671{
188960b4 2672 add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0);
08cdfb13 2673 commit->object.flags |= OBJECT_ADDED;
7cc8f971
VM
2674
2675 if (write_bitmap_index)
2676 index_commit_for_bitmap(commit);
b5d97e6b
JH
2677}
2678
de1e67d0 2679static void show_object(struct object *obj, const char *name, void *data)
b5d97e6b 2680{
8d2dfc49 2681 add_preferred_base_object(name);
188960b4 2682 add_object_entry(&obj->oid, obj->type, name, 0);
8d2dfc49 2683 obj->flags |= OBJECT_ADDED;
b5d97e6b
JH
2684}
2685
9535ce73
JH
2686static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)
2687{
2688 assert(arg_missing_action == MA_ALLOW_ANY);
2689
2690 /*
2691 * Quietly ignore ALL missing objects. This avoids problems with
2692 * staging them now and getting an odd error later.
2693 */
2694 if (!has_object_file(&obj->oid))
2695 return;
2696
2697 show_object(obj, name, data);
2698}
2699
0c16cd49
JT
2700static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)
2701{
2702 assert(arg_missing_action == MA_ALLOW_PROMISOR);
2703
2704 /*
2705 * Quietly ignore EXPECTED missing objects. This avoids problems with
2706 * staging them now and getting an odd error later.
2707 */
2708 if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))
2709 return;
2710
2711 show_object(obj, name, data);
2712}
2713
9535ce73
JH
2714static int option_parse_missing_action(const struct option *opt,
2715 const char *arg, int unset)
2716{
2717 assert(arg);
2718 assert(!unset);
2719
2720 if (!strcmp(arg, "error")) {
2721 arg_missing_action = MA_ERROR;
2722 fn_show_object = show_object;
2723 return 0;
2724 }
2725
2726 if (!strcmp(arg, "allow-any")) {
2727 arg_missing_action = MA_ALLOW_ANY;
0c16cd49 2728 fetch_if_missing = 0;
9535ce73
JH
2729 fn_show_object = show_object__ma_allow_any;
2730 return 0;
2731 }
2732
0c16cd49
JT
2733 if (!strcmp(arg, "allow-promisor")) {
2734 arg_missing_action = MA_ALLOW_PROMISOR;
2735 fetch_if_missing = 0;
2736 fn_show_object = show_object__ma_allow_promisor;
2737 return 0;
2738 }
2739
9535ce73
JH
2740 die(_("invalid value for --missing"));
2741 return 0;
2742}
2743
8d1d8f83
JH
2744static void show_edge(struct commit *commit)
2745{
188960b4 2746 add_preferred_base(&commit->object.oid);
8d1d8f83
JH
2747}
2748
08cdfb13
JH
2749struct in_pack_object {
2750 off_t offset;
2751 struct object *object;
2752};
2753
2754struct in_pack {
071bcaab
RJ
2755 unsigned int alloc;
2756 unsigned int nr;
08cdfb13
JH
2757 struct in_pack_object *array;
2758};
2759
2760static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
2761{
ed1c9977 2762 in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p);
08cdfb13
JH
2763 in_pack->array[in_pack->nr].object = object;
2764 in_pack->nr++;
2765}
2766
2767/*
2768 * Compare the objects in the offset order, in order to emulate the
f18d244a 2769 * "git rev-list --objects" output that produced the pack originally.
08cdfb13
JH
2770 */
2771static int ofscmp(const void *a_, const void *b_)
2772{
2773 struct in_pack_object *a = (struct in_pack_object *)a_;
2774 struct in_pack_object *b = (struct in_pack_object *)b_;
2775
2776 if (a->offset < b->offset)
2777 return -1;
2778 else if (a->offset > b->offset)
2779 return 1;
2780 else
f2fd0760 2781 return oidcmp(&a->object->oid, &b->object->oid);
08cdfb13
JH
2782}
2783
2784static void add_objects_in_unpacked_packs(struct rev_info *revs)
2785{
2786 struct packed_git *p;
2787 struct in_pack in_pack;
2788 uint32_t i;
2789
2790 memset(&in_pack, 0, sizeof(in_pack));
2791
a80d72db 2792 for (p = get_packed_git(the_repository); p; p = p->next) {
188960b4 2793 struct object_id oid;
08cdfb13
JH
2794 struct object *o;
2795
ed7e5fc3 2796 if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
08cdfb13
JH
2797 continue;
2798 if (open_pack_index(p))
2799 die("cannot open pack index");
2800
2801 ALLOC_GROW(in_pack.array,
2802 in_pack.nr + p->num_objects,
2803 in_pack.alloc);
2804
2805 for (i = 0; i < p->num_objects; i++) {
188960b4 2806 nth_packed_object_oid(&oid, p, i);
2807 o = lookup_unknown_object(oid.hash);
08cdfb13
JH
2808 if (!(o->flags & OBJECT_ADDED))
2809 mark_in_pack_object(o, p, &in_pack);
2810 o->flags |= OBJECT_ADDED;
2811 }
2812 }
2813
2814 if (in_pack.nr) {
9ed0d8d6 2815 QSORT(in_pack.array, in_pack.nr, ofscmp);
08cdfb13
JH
2816 for (i = 0; i < in_pack.nr; i++) {
2817 struct object *o = in_pack.array[i].object;
188960b4 2818 add_object_entry(&o->oid, o->type, "", 0);
08cdfb13
JH
2819 }
2820 }
2821 free(in_pack.array);
2822}
2823
76c1d9a0 2824static int add_loose_object(const struct object_id *oid, const char *path,
e26a8c47
JK
2825 void *data)
2826{
0df8e965 2827 enum object_type type = oid_object_info(the_repository, oid, NULL);
e26a8c47
JK
2828
2829 if (type < 0) {
2830 warning("loose object at %s could not be examined", path);
2831 return 0;
2832 }
2833
188960b4 2834 add_object_entry(oid, type, "", 0);
e26a8c47
JK
2835 return 0;
2836}
2837
2838/*
2839 * We actually don't even have to worry about reachability here.
2840 * add_object_entry will weed out duplicates, so we just add every
2841 * loose object we find.
2842 */
2843static void add_unreachable_loose_objects(void)
2844{
2845 for_each_loose_file_in_objdir(get_object_directory(),
2846 add_loose_object,
2847 NULL, NULL, NULL);
2848}
2849
188960b4 2850static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
094085e3
BC
2851{
2852 static struct packed_git *last_found = (void *)1;
2853 struct packed_git *p;
2854
a80d72db
SB
2855 p = (last_found != (void *)1) ? last_found :
2856 get_packed_git(the_repository);
094085e3
BC
2857
2858 while (p) {
ed7e5fc3
NTND
2859 if ((!p->pack_local || p->pack_keep ||
2860 p->pack_keep_in_core) &&
188960b4 2861 find_pack_entry_one(oid->hash, p)) {
094085e3
BC
2862 last_found = p;
2863 return 1;
2864 }
2865 if (p == last_found)
a80d72db 2866 p = get_packed_git(the_repository);
094085e3
BC
2867 else
2868 p = p->next;
2869 if (p == last_found)
2870 p = p->next;
2871 }
2872 return 0;
2873}
2874
abcb8655
JK
2875/*
2876 * Store a list of sha1s that are should not be discarded
2877 * because they are either written too recently, or are
2878 * reachable from another object that was.
2879 *
2880 * This is filled by get_object_list.
2881 */
910650d2 2882static struct oid_array recent_objects;
abcb8655 2883
4ce3621a 2884static int loosened_object_can_be_discarded(const struct object_id *oid,
dddbad72 2885 timestamp_t mtime)
d0d46abc
JK
2886{
2887 if (!unpack_unreachable_expiration)
2888 return 0;
2889 if (mtime > unpack_unreachable_expiration)
2890 return 0;
910650d2 2891 if (oid_array_lookup(&recent_objects, oid) >= 0)
abcb8655 2892 return 0;
d0d46abc
JK
2893 return 1;
2894}
2895
ca11b212
NP
2896static void loosen_unused_packed_objects(struct rev_info *revs)
2897{
2898 struct packed_git *p;
2899 uint32_t i;
4ce3621a 2900 struct object_id oid;
ca11b212 2901
a80d72db 2902 for (p = get_packed_git(the_repository); p; p = p->next) {
ed7e5fc3 2903 if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
ca11b212
NP
2904 continue;
2905
2906 if (open_pack_index(p))
2907 die("cannot open pack index");
2908
2909 for (i = 0; i < p->num_objects; i++) {
4ce3621a 2910 nth_packed_object_oid(&oid, p, i);
2911 if (!packlist_find(&to_pack, oid.hash, NULL) &&
188960b4 2912 !has_sha1_pack_kept_or_nonlocal(&oid) &&
4ce3621a 2913 !loosened_object_can_be_discarded(&oid, p->mtime))
4bdb70a4 2914 if (force_object_loose(&oid, p->mtime))
ca11b212
NP
2915 die("unable to force loose object");
2916 }
2917 }
2918}
2919
69e4b342 2920/*
645c432d
KS
2921 * This tracks any options which pack-reuse code expects to be on, or which a
2922 * reader of the pack might not understand, and which would therefore prevent
2923 * blind reuse of what we have on disk.
69e4b342
JK
2924 */
2925static int pack_options_allow_reuse(void)
2926{
9df4a607
JK
2927 return pack_to_stdout &&
2928 allow_ofs_delta &&
ed7e5fc3
NTND
2929 !ignore_packed_keep_on_disk &&
2930 !ignore_packed_keep_in_core &&
9df4a607
JK
2931 (!local || !have_non_local_packs) &&
2932 !incremental;
69e4b342
JK
2933}
2934
6b8fda2d
VM
2935static int get_object_list_from_bitmap(struct rev_info *revs)
2936{
2937 if (prepare_bitmap_walk(revs) < 0)
2938 return -1;
2939
69e4b342
JK
2940 if (pack_options_allow_reuse() &&
2941 !reuse_partial_packfile_from_bitmap(
6b8fda2d
VM
2942 &reuse_packfile,
2943 &reuse_packfile_objects,
2944 &reuse_packfile_offset)) {
2945 assert(reuse_packfile_objects);
2946 nr_result += reuse_packfile_objects;
78d2214e 2947 display_progress(progress_state, nr_result);
6b8fda2d
VM
2948 }
2949
2950 traverse_bitmap_commit_list(&add_object_entry_from_bitmap);
2951 return 0;
2952}
2953
abcb8655 2954static void record_recent_object(struct object *obj,
de1e67d0 2955 const char *name,
abcb8655
JK
2956 void *data)
2957{
910650d2 2958 oid_array_append(&recent_objects, &obj->oid);
abcb8655
JK
2959}
2960
2961static void record_recent_commit(struct commit *commit, void *data)
2962{
910650d2 2963 oid_array_append(&recent_objects, &commit->object.oid);
abcb8655
JK
2964}
2965
8d1d8f83 2966static void get_object_list(int ac, const char **av)
b5d97e6b
JH
2967{
2968 struct rev_info revs;
2969 char line[1000];
b5d97e6b
JH
2970 int flags = 0;
2971
b5d97e6b
JH
2972 init_revisions(&revs, NULL);
2973 save_commit_buffer = 0;
b5d97e6b
JH
2974 setup_revisions(ac, av, &revs, NULL);
2975
b790e0f6
NTND
2976 /* make sure shallows are read */
2977 is_repository_shallow();
2978
b5d97e6b
JH
2979 while (fgets(line, sizeof(line), stdin) != NULL) {
2980 int len = strlen(line);
872c930d 2981 if (len && line[len - 1] == '\n')
b5d97e6b
JH
2982 line[--len] = 0;
2983 if (!len)
2984 break;
2985 if (*line == '-') {
2986 if (!strcmp(line, "--not")) {
2987 flags ^= UNINTERESTING;
7cc8f971 2988 write_bitmap_index = 0;
b5d97e6b
JH
2989 continue;
2990 }
b790e0f6 2991 if (starts_with(line, "--shallow ")) {
e92b848c 2992 struct object_id oid;
2993 if (get_oid_hex(line + 10, &oid))
b790e0f6 2994 die("not an SHA-1 '%s'", line + 10);
e92b848c 2995 register_shallow(&oid);
f7f91086 2996 use_bitmap_index = 0;
b790e0f6
NTND
2997 continue;
2998 }
b5d97e6b
JH
2999 die("not a rev '%s'", line);
3000 }
8e676e8b 3001 if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
b5d97e6b
JH
3002 die("bad revision '%s'", line);
3003 }
3004
6b8fda2d
VM
3005 if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
3006 return;
3007
3d51e1b5
MK
3008 if (prepare_revision_walk(&revs))
3009 die("revision walk setup failed");
e76a5fb4 3010 mark_edges_uninteresting(&revs, show_edge);
9535ce73
JH
3011
3012 if (!fn_show_object)
3013 fn_show_object = show_object;
3014 traverse_commit_list_filtered(&filter_options, &revs,
3015 show_commit, fn_show_object, NULL,
3016 NULL);
08cdfb13 3017
abcb8655
JK
3018 if (unpack_unreachable_expiration) {
3019 revs.ignore_missing_links = 1;
3020 if (add_unseen_recent_objects_to_traversal(&revs,
3021 unpack_unreachable_expiration))
3022 die("unable to add recent objects");
3023 if (prepare_revision_walk(&revs))
3024 die("revision walk setup failed");
3025 traverse_commit_list(&revs, record_recent_commit,
3026 record_recent_object, NULL);
3027 }
3028
08cdfb13
JH
3029 if (keep_unreachable)
3030 add_objects_in_unpacked_packs(&revs);
e26a8c47
JK
3031 if (pack_loose_unreachable)
3032 add_unreachable_loose_objects();
ca11b212
NP
3033 if (unpack_unreachable)
3034 loosen_unused_packed_objects(&revs);
abcb8655 3035
910650d2 3036 oid_array_clear(&recent_objects);
b5d97e6b
JH
3037}
3038
ed7e5fc3
NTND
3039static void add_extra_kept_packs(const struct string_list *names)
3040{
3041 struct packed_git *p;
3042
3043 if (!names->nr)
3044 return;
3045
3046 for (p = get_packed_git(the_repository); p; p = p->next) {
3047 const char *name = basename(p->pack_name);
3048 int i;
3049
3050 if (!p->pack_local)
3051 continue;
3052
3053 for (i = 0; i < names->nr; i++)
3054 if (!fspathcmp(name, names->items[i].string))
3055 break;
3056
3057 if (i < names->nr) {
3058 p->pack_keep_in_core = 1;
3059 ignore_packed_keep_in_core = 1;
3060 continue;
3061 }
3062 }
3063}
3064
99fb6e04
NTND
3065static int option_parse_index_version(const struct option *opt,
3066 const char *arg, int unset)
3067{
3068 char *c;
3069 const char *val = arg;
3070 pack_idx_opts.version = strtoul(val, &c, 10);
3071 if (pack_idx_opts.version > 2)
3072 die(_("unsupported index version %s"), val);
3073 if (*c == ',' && c[1])
3074 pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
3075 if (*c || pack_idx_opts.off32_limit & 0x80000000)
3076 die(_("bad index version '%s'"), val);
3077 return 0;
3078}
3079
7e52f566
JK
3080static int option_parse_unpack_unreachable(const struct option *opt,
3081 const char *arg, int unset)
3082{
3083 if (unset) {
3084 unpack_unreachable = 0;
3085 unpack_unreachable_expiration = 0;
3086 }
3087 else {
3088 unpack_unreachable = 1;
3089 if (arg)
3090 unpack_unreachable_expiration = approxidate(arg);
3091 }
3092 return 0;
3093}
3094
b5d97e6b
JH
3095int cmd_pack_objects(int argc, const char **argv, const char *prefix)
3096{
b5d97e6b 3097 int use_internal_rev_list = 0;
8d1d8f83 3098 int thin = 0;
2dacf26d 3099 int shallow = 0;
4f366275 3100 int all_progress_implied = 0;
edfbb2aa 3101 struct argv_array rp = ARGV_ARRAY_INIT;
99fb6e04 3102 int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
c90f9e13 3103 int rev_list_index = 0;
ed7e5fc3 3104 struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
99fb6e04
NTND
3105 struct option pack_objects_options[] = {
3106 OPT_SET_INT('q', "quiet", &progress,
4c688120 3107 N_("do not show progress meter"), 0),
99fb6e04 3108 OPT_SET_INT(0, "progress", &progress,
4c688120 3109 N_("show progress meter"), 1),
99fb6e04 3110 OPT_SET_INT(0, "all-progress", &progress,
4c688120 3111 N_("show progress meter during object writing phase"), 2),
99fb6e04
NTND
3112 OPT_BOOL(0, "all-progress-implied",
3113 &all_progress_implied,
4c688120
NTND
3114 N_("similar to --all-progress when progress meter is shown")),
3115 { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
3116 N_("write the pack index file in the specified idx format version"),
99fb6e04 3117 0, option_parse_index_version },
2a514ed8
CB
3118 OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
3119 N_("maximum size of each output pack file")),
99fb6e04 3120 OPT_BOOL(0, "local", &local,
4c688120 3121 N_("ignore borrowed objects from alternate object store")),
99fb6e04 3122 OPT_BOOL(0, "incremental", &incremental,
4c688120 3123 N_("ignore packed objects")),
99fb6e04 3124 OPT_INTEGER(0, "window", &window,
4c688120 3125 N_("limit pack window by objects")),
2a514ed8
CB
3126 OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,
3127 N_("limit pack window by memory in addition to object limit")),
99fb6e04 3128 OPT_INTEGER(0, "depth", &depth,
4c688120 3129 N_("maximum length of delta chain allowed in the resulting pack")),
99fb6e04 3130 OPT_BOOL(0, "reuse-delta", &reuse_delta,
4c688120 3131 N_("reuse existing deltas")),
99fb6e04 3132 OPT_BOOL(0, "reuse-object", &reuse_object,
4c688120 3133 N_("reuse existing objects")),
99fb6e04 3134 OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,
4c688120 3135 N_("use OFS_DELTA objects")),
99fb6e04 3136 OPT_INTEGER(0, "threads", &delta_search_threads,
4c688120 3137 N_("use threads when searching for best delta matches")),
99fb6e04 3138 OPT_BOOL(0, "non-empty", &non_empty,
4c688120 3139 N_("do not create an empty pack output")),
99fb6e04 3140 OPT_BOOL(0, "revs", &use_internal_rev_list,
4c688120 3141 N_("read revision arguments from standard input")),
3e4a67b4
NTND
3142 OPT_SET_INT_F(0, "unpacked", &rev_list_unpacked,
3143 N_("limit the objects to those that are not yet packed"),
3144 1, PARSE_OPT_NONEG),
3145 OPT_SET_INT_F(0, "all", &rev_list_all,
3146 N_("include objects reachable from any reference"),
3147 1, PARSE_OPT_NONEG),
3148 OPT_SET_INT_F(0, "reflog", &rev_list_reflog,
3149 N_("include objects referred by reflog entries"),
3150 1, PARSE_OPT_NONEG),
3151 OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,
3152 N_("include objects referred to by the index"),
3153 1, PARSE_OPT_NONEG),
99fb6e04 3154 OPT_BOOL(0, "stdout", &pack_to_stdout,
4c688120 3155 N_("output pack to stdout")),
99fb6e04 3156 OPT_BOOL(0, "include-tag", &include_tag,
4c688120 3157 N_("include tag objects that refer to objects to be packed")),
99fb6e04 3158 OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
4c688120 3159 N_("keep unreachable objects")),
e26a8c47
JK
3160 OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,
3161 N_("pack loose unreachable objects")),
4c688120
NTND
3162 { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),
3163 N_("unpack unreachable objects newer than <time>"),
7e52f566 3164 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },
99fb6e04 3165 OPT_BOOL(0, "thin", &thin,
4c688120 3166 N_("create thin packs")),
2dacf26d 3167 OPT_BOOL(0, "shallow", &shallow,
3168 N_("create packs suitable for shallow fetches")),
ed7e5fc3 3169 OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,
4c688120 3170 N_("ignore packs that have companion .keep file")),
ed7e5fc3
NTND
3171 OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
3172 N_("ignore this pack")),
99fb6e04 3173 OPT_INTEGER(0, "compression", &pack_compression_level,
4c688120 3174 N_("pack compression level")),
99fb6e04 3175 OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
4c688120 3176 N_("do not hide commits by grafts"), 0),
6b8fda2d
VM
3177 OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,
3178 N_("use a bitmap index if available to speed up counting objects")),
7cc8f971
VM
3179 OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index,
3180 N_("write a bitmap index together with the pack index")),
9535ce73
JH
3181 OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
3182 { OPTION_CALLBACK, 0, "missing", NULL, N_("action"),
3183 N_("handling for missing objects"), PARSE_OPT_NONEG,
3184 option_parse_missing_action },
0c16cd49
JT
3185 OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
3186 N_("do not pack objects in promisor packfiles")),
99fb6e04
NTND
3187 OPT_END(),
3188 };
8d1d8f83 3189
0c6804ab
NTND
3190 if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
3191 BUG("too many dfs states, increase OE_DFS_STATE_BITS");
3192
afc711b8 3193 check_replace_refs = 0;
dae556bd 3194
ebcfb379 3195 reset_pack_idx_option(&pack_idx_opts);
ef90d6d4 3196 git_config(git_pack_config, NULL);
b5d97e6b
JH
3197
3198 progress = isatty(2);
99fb6e04
NTND
3199 argc = parse_options(argc, argv, prefix, pack_objects_options,
3200 pack_usage, 0);
b5d97e6b 3201
99fb6e04
NTND