Commit | Line | Data |
---|---|---|
fff42755 VM |
1 | #include "cache.h" |
2 | #include "commit.h" | |
3 | #include "tag.h" | |
4 | #include "diff.h" | |
5 | #include "revision.h" | |
6 | #include "progress.h" | |
7 | #include "list-objects.h" | |
8 | #include "pack.h" | |
9 | #include "pack-bitmap.h" | |
10 | #include "pack-revindex.h" | |
11 | #include "pack-objects.h" | |
12 | ||
13 | /* | |
14 | * An entry on the bitmap index, representing the bitmap for a given | |
15 | * commit. | |
16 | */ | |
17 | struct stored_bitmap { | |
18 | unsigned char sha1[20]; | |
19 | struct ewah_bitmap *root; | |
20 | struct stored_bitmap *xor; | |
21 | int flags; | |
22 | }; | |
23 | ||
24 | /* | |
25 | * The currently active bitmap index. By design, repositories only have | |
26 | * a single bitmap index available (the index for the biggest packfile in | |
27 | * the repository), since bitmap indexes need full closure. | |
28 | * | |
29 | * If there is more than one bitmap index available (e.g. because of alternates), | |
30 | * the active bitmap index is the largest one. | |
31 | */ | |
32 | static struct bitmap_index { | |
33 | /* Packfile to which this bitmap index belongs to */ | |
34 | struct packed_git *pack; | |
35 | ||
fff42755 VM |
36 | /* |
37 | * Mark the first `reuse_objects` in the packfile as reused: | |
38 | * they will be sent as-is without using them for repacking | |
39 | * calculations | |
40 | */ | |
41 | uint32_t reuse_objects; | |
42 | ||
43 | /* mmapped buffer of the whole bitmap index */ | |
44 | unsigned char *map; | |
45 | size_t map_size; /* size of the mmaped buffer */ | |
46 | size_t map_pos; /* current position when loading the index */ | |
47 | ||
48 | /* | |
49 | * Type indexes. | |
50 | * | |
51 | * Each bitmap marks which objects in the packfile are of the given | |
52 | * type. This provides type information when yielding the objects from | |
53 | * the packfile during a walk, which allows for better delta bases. | |
54 | */ | |
55 | struct ewah_bitmap *commits; | |
56 | struct ewah_bitmap *trees; | |
57 | struct ewah_bitmap *blobs; | |
58 | struct ewah_bitmap *tags; | |
59 | ||
60 | /* Map from SHA1 -> `stored_bitmap` for all the bitmapped comits */ | |
61 | khash_sha1 *bitmaps; | |
62 | ||
63 | /* Number of bitmapped commits */ | |
64 | uint32_t entry_count; | |
65 | ||
ae4f07fb VM |
66 | /* Name-hash cache (or NULL if not present). */ |
67 | uint32_t *hashes; | |
68 | ||
fff42755 VM |
69 | /* |
70 | * Extended index. | |
71 | * | |
72 | * When trying to perform bitmap operations with objects that are not | |
73 | * packed in `pack`, these objects are added to this "fake index" and | |
74 | * are assumed to appear at the end of the packfile for all operations | |
75 | */ | |
76 | struct eindex { | |
77 | struct object **objects; | |
78 | uint32_t *hashes; | |
79 | uint32_t count, alloc; | |
80 | khash_sha1_pos *positions; | |
81 | } ext_index; | |
82 | ||
83 | /* Bitmap result of the last performed walk */ | |
84 | struct bitmap *result; | |
85 | ||
86 | /* Version of the bitmap index */ | |
87 | unsigned int version; | |
88 | ||
89 | unsigned loaded : 1; | |
90 | ||
91 | } bitmap_git; | |
92 | ||
93 | static struct ewah_bitmap *lookup_stored_bitmap(struct stored_bitmap *st) | |
94 | { | |
95 | struct ewah_bitmap *parent; | |
96 | struct ewah_bitmap *composed; | |
97 | ||
98 | if (st->xor == NULL) | |
99 | return st->root; | |
100 | ||
101 | composed = ewah_pool_new(); | |
102 | parent = lookup_stored_bitmap(st->xor); | |
103 | ewah_xor(st->root, parent, composed); | |
104 | ||
105 | ewah_pool_free(st->root); | |
106 | st->root = composed; | |
107 | st->xor = NULL; | |
108 | ||
109 | return composed; | |
110 | } | |
111 | ||
112 | /* | |
113 | * Read a bitmap from the current read position on the mmaped | |
114 | * index, and increase the read position accordingly | |
115 | */ | |
116 | static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index) | |
117 | { | |
118 | struct ewah_bitmap *b = ewah_pool_new(); | |
119 | ||
120 | int bitmap_size = ewah_read_mmap(b, | |
121 | index->map + index->map_pos, | |
122 | index->map_size - index->map_pos); | |
123 | ||
124 | if (bitmap_size < 0) { | |
125 | error("Failed to load bitmap index (corrupted?)"); | |
126 | ewah_pool_free(b); | |
127 | return NULL; | |
128 | } | |
129 | ||
130 | index->map_pos += bitmap_size; | |
131 | return b; | |
132 | } | |
133 | ||
134 | static int load_bitmap_header(struct bitmap_index *index) | |
135 | { | |
136 | struct bitmap_disk_header *header = (void *)index->map; | |
137 | ||
138 | if (index->map_size < sizeof(*header) + 20) | |
139 | return error("Corrupted bitmap index (missing header data)"); | |
140 | ||
141 | if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0) | |
142 | return error("Corrupted bitmap index file (wrong header)"); | |
143 | ||
144 | index->version = ntohs(header->version); | |
145 | if (index->version != 1) | |
146 | return error("Unsupported version for bitmap index file (%d)", index->version); | |
147 | ||
148 | /* Parse known bitmap format options */ | |
149 | { | |
150 | uint32_t flags = ntohs(header->options); | |
151 | ||
152 | if ((flags & BITMAP_OPT_FULL_DAG) == 0) | |
153 | return error("Unsupported options for bitmap index file " | |
154 | "(Git requires BITMAP_OPT_FULL_DAG)"); | |
ae4f07fb VM |
155 | |
156 | if (flags & BITMAP_OPT_HASH_CACHE) { | |
157 | unsigned char *end = index->map + index->map_size - 20; | |
158 | index->hashes = ((uint32_t *)end) - index->pack->num_objects; | |
159 | } | |
fff42755 VM |
160 | } |
161 | ||
162 | index->entry_count = ntohl(header->entry_count); | |
163 | index->map_pos += sizeof(*header); | |
164 | return 0; | |
165 | } | |
166 | ||
167 | static struct stored_bitmap *store_bitmap(struct bitmap_index *index, | |
168 | struct ewah_bitmap *root, | |
169 | const unsigned char *sha1, | |
170 | struct stored_bitmap *xor_with, | |
171 | int flags) | |
172 | { | |
173 | struct stored_bitmap *stored; | |
174 | khiter_t hash_pos; | |
175 | int ret; | |
176 | ||
177 | stored = xmalloc(sizeof(struct stored_bitmap)); | |
178 | stored->root = root; | |
179 | stored->xor = xor_with; | |
180 | stored->flags = flags; | |
181 | hashcpy(stored->sha1, sha1); | |
182 | ||
183 | hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret); | |
184 | ||
185 | /* a 0 return code means the insertion succeeded with no changes, | |
186 | * because the SHA1 already existed on the map. this is bad, there | |
187 | * shouldn't be duplicated commits in the index */ | |
188 | if (ret == 0) { | |
189 | error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1)); | |
190 | return NULL; | |
191 | } | |
192 | ||
193 | kh_value(index->bitmaps, hash_pos) = stored; | |
194 | return stored; | |
195 | } | |
196 | ||
197 | static int load_bitmap_entries_v1(struct bitmap_index *index) | |
198 | { | |
199 | static const size_t MAX_XOR_OFFSET = 160; | |
200 | ||
201 | uint32_t i; | |
202 | struct stored_bitmap **recent_bitmaps; | |
203 | struct bitmap_disk_entry *entry; | |
204 | ||
205 | recent_bitmaps = xcalloc(MAX_XOR_OFFSET, sizeof(struct stored_bitmap)); | |
206 | ||
207 | for (i = 0; i < index->entry_count; ++i) { | |
208 | int xor_offset, flags; | |
209 | struct ewah_bitmap *bitmap = NULL; | |
210 | struct stored_bitmap *xor_bitmap = NULL; | |
211 | uint32_t commit_idx_pos; | |
212 | const unsigned char *sha1; | |
213 | ||
214 | entry = (struct bitmap_disk_entry *)(index->map + index->map_pos); | |
215 | index->map_pos += sizeof(struct bitmap_disk_entry); | |
216 | ||
217 | commit_idx_pos = ntohl(entry->object_pos); | |
218 | sha1 = nth_packed_object_sha1(index->pack, commit_idx_pos); | |
219 | ||
220 | xor_offset = (int)entry->xor_offset; | |
221 | flags = (int)entry->flags; | |
222 | ||
223 | bitmap = read_bitmap_1(index); | |
224 | if (!bitmap) | |
225 | return -1; | |
226 | ||
227 | if (xor_offset > MAX_XOR_OFFSET || xor_offset > i) | |
228 | return error("Corrupted bitmap pack index"); | |
229 | ||
230 | if (xor_offset > 0) { | |
231 | xor_bitmap = recent_bitmaps[(i - xor_offset) % MAX_XOR_OFFSET]; | |
232 | ||
233 | if (xor_bitmap == NULL) | |
234 | return error("Invalid XOR offset in bitmap pack index"); | |
235 | } | |
236 | ||
237 | recent_bitmaps[i % MAX_XOR_OFFSET] = store_bitmap( | |
238 | index, bitmap, sha1, xor_bitmap, flags); | |
239 | } | |
240 | ||
241 | return 0; | |
242 | } | |
243 | ||
244 | static int open_pack_bitmap_1(struct packed_git *packfile) | |
245 | { | |
246 | int fd; | |
247 | struct stat st; | |
248 | char *idx_name; | |
249 | ||
250 | if (open_pack_index(packfile)) | |
251 | return -1; | |
252 | ||
253 | idx_name = pack_bitmap_filename(packfile); | |
254 | fd = git_open_noatime(idx_name); | |
255 | free(idx_name); | |
256 | ||
257 | if (fd < 0) | |
258 | return -1; | |
259 | ||
260 | if (fstat(fd, &st)) { | |
261 | close(fd); | |
262 | return -1; | |
263 | } | |
264 | ||
265 | if (bitmap_git.pack) { | |
266 | warning("ignoring extra bitmap file: %s", packfile->pack_name); | |
267 | close(fd); | |
268 | return -1; | |
269 | } | |
270 | ||
271 | bitmap_git.pack = packfile; | |
272 | bitmap_git.map_size = xsize_t(st.st_size); | |
273 | bitmap_git.map = xmmap(NULL, bitmap_git.map_size, PROT_READ, MAP_PRIVATE, fd, 0); | |
274 | bitmap_git.map_pos = 0; | |
275 | close(fd); | |
276 | ||
277 | if (load_bitmap_header(&bitmap_git) < 0) { | |
278 | munmap(bitmap_git.map, bitmap_git.map_size); | |
279 | bitmap_git.map = NULL; | |
280 | bitmap_git.map_size = 0; | |
281 | return -1; | |
282 | } | |
283 | ||
284 | return 0; | |
285 | } | |
286 | ||
287 | static int load_pack_bitmap(void) | |
288 | { | |
289 | assert(bitmap_git.map && !bitmap_git.loaded); | |
290 | ||
291 | bitmap_git.bitmaps = kh_init_sha1(); | |
292 | bitmap_git.ext_index.positions = kh_init_sha1_pos(); | |
9d98bbf5 | 293 | load_pack_revindex(bitmap_git.pack); |
fff42755 VM |
294 | |
295 | if (!(bitmap_git.commits = read_bitmap_1(&bitmap_git)) || | |
296 | !(bitmap_git.trees = read_bitmap_1(&bitmap_git)) || | |
297 | !(bitmap_git.blobs = read_bitmap_1(&bitmap_git)) || | |
298 | !(bitmap_git.tags = read_bitmap_1(&bitmap_git))) | |
299 | goto failed; | |
300 | ||
301 | if (load_bitmap_entries_v1(&bitmap_git) < 0) | |
302 | goto failed; | |
303 | ||
304 | bitmap_git.loaded = 1; | |
305 | return 0; | |
306 | ||
307 | failed: | |
308 | munmap(bitmap_git.map, bitmap_git.map_size); | |
309 | bitmap_git.map = NULL; | |
310 | bitmap_git.map_size = 0; | |
311 | return -1; | |
312 | } | |
313 | ||
314 | char *pack_bitmap_filename(struct packed_git *p) | |
315 | { | |
316 | char *idx_name; | |
317 | int len; | |
318 | ||
319 | len = strlen(p->pack_name) - strlen(".pack"); | |
320 | idx_name = xmalloc(len + strlen(".bitmap") + 1); | |
321 | ||
322 | memcpy(idx_name, p->pack_name, len); | |
323 | memcpy(idx_name + len, ".bitmap", strlen(".bitmap") + 1); | |
324 | ||
325 | return idx_name; | |
326 | } | |
327 | ||
328 | static int open_pack_bitmap(void) | |
329 | { | |
330 | struct packed_git *p; | |
331 | int ret = -1; | |
332 | ||
333 | assert(!bitmap_git.map && !bitmap_git.loaded); | |
334 | ||
335 | prepare_packed_git(); | |
336 | for (p = packed_git; p; p = p->next) { | |
337 | if (open_pack_bitmap_1(p) == 0) | |
338 | ret = 0; | |
339 | } | |
340 | ||
341 | return ret; | |
342 | } | |
343 | ||
344 | int prepare_bitmap_git(void) | |
345 | { | |
346 | if (bitmap_git.loaded) | |
347 | return 0; | |
348 | ||
349 | if (!open_pack_bitmap()) | |
350 | return load_pack_bitmap(); | |
351 | ||
352 | return -1; | |
353 | } | |
354 | ||
355 | struct include_data { | |
356 | struct bitmap *base; | |
357 | struct bitmap *seen; | |
358 | }; | |
359 | ||
360 | static inline int bitmap_position_extended(const unsigned char *sha1) | |
361 | { | |
362 | khash_sha1_pos *positions = bitmap_git.ext_index.positions; | |
363 | khiter_t pos = kh_get_sha1_pos(positions, sha1); | |
364 | ||
365 | if (pos < kh_end(positions)) { | |
366 | int bitmap_pos = kh_value(positions, pos); | |
367 | return bitmap_pos + bitmap_git.pack->num_objects; | |
368 | } | |
369 | ||
370 | return -1; | |
371 | } | |
372 | ||
373 | static inline int bitmap_position_packfile(const unsigned char *sha1) | |
374 | { | |
375 | off_t offset = find_pack_entry_one(sha1, bitmap_git.pack); | |
376 | if (!offset) | |
377 | return -1; | |
378 | ||
9d98bbf5 | 379 | return find_revindex_position(bitmap_git.pack, offset); |
fff42755 VM |
380 | } |
381 | ||
382 | static int bitmap_position(const unsigned char *sha1) | |
383 | { | |
384 | int pos = bitmap_position_packfile(sha1); | |
385 | return (pos >= 0) ? pos : bitmap_position_extended(sha1); | |
386 | } | |
387 | ||
388 | static int ext_index_add_object(struct object *object, const char *name) | |
389 | { | |
390 | struct eindex *eindex = &bitmap_git.ext_index; | |
391 | ||
392 | khiter_t hash_pos; | |
393 | int hash_ret; | |
394 | int bitmap_pos; | |
395 | ||
396 | hash_pos = kh_put_sha1_pos(eindex->positions, object->sha1, &hash_ret); | |
397 | if (hash_ret > 0) { | |
398 | if (eindex->count >= eindex->alloc) { | |
399 | eindex->alloc = (eindex->alloc + 16) * 3 / 2; | |
400 | eindex->objects = xrealloc(eindex->objects, | |
401 | eindex->alloc * sizeof(struct object *)); | |
402 | eindex->hashes = xrealloc(eindex->hashes, | |
403 | eindex->alloc * sizeof(uint32_t)); | |
404 | } | |
405 | ||
406 | bitmap_pos = eindex->count; | |
407 | eindex->objects[eindex->count] = object; | |
408 | eindex->hashes[eindex->count] = pack_name_hash(name); | |
409 | kh_value(eindex->positions, hash_pos) = bitmap_pos; | |
410 | eindex->count++; | |
411 | } else { | |
412 | bitmap_pos = kh_value(eindex->positions, hash_pos); | |
413 | } | |
414 | ||
415 | return bitmap_pos + bitmap_git.pack->num_objects; | |
416 | } | |
417 | ||
418 | static void show_object(struct object *object, const struct name_path *path, | |
419 | const char *last, void *data) | |
420 | { | |
421 | struct bitmap *base = data; | |
422 | int bitmap_pos; | |
423 | ||
424 | bitmap_pos = bitmap_position(object->sha1); | |
425 | ||
426 | if (bitmap_pos < 0) { | |
427 | char *name = path_name(path, last); | |
428 | bitmap_pos = ext_index_add_object(object, name); | |
429 | free(name); | |
430 | } | |
431 | ||
432 | bitmap_set(base, bitmap_pos); | |
433 | } | |
434 | ||
435 | static void show_commit(struct commit *commit, void *data) | |
436 | { | |
437 | } | |
438 | ||
439 | static int add_to_include_set(struct include_data *data, | |
440 | const unsigned char *sha1, | |
441 | int bitmap_pos) | |
442 | { | |
443 | khiter_t hash_pos; | |
444 | ||
445 | if (data->seen && bitmap_get(data->seen, bitmap_pos)) | |
446 | return 0; | |
447 | ||
448 | if (bitmap_get(data->base, bitmap_pos)) | |
449 | return 0; | |
450 | ||
451 | hash_pos = kh_get_sha1(bitmap_git.bitmaps, sha1); | |
452 | if (hash_pos < kh_end(bitmap_git.bitmaps)) { | |
453 | struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, hash_pos); | |
454 | bitmap_or_ewah(data->base, lookup_stored_bitmap(st)); | |
455 | return 0; | |
456 | } | |
457 | ||
458 | bitmap_set(data->base, bitmap_pos); | |
459 | return 1; | |
460 | } | |
461 | ||
462 | static int should_include(struct commit *commit, void *_data) | |
463 | { | |
464 | struct include_data *data = _data; | |
465 | int bitmap_pos; | |
466 | ||
467 | bitmap_pos = bitmap_position(commit->object.sha1); | |
468 | if (bitmap_pos < 0) | |
469 | bitmap_pos = ext_index_add_object((struct object *)commit, NULL); | |
470 | ||
471 | if (!add_to_include_set(data, commit->object.sha1, bitmap_pos)) { | |
472 | struct commit_list *parent = commit->parents; | |
473 | ||
474 | while (parent) { | |
475 | parent->item->object.flags |= SEEN; | |
476 | parent = parent->next; | |
477 | } | |
478 | ||
479 | return 0; | |
480 | } | |
481 | ||
482 | return 1; | |
483 | } | |
484 | ||
485 | static struct bitmap *find_objects(struct rev_info *revs, | |
486 | struct object_list *roots, | |
487 | struct bitmap *seen) | |
488 | { | |
489 | struct bitmap *base = NULL; | |
490 | int needs_walk = 0; | |
491 | ||
492 | struct object_list *not_mapped = NULL; | |
493 | ||
494 | /* | |
495 | * Go through all the roots for the walk. The ones that have bitmaps | |
496 | * on the bitmap index will be `or`ed together to form an initial | |
497 | * global reachability analysis. | |
498 | * | |
499 | * The ones without bitmaps in the index will be stored in the | |
500 | * `not_mapped_list` for further processing. | |
501 | */ | |
502 | while (roots) { | |
503 | struct object *object = roots->item; | |
504 | roots = roots->next; | |
505 | ||
506 | if (object->type == OBJ_COMMIT) { | |
507 | khiter_t pos = kh_get_sha1(bitmap_git.bitmaps, object->sha1); | |
508 | ||
509 | if (pos < kh_end(bitmap_git.bitmaps)) { | |
510 | struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); | |
511 | struct ewah_bitmap *or_with = lookup_stored_bitmap(st); | |
512 | ||
513 | if (base == NULL) | |
514 | base = ewah_to_bitmap(or_with); | |
515 | else | |
516 | bitmap_or_ewah(base, or_with); | |
517 | ||
518 | object->flags |= SEEN; | |
519 | continue; | |
520 | } | |
521 | } | |
522 | ||
523 | object_list_insert(object, ¬_mapped); | |
524 | } | |
525 | ||
526 | /* | |
527 | * Best case scenario: We found bitmaps for all the roots, | |
528 | * so the resulting `or` bitmap has the full reachability analysis | |
529 | */ | |
530 | if (not_mapped == NULL) | |
531 | return base; | |
532 | ||
533 | roots = not_mapped; | |
534 | ||
535 | /* | |
536 | * Let's iterate through all the roots that don't have bitmaps to | |
537 | * check if we can determine them to be reachable from the existing | |
538 | * global bitmap. | |
539 | * | |
540 | * If we cannot find them in the existing global bitmap, we'll need | |
541 | * to push them to an actual walk and run it until we can confirm | |
542 | * they are reachable | |
543 | */ | |
544 | while (roots) { | |
545 | struct object *object = roots->item; | |
546 | int pos; | |
547 | ||
548 | roots = roots->next; | |
549 | pos = bitmap_position(object->sha1); | |
550 | ||
551 | if (pos < 0 || base == NULL || !bitmap_get(base, pos)) { | |
552 | object->flags &= ~UNINTERESTING; | |
553 | add_pending_object(revs, object, ""); | |
554 | needs_walk = 1; | |
555 | } else { | |
556 | object->flags |= SEEN; | |
557 | } | |
558 | } | |
559 | ||
560 | if (needs_walk) { | |
561 | struct include_data incdata; | |
562 | ||
563 | if (base == NULL) | |
564 | base = bitmap_new(); | |
565 | ||
566 | incdata.base = base; | |
567 | incdata.seen = seen; | |
568 | ||
569 | revs->include_check = should_include; | |
570 | revs->include_check_data = &incdata; | |
571 | ||
572 | if (prepare_revision_walk(revs)) | |
573 | die("revision walk setup failed"); | |
574 | ||
575 | traverse_commit_list(revs, show_commit, show_object, base); | |
576 | } | |
577 | ||
578 | return base; | |
579 | } | |
580 | ||
581 | static void show_extended_objects(struct bitmap *objects, | |
582 | show_reachable_fn show_reach) | |
583 | { | |
584 | struct eindex *eindex = &bitmap_git.ext_index; | |
585 | uint32_t i; | |
586 | ||
587 | for (i = 0; i < eindex->count; ++i) { | |
588 | struct object *obj; | |
589 | ||
590 | if (!bitmap_get(objects, bitmap_git.pack->num_objects + i)) | |
591 | continue; | |
592 | ||
593 | obj = eindex->objects[i]; | |
594 | show_reach(obj->sha1, obj->type, 0, eindex->hashes[i], NULL, 0); | |
595 | } | |
596 | } | |
597 | ||
598 | static void show_objects_for_type( | |
599 | struct bitmap *objects, | |
600 | struct ewah_bitmap *type_filter, | |
601 | enum object_type object_type, | |
602 | show_reachable_fn show_reach) | |
603 | { | |
604 | size_t pos = 0, i = 0; | |
605 | uint32_t offset; | |
606 | ||
607 | struct ewah_iterator it; | |
608 | eword_t filter; | |
609 | ||
610 | if (bitmap_git.reuse_objects == bitmap_git.pack->num_objects) | |
611 | return; | |
612 | ||
613 | ewah_iterator_init(&it, type_filter); | |
614 | ||
615 | while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { | |
616 | eword_t word = objects->words[i] & filter; | |
617 | ||
618 | for (offset = 0; offset < BITS_IN_WORD; ++offset) { | |
619 | const unsigned char *sha1; | |
620 | struct revindex_entry *entry; | |
621 | uint32_t hash = 0; | |
622 | ||
623 | if ((word >> offset) == 0) | |
624 | break; | |
625 | ||
626 | offset += ewah_bit_ctz64(word >> offset); | |
627 | ||
628 | if (pos + offset < bitmap_git.reuse_objects) | |
629 | continue; | |
630 | ||
9d98bbf5 | 631 | entry = &bitmap_git.pack->revindex[pos + offset]; |
fff42755 VM |
632 | sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); |
633 | ||
ae4f07fb VM |
634 | if (bitmap_git.hashes) |
635 | hash = ntohl(bitmap_git.hashes[entry->nr]); | |
636 | ||
fff42755 VM |
637 | show_reach(sha1, object_type, 0, hash, bitmap_git.pack, entry->offset); |
638 | } | |
639 | ||
640 | pos += BITS_IN_WORD; | |
641 | i++; | |
642 | } | |
643 | } | |
644 | ||
645 | static int in_bitmapped_pack(struct object_list *roots) | |
646 | { | |
647 | while (roots) { | |
648 | struct object *object = roots->item; | |
649 | roots = roots->next; | |
650 | ||
651 | if (find_pack_entry_one(object->sha1, bitmap_git.pack) > 0) | |
652 | return 1; | |
653 | } | |
654 | ||
655 | return 0; | |
656 | } | |
657 | ||
658 | int prepare_bitmap_walk(struct rev_info *revs) | |
659 | { | |
660 | unsigned int i; | |
661 | unsigned int pending_nr = revs->pending.nr; | |
662 | struct object_array_entry *pending_e = revs->pending.objects; | |
663 | ||
664 | struct object_list *wants = NULL; | |
665 | struct object_list *haves = NULL; | |
666 | ||
667 | struct bitmap *wants_bitmap = NULL; | |
668 | struct bitmap *haves_bitmap = NULL; | |
669 | ||
670 | if (!bitmap_git.loaded) { | |
671 | /* try to open a bitmapped pack, but don't parse it yet | |
672 | * because we may not need to use it */ | |
673 | if (open_pack_bitmap() < 0) | |
674 | return -1; | |
675 | } | |
676 | ||
677 | for (i = 0; i < pending_nr; ++i) { | |
678 | struct object *object = pending_e[i].item; | |
679 | ||
680 | if (object->type == OBJ_NONE) | |
681 | parse_object_or_die(object->sha1, NULL); | |
682 | ||
683 | while (object->type == OBJ_TAG) { | |
684 | struct tag *tag = (struct tag *) object; | |
685 | ||
686 | if (object->flags & UNINTERESTING) | |
687 | object_list_insert(object, &haves); | |
688 | else | |
689 | object_list_insert(object, &wants); | |
690 | ||
691 | if (!tag->tagged) | |
692 | die("bad tag"); | |
693 | object = parse_object_or_die(tag->tagged->sha1, NULL); | |
694 | } | |
695 | ||
696 | if (object->flags & UNINTERESTING) | |
697 | object_list_insert(object, &haves); | |
698 | else | |
699 | object_list_insert(object, &wants); | |
700 | } | |
701 | ||
702 | /* | |
703 | * if we have a HAVES list, but none of those haves is contained | |
704 | * in the packfile that has a bitmap, we don't have anything to | |
705 | * optimize here | |
706 | */ | |
707 | if (haves && !in_bitmapped_pack(haves)) | |
708 | return -1; | |
709 | ||
710 | /* if we don't want anything, we're done here */ | |
711 | if (!wants) | |
712 | return -1; | |
713 | ||
714 | /* | |
715 | * now we're going to use bitmaps, so load the actual bitmap entries | |
716 | * from disk. this is the point of no return; after this the rev_list | |
717 | * becomes invalidated and we must perform the revwalk through bitmaps | |
718 | */ | |
719 | if (!bitmap_git.loaded && load_pack_bitmap() < 0) | |
720 | return -1; | |
721 | ||
722 | revs->pending.nr = 0; | |
723 | revs->pending.alloc = 0; | |
724 | revs->pending.objects = NULL; | |
725 | ||
726 | if (haves) { | |
2db1a43f | 727 | revs->ignore_missing_links = 1; |
fff42755 VM |
728 | haves_bitmap = find_objects(revs, haves, NULL); |
729 | reset_revision_walk(); | |
2db1a43f | 730 | revs->ignore_missing_links = 0; |
fff42755 VM |
731 | |
732 | if (haves_bitmap == NULL) | |
733 | die("BUG: failed to perform bitmap walk"); | |
734 | } | |
735 | ||
736 | wants_bitmap = find_objects(revs, wants, haves_bitmap); | |
737 | ||
738 | if (!wants_bitmap) | |
739 | die("BUG: failed to perform bitmap walk"); | |
740 | ||
741 | if (haves_bitmap) | |
742 | bitmap_and_not(wants_bitmap, haves_bitmap); | |
743 | ||
744 | bitmap_git.result = wants_bitmap; | |
745 | ||
746 | bitmap_free(haves_bitmap); | |
747 | return 0; | |
748 | } | |
749 | ||
750 | int reuse_partial_packfile_from_bitmap(struct packed_git **packfile, | |
751 | uint32_t *entries, | |
752 | off_t *up_to) | |
753 | { | |
754 | /* | |
755 | * Reuse the packfile content if we need more than | |
756 | * 90% of its objects | |
757 | */ | |
758 | static const double REUSE_PERCENT = 0.9; | |
759 | ||
760 | struct bitmap *result = bitmap_git.result; | |
761 | uint32_t reuse_threshold; | |
762 | uint32_t i, reuse_objects = 0; | |
763 | ||
764 | assert(result); | |
765 | ||
766 | for (i = 0; i < result->word_alloc; ++i) { | |
767 | if (result->words[i] != (eword_t)~0) { | |
768 | reuse_objects += ewah_bit_ctz64(~result->words[i]); | |
769 | break; | |
770 | } | |
771 | ||
772 | reuse_objects += BITS_IN_WORD; | |
773 | } | |
774 | ||
775 | #ifdef GIT_BITMAP_DEBUG | |
776 | { | |
777 | const unsigned char *sha1; | |
778 | struct revindex_entry *entry; | |
779 | ||
780 | entry = &bitmap_git.reverse_index->revindex[reuse_objects]; | |
781 | sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); | |
782 | ||
783 | fprintf(stderr, "Failed to reuse at %d (%016llx)\n", | |
784 | reuse_objects, result->words[i]); | |
785 | fprintf(stderr, " %s\n", sha1_to_hex(sha1)); | |
786 | } | |
787 | #endif | |
788 | ||
789 | if (!reuse_objects) | |
790 | return -1; | |
791 | ||
792 | if (reuse_objects >= bitmap_git.pack->num_objects) { | |
793 | bitmap_git.reuse_objects = *entries = bitmap_git.pack->num_objects; | |
794 | *up_to = -1; /* reuse the full pack */ | |
795 | *packfile = bitmap_git.pack; | |
796 | return 0; | |
797 | } | |
798 | ||
799 | reuse_threshold = bitmap_popcount(bitmap_git.result) * REUSE_PERCENT; | |
800 | ||
801 | if (reuse_objects < reuse_threshold) | |
802 | return -1; | |
803 | ||
804 | bitmap_git.reuse_objects = *entries = reuse_objects; | |
9d98bbf5 | 805 | *up_to = bitmap_git.pack->revindex[reuse_objects].offset; |
fff42755 VM |
806 | *packfile = bitmap_git.pack; |
807 | ||
808 | return 0; | |
809 | } | |
810 | ||
811 | void traverse_bitmap_commit_list(show_reachable_fn show_reachable) | |
812 | { | |
813 | assert(bitmap_git.result); | |
814 | ||
815 | show_objects_for_type(bitmap_git.result, bitmap_git.commits, | |
816 | OBJ_COMMIT, show_reachable); | |
817 | show_objects_for_type(bitmap_git.result, bitmap_git.trees, | |
818 | OBJ_TREE, show_reachable); | |
819 | show_objects_for_type(bitmap_git.result, bitmap_git.blobs, | |
820 | OBJ_BLOB, show_reachable); | |
821 | show_objects_for_type(bitmap_git.result, bitmap_git.tags, | |
822 | OBJ_TAG, show_reachable); | |
823 | ||
824 | show_extended_objects(bitmap_git.result, show_reachable); | |
825 | ||
826 | bitmap_free(bitmap_git.result); | |
827 | bitmap_git.result = NULL; | |
828 | } | |
829 | ||
830 | static uint32_t count_object_type(struct bitmap *objects, | |
831 | enum object_type type) | |
832 | { | |
833 | struct eindex *eindex = &bitmap_git.ext_index; | |
834 | ||
835 | uint32_t i = 0, count = 0; | |
836 | struct ewah_iterator it; | |
837 | eword_t filter; | |
838 | ||
839 | switch (type) { | |
840 | case OBJ_COMMIT: | |
841 | ewah_iterator_init(&it, bitmap_git.commits); | |
842 | break; | |
843 | ||
844 | case OBJ_TREE: | |
845 | ewah_iterator_init(&it, bitmap_git.trees); | |
846 | break; | |
847 | ||
848 | case OBJ_BLOB: | |
849 | ewah_iterator_init(&it, bitmap_git.blobs); | |
850 | break; | |
851 | ||
852 | case OBJ_TAG: | |
853 | ewah_iterator_init(&it, bitmap_git.tags); | |
854 | break; | |
855 | ||
856 | default: | |
857 | return 0; | |
858 | } | |
859 | ||
860 | while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { | |
861 | eword_t word = objects->words[i++] & filter; | |
862 | count += ewah_bit_popcount64(word); | |
863 | } | |
864 | ||
865 | for (i = 0; i < eindex->count; ++i) { | |
866 | if (eindex->objects[i]->type == type && | |
867 | bitmap_get(objects, bitmap_git.pack->num_objects + i)) | |
868 | count++; | |
869 | } | |
870 | ||
871 | return count; | |
872 | } | |
873 | ||
874 | void count_bitmap_commit_list(uint32_t *commits, uint32_t *trees, | |
875 | uint32_t *blobs, uint32_t *tags) | |
876 | { | |
877 | assert(bitmap_git.result); | |
878 | ||
879 | if (commits) | |
880 | *commits = count_object_type(bitmap_git.result, OBJ_COMMIT); | |
881 | ||
882 | if (trees) | |
883 | *trees = count_object_type(bitmap_git.result, OBJ_TREE); | |
884 | ||
885 | if (blobs) | |
886 | *blobs = count_object_type(bitmap_git.result, OBJ_BLOB); | |
887 | ||
888 | if (tags) | |
889 | *tags = count_object_type(bitmap_git.result, OBJ_TAG); | |
890 | } | |
891 | ||
892 | struct bitmap_test_data { | |
893 | struct bitmap *base; | |
894 | struct progress *prg; | |
895 | size_t seen; | |
896 | }; | |
897 | ||
898 | static void test_show_object(struct object *object, | |
899 | const struct name_path *path, | |
900 | const char *last, void *data) | |
901 | { | |
902 | struct bitmap_test_data *tdata = data; | |
903 | int bitmap_pos; | |
904 | ||
905 | bitmap_pos = bitmap_position(object->sha1); | |
906 | if (bitmap_pos < 0) | |
907 | die("Object not in bitmap: %s\n", sha1_to_hex(object->sha1)); | |
908 | ||
909 | bitmap_set(tdata->base, bitmap_pos); | |
910 | display_progress(tdata->prg, ++tdata->seen); | |
911 | } | |
912 | ||
913 | static void test_show_commit(struct commit *commit, void *data) | |
914 | { | |
915 | struct bitmap_test_data *tdata = data; | |
916 | int bitmap_pos; | |
917 | ||
918 | bitmap_pos = bitmap_position(commit->object.sha1); | |
919 | if (bitmap_pos < 0) | |
920 | die("Object not in bitmap: %s\n", sha1_to_hex(commit->object.sha1)); | |
921 | ||
922 | bitmap_set(tdata->base, bitmap_pos); | |
923 | display_progress(tdata->prg, ++tdata->seen); | |
924 | } | |
925 | ||
926 | void test_bitmap_walk(struct rev_info *revs) | |
927 | { | |
928 | struct object *root; | |
929 | struct bitmap *result = NULL; | |
930 | khiter_t pos; | |
931 | size_t result_popcnt; | |
932 | struct bitmap_test_data tdata; | |
933 | ||
934 | if (prepare_bitmap_git()) | |
935 | die("failed to load bitmap indexes"); | |
936 | ||
937 | if (revs->pending.nr != 1) | |
938 | die("you must specify exactly one commit to test"); | |
939 | ||
940 | fprintf(stderr, "Bitmap v%d test (%d entries loaded)\n", | |
941 | bitmap_git.version, bitmap_git.entry_count); | |
942 | ||
943 | root = revs->pending.objects[0].item; | |
944 | pos = kh_get_sha1(bitmap_git.bitmaps, root->sha1); | |
945 | ||
946 | if (pos < kh_end(bitmap_git.bitmaps)) { | |
947 | struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); | |
948 | struct ewah_bitmap *bm = lookup_stored_bitmap(st); | |
949 | ||
950 | fprintf(stderr, "Found bitmap for %s. %d bits / %08x checksum\n", | |
951 | sha1_to_hex(root->sha1), (int)bm->bit_size, ewah_checksum(bm)); | |
952 | ||
953 | result = ewah_to_bitmap(bm); | |
954 | } | |
955 | ||
956 | if (result == NULL) | |
957 | die("Commit %s doesn't have an indexed bitmap", sha1_to_hex(root->sha1)); | |
958 | ||
959 | revs->tag_objects = 1; | |
960 | revs->tree_objects = 1; | |
961 | revs->blob_objects = 1; | |
962 | ||
963 | result_popcnt = bitmap_popcount(result); | |
964 | ||
965 | if (prepare_revision_walk(revs)) | |
966 | die("revision walk setup failed"); | |
967 | ||
968 | tdata.base = bitmap_new(); | |
969 | tdata.prg = start_progress("Verifying bitmap entries", result_popcnt); | |
970 | tdata.seen = 0; | |
971 | ||
972 | traverse_commit_list(revs, &test_show_commit, &test_show_object, &tdata); | |
973 | ||
974 | stop_progress(&tdata.prg); | |
975 | ||
976 | if (bitmap_equals(result, tdata.base)) | |
977 | fprintf(stderr, "OK!\n"); | |
978 | else | |
979 | fprintf(stderr, "Mismatch!\n"); | |
980 | } | |
7cc8f971 VM |
981 | |
982 | static int rebuild_bitmap(uint32_t *reposition, | |
983 | struct ewah_bitmap *source, | |
984 | struct bitmap *dest) | |
985 | { | |
986 | uint32_t pos = 0; | |
987 | struct ewah_iterator it; | |
988 | eword_t word; | |
989 | ||
990 | ewah_iterator_init(&it, source); | |
991 | ||
992 | while (ewah_iterator_next(&word, &it)) { | |
993 | uint32_t offset, bit_pos; | |
994 | ||
995 | for (offset = 0; offset < BITS_IN_WORD; ++offset) { | |
996 | if ((word >> offset) == 0) | |
997 | break; | |
998 | ||
999 | offset += ewah_bit_ctz64(word >> offset); | |
1000 | ||
1001 | bit_pos = reposition[pos + offset]; | |
1002 | if (bit_pos > 0) | |
1003 | bitmap_set(dest, bit_pos - 1); | |
1004 | else /* can't reuse, we don't have the object */ | |
1005 | return -1; | |
1006 | } | |
1007 | ||
1008 | pos += BITS_IN_WORD; | |
1009 | } | |
1010 | return 0; | |
1011 | } | |
1012 | ||
1013 | int rebuild_existing_bitmaps(struct packing_data *mapping, | |
1014 | khash_sha1 *reused_bitmaps, | |
1015 | int show_progress) | |
1016 | { | |
1017 | uint32_t i, num_objects; | |
1018 | uint32_t *reposition; | |
1019 | struct bitmap *rebuild; | |
1020 | struct stored_bitmap *stored; | |
1021 | struct progress *progress = NULL; | |
1022 | ||
1023 | khiter_t hash_pos; | |
1024 | int hash_ret; | |
1025 | ||
1026 | if (prepare_bitmap_git() < 0) | |
1027 | return -1; | |
1028 | ||
1029 | num_objects = bitmap_git.pack->num_objects; | |
1030 | reposition = xcalloc(num_objects, sizeof(uint32_t)); | |
1031 | ||
1032 | for (i = 0; i < num_objects; ++i) { | |
1033 | const unsigned char *sha1; | |
1034 | struct revindex_entry *entry; | |
1035 | struct object_entry *oe; | |
1036 | ||
9d98bbf5 | 1037 | entry = &bitmap_git.pack->revindex[i]; |
7cc8f971 VM |
1038 | sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); |
1039 | oe = packlist_find(mapping, sha1, NULL); | |
1040 | ||
1041 | if (oe) | |
1042 | reposition[i] = oe->in_pack_pos + 1; | |
1043 | } | |
1044 | ||
1045 | rebuild = bitmap_new(); | |
1046 | i = 0; | |
1047 | ||
1048 | if (show_progress) | |
1049 | progress = start_progress("Reusing bitmaps", 0); | |
1050 | ||
1051 | kh_foreach_value(bitmap_git.bitmaps, stored, { | |
1052 | if (stored->flags & BITMAP_FLAG_REUSE) { | |
1053 | if (!rebuild_bitmap(reposition, | |
1054 | lookup_stored_bitmap(stored), | |
1055 | rebuild)) { | |
1056 | hash_pos = kh_put_sha1(reused_bitmaps, | |
1057 | stored->sha1, | |
1058 | &hash_ret); | |
1059 | kh_value(reused_bitmaps, hash_pos) = | |
1060 | bitmap_to_ewah(rebuild); | |
1061 | } | |
1062 | bitmap_reset(rebuild); | |
1063 | display_progress(progress, ++i); | |
1064 | } | |
1065 | }); | |
1066 | ||
1067 | stop_progress(&progress); | |
1068 | ||
1069 | free(reposition); | |
1070 | bitmap_free(rebuild); | |
1071 | return 0; | |
1072 | } |