Commit | Line | Data |
---|---|---|
67be7c5a | 1 | #include "../cache.h" |
44c2339e | 2 | #include "../config.h" |
67be7c5a MH |
3 | #include "../refs.h" |
4 | #include "refs-internal.h" | |
5 | #include "ref-cache.h" | |
6 | #include "packed-backend.h" | |
7 | #include "../iterator.h" | |
8 | #include "../lockfile.h" | |
9 | ||
5b633610 MH |
10 | enum mmap_strategy { |
11 | /* | |
12 | * Don't use mmap() at all for reading `packed-refs`. | |
13 | */ | |
14 | MMAP_NONE, | |
15 | ||
16 | /* | |
17 | * Can use mmap() for reading `packed-refs`, but the file must | |
18 | * not remain mmapped. This is the usual option on Windows, | |
19 | * where you cannot rename a new version of a file onto a file | |
20 | * that is currently mmapped. | |
21 | */ | |
22 | MMAP_TEMPORARY, | |
23 | ||
24 | /* | |
25 | * It is OK to leave the `packed-refs` file mmapped while | |
26 | * arbitrary other code is running. | |
27 | */ | |
28 | MMAP_OK | |
29 | }; | |
30 | ||
31 | #if defined(NO_MMAP) | |
32 | static enum mmap_strategy mmap_strategy = MMAP_NONE; | |
33 | #elif defined(MMAP_PREVENTS_DELETE) | |
34 | static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY; | |
35 | #else | |
36 | static enum mmap_strategy mmap_strategy = MMAP_OK; | |
37 | #endif | |
38 | ||
f0a7dc86 MH |
39 | struct packed_ref_store; |
40 | ||
67be7c5a | 41 | struct packed_ref_cache { |
f0a7dc86 MH |
42 | /* |
43 | * A back-pointer to the packed_ref_store with which this | |
44 | * cache is associated: | |
45 | */ | |
46 | struct packed_ref_store *refs; | |
47 | ||
67be7c5a MH |
48 | struct ref_cache *cache; |
49 | ||
5b633610 MH |
50 | /* Is the `packed-refs` file currently mmapped? */ |
51 | int mmapped; | |
52 | ||
53 | /* | |
02b920f3 MH |
54 | * The contents of the `packed-refs` file. If the file was |
55 | * already sorted, this points at the mmapped contents of the | |
56 | * file. If not, this points at heap-allocated memory | |
57 | * containing the contents, sorted. If there were no contents | |
58 | * (e.g., because the file didn't exist), `buf` and `eof` are | |
59 | * both NULL. | |
5b633610 MH |
60 | */ |
61 | char *buf, *eof; | |
62 | ||
63 | /* The size of the header line, if any; otherwise, 0: */ | |
64 | size_t header_len; | |
65 | ||
daa45408 MH |
66 | /* |
67 | * What is the peeled state of this cache? (This is usually | |
68 | * determined from the header of the "packed-refs" file.) | |
69 | */ | |
70 | enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled; | |
71 | ||
67be7c5a MH |
72 | /* |
73 | * Count of references to the data structure in this instance, | |
74 | * including the pointer from files_ref_store::packed if any. | |
75 | * The data will not be freed as long as the reference count | |
76 | * is nonzero. | |
77 | */ | |
78 | unsigned int referrers; | |
79 | ||
80 | /* The metadata from when this packed-refs cache was read */ | |
81 | struct stat_validity validity; | |
82 | }; | |
83 | ||
67be7c5a MH |
84 | /* |
85 | * A container for `packed-refs`-related data. It is not (yet) a | |
86 | * `ref_store`. | |
87 | */ | |
88 | struct packed_ref_store { | |
e0cc8ac8 MH |
89 | struct ref_store base; |
90 | ||
67be7c5a MH |
91 | unsigned int store_flags; |
92 | ||
93 | /* The path of the "packed-refs" file: */ | |
94 | char *path; | |
95 | ||
96 | /* | |
97 | * A cache of the values read from the `packed-refs` file, if | |
98 | * it might still be current; otherwise, NULL. | |
99 | */ | |
100 | struct packed_ref_cache *cache; | |
101 | ||
102 | /* | |
103 | * Lock used for the "packed-refs" file. Note that this (and | |
104 | * thus the enclosing `packed_ref_store`) must not be freed. | |
105 | */ | |
106 | struct lock_file lock; | |
42dfa7ec MH |
107 | |
108 | /* | |
109 | * Temporary file used when rewriting new contents to the | |
110 | * "packed-refs" file. Note that this (and thus the enclosing | |
111 | * `packed_ref_store`) must not be freed. | |
112 | */ | |
113 | struct tempfile tempfile; | |
67be7c5a MH |
114 | }; |
115 | ||
14b3c344 MH |
116 | /* |
117 | * Increment the reference count of *packed_refs. | |
118 | */ | |
119 | static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs) | |
120 | { | |
121 | packed_refs->referrers++; | |
122 | } | |
123 | ||
5b633610 MH |
124 | /* |
125 | * If the buffer in `packed_refs` is active, then either munmap the | |
126 | * memory and close the file, or free the memory. Then set the buffer | |
127 | * pointers to NULL. | |
128 | */ | |
129 | static void release_packed_ref_buffer(struct packed_ref_cache *packed_refs) | |
130 | { | |
131 | if (packed_refs->mmapped) { | |
132 | if (munmap(packed_refs->buf, | |
133 | packed_refs->eof - packed_refs->buf)) | |
134 | die_errno("error ummapping packed-refs file %s", | |
135 | packed_refs->refs->path); | |
136 | packed_refs->mmapped = 0; | |
137 | } else { | |
138 | free(packed_refs->buf); | |
139 | } | |
140 | packed_refs->buf = packed_refs->eof = NULL; | |
141 | packed_refs->header_len = 0; | |
142 | } | |
143 | ||
14b3c344 MH |
144 | /* |
145 | * Decrease the reference count of *packed_refs. If it goes to zero, | |
146 | * free *packed_refs and return true; otherwise return false. | |
147 | */ | |
148 | static int release_packed_ref_cache(struct packed_ref_cache *packed_refs) | |
149 | { | |
150 | if (!--packed_refs->referrers) { | |
151 | free_ref_cache(packed_refs->cache); | |
152 | stat_validity_clear(&packed_refs->validity); | |
5b633610 | 153 | release_packed_ref_buffer(packed_refs); |
14b3c344 MH |
154 | free(packed_refs); |
155 | return 1; | |
156 | } else { | |
157 | return 0; | |
158 | } | |
159 | } | |
160 | ||
e0cc8ac8 MH |
161 | struct ref_store *packed_ref_store_create(const char *path, |
162 | unsigned int store_flags) | |
67be7c5a MH |
163 | { |
164 | struct packed_ref_store *refs = xcalloc(1, sizeof(*refs)); | |
e0cc8ac8 | 165 | struct ref_store *ref_store = (struct ref_store *)refs; |
67be7c5a | 166 | |
e0cc8ac8 | 167 | base_ref_store_init(ref_store, &refs_be_packed); |
67be7c5a | 168 | refs->store_flags = store_flags; |
e0cc8ac8 | 169 | |
67be7c5a | 170 | refs->path = xstrdup(path); |
e0cc8ac8 | 171 | return ref_store; |
67be7c5a MH |
172 | } |
173 | ||
e0cc8ac8 MH |
174 | /* |
175 | * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is | |
176 | * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't | |
177 | * support at least the flags specified in `required_flags`. `caller` | |
178 | * is used in any necessary error messages. | |
179 | */ | |
180 | static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, | |
181 | unsigned int required_flags, | |
182 | const char *caller) | |
183 | { | |
184 | struct packed_ref_store *refs; | |
185 | ||
186 | if (ref_store->be != &refs_be_packed) | |
187 | die("BUG: ref_store is type \"%s\" not \"packed\" in %s", | |
188 | ref_store->be->name, caller); | |
189 | ||
190 | refs = (struct packed_ref_store *)ref_store; | |
191 | ||
192 | if ((refs->store_flags & required_flags) != required_flags) | |
193 | die("BUG: unallowed operation (%s), requires %x, has %x\n", | |
194 | caller, required_flags, refs->store_flags); | |
195 | ||
196 | return refs; | |
197 | } | |
198 | ||
67be7c5a MH |
199 | static void clear_packed_ref_cache(struct packed_ref_store *refs) |
200 | { | |
201 | if (refs->cache) { | |
202 | struct packed_ref_cache *cache = refs->cache; | |
203 | ||
67be7c5a MH |
204 | refs->cache = NULL; |
205 | release_packed_ref_cache(cache); | |
206 | } | |
207 | } | |
208 | ||
735267aa MH |
209 | static NORETURN void die_unterminated_line(const char *path, |
210 | const char *p, size_t len) | |
211 | { | |
212 | if (len < 80) | |
213 | die("unterminated line in %s: %.*s", path, (int)len, p); | |
214 | else | |
215 | die("unterminated line in %s: %.75s...", path, p); | |
216 | } | |
217 | ||
218 | static NORETURN void die_invalid_line(const char *path, | |
219 | const char *p, size_t len) | |
220 | { | |
221 | const char *eol = memchr(p, '\n', len); | |
222 | ||
223 | if (!eol) | |
224 | die_unterminated_line(path, p, len); | |
225 | else if (eol - p < 80) | |
226 | die("unexpected line in %s: %.*s", path, (int)(eol - p), p); | |
227 | else | |
228 | die("unexpected line in %s: %.75s...", path, p); | |
229 | ||
230 | } | |
231 | ||
9cfb3dc0 MH |
232 | /* |
233 | * An iterator over a packed-refs file that is currently mmapped. | |
234 | */ | |
235 | struct mmapped_ref_iterator { | |
236 | struct ref_iterator base; | |
237 | ||
238 | struct packed_ref_cache *packed_refs; | |
239 | ||
240 | /* The current position in the mmapped file: */ | |
241 | const char *pos; | |
242 | ||
243 | /* The end of the mmapped file: */ | |
244 | const char *eof; | |
245 | ||
246 | struct object_id oid, peeled; | |
247 | ||
248 | struct strbuf refname_buf; | |
249 | }; | |
250 | ||
251 | static int mmapped_ref_iterator_advance(struct ref_iterator *ref_iterator) | |
252 | { | |
253 | struct mmapped_ref_iterator *iter = | |
254 | (struct mmapped_ref_iterator *)ref_iterator; | |
255 | const char *p = iter->pos, *eol; | |
256 | ||
257 | strbuf_reset(&iter->refname_buf); | |
258 | ||
259 | if (iter->pos == iter->eof) | |
260 | return ref_iterator_abort(ref_iterator); | |
261 | ||
262 | iter->base.flags = REF_ISPACKED; | |
263 | ||
264 | if (iter->eof - p < GIT_SHA1_HEXSZ + 2 || | |
265 | parse_oid_hex(p, &iter->oid, &p) || | |
266 | !isspace(*p++)) | |
267 | die_invalid_line(iter->packed_refs->refs->path, | |
268 | iter->pos, iter->eof - iter->pos); | |
269 | ||
270 | eol = memchr(p, '\n', iter->eof - p); | |
271 | if (!eol) | |
272 | die_unterminated_line(iter->packed_refs->refs->path, | |
273 | iter->pos, iter->eof - iter->pos); | |
274 | ||
275 | strbuf_add(&iter->refname_buf, p, eol - p); | |
276 | iter->base.refname = iter->refname_buf.buf; | |
277 | ||
278 | if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) { | |
279 | if (!refname_is_safe(iter->base.refname)) | |
280 | die("packed refname is dangerous: %s", | |
281 | iter->base.refname); | |
282 | oidclr(&iter->oid); | |
283 | iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN; | |
284 | } | |
285 | if (iter->packed_refs->peeled == PEELED_FULLY || | |
286 | (iter->packed_refs->peeled == PEELED_TAGS && | |
287 | starts_with(iter->base.refname, "refs/tags/"))) | |
288 | iter->base.flags |= REF_KNOWS_PEELED; | |
289 | ||
290 | iter->pos = eol + 1; | |
291 | ||
292 | if (iter->pos < iter->eof && *iter->pos == '^') { | |
293 | p = iter->pos + 1; | |
294 | if (iter->eof - p < GIT_SHA1_HEXSZ + 1 || | |
295 | parse_oid_hex(p, &iter->peeled, &p) || | |
296 | *p++ != '\n') | |
297 | die_invalid_line(iter->packed_refs->refs->path, | |
298 | iter->pos, iter->eof - iter->pos); | |
299 | iter->pos = p; | |
300 | ||
301 | /* | |
302 | * Regardless of what the file header said, we | |
81b9b5ae MH |
303 | * definitely know the value of *this* reference. But |
304 | * we suppress it if the reference is broken: | |
9cfb3dc0 | 305 | */ |
81b9b5ae MH |
306 | if ((iter->base.flags & REF_ISBROKEN)) { |
307 | oidclr(&iter->peeled); | |
308 | iter->base.flags &= ~REF_KNOWS_PEELED; | |
309 | } else { | |
310 | iter->base.flags |= REF_KNOWS_PEELED; | |
311 | } | |
9cfb3dc0 MH |
312 | } else { |
313 | oidclr(&iter->peeled); | |
314 | } | |
315 | ||
316 | return ITER_OK; | |
317 | } | |
318 | ||
319 | static int mmapped_ref_iterator_peel(struct ref_iterator *ref_iterator, | |
320 | struct object_id *peeled) | |
321 | { | |
322 | struct mmapped_ref_iterator *iter = | |
323 | (struct mmapped_ref_iterator *)ref_iterator; | |
324 | ||
325 | if ((iter->base.flags & REF_KNOWS_PEELED)) { | |
326 | oidcpy(peeled, &iter->peeled); | |
327 | return is_null_oid(&iter->peeled) ? -1 : 0; | |
328 | } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) { | |
329 | return -1; | |
330 | } else { | |
331 | return !!peel_object(iter->oid.hash, peeled->hash); | |
332 | } | |
333 | } | |
334 | ||
335 | static int mmapped_ref_iterator_abort(struct ref_iterator *ref_iterator) | |
336 | { | |
337 | struct mmapped_ref_iterator *iter = | |
338 | (struct mmapped_ref_iterator *)ref_iterator; | |
339 | ||
340 | release_packed_ref_cache(iter->packed_refs); | |
341 | strbuf_release(&iter->refname_buf); | |
342 | base_ref_iterator_free(ref_iterator); | |
343 | return ITER_DONE; | |
344 | } | |
345 | ||
346 | static struct ref_iterator_vtable mmapped_ref_iterator_vtable = { | |
347 | mmapped_ref_iterator_advance, | |
348 | mmapped_ref_iterator_peel, | |
349 | mmapped_ref_iterator_abort | |
350 | }; | |
351 | ||
352 | struct ref_iterator *mmapped_ref_iterator_begin( | |
9cfb3dc0 MH |
353 | struct packed_ref_cache *packed_refs, |
354 | const char *pos, const char *eof) | |
355 | { | |
356 | struct mmapped_ref_iterator *iter = xcalloc(1, sizeof(*iter)); | |
357 | struct ref_iterator *ref_iterator = &iter->base; | |
358 | ||
5b633610 MH |
359 | if (!packed_refs->buf) |
360 | return empty_ref_iterator_begin(); | |
361 | ||
02b920f3 | 362 | base_ref_iterator_init(ref_iterator, &mmapped_ref_iterator_vtable, 1); |
9cfb3dc0 MH |
363 | |
364 | iter->packed_refs = packed_refs; | |
365 | acquire_packed_ref_cache(iter->packed_refs); | |
366 | iter->pos = pos; | |
367 | iter->eof = eof; | |
368 | strbuf_init(&iter->refname_buf, 0); | |
369 | ||
370 | iter->base.oid = &iter->oid; | |
371 | ||
372 | return ref_iterator; | |
373 | } | |
374 | ||
02b920f3 MH |
375 | struct packed_ref_entry { |
376 | const char *start; | |
377 | size_t len; | |
378 | }; | |
379 | ||
380 | static int cmp_packed_ref_entries(const void *v1, const void *v2) | |
381 | { | |
382 | const struct packed_ref_entry *e1 = v1, *e2 = v2; | |
383 | const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1; | |
384 | const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1; | |
385 | ||
386 | while (1) { | |
387 | if (*r1 == '\n') | |
388 | return *r2 == '\n' ? 0 : -1; | |
389 | if (*r1 != *r2) { | |
390 | if (*r2 == '\n') | |
391 | return 1; | |
392 | else | |
393 | return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1; | |
394 | } | |
395 | r1++; | |
396 | r2++; | |
397 | } | |
398 | } | |
399 | ||
400 | /* | |
401 | * `packed_refs->buf` is not known to be sorted. Check whether it is, | |
402 | * and if not, sort it into new memory and munmap/free the old | |
403 | * storage. | |
404 | */ | |
405 | static void sort_packed_refs(struct packed_ref_cache *packed_refs) | |
406 | { | |
407 | struct packed_ref_entry *entries = NULL; | |
408 | size_t alloc = 0, nr = 0; | |
409 | int sorted = 1; | |
410 | const char *pos, *eof, *eol; | |
411 | size_t len, i; | |
412 | char *new_buffer, *dst; | |
413 | ||
414 | pos = packed_refs->buf + packed_refs->header_len; | |
415 | eof = packed_refs->eof; | |
416 | len = eof - pos; | |
417 | ||
418 | if (!len) | |
419 | return; | |
420 | ||
421 | /* | |
422 | * Initialize entries based on a crude estimate of the number | |
423 | * of references in the file (we'll grow it below if needed): | |
424 | */ | |
425 | ALLOC_GROW(entries, len / 80 + 20, alloc); | |
426 | ||
427 | while (pos < eof) { | |
428 | eol = memchr(pos, '\n', eof - pos); | |
429 | if (!eol) | |
430 | /* The safety check should prevent this. */ | |
431 | BUG("unterminated line found in packed-refs"); | |
432 | if (eol - pos < GIT_SHA1_HEXSZ + 2) | |
433 | die_invalid_line(packed_refs->refs->path, | |
434 | pos, eof - pos); | |
435 | eol++; | |
436 | if (eol < eof && *eol == '^') { | |
437 | /* | |
438 | * Keep any peeled line together with its | |
439 | * reference: | |
440 | */ | |
441 | const char *peeled_start = eol; | |
442 | ||
443 | eol = memchr(peeled_start, '\n', eof - peeled_start); | |
444 | if (!eol) | |
445 | /* The safety check should prevent this. */ | |
446 | BUG("unterminated peeled line found in packed-refs"); | |
447 | eol++; | |
448 | } | |
449 | ||
450 | ALLOC_GROW(entries, nr + 1, alloc); | |
451 | entries[nr].start = pos; | |
452 | entries[nr].len = eol - pos; | |
453 | nr++; | |
454 | ||
455 | if (sorted && | |
456 | nr > 1 && | |
457 | cmp_packed_ref_entries(&entries[nr - 2], | |
458 | &entries[nr - 1]) >= 0) | |
459 | sorted = 0; | |
460 | ||
461 | pos = eol; | |
462 | } | |
463 | ||
464 | if (sorted) | |
465 | goto cleanup; | |
466 | ||
467 | /* We need to sort the memory. First we sort the entries array: */ | |
468 | QSORT(entries, nr, cmp_packed_ref_entries); | |
469 | ||
470 | /* | |
471 | * Allocate a new chunk of memory, and copy the old memory to | |
472 | * the new in the order indicated by `entries` (not bothering | |
473 | * with the header line): | |
474 | */ | |
475 | new_buffer = xmalloc(len); | |
476 | for (dst = new_buffer, i = 0; i < nr; i++) { | |
477 | memcpy(dst, entries[i].start, entries[i].len); | |
478 | dst += entries[i].len; | |
479 | } | |
480 | ||
481 | /* | |
482 | * Now munmap the old buffer and use the sorted buffer in its | |
483 | * place: | |
484 | */ | |
485 | release_packed_ref_buffer(packed_refs); | |
486 | packed_refs->buf = new_buffer; | |
487 | packed_refs->eof = new_buffer + len; | |
488 | packed_refs->header_len = 0; | |
489 | ||
490 | cleanup: | |
491 | free(entries); | |
492 | } | |
493 | ||
494 | /* | |
495 | * Return a pointer to the start of the record that contains the | |
496 | * character `*p` (which must be within the buffer). If no other | |
497 | * record start is found, return `buf`. | |
498 | */ | |
499 | static const char *find_start_of_record(const char *buf, const char *p) | |
500 | { | |
501 | while (p > buf && (p[-1] != '\n' || p[0] == '^')) | |
502 | p--; | |
503 | return p; | |
504 | } | |
505 | ||
506 | /* | |
507 | * We want to be able to compare mmapped reference records quickly, | |
508 | * without totally parsing them. We can do so because the records are | |
509 | * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ | |
510 | * + 1) bytes past the beginning of the record. | |
511 | * | |
512 | * But what if the `packed-refs` file contains garbage? We're willing | |
513 | * to tolerate not detecting the problem, as long as we don't produce | |
514 | * totally garbled output (we can't afford to check the integrity of | |
515 | * the whole file during every Git invocation). But we do want to be | |
516 | * sure that we never read past the end of the buffer in memory and | |
517 | * perform an illegal memory access. | |
518 | * | |
519 | * Guarantee that minimum level of safety by verifying that the last | |
520 | * record in the file is LF-terminated, and that it has at least | |
521 | * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of | |
522 | * these checks fails. | |
523 | */ | |
524 | static void verify_buffer_safe(struct packed_ref_cache *packed_refs) | |
525 | { | |
526 | const char *buf = packed_refs->buf + packed_refs->header_len; | |
527 | const char *eof = packed_refs->eof; | |
528 | const char *last_line; | |
529 | ||
530 | if (buf == eof) | |
531 | return; | |
532 | ||
533 | last_line = find_start_of_record(buf, eof - 1); | |
534 | if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2) | |
535 | die_invalid_line(packed_refs->refs->path, | |
536 | last_line, eof - last_line); | |
537 | } | |
538 | ||
5b633610 MH |
539 | /* |
540 | * Depending on `mmap_strategy`, either mmap or read the contents of | |
541 | * the `packed-refs` file into the `packed_refs` instance. Return 1 if | |
542 | * the file existed and was read, or 0 if the file was absent. Die on | |
543 | * errors. | |
544 | */ | |
545 | static int load_contents(struct packed_ref_cache *packed_refs) | |
546 | { | |
547 | int fd; | |
548 | struct stat st; | |
549 | size_t size; | |
550 | ssize_t bytes_read; | |
551 | ||
552 | fd = open(packed_refs->refs->path, O_RDONLY); | |
553 | if (fd < 0) { | |
554 | if (errno == ENOENT) { | |
555 | /* | |
556 | * This is OK; it just means that no | |
557 | * "packed-refs" file has been written yet, | |
558 | * which is equivalent to it being empty, | |
559 | * which is its state when initialized with | |
560 | * zeros. | |
561 | */ | |
562 | return 0; | |
563 | } else { | |
564 | die_errno("couldn't read %s", packed_refs->refs->path); | |
565 | } | |
566 | } | |
567 | ||
568 | stat_validity_update(&packed_refs->validity, fd); | |
569 | ||
570 | if (fstat(fd, &st) < 0) | |
571 | die_errno("couldn't stat %s", packed_refs->refs->path); | |
572 | size = xsize_t(st.st_size); | |
573 | ||
574 | switch (mmap_strategy) { | |
575 | case MMAP_NONE: | |
5b633610 MH |
576 | packed_refs->buf = xmalloc(size); |
577 | bytes_read = read_in_full(fd, packed_refs->buf, size); | |
578 | if (bytes_read < 0 || bytes_read != size) | |
579 | die_errno("couldn't read %s", packed_refs->refs->path); | |
580 | packed_refs->eof = packed_refs->buf + size; | |
581 | packed_refs->mmapped = 0; | |
582 | break; | |
02b920f3 | 583 | case MMAP_TEMPORARY: |
5b633610 MH |
584 | case MMAP_OK: |
585 | packed_refs->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); | |
586 | packed_refs->eof = packed_refs->buf + size; | |
587 | packed_refs->mmapped = 1; | |
588 | break; | |
589 | } | |
590 | close(fd); | |
591 | ||
592 | return 1; | |
593 | } | |
594 | ||
67be7c5a | 595 | /* |
f0a7dc86 | 596 | * Read from the `packed-refs` file into a newly-allocated |
67be7c5a MH |
597 | * `packed_ref_cache` and return it. The return value will already |
598 | * have its reference count incremented. | |
599 | * | |
600 | * A comment line of the form "# pack-refs with: " may contain zero or | |
601 | * more traits. We interpret the traits as follows: | |
602 | * | |
02b920f3 | 603 | * Neither `peeled` nor `fully-peeled`: |
67be7c5a MH |
604 | * |
605 | * Probably no references are peeled. But if the file contains a | |
606 | * peeled value for a reference, we will use it. | |
607 | * | |
02b920f3 | 608 | * `peeled`: |
67be7c5a MH |
609 | * |
610 | * References under "refs/tags/", if they *can* be peeled, *are* | |
611 | * peeled in this file. References outside of "refs/tags/" are | |
612 | * probably not peeled even if they could have been, but if we find | |
613 | * a peeled value for such a reference we will use it. | |
614 | * | |
02b920f3 | 615 | * `fully-peeled`: |
67be7c5a MH |
616 | * |
617 | * All references in the file that can be peeled are peeled. | |
618 | * Inversely (and this is more important), any references in the | |
619 | * file for which no peeled value is recorded is not peelable. This | |
620 | * trait should typically be written alongside "peeled" for | |
621 | * compatibility with older clients, but we do not require it | |
622 | * (i.e., "peeled" is a no-op if "fully-peeled" is set). | |
02b920f3 MH |
623 | * |
624 | * `sorted`: | |
625 | * | |
626 | * The references in this file are known to be sorted by refname. | |
67be7c5a | 627 | */ |
f0a7dc86 | 628 | static struct packed_ref_cache *read_packed_refs(struct packed_ref_store *refs) |
67be7c5a | 629 | { |
67be7c5a | 630 | struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs)); |
67be7c5a | 631 | struct ref_dir *dir; |
9cfb3dc0 | 632 | struct ref_iterator *iter; |
02b920f3 | 633 | int sorted = 0; |
9cfb3dc0 | 634 | int ok; |
67be7c5a | 635 | |
f0a7dc86 | 636 | packed_refs->refs = refs; |
67be7c5a MH |
637 | acquire_packed_ref_cache(packed_refs); |
638 | packed_refs->cache = create_ref_cache(NULL, NULL); | |
639 | packed_refs->cache->root->flag &= ~REF_INCOMPLETE; | |
daa45408 | 640 | packed_refs->peeled = PEELED_NONE; |
67be7c5a | 641 | |
5b633610 MH |
642 | if (!load_contents(packed_refs)) |
643 | return packed_refs; | |
67be7c5a | 644 | |
36f23534 | 645 | /* If the file has a header line, process it: */ |
5b633610 | 646 | if (packed_refs->buf < packed_refs->eof && *packed_refs->buf == '#') { |
9cfb3dc0 | 647 | struct strbuf tmp = STRBUF_INIT; |
a8811695 | 648 | char *p; |
9cfb3dc0 | 649 | const char *eol; |
a8811695 | 650 | struct string_list traits = STRING_LIST_INIT_NODUP; |
36f23534 | 651 | |
5b633610 MH |
652 | eol = memchr(packed_refs->buf, '\n', |
653 | packed_refs->eof - packed_refs->buf); | |
36f23534 | 654 | if (!eol) |
5b633610 MH |
655 | die_unterminated_line(refs->path, |
656 | packed_refs->buf, | |
657 | packed_refs->eof - packed_refs->buf); | |
36f23534 | 658 | |
5b633610 | 659 | strbuf_add(&tmp, packed_refs->buf, eol - packed_refs->buf); |
36f23534 | 660 | |
6a9bc403 | 661 | if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p)) |
5b633610 MH |
662 | die_invalid_line(refs->path, |
663 | packed_refs->buf, | |
664 | packed_refs->eof - packed_refs->buf); | |
36f23534 | 665 | |
a8811695 MH |
666 | string_list_split_in_place(&traits, p, ' ', -1); |
667 | ||
668 | if (unsorted_string_list_has_string(&traits, "fully-peeled")) | |
daa45408 | 669 | packed_refs->peeled = PEELED_FULLY; |
a8811695 | 670 | else if (unsorted_string_list_has_string(&traits, "peeled")) |
daa45408 | 671 | packed_refs->peeled = PEELED_TAGS; |
02b920f3 MH |
672 | |
673 | sorted = unsorted_string_list_has_string(&traits, "sorted"); | |
674 | ||
36f23534 MH |
675 | /* perhaps other traits later as well */ |
676 | ||
677 | /* The "+ 1" is for the LF character. */ | |
5b633610 | 678 | packed_refs->header_len = eol + 1 - packed_refs->buf; |
a8811695 MH |
679 | |
680 | string_list_clear(&traits, 0); | |
9cfb3dc0 | 681 | strbuf_release(&tmp); |
36f23534 MH |
682 | } |
683 | ||
02b920f3 MH |
684 | verify_buffer_safe(packed_refs); |
685 | ||
686 | if (!sorted) { | |
687 | sort_packed_refs(packed_refs); | |
688 | ||
689 | /* | |
690 | * Reordering the records might have moved a short one | |
691 | * to the end of the buffer, so verify the buffer's | |
692 | * safety again: | |
693 | */ | |
694 | verify_buffer_safe(packed_refs); | |
695 | } | |
696 | ||
697 | if (mmap_strategy != MMAP_OK && packed_refs->mmapped) { | |
698 | /* | |
699 | * We don't want to leave the file mmapped, so we are | |
700 | * forced to make a copy now: | |
701 | */ | |
702 | size_t size = packed_refs->eof - | |
703 | (packed_refs->buf + packed_refs->header_len); | |
704 | char *buf_copy = xmalloc(size); | |
705 | ||
706 | memcpy(buf_copy, packed_refs->buf + packed_refs->header_len, size); | |
707 | release_packed_ref_buffer(packed_refs); | |
708 | packed_refs->buf = buf_copy; | |
709 | packed_refs->eof = buf_copy + size; | |
710 | } | |
711 | ||
67be7c5a | 712 | dir = get_ref_dir(packed_refs->cache->root); |
5b633610 MH |
713 | iter = mmapped_ref_iterator_begin( |
714 | packed_refs, | |
715 | packed_refs->buf + packed_refs->header_len, | |
716 | packed_refs->eof); | |
9cfb3dc0 MH |
717 | while ((ok = ref_iterator_advance(iter)) == ITER_OK) { |
718 | struct ref_entry *entry = | |
719 | create_ref_entry(iter->refname, iter->oid, iter->flags); | |
6a9bc403 | 720 | |
9cfb3dc0 MH |
721 | if ((iter->flags & REF_KNOWS_PEELED)) |
722 | ref_iterator_peel(iter, &entry->u.value.peeled); | |
6a9bc403 | 723 | add_ref_entry(dir, entry); |
67be7c5a MH |
724 | } |
725 | ||
9cfb3dc0 MH |
726 | if (ok != ITER_DONE) |
727 | die("error reading packed-refs file %s", refs->path); | |
728 | ||
67be7c5a MH |
729 | return packed_refs; |
730 | } | |
731 | ||
732 | /* | |
733 | * Check that the packed refs cache (if any) still reflects the | |
734 | * contents of the file. If not, clear the cache. | |
735 | */ | |
736 | static void validate_packed_ref_cache(struct packed_ref_store *refs) | |
737 | { | |
738 | if (refs->cache && | |
739 | !stat_validity_check(&refs->cache->validity, refs->path)) | |
740 | clear_packed_ref_cache(refs); | |
741 | } | |
742 | ||
743 | /* | |
744 | * Get the packed_ref_cache for the specified packed_ref_store, | |
745 | * creating and populating it if it hasn't been read before or if the | |
746 | * file has been changed (according to its `validity` field) since it | |
747 | * was last read. On the other hand, if we hold the lock, then assume | |
748 | * that the file hasn't been changed out from under us, so skip the | |
749 | * extra `stat()` call in `stat_validity_check()`. | |
750 | */ | |
751 | static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs) | |
752 | { | |
753 | if (!is_lock_file_locked(&refs->lock)) | |
754 | validate_packed_ref_cache(refs); | |
755 | ||
756 | if (!refs->cache) | |
f0a7dc86 | 757 | refs->cache = read_packed_refs(refs); |
67be7c5a MH |
758 | |
759 | return refs->cache; | |
760 | } | |
761 | ||
762 | static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache) | |
763 | { | |
764 | return get_ref_dir(packed_ref_cache->cache->root); | |
765 | } | |
766 | ||
767 | static struct ref_dir *get_packed_refs(struct packed_ref_store *refs) | |
768 | { | |
769 | return get_packed_ref_dir(get_packed_ref_cache(refs)); | |
770 | } | |
771 | ||
67be7c5a MH |
772 | /* |
773 | * Return the ref_entry for the given refname from the packed | |
774 | * references. If it does not exist, return NULL. | |
775 | */ | |
776 | static struct ref_entry *get_packed_ref(struct packed_ref_store *refs, | |
777 | const char *refname) | |
778 | { | |
779 | return find_ref_entry(get_packed_refs(refs), refname); | |
780 | } | |
781 | ||
e0cc8ac8 MH |
782 | static int packed_read_raw_ref(struct ref_store *ref_store, |
783 | const char *refname, unsigned char *sha1, | |
784 | struct strbuf *referent, unsigned int *type) | |
67be7c5a | 785 | { |
e0cc8ac8 MH |
786 | struct packed_ref_store *refs = |
787 | packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); | |
788 | ||
67be7c5a MH |
789 | struct ref_entry *entry; |
790 | ||
791 | *type = 0; | |
792 | ||
793 | entry = get_packed_ref(refs, refname); | |
794 | if (!entry) { | |
795 | errno = ENOENT; | |
796 | return -1; | |
797 | } | |
798 | ||
799 | hashcpy(sha1, entry->u.value.oid.hash); | |
800 | *type = REF_ISPACKED; | |
801 | return 0; | |
802 | } | |
803 | ||
e0cc8ac8 MH |
804 | static int packed_peel_ref(struct ref_store *ref_store, |
805 | const char *refname, unsigned char *sha1) | |
67be7c5a | 806 | { |
e0cc8ac8 MH |
807 | struct packed_ref_store *refs = |
808 | packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB, | |
809 | "peel_ref"); | |
67be7c5a MH |
810 | struct ref_entry *r = get_packed_ref(refs, refname); |
811 | ||
812 | if (!r || peel_entry(r, 0)) | |
813 | return -1; | |
814 | ||
815 | hashcpy(sha1, r->u.value.peeled.hash); | |
816 | return 0; | |
817 | } | |
818 | ||
819 | struct packed_ref_iterator { | |
820 | struct ref_iterator base; | |
821 | ||
822 | struct packed_ref_cache *cache; | |
823 | struct ref_iterator *iter0; | |
824 | unsigned int flags; | |
825 | }; | |
826 | ||
827 | static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator) | |
828 | { | |
829 | struct packed_ref_iterator *iter = | |
830 | (struct packed_ref_iterator *)ref_iterator; | |
831 | int ok; | |
832 | ||
833 | while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) { | |
834 | if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && | |
835 | ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE) | |
836 | continue; | |
837 | ||
838 | if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && | |
839 | !ref_resolves_to_object(iter->iter0->refname, | |
840 | iter->iter0->oid, | |
841 | iter->iter0->flags)) | |
842 | continue; | |
843 | ||
844 | iter->base.refname = iter->iter0->refname; | |
845 | iter->base.oid = iter->iter0->oid; | |
846 | iter->base.flags = iter->iter0->flags; | |
847 | return ITER_OK; | |
848 | } | |
849 | ||
850 | iter->iter0 = NULL; | |
851 | if (ref_iterator_abort(ref_iterator) != ITER_DONE) | |
852 | ok = ITER_ERROR; | |
853 | ||
854 | return ok; | |
855 | } | |
856 | ||
857 | static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator, | |
858 | struct object_id *peeled) | |
859 | { | |
860 | struct packed_ref_iterator *iter = | |
861 | (struct packed_ref_iterator *)ref_iterator; | |
862 | ||
863 | return ref_iterator_peel(iter->iter0, peeled); | |
864 | } | |
865 | ||
866 | static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator) | |
867 | { | |
868 | struct packed_ref_iterator *iter = | |
869 | (struct packed_ref_iterator *)ref_iterator; | |
870 | int ok = ITER_DONE; | |
871 | ||
872 | if (iter->iter0) | |
873 | ok = ref_iterator_abort(iter->iter0); | |
874 | ||
875 | release_packed_ref_cache(iter->cache); | |
876 | base_ref_iterator_free(ref_iterator); | |
877 | return ok; | |
878 | } | |
879 | ||
880 | static struct ref_iterator_vtable packed_ref_iterator_vtable = { | |
881 | packed_ref_iterator_advance, | |
882 | packed_ref_iterator_peel, | |
883 | packed_ref_iterator_abort | |
884 | }; | |
885 | ||
e0cc8ac8 MH |
886 | static struct ref_iterator *packed_ref_iterator_begin( |
887 | struct ref_store *ref_store, | |
67be7c5a MH |
888 | const char *prefix, unsigned int flags) |
889 | { | |
e0cc8ac8 | 890 | struct packed_ref_store *refs; |
67be7c5a MH |
891 | struct packed_ref_iterator *iter; |
892 | struct ref_iterator *ref_iterator; | |
e0cc8ac8 MH |
893 | unsigned int required_flags = REF_STORE_READ; |
894 | ||
895 | if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) | |
896 | required_flags |= REF_STORE_ODB; | |
897 | refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin"); | |
67be7c5a MH |
898 | |
899 | iter = xcalloc(1, sizeof(*iter)); | |
900 | ref_iterator = &iter->base; | |
8738a8a4 | 901 | base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1); |
67be7c5a MH |
902 | |
903 | /* | |
904 | * Note that get_packed_ref_cache() internally checks whether | |
905 | * the packed-ref cache is up to date with what is on disk, | |
906 | * and re-reads it if not. | |
907 | */ | |
908 | ||
909 | iter->cache = get_packed_ref_cache(refs); | |
910 | acquire_packed_ref_cache(iter->cache); | |
911 | iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0); | |
912 | ||
913 | iter->flags = flags; | |
914 | ||
915 | return ref_iterator; | |
916 | } | |
917 | ||
918 | /* | |
919 | * Write an entry to the packed-refs file for the specified refname. | |
3478983b MH |
920 | * If peeled is non-NULL, write it as the entry's peeled value. On |
921 | * error, return a nonzero value and leave errno set at the value left | |
922 | * by the failing call to `fprintf()`. | |
67be7c5a | 923 | */ |
3478983b MH |
924 | static int write_packed_entry(FILE *fh, const char *refname, |
925 | const unsigned char *sha1, | |
926 | const unsigned char *peeled) | |
67be7c5a | 927 | { |
3478983b MH |
928 | if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 || |
929 | (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0)) | |
930 | return -1; | |
931 | ||
932 | return 0; | |
67be7c5a MH |
933 | } |
934 | ||
c8bed835 | 935 | int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err) |
67be7c5a | 936 | { |
e0cc8ac8 MH |
937 | struct packed_ref_store *refs = |
938 | packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, | |
b7de57d8 | 939 | "packed_refs_lock"); |
67be7c5a MH |
940 | static int timeout_configured = 0; |
941 | static int timeout_value = 1000; | |
67be7c5a | 942 | |
67be7c5a MH |
943 | if (!timeout_configured) { |
944 | git_config_get_int("core.packedrefstimeout", &timeout_value); | |
945 | timeout_configured = 1; | |
946 | } | |
947 | ||
42dfa7ec MH |
948 | /* |
949 | * Note that we close the lockfile immediately because we | |
950 | * don't write new content to it, but rather to a separate | |
951 | * tempfile. | |
952 | */ | |
67be7c5a MH |
953 | if (hold_lock_file_for_update_timeout( |
954 | &refs->lock, | |
955 | refs->path, | |
c8bed835 MH |
956 | flags, timeout_value) < 0) { |
957 | unable_to_lock_message(refs->path, errno, err); | |
958 | return -1; | |
959 | } | |
960 | ||
961 | if (close_lock_file(&refs->lock)) { | |
962 | strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno)); | |
67be7c5a | 963 | return -1; |
c8bed835 | 964 | } |
67be7c5a MH |
965 | |
966 | /* | |
967 | * Now that we hold the `packed-refs` lock, make sure that our | |
968 | * cache matches the current version of the file. Normally | |
969 | * `get_packed_ref_cache()` does that for us, but that | |
970 | * function assumes that when the file is locked, any existing | |
971 | * cache is still valid. We've just locked the file, but it | |
972 | * might have changed the moment *before* we locked it. | |
973 | */ | |
974 | validate_packed_ref_cache(refs); | |
975 | ||
39c8df0c MH |
976 | /* |
977 | * Now make sure that the packed-refs file as it exists in the | |
978 | * locked state is loaded into the cache: | |
979 | */ | |
980 | get_packed_ref_cache(refs); | |
67be7c5a MH |
981 | return 0; |
982 | } | |
983 | ||
49aebcf4 MH |
984 | void packed_refs_unlock(struct ref_store *ref_store) |
985 | { | |
986 | struct packed_ref_store *refs = packed_downcast( | |
987 | ref_store, | |
988 | REF_STORE_READ | REF_STORE_WRITE, | |
989 | "packed_refs_unlock"); | |
990 | ||
991 | if (!is_lock_file_locked(&refs->lock)) | |
992 | die("BUG: packed_refs_unlock() called when not locked"); | |
993 | rollback_lock_file(&refs->lock); | |
49aebcf4 MH |
994 | } |
995 | ||
996 | int packed_refs_is_locked(struct ref_store *ref_store) | |
997 | { | |
998 | struct packed_ref_store *refs = packed_downcast( | |
999 | ref_store, | |
1000 | REF_STORE_READ | REF_STORE_WRITE, | |
1001 | "packed_refs_is_locked"); | |
1002 | ||
1003 | return is_lock_file_locked(&refs->lock); | |
1004 | } | |
1005 | ||
67be7c5a MH |
1006 | /* |
1007 | * The packed-refs header line that we write out. Perhaps other | |
a8811695 MH |
1008 | * traits will be added later. |
1009 | * | |
1010 | * Note that earlier versions of Git used to parse these traits by | |
1011 | * looking for " trait " in the line. For this reason, the space after | |
1012 | * the colon and the trailing space are required. | |
67be7c5a MH |
1013 | */ |
1014 | static const char PACKED_REFS_HEADER[] = | |
02b920f3 | 1015 | "# pack-refs with: peeled fully-peeled sorted \n"; |
67be7c5a | 1016 | |
e0cc8ac8 MH |
1017 | static int packed_init_db(struct ref_store *ref_store, struct strbuf *err) |
1018 | { | |
1019 | /* Nothing to do. */ | |
1020 | return 0; | |
1021 | } | |
1022 | ||
2775d872 MH |
1023 | /* |
1024 | * Write the packed-refs from the cache to the packed-refs tempfile, | |
1025 | * incorporating any changes from `updates`. `updates` must be a | |
1026 | * sorted string list whose keys are the refnames and whose util | |
1027 | * values are `struct ref_update *`. On error, rollback the tempfile, | |
1028 | * write an error message to `err`, and return a nonzero value. | |
1029 | * | |
1030 | * The packfile must be locked before calling this function and will | |
1031 | * remain locked when it is done. | |
1032 | */ | |
1033 | static int write_with_updates(struct packed_ref_store *refs, | |
1034 | struct string_list *updates, | |
1035 | struct strbuf *err) | |
1036 | { | |
1037 | struct ref_iterator *iter = NULL; | |
1038 | size_t i; | |
1039 | int ok; | |
1040 | FILE *out; | |
1041 | struct strbuf sb = STRBUF_INIT; | |
1042 | char *packed_refs_path; | |
1043 | ||
1044 | if (!is_lock_file_locked(&refs->lock)) | |
1045 | die("BUG: write_with_updates() called while unlocked"); | |
1046 | ||
1047 | /* | |
1048 | * If packed-refs is a symlink, we want to overwrite the | |
1049 | * symlinked-to file, not the symlink itself. Also, put the | |
1050 | * staging file next to it: | |
1051 | */ | |
1052 | packed_refs_path = get_locked_file_path(&refs->lock); | |
1053 | strbuf_addf(&sb, "%s.new", packed_refs_path); | |
1054 | free(packed_refs_path); | |
1055 | if (create_tempfile(&refs->tempfile, sb.buf) < 0) { | |
1056 | strbuf_addf(err, "unable to create file %s: %s", | |
1057 | sb.buf, strerror(errno)); | |
1058 | strbuf_release(&sb); | |
1059 | return -1; | |
1060 | } | |
1061 | strbuf_release(&sb); | |
1062 | ||
1063 | out = fdopen_tempfile(&refs->tempfile, "w"); | |
1064 | if (!out) { | |
1065 | strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s", | |
1066 | strerror(errno)); | |
1067 | goto error; | |
1068 | } | |
1069 | ||
1070 | if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0) | |
1071 | goto write_error; | |
1072 | ||
1073 | /* | |
1074 | * We iterate in parallel through the current list of refs and | |
1075 | * the list of updates, processing an entry from at least one | |
1076 | * of the lists each time through the loop. When the current | |
1077 | * list of refs is exhausted, set iter to NULL. When the list | |
1078 | * of updates is exhausted, leave i set to updates->nr. | |
1079 | */ | |
1080 | iter = packed_ref_iterator_begin(&refs->base, "", | |
1081 | DO_FOR_EACH_INCLUDE_BROKEN); | |
1082 | if ((ok = ref_iterator_advance(iter)) != ITER_OK) | |
1083 | iter = NULL; | |
1084 | ||
1085 | i = 0; | |
1086 | ||
1087 | while (iter || i < updates->nr) { | |
1088 | struct ref_update *update = NULL; | |
1089 | int cmp; | |
1090 | ||
1091 | if (i >= updates->nr) { | |
1092 | cmp = -1; | |
1093 | } else { | |
1094 | update = updates->items[i].util; | |
1095 | ||
1096 | if (!iter) | |
1097 | cmp = +1; | |
1098 | else | |
1099 | cmp = strcmp(iter->refname, update->refname); | |
1100 | } | |
1101 | ||
1102 | if (!cmp) { | |
1103 | /* | |
1104 | * There is both an old value and an update | |
1105 | * for this reference. Check the old value if | |
1106 | * necessary: | |
1107 | */ | |
1108 | if ((update->flags & REF_HAVE_OLD)) { | |
1109 | if (is_null_oid(&update->old_oid)) { | |
1110 | strbuf_addf(err, "cannot update ref '%s': " | |
1111 | "reference already exists", | |
1112 | update->refname); | |
1113 | goto error; | |
1114 | } else if (oidcmp(&update->old_oid, iter->oid)) { | |
1115 | strbuf_addf(err, "cannot update ref '%s': " | |
1116 | "is at %s but expected %s", | |
1117 | update->refname, | |
1118 | oid_to_hex(iter->oid), | |
1119 | oid_to_hex(&update->old_oid)); | |
1120 | goto error; | |
1121 | } | |
1122 | } | |
1123 | ||
1124 | /* Now figure out what to use for the new value: */ | |
1125 | if ((update->flags & REF_HAVE_NEW)) { | |
1126 | /* | |
1127 | * The update takes precedence. Skip | |
1128 | * the iterator over the unneeded | |
1129 | * value. | |
1130 | */ | |
1131 | if ((ok = ref_iterator_advance(iter)) != ITER_OK) | |
1132 | iter = NULL; | |
1133 | cmp = +1; | |
1134 | } else { | |
1135 | /* | |
1136 | * The update doesn't actually want to | |
1137 | * change anything. We're done with it. | |
1138 | */ | |
1139 | i++; | |
1140 | cmp = -1; | |
1141 | } | |
1142 | } else if (cmp > 0) { | |
1143 | /* | |
1144 | * There is no old value but there is an | |
1145 | * update for this reference. Make sure that | |
1146 | * the update didn't expect an existing value: | |
1147 | */ | |
1148 | if ((update->flags & REF_HAVE_OLD) && | |
1149 | !is_null_oid(&update->old_oid)) { | |
1150 | strbuf_addf(err, "cannot update ref '%s': " | |
1151 | "reference is missing but expected %s", | |
1152 | update->refname, | |
1153 | oid_to_hex(&update->old_oid)); | |
1154 | goto error; | |
1155 | } | |
1156 | } | |
1157 | ||
1158 | if (cmp < 0) { | |
1159 | /* Pass the old reference through. */ | |
1160 | ||
1161 | struct object_id peeled; | |
1162 | int peel_error = ref_iterator_peel(iter, &peeled); | |
1163 | ||
1164 | if (write_packed_entry(out, iter->refname, | |
1165 | iter->oid->hash, | |
1166 | peel_error ? NULL : peeled.hash)) | |
1167 | goto write_error; | |
1168 | ||
1169 | if ((ok = ref_iterator_advance(iter)) != ITER_OK) | |
1170 | iter = NULL; | |
1171 | } else if (is_null_oid(&update->new_oid)) { | |
1172 | /* | |
1173 | * The update wants to delete the reference, | |
1174 | * and the reference either didn't exist or we | |
1175 | * have already skipped it. So we're done with | |
1176 | * the update (and don't have to write | |
1177 | * anything). | |
1178 | */ | |
1179 | i++; | |
1180 | } else { | |
1181 | struct object_id peeled; | |
1182 | int peel_error = peel_object(update->new_oid.hash, | |
1183 | peeled.hash); | |
1184 | ||
1185 | if (write_packed_entry(out, update->refname, | |
1186 | update->new_oid.hash, | |
1187 | peel_error ? NULL : peeled.hash)) | |
1188 | goto write_error; | |
1189 | ||
1190 | i++; | |
1191 | } | |
1192 | } | |
1193 | ||
1194 | if (ok != ITER_DONE) { | |
1195 | strbuf_addf(err, "unable to write packed-refs file: " | |
1196 | "error iterating over old contents"); | |
1197 | goto error; | |
1198 | } | |
1199 | ||
1200 | if (close_tempfile(&refs->tempfile)) { | |
1201 | strbuf_addf(err, "error closing file %s: %s", | |
1202 | get_tempfile_path(&refs->tempfile), | |
1203 | strerror(errno)); | |
1204 | strbuf_release(&sb); | |
1205 | return -1; | |
1206 | } | |
1207 | ||
1208 | return 0; | |
1209 | ||
1210 | write_error: | |
1211 | strbuf_addf(err, "error writing to %s: %s", | |
1212 | get_tempfile_path(&refs->tempfile), strerror(errno)); | |
1213 | ||
1214 | error: | |
1215 | if (iter) | |
1216 | ref_iterator_abort(iter); | |
1217 | ||
1218 | delete_tempfile(&refs->tempfile); | |
1219 | return -1; | |
1220 | } | |
1221 | ||
1222 | struct packed_transaction_backend_data { | |
1223 | /* True iff the transaction owns the packed-refs lock. */ | |
1224 | int own_lock; | |
1225 | ||
1226 | struct string_list updates; | |
1227 | }; | |
1228 | ||
1229 | static void packed_transaction_cleanup(struct packed_ref_store *refs, | |
1230 | struct ref_transaction *transaction) | |
1231 | { | |
1232 | struct packed_transaction_backend_data *data = transaction->backend_data; | |
1233 | ||
1234 | if (data) { | |
1235 | string_list_clear(&data->updates, 0); | |
1236 | ||
1237 | if (is_tempfile_active(&refs->tempfile)) | |
1238 | delete_tempfile(&refs->tempfile); | |
1239 | ||
1240 | if (data->own_lock && is_lock_file_locked(&refs->lock)) { | |
1241 | packed_refs_unlock(&refs->base); | |
1242 | data->own_lock = 0; | |
1243 | } | |
1244 | ||
1245 | free(data); | |
1246 | transaction->backend_data = NULL; | |
1247 | } | |
1248 | ||
1249 | transaction->state = REF_TRANSACTION_CLOSED; | |
1250 | } | |
1251 | ||
e0cc8ac8 MH |
1252 | static int packed_transaction_prepare(struct ref_store *ref_store, |
1253 | struct ref_transaction *transaction, | |
1254 | struct strbuf *err) | |
1255 | { | |
2775d872 MH |
1256 | struct packed_ref_store *refs = packed_downcast( |
1257 | ref_store, | |
1258 | REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, | |
1259 | "ref_transaction_prepare"); | |
1260 | struct packed_transaction_backend_data *data; | |
1261 | size_t i; | |
1262 | int ret = TRANSACTION_GENERIC_ERROR; | |
1263 | ||
1264 | /* | |
1265 | * Note that we *don't* skip transactions with zero updates, | |
1266 | * because such a transaction might be executed for the side | |
1267 | * effect of ensuring that all of the references are peeled. | |
1268 | * If the caller wants to optimize away empty transactions, it | |
1269 | * should do so itself. | |
1270 | */ | |
1271 | ||
1272 | data = xcalloc(1, sizeof(*data)); | |
1273 | string_list_init(&data->updates, 0); | |
1274 | ||
1275 | transaction->backend_data = data; | |
1276 | ||
1277 | /* | |
1278 | * Stick the updates in a string list by refname so that we | |
1279 | * can sort them: | |
1280 | */ | |
1281 | for (i = 0; i < transaction->nr; i++) { | |
1282 | struct ref_update *update = transaction->updates[i]; | |
1283 | struct string_list_item *item = | |
1284 | string_list_append(&data->updates, update->refname); | |
1285 | ||
1286 | /* Store a pointer to update in item->util: */ | |
1287 | item->util = update; | |
1288 | } | |
1289 | string_list_sort(&data->updates); | |
1290 | ||
1291 | if (ref_update_reject_duplicates(&data->updates, err)) | |
1292 | goto failure; | |
1293 | ||
1294 | if (!is_lock_file_locked(&refs->lock)) { | |
1295 | if (packed_refs_lock(ref_store, 0, err)) | |
1296 | goto failure; | |
1297 | data->own_lock = 1; | |
1298 | } | |
1299 | ||
1300 | if (write_with_updates(refs, &data->updates, err)) | |
1301 | goto failure; | |
1302 | ||
1303 | transaction->state = REF_TRANSACTION_PREPARED; | |
1304 | return 0; | |
1305 | ||
1306 | failure: | |
1307 | packed_transaction_cleanup(refs, transaction); | |
1308 | return ret; | |
e0cc8ac8 MH |
1309 | } |
1310 | ||
1311 | static int packed_transaction_abort(struct ref_store *ref_store, | |
1312 | struct ref_transaction *transaction, | |
1313 | struct strbuf *err) | |
1314 | { | |
2775d872 MH |
1315 | struct packed_ref_store *refs = packed_downcast( |
1316 | ref_store, | |
1317 | REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, | |
1318 | "ref_transaction_abort"); | |
1319 | ||
1320 | packed_transaction_cleanup(refs, transaction); | |
1321 | return 0; | |
e0cc8ac8 MH |
1322 | } |
1323 | ||
1324 | static int packed_transaction_finish(struct ref_store *ref_store, | |
1325 | struct ref_transaction *transaction, | |
1326 | struct strbuf *err) | |
1327 | { | |
2775d872 MH |
1328 | struct packed_ref_store *refs = packed_downcast( |
1329 | ref_store, | |
1330 | REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, | |
1331 | "ref_transaction_finish"); | |
1332 | int ret = TRANSACTION_GENERIC_ERROR; | |
1333 | char *packed_refs_path; | |
1334 | ||
5b633610 MH |
1335 | clear_packed_ref_cache(refs); |
1336 | ||
2775d872 MH |
1337 | packed_refs_path = get_locked_file_path(&refs->lock); |
1338 | if (rename_tempfile(&refs->tempfile, packed_refs_path)) { | |
1339 | strbuf_addf(err, "error replacing %s: %s", | |
1340 | refs->path, strerror(errno)); | |
1341 | goto cleanup; | |
1342 | } | |
1343 | ||
2775d872 MH |
1344 | ret = 0; |
1345 | ||
1346 | cleanup: | |
1347 | free(packed_refs_path); | |
1348 | packed_transaction_cleanup(refs, transaction); | |
1349 | return ret; | |
e0cc8ac8 MH |
1350 | } |
1351 | ||
1352 | static int packed_initial_transaction_commit(struct ref_store *ref_store, | |
1353 | struct ref_transaction *transaction, | |
1354 | struct strbuf *err) | |
1355 | { | |
1356 | return ref_transaction_commit(transaction, err); | |
1357 | } | |
1358 | ||
1359 | static int packed_delete_refs(struct ref_store *ref_store, const char *msg, | |
1360 | struct string_list *refnames, unsigned int flags) | |
1361 | { | |
2fb330ca MH |
1362 | struct packed_ref_store *refs = |
1363 | packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs"); | |
1364 | struct strbuf err = STRBUF_INIT; | |
1365 | struct ref_transaction *transaction; | |
1366 | struct string_list_item *item; | |
1367 | int ret; | |
1368 | ||
1369 | (void)refs; /* We need the check above, but don't use the variable */ | |
1370 | ||
1371 | if (!refnames->nr) | |
1372 | return 0; | |
1373 | ||
1374 | /* | |
1375 | * Since we don't check the references' old_oids, the | |
1376 | * individual updates can't fail, so we can pack all of the | |
1377 | * updates into a single transaction. | |
1378 | */ | |
1379 | ||
1380 | transaction = ref_store_transaction_begin(ref_store, &err); | |
1381 | if (!transaction) | |
1382 | return -1; | |
1383 | ||
1384 | for_each_string_list_item(item, refnames) { | |
1385 | if (ref_transaction_delete(transaction, item->string, NULL, | |
1386 | flags, msg, &err)) { | |
1387 | warning(_("could not delete reference %s: %s"), | |
1388 | item->string, err.buf); | |
1389 | strbuf_reset(&err); | |
1390 | } | |
1391 | } | |
1392 | ||
1393 | ret = ref_transaction_commit(transaction, &err); | |
1394 | ||
1395 | if (ret) { | |
1396 | if (refnames->nr == 1) | |
1397 | error(_("could not delete reference %s: %s"), | |
1398 | refnames->items[0].string, err.buf); | |
1399 | else | |
1400 | error(_("could not delete references: %s"), err.buf); | |
1401 | } | |
1402 | ||
1403 | ref_transaction_free(transaction); | |
1404 | strbuf_release(&err); | |
1405 | return ret; | |
e0cc8ac8 MH |
1406 | } |
1407 | ||
1408 | static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags) | |
1409 | { | |
1410 | /* | |
1411 | * Packed refs are already packed. It might be that loose refs | |
1412 | * are packed *into* a packed refs store, but that is done by | |
1413 | * updating the packed references via a transaction. | |
1414 | */ | |
1415 | return 0; | |
1416 | } | |
1417 | ||
1418 | static int packed_create_symref(struct ref_store *ref_store, | |
1419 | const char *refname, const char *target, | |
1420 | const char *logmsg) | |
1421 | { | |
1422 | die("BUG: packed reference store does not support symrefs"); | |
1423 | } | |
1424 | ||
1425 | static int packed_rename_ref(struct ref_store *ref_store, | |
1426 | const char *oldrefname, const char *newrefname, | |
1427 | const char *logmsg) | |
1428 | { | |
1429 | die("BUG: packed reference store does not support renaming references"); | |
1430 | } | |
1431 | ||
1432 | static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store) | |
1433 | { | |
1434 | return empty_ref_iterator_begin(); | |
1435 | } | |
1436 | ||
1437 | static int packed_for_each_reflog_ent(struct ref_store *ref_store, | |
1438 | const char *refname, | |
1439 | each_reflog_ent_fn fn, void *cb_data) | |
1440 | { | |
1441 | return 0; | |
1442 | } | |
1443 | ||
1444 | static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store, | |
1445 | const char *refname, | |
1446 | each_reflog_ent_fn fn, | |
1447 | void *cb_data) | |
1448 | { | |
1449 | return 0; | |
1450 | } | |
1451 | ||
1452 | static int packed_reflog_exists(struct ref_store *ref_store, | |
1453 | const char *refname) | |
1454 | { | |
1455 | return 0; | |
1456 | } | |
1457 | ||
1458 | static int packed_create_reflog(struct ref_store *ref_store, | |
1459 | const char *refname, int force_create, | |
1460 | struct strbuf *err) | |
1461 | { | |
1462 | die("BUG: packed reference store does not support reflogs"); | |
1463 | } | |
1464 | ||
1465 | static int packed_delete_reflog(struct ref_store *ref_store, | |
1466 | const char *refname) | |
1467 | { | |
1468 | return 0; | |
1469 | } | |
1470 | ||
1471 | static int packed_reflog_expire(struct ref_store *ref_store, | |
1472 | const char *refname, const unsigned char *sha1, | |
1473 | unsigned int flags, | |
1474 | reflog_expiry_prepare_fn prepare_fn, | |
1475 | reflog_expiry_should_prune_fn should_prune_fn, | |
1476 | reflog_expiry_cleanup_fn cleanup_fn, | |
1477 | void *policy_cb_data) | |
1478 | { | |
1479 | return 0; | |
1480 | } | |
1481 | ||
1482 | struct ref_storage_be refs_be_packed = { | |
1483 | NULL, | |
1484 | "packed", | |
1485 | packed_ref_store_create, | |
1486 | packed_init_db, | |
1487 | packed_transaction_prepare, | |
1488 | packed_transaction_finish, | |
1489 | packed_transaction_abort, | |
1490 | packed_initial_transaction_commit, | |
1491 | ||
1492 | packed_pack_refs, | |
1493 | packed_peel_ref, | |
1494 | packed_create_symref, | |
1495 | packed_delete_refs, | |
1496 | packed_rename_ref, | |
1497 | ||
1498 | packed_ref_iterator_begin, | |
1499 | packed_read_raw_ref, | |
1500 | ||
1501 | packed_reflog_iterator_begin, | |
1502 | packed_for_each_reflog_ent, | |
1503 | packed_for_each_reflog_ent_reverse, | |
1504 | packed_reflog_exists, | |
1505 | packed_create_reflog, | |
1506 | packed_delete_reflog, | |
1507 | packed_reflog_expire | |
1508 | }; |