Commit | Line | Data |
---|---|---|
67be7c5a | 1 | #include "../cache.h" |
44c2339e | 2 | #include "../config.h" |
67be7c5a MH |
3 | #include "../refs.h" |
4 | #include "refs-internal.h" | |
5 | #include "ref-cache.h" | |
6 | #include "packed-backend.h" | |
7 | #include "../iterator.h" | |
8 | #include "../lockfile.h" | |
9 | ||
f0a7dc86 MH |
10 | struct packed_ref_store; |
11 | ||
67be7c5a | 12 | struct packed_ref_cache { |
f0a7dc86 MH |
13 | /* |
14 | * A back-pointer to the packed_ref_store with which this | |
15 | * cache is associated: | |
16 | */ | |
17 | struct packed_ref_store *refs; | |
18 | ||
67be7c5a MH |
19 | struct ref_cache *cache; |
20 | ||
daa45408 MH |
21 | /* |
22 | * What is the peeled state of this cache? (This is usually | |
23 | * determined from the header of the "packed-refs" file.) | |
24 | */ | |
25 | enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled; | |
26 | ||
67be7c5a MH |
27 | /* |
28 | * Count of references to the data structure in this instance, | |
29 | * including the pointer from files_ref_store::packed if any. | |
30 | * The data will not be freed as long as the reference count | |
31 | * is nonzero. | |
32 | */ | |
33 | unsigned int referrers; | |
34 | ||
35 | /* The metadata from when this packed-refs cache was read */ | |
36 | struct stat_validity validity; | |
37 | }; | |
38 | ||
39 | /* | |
40 | * Increment the reference count of *packed_refs. | |
41 | */ | |
42 | static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs) | |
43 | { | |
44 | packed_refs->referrers++; | |
45 | } | |
46 | ||
47 | /* | |
48 | * Decrease the reference count of *packed_refs. If it goes to zero, | |
49 | * free *packed_refs and return true; otherwise return false. | |
50 | */ | |
51 | static int release_packed_ref_cache(struct packed_ref_cache *packed_refs) | |
52 | { | |
53 | if (!--packed_refs->referrers) { | |
54 | free_ref_cache(packed_refs->cache); | |
55 | stat_validity_clear(&packed_refs->validity); | |
56 | free(packed_refs); | |
57 | return 1; | |
58 | } else { | |
59 | return 0; | |
60 | } | |
61 | } | |
62 | ||
63 | /* | |
64 | * A container for `packed-refs`-related data. It is not (yet) a | |
65 | * `ref_store`. | |
66 | */ | |
67 | struct packed_ref_store { | |
e0cc8ac8 MH |
68 | struct ref_store base; |
69 | ||
67be7c5a MH |
70 | unsigned int store_flags; |
71 | ||
72 | /* The path of the "packed-refs" file: */ | |
73 | char *path; | |
74 | ||
75 | /* | |
76 | * A cache of the values read from the `packed-refs` file, if | |
77 | * it might still be current; otherwise, NULL. | |
78 | */ | |
79 | struct packed_ref_cache *cache; | |
80 | ||
81 | /* | |
82 | * Lock used for the "packed-refs" file. Note that this (and | |
83 | * thus the enclosing `packed_ref_store`) must not be freed. | |
84 | */ | |
85 | struct lock_file lock; | |
42dfa7ec MH |
86 | |
87 | /* | |
88 | * Temporary file used when rewriting new contents to the | |
89 | * "packed-refs" file. Note that this (and thus the enclosing | |
90 | * `packed_ref_store`) must not be freed. | |
91 | */ | |
92 | struct tempfile tempfile; | |
67be7c5a MH |
93 | }; |
94 | ||
e0cc8ac8 MH |
95 | struct ref_store *packed_ref_store_create(const char *path, |
96 | unsigned int store_flags) | |
67be7c5a MH |
97 | { |
98 | struct packed_ref_store *refs = xcalloc(1, sizeof(*refs)); | |
e0cc8ac8 | 99 | struct ref_store *ref_store = (struct ref_store *)refs; |
67be7c5a | 100 | |
e0cc8ac8 | 101 | base_ref_store_init(ref_store, &refs_be_packed); |
67be7c5a | 102 | refs->store_flags = store_flags; |
e0cc8ac8 | 103 | |
67be7c5a | 104 | refs->path = xstrdup(path); |
e0cc8ac8 | 105 | return ref_store; |
67be7c5a MH |
106 | } |
107 | ||
e0cc8ac8 MH |
108 | /* |
109 | * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is | |
110 | * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't | |
111 | * support at least the flags specified in `required_flags`. `caller` | |
112 | * is used in any necessary error messages. | |
113 | */ | |
114 | static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, | |
115 | unsigned int required_flags, | |
116 | const char *caller) | |
117 | { | |
118 | struct packed_ref_store *refs; | |
119 | ||
120 | if (ref_store->be != &refs_be_packed) | |
121 | die("BUG: ref_store is type \"%s\" not \"packed\" in %s", | |
122 | ref_store->be->name, caller); | |
123 | ||
124 | refs = (struct packed_ref_store *)ref_store; | |
125 | ||
126 | if ((refs->store_flags & required_flags) != required_flags) | |
127 | die("BUG: unallowed operation (%s), requires %x, has %x\n", | |
128 | caller, required_flags, refs->store_flags); | |
129 | ||
130 | return refs; | |
131 | } | |
132 | ||
67be7c5a MH |
133 | static void clear_packed_ref_cache(struct packed_ref_store *refs) |
134 | { | |
135 | if (refs->cache) { | |
136 | struct packed_ref_cache *cache = refs->cache; | |
137 | ||
67be7c5a MH |
138 | refs->cache = NULL; |
139 | release_packed_ref_cache(cache); | |
140 | } | |
141 | } | |
142 | ||
735267aa MH |
143 | static NORETURN void die_unterminated_line(const char *path, |
144 | const char *p, size_t len) | |
145 | { | |
146 | if (len < 80) | |
147 | die("unterminated line in %s: %.*s", path, (int)len, p); | |
148 | else | |
149 | die("unterminated line in %s: %.75s...", path, p); | |
150 | } | |
151 | ||
152 | static NORETURN void die_invalid_line(const char *path, | |
153 | const char *p, size_t len) | |
154 | { | |
155 | const char *eol = memchr(p, '\n', len); | |
156 | ||
157 | if (!eol) | |
158 | die_unterminated_line(path, p, len); | |
159 | else if (eol - p < 80) | |
160 | die("unexpected line in %s: %.*s", path, (int)(eol - p), p); | |
161 | else | |
162 | die("unexpected line in %s: %.75s...", path, p); | |
163 | ||
164 | } | |
165 | ||
9cfb3dc0 MH |
166 | /* |
167 | * An iterator over a packed-refs file that is currently mmapped. | |
168 | */ | |
169 | struct mmapped_ref_iterator { | |
170 | struct ref_iterator base; | |
171 | ||
172 | struct packed_ref_cache *packed_refs; | |
173 | ||
174 | /* The current position in the mmapped file: */ | |
175 | const char *pos; | |
176 | ||
177 | /* The end of the mmapped file: */ | |
178 | const char *eof; | |
179 | ||
180 | struct object_id oid, peeled; | |
181 | ||
182 | struct strbuf refname_buf; | |
183 | }; | |
184 | ||
185 | static int mmapped_ref_iterator_advance(struct ref_iterator *ref_iterator) | |
186 | { | |
187 | struct mmapped_ref_iterator *iter = | |
188 | (struct mmapped_ref_iterator *)ref_iterator; | |
189 | const char *p = iter->pos, *eol; | |
190 | ||
191 | strbuf_reset(&iter->refname_buf); | |
192 | ||
193 | if (iter->pos == iter->eof) | |
194 | return ref_iterator_abort(ref_iterator); | |
195 | ||
196 | iter->base.flags = REF_ISPACKED; | |
197 | ||
198 | if (iter->eof - p < GIT_SHA1_HEXSZ + 2 || | |
199 | parse_oid_hex(p, &iter->oid, &p) || | |
200 | !isspace(*p++)) | |
201 | die_invalid_line(iter->packed_refs->refs->path, | |
202 | iter->pos, iter->eof - iter->pos); | |
203 | ||
204 | eol = memchr(p, '\n', iter->eof - p); | |
205 | if (!eol) | |
206 | die_unterminated_line(iter->packed_refs->refs->path, | |
207 | iter->pos, iter->eof - iter->pos); | |
208 | ||
209 | strbuf_add(&iter->refname_buf, p, eol - p); | |
210 | iter->base.refname = iter->refname_buf.buf; | |
211 | ||
212 | if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) { | |
213 | if (!refname_is_safe(iter->base.refname)) | |
214 | die("packed refname is dangerous: %s", | |
215 | iter->base.refname); | |
216 | oidclr(&iter->oid); | |
217 | iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN; | |
218 | } | |
219 | if (iter->packed_refs->peeled == PEELED_FULLY || | |
220 | (iter->packed_refs->peeled == PEELED_TAGS && | |
221 | starts_with(iter->base.refname, "refs/tags/"))) | |
222 | iter->base.flags |= REF_KNOWS_PEELED; | |
223 | ||
224 | iter->pos = eol + 1; | |
225 | ||
226 | if (iter->pos < iter->eof && *iter->pos == '^') { | |
227 | p = iter->pos + 1; | |
228 | if (iter->eof - p < GIT_SHA1_HEXSZ + 1 || | |
229 | parse_oid_hex(p, &iter->peeled, &p) || | |
230 | *p++ != '\n') | |
231 | die_invalid_line(iter->packed_refs->refs->path, | |
232 | iter->pos, iter->eof - iter->pos); | |
233 | iter->pos = p; | |
234 | ||
235 | /* | |
236 | * Regardless of what the file header said, we | |
81b9b5ae MH |
237 | * definitely know the value of *this* reference. But |
238 | * we suppress it if the reference is broken: | |
9cfb3dc0 | 239 | */ |
81b9b5ae MH |
240 | if ((iter->base.flags & REF_ISBROKEN)) { |
241 | oidclr(&iter->peeled); | |
242 | iter->base.flags &= ~REF_KNOWS_PEELED; | |
243 | } else { | |
244 | iter->base.flags |= REF_KNOWS_PEELED; | |
245 | } | |
9cfb3dc0 MH |
246 | } else { |
247 | oidclr(&iter->peeled); | |
248 | } | |
249 | ||
250 | return ITER_OK; | |
251 | } | |
252 | ||
253 | static int mmapped_ref_iterator_peel(struct ref_iterator *ref_iterator, | |
254 | struct object_id *peeled) | |
255 | { | |
256 | struct mmapped_ref_iterator *iter = | |
257 | (struct mmapped_ref_iterator *)ref_iterator; | |
258 | ||
259 | if ((iter->base.flags & REF_KNOWS_PEELED)) { | |
260 | oidcpy(peeled, &iter->peeled); | |
261 | return is_null_oid(&iter->peeled) ? -1 : 0; | |
262 | } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) { | |
263 | return -1; | |
264 | } else { | |
265 | return !!peel_object(iter->oid.hash, peeled->hash); | |
266 | } | |
267 | } | |
268 | ||
269 | static int mmapped_ref_iterator_abort(struct ref_iterator *ref_iterator) | |
270 | { | |
271 | struct mmapped_ref_iterator *iter = | |
272 | (struct mmapped_ref_iterator *)ref_iterator; | |
273 | ||
274 | release_packed_ref_cache(iter->packed_refs); | |
275 | strbuf_release(&iter->refname_buf); | |
276 | base_ref_iterator_free(ref_iterator); | |
277 | return ITER_DONE; | |
278 | } | |
279 | ||
280 | static struct ref_iterator_vtable mmapped_ref_iterator_vtable = { | |
281 | mmapped_ref_iterator_advance, | |
282 | mmapped_ref_iterator_peel, | |
283 | mmapped_ref_iterator_abort | |
284 | }; | |
285 | ||
286 | struct ref_iterator *mmapped_ref_iterator_begin( | |
287 | const char *packed_refs_file, | |
288 | struct packed_ref_cache *packed_refs, | |
289 | const char *pos, const char *eof) | |
290 | { | |
291 | struct mmapped_ref_iterator *iter = xcalloc(1, sizeof(*iter)); | |
292 | struct ref_iterator *ref_iterator = &iter->base; | |
293 | ||
294 | base_ref_iterator_init(ref_iterator, &mmapped_ref_iterator_vtable, 0); | |
295 | ||
296 | iter->packed_refs = packed_refs; | |
297 | acquire_packed_ref_cache(iter->packed_refs); | |
298 | iter->pos = pos; | |
299 | iter->eof = eof; | |
300 | strbuf_init(&iter->refname_buf, 0); | |
301 | ||
302 | iter->base.oid = &iter->oid; | |
303 | ||
304 | return ref_iterator; | |
305 | } | |
306 | ||
67be7c5a | 307 | /* |
f0a7dc86 | 308 | * Read from the `packed-refs` file into a newly-allocated |
67be7c5a MH |
309 | * `packed_ref_cache` and return it. The return value will already |
310 | * have its reference count incremented. | |
311 | * | |
312 | * A comment line of the form "# pack-refs with: " may contain zero or | |
313 | * more traits. We interpret the traits as follows: | |
314 | * | |
315 | * No traits: | |
316 | * | |
317 | * Probably no references are peeled. But if the file contains a | |
318 | * peeled value for a reference, we will use it. | |
319 | * | |
320 | * peeled: | |
321 | * | |
322 | * References under "refs/tags/", if they *can* be peeled, *are* | |
323 | * peeled in this file. References outside of "refs/tags/" are | |
324 | * probably not peeled even if they could have been, but if we find | |
325 | * a peeled value for such a reference we will use it. | |
326 | * | |
327 | * fully-peeled: | |
328 | * | |
329 | * All references in the file that can be peeled are peeled. | |
330 | * Inversely (and this is more important), any references in the | |
331 | * file for which no peeled value is recorded is not peelable. This | |
332 | * trait should typically be written alongside "peeled" for | |
333 | * compatibility with older clients, but we do not require it | |
334 | * (i.e., "peeled" is a no-op if "fully-peeled" is set). | |
335 | */ | |
f0a7dc86 | 336 | static struct packed_ref_cache *read_packed_refs(struct packed_ref_store *refs) |
67be7c5a | 337 | { |
67be7c5a | 338 | struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs)); |
49a03ef4 MH |
339 | int fd; |
340 | struct stat st; | |
341 | size_t size; | |
342 | char *buf; | |
9cfb3dc0 | 343 | const char *pos, *eof; |
67be7c5a | 344 | struct ref_dir *dir; |
9cfb3dc0 MH |
345 | struct ref_iterator *iter; |
346 | int ok; | |
67be7c5a | 347 | |
f0a7dc86 | 348 | packed_refs->refs = refs; |
67be7c5a MH |
349 | acquire_packed_ref_cache(packed_refs); |
350 | packed_refs->cache = create_ref_cache(NULL, NULL); | |
351 | packed_refs->cache->root->flag &= ~REF_INCOMPLETE; | |
daa45408 | 352 | packed_refs->peeled = PEELED_NONE; |
67be7c5a | 353 | |
49a03ef4 MH |
354 | fd = open(refs->path, O_RDONLY); |
355 | if (fd < 0) { | |
67be7c5a MH |
356 | if (errno == ENOENT) { |
357 | /* | |
358 | * This is OK; it just means that no | |
359 | * "packed-refs" file has been written yet, | |
360 | * which is equivalent to it being empty. | |
361 | */ | |
362 | return packed_refs; | |
363 | } else { | |
f0a7dc86 | 364 | die_errno("couldn't read %s", refs->path); |
67be7c5a MH |
365 | } |
366 | } | |
367 | ||
49a03ef4 MH |
368 | stat_validity_update(&packed_refs->validity, fd); |
369 | ||
370 | if (fstat(fd, &st) < 0) | |
371 | die_errno("couldn't stat %s", refs->path); | |
372 | ||
373 | size = xsize_t(st.st_size); | |
374 | buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); | |
375 | pos = buf; | |
376 | eof = buf + size; | |
67be7c5a | 377 | |
36f23534 MH |
378 | /* If the file has a header line, process it: */ |
379 | if (pos < eof && *pos == '#') { | |
9cfb3dc0 | 380 | struct strbuf tmp = STRBUF_INIT; |
a8811695 | 381 | char *p; |
9cfb3dc0 | 382 | const char *eol; |
a8811695 | 383 | struct string_list traits = STRING_LIST_INIT_NODUP; |
36f23534 MH |
384 | |
385 | eol = memchr(pos, '\n', eof - pos); | |
386 | if (!eol) | |
387 | die_unterminated_line(refs->path, pos, eof - pos); | |
388 | ||
6a9bc403 | 389 | strbuf_add(&tmp, pos, eol - pos); |
36f23534 | 390 | |
6a9bc403 | 391 | if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p)) |
36f23534 MH |
392 | die_invalid_line(refs->path, pos, eof - pos); |
393 | ||
a8811695 MH |
394 | string_list_split_in_place(&traits, p, ' ', -1); |
395 | ||
396 | if (unsorted_string_list_has_string(&traits, "fully-peeled")) | |
daa45408 | 397 | packed_refs->peeled = PEELED_FULLY; |
a8811695 | 398 | else if (unsorted_string_list_has_string(&traits, "peeled")) |
daa45408 | 399 | packed_refs->peeled = PEELED_TAGS; |
36f23534 MH |
400 | /* perhaps other traits later as well */ |
401 | ||
402 | /* The "+ 1" is for the LF character. */ | |
403 | pos = eol + 1; | |
a8811695 MH |
404 | |
405 | string_list_clear(&traits, 0); | |
9cfb3dc0 | 406 | strbuf_release(&tmp); |
36f23534 MH |
407 | } |
408 | ||
67be7c5a | 409 | dir = get_ref_dir(packed_refs->cache->root); |
9cfb3dc0 MH |
410 | iter = mmapped_ref_iterator_begin(refs->path, packed_refs, pos, eof); |
411 | while ((ok = ref_iterator_advance(iter)) == ITER_OK) { | |
412 | struct ref_entry *entry = | |
413 | create_ref_entry(iter->refname, iter->oid, iter->flags); | |
6a9bc403 | 414 | |
9cfb3dc0 MH |
415 | if ((iter->flags & REF_KNOWS_PEELED)) |
416 | ref_iterator_peel(iter, &entry->u.value.peeled); | |
6a9bc403 | 417 | add_ref_entry(dir, entry); |
67be7c5a MH |
418 | } |
419 | ||
9cfb3dc0 MH |
420 | if (ok != ITER_DONE) |
421 | die("error reading packed-refs file %s", refs->path); | |
422 | ||
49a03ef4 | 423 | if (munmap(buf, size)) |
9cfb3dc0 MH |
424 | die_errno("error ummapping packed-refs file %s", refs->path); |
425 | ||
49a03ef4 | 426 | close(fd); |
67be7c5a MH |
427 | |
428 | return packed_refs; | |
429 | } | |
430 | ||
431 | /* | |
432 | * Check that the packed refs cache (if any) still reflects the | |
433 | * contents of the file. If not, clear the cache. | |
434 | */ | |
435 | static void validate_packed_ref_cache(struct packed_ref_store *refs) | |
436 | { | |
437 | if (refs->cache && | |
438 | !stat_validity_check(&refs->cache->validity, refs->path)) | |
439 | clear_packed_ref_cache(refs); | |
440 | } | |
441 | ||
442 | /* | |
443 | * Get the packed_ref_cache for the specified packed_ref_store, | |
444 | * creating and populating it if it hasn't been read before or if the | |
445 | * file has been changed (according to its `validity` field) since it | |
446 | * was last read. On the other hand, if we hold the lock, then assume | |
447 | * that the file hasn't been changed out from under us, so skip the | |
448 | * extra `stat()` call in `stat_validity_check()`. | |
449 | */ | |
450 | static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs) | |
451 | { | |
452 | if (!is_lock_file_locked(&refs->lock)) | |
453 | validate_packed_ref_cache(refs); | |
454 | ||
455 | if (!refs->cache) | |
f0a7dc86 | 456 | refs->cache = read_packed_refs(refs); |
67be7c5a MH |
457 | |
458 | return refs->cache; | |
459 | } | |
460 | ||
461 | static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache) | |
462 | { | |
463 | return get_ref_dir(packed_ref_cache->cache->root); | |
464 | } | |
465 | ||
466 | static struct ref_dir *get_packed_refs(struct packed_ref_store *refs) | |
467 | { | |
468 | return get_packed_ref_dir(get_packed_ref_cache(refs)); | |
469 | } | |
470 | ||
67be7c5a MH |
471 | /* |
472 | * Return the ref_entry for the given refname from the packed | |
473 | * references. If it does not exist, return NULL. | |
474 | */ | |
475 | static struct ref_entry *get_packed_ref(struct packed_ref_store *refs, | |
476 | const char *refname) | |
477 | { | |
478 | return find_ref_entry(get_packed_refs(refs), refname); | |
479 | } | |
480 | ||
e0cc8ac8 MH |
481 | static int packed_read_raw_ref(struct ref_store *ref_store, |
482 | const char *refname, unsigned char *sha1, | |
483 | struct strbuf *referent, unsigned int *type) | |
67be7c5a | 484 | { |
e0cc8ac8 MH |
485 | struct packed_ref_store *refs = |
486 | packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); | |
487 | ||
67be7c5a MH |
488 | struct ref_entry *entry; |
489 | ||
490 | *type = 0; | |
491 | ||
492 | entry = get_packed_ref(refs, refname); | |
493 | if (!entry) { | |
494 | errno = ENOENT; | |
495 | return -1; | |
496 | } | |
497 | ||
498 | hashcpy(sha1, entry->u.value.oid.hash); | |
499 | *type = REF_ISPACKED; | |
500 | return 0; | |
501 | } | |
502 | ||
e0cc8ac8 MH |
503 | static int packed_peel_ref(struct ref_store *ref_store, |
504 | const char *refname, unsigned char *sha1) | |
67be7c5a | 505 | { |
e0cc8ac8 MH |
506 | struct packed_ref_store *refs = |
507 | packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB, | |
508 | "peel_ref"); | |
67be7c5a MH |
509 | struct ref_entry *r = get_packed_ref(refs, refname); |
510 | ||
511 | if (!r || peel_entry(r, 0)) | |
512 | return -1; | |
513 | ||
514 | hashcpy(sha1, r->u.value.peeled.hash); | |
515 | return 0; | |
516 | } | |
517 | ||
518 | struct packed_ref_iterator { | |
519 | struct ref_iterator base; | |
520 | ||
521 | struct packed_ref_cache *cache; | |
522 | struct ref_iterator *iter0; | |
523 | unsigned int flags; | |
524 | }; | |
525 | ||
526 | static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator) | |
527 | { | |
528 | struct packed_ref_iterator *iter = | |
529 | (struct packed_ref_iterator *)ref_iterator; | |
530 | int ok; | |
531 | ||
532 | while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) { | |
533 | if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && | |
534 | ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE) | |
535 | continue; | |
536 | ||
537 | if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && | |
538 | !ref_resolves_to_object(iter->iter0->refname, | |
539 | iter->iter0->oid, | |
540 | iter->iter0->flags)) | |
541 | continue; | |
542 | ||
543 | iter->base.refname = iter->iter0->refname; | |
544 | iter->base.oid = iter->iter0->oid; | |
545 | iter->base.flags = iter->iter0->flags; | |
546 | return ITER_OK; | |
547 | } | |
548 | ||
549 | iter->iter0 = NULL; | |
550 | if (ref_iterator_abort(ref_iterator) != ITER_DONE) | |
551 | ok = ITER_ERROR; | |
552 | ||
553 | return ok; | |
554 | } | |
555 | ||
556 | static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator, | |
557 | struct object_id *peeled) | |
558 | { | |
559 | struct packed_ref_iterator *iter = | |
560 | (struct packed_ref_iterator *)ref_iterator; | |
561 | ||
562 | return ref_iterator_peel(iter->iter0, peeled); | |
563 | } | |
564 | ||
565 | static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator) | |
566 | { | |
567 | struct packed_ref_iterator *iter = | |
568 | (struct packed_ref_iterator *)ref_iterator; | |
569 | int ok = ITER_DONE; | |
570 | ||
571 | if (iter->iter0) | |
572 | ok = ref_iterator_abort(iter->iter0); | |
573 | ||
574 | release_packed_ref_cache(iter->cache); | |
575 | base_ref_iterator_free(ref_iterator); | |
576 | return ok; | |
577 | } | |
578 | ||
579 | static struct ref_iterator_vtable packed_ref_iterator_vtable = { | |
580 | packed_ref_iterator_advance, | |
581 | packed_ref_iterator_peel, | |
582 | packed_ref_iterator_abort | |
583 | }; | |
584 | ||
e0cc8ac8 MH |
585 | static struct ref_iterator *packed_ref_iterator_begin( |
586 | struct ref_store *ref_store, | |
67be7c5a MH |
587 | const char *prefix, unsigned int flags) |
588 | { | |
e0cc8ac8 | 589 | struct packed_ref_store *refs; |
67be7c5a MH |
590 | struct packed_ref_iterator *iter; |
591 | struct ref_iterator *ref_iterator; | |
e0cc8ac8 MH |
592 | unsigned int required_flags = REF_STORE_READ; |
593 | ||
594 | if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) | |
595 | required_flags |= REF_STORE_ODB; | |
596 | refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin"); | |
67be7c5a MH |
597 | |
598 | iter = xcalloc(1, sizeof(*iter)); | |
599 | ref_iterator = &iter->base; | |
8738a8a4 | 600 | base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1); |
67be7c5a MH |
601 | |
602 | /* | |
603 | * Note that get_packed_ref_cache() internally checks whether | |
604 | * the packed-ref cache is up to date with what is on disk, | |
605 | * and re-reads it if not. | |
606 | */ | |
607 | ||
608 | iter->cache = get_packed_ref_cache(refs); | |
609 | acquire_packed_ref_cache(iter->cache); | |
610 | iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0); | |
611 | ||
612 | iter->flags = flags; | |
613 | ||
614 | return ref_iterator; | |
615 | } | |
616 | ||
617 | /* | |
618 | * Write an entry to the packed-refs file for the specified refname. | |
3478983b MH |
619 | * If peeled is non-NULL, write it as the entry's peeled value. On |
620 | * error, return a nonzero value and leave errno set at the value left | |
621 | * by the failing call to `fprintf()`. | |
67be7c5a | 622 | */ |
3478983b MH |
623 | static int write_packed_entry(FILE *fh, const char *refname, |
624 | const unsigned char *sha1, | |
625 | const unsigned char *peeled) | |
67be7c5a | 626 | { |
3478983b MH |
627 | if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 || |
628 | (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0)) | |
629 | return -1; | |
630 | ||
631 | return 0; | |
67be7c5a MH |
632 | } |
633 | ||
c8bed835 | 634 | int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err) |
67be7c5a | 635 | { |
e0cc8ac8 MH |
636 | struct packed_ref_store *refs = |
637 | packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, | |
b7de57d8 | 638 | "packed_refs_lock"); |
67be7c5a MH |
639 | static int timeout_configured = 0; |
640 | static int timeout_value = 1000; | |
67be7c5a | 641 | |
67be7c5a MH |
642 | if (!timeout_configured) { |
643 | git_config_get_int("core.packedrefstimeout", &timeout_value); | |
644 | timeout_configured = 1; | |
645 | } | |
646 | ||
42dfa7ec MH |
647 | /* |
648 | * Note that we close the lockfile immediately because we | |
649 | * don't write new content to it, but rather to a separate | |
650 | * tempfile. | |
651 | */ | |
67be7c5a MH |
652 | if (hold_lock_file_for_update_timeout( |
653 | &refs->lock, | |
654 | refs->path, | |
c8bed835 MH |
655 | flags, timeout_value) < 0) { |
656 | unable_to_lock_message(refs->path, errno, err); | |
657 | return -1; | |
658 | } | |
659 | ||
660 | if (close_lock_file(&refs->lock)) { | |
661 | strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno)); | |
67be7c5a | 662 | return -1; |
c8bed835 | 663 | } |
67be7c5a MH |
664 | |
665 | /* | |
666 | * Now that we hold the `packed-refs` lock, make sure that our | |
667 | * cache matches the current version of the file. Normally | |
668 | * `get_packed_ref_cache()` does that for us, but that | |
669 | * function assumes that when the file is locked, any existing | |
670 | * cache is still valid. We've just locked the file, but it | |
671 | * might have changed the moment *before* we locked it. | |
672 | */ | |
673 | validate_packed_ref_cache(refs); | |
674 | ||
39c8df0c MH |
675 | /* |
676 | * Now make sure that the packed-refs file as it exists in the | |
677 | * locked state is loaded into the cache: | |
678 | */ | |
679 | get_packed_ref_cache(refs); | |
67be7c5a MH |
680 | return 0; |
681 | } | |
682 | ||
49aebcf4 MH |
683 | void packed_refs_unlock(struct ref_store *ref_store) |
684 | { | |
685 | struct packed_ref_store *refs = packed_downcast( | |
686 | ref_store, | |
687 | REF_STORE_READ | REF_STORE_WRITE, | |
688 | "packed_refs_unlock"); | |
689 | ||
690 | if (!is_lock_file_locked(&refs->lock)) | |
691 | die("BUG: packed_refs_unlock() called when not locked"); | |
692 | rollback_lock_file(&refs->lock); | |
49aebcf4 MH |
693 | } |
694 | ||
695 | int packed_refs_is_locked(struct ref_store *ref_store) | |
696 | { | |
697 | struct packed_ref_store *refs = packed_downcast( | |
698 | ref_store, | |
699 | REF_STORE_READ | REF_STORE_WRITE, | |
700 | "packed_refs_is_locked"); | |
701 | ||
702 | return is_lock_file_locked(&refs->lock); | |
703 | } | |
704 | ||
67be7c5a MH |
705 | /* |
706 | * The packed-refs header line that we write out. Perhaps other | |
a8811695 MH |
707 | * traits will be added later. |
708 | * | |
709 | * Note that earlier versions of Git used to parse these traits by | |
710 | * looking for " trait " in the line. For this reason, the space after | |
711 | * the colon and the trailing space are required. | |
67be7c5a MH |
712 | */ |
713 | static const char PACKED_REFS_HEADER[] = | |
714 | "# pack-refs with: peeled fully-peeled \n"; | |
715 | ||
e0cc8ac8 MH |
716 | static int packed_init_db(struct ref_store *ref_store, struct strbuf *err) |
717 | { | |
718 | /* Nothing to do. */ | |
719 | return 0; | |
720 | } | |
721 | ||
2775d872 MH |
722 | /* |
723 | * Write the packed-refs from the cache to the packed-refs tempfile, | |
724 | * incorporating any changes from `updates`. `updates` must be a | |
725 | * sorted string list whose keys are the refnames and whose util | |
726 | * values are `struct ref_update *`. On error, rollback the tempfile, | |
727 | * write an error message to `err`, and return a nonzero value. | |
728 | * | |
729 | * The packfile must be locked before calling this function and will | |
730 | * remain locked when it is done. | |
731 | */ | |
732 | static int write_with_updates(struct packed_ref_store *refs, | |
733 | struct string_list *updates, | |
734 | struct strbuf *err) | |
735 | { | |
736 | struct ref_iterator *iter = NULL; | |
737 | size_t i; | |
738 | int ok; | |
739 | FILE *out; | |
740 | struct strbuf sb = STRBUF_INIT; | |
741 | char *packed_refs_path; | |
742 | ||
743 | if (!is_lock_file_locked(&refs->lock)) | |
744 | die("BUG: write_with_updates() called while unlocked"); | |
745 | ||
746 | /* | |
747 | * If packed-refs is a symlink, we want to overwrite the | |
748 | * symlinked-to file, not the symlink itself. Also, put the | |
749 | * staging file next to it: | |
750 | */ | |
751 | packed_refs_path = get_locked_file_path(&refs->lock); | |
752 | strbuf_addf(&sb, "%s.new", packed_refs_path); | |
753 | free(packed_refs_path); | |
754 | if (create_tempfile(&refs->tempfile, sb.buf) < 0) { | |
755 | strbuf_addf(err, "unable to create file %s: %s", | |
756 | sb.buf, strerror(errno)); | |
757 | strbuf_release(&sb); | |
758 | return -1; | |
759 | } | |
760 | strbuf_release(&sb); | |
761 | ||
762 | out = fdopen_tempfile(&refs->tempfile, "w"); | |
763 | if (!out) { | |
764 | strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s", | |
765 | strerror(errno)); | |
766 | goto error; | |
767 | } | |
768 | ||
769 | if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0) | |
770 | goto write_error; | |
771 | ||
772 | /* | |
773 | * We iterate in parallel through the current list of refs and | |
774 | * the list of updates, processing an entry from at least one | |
775 | * of the lists each time through the loop. When the current | |
776 | * list of refs is exhausted, set iter to NULL. When the list | |
777 | * of updates is exhausted, leave i set to updates->nr. | |
778 | */ | |
779 | iter = packed_ref_iterator_begin(&refs->base, "", | |
780 | DO_FOR_EACH_INCLUDE_BROKEN); | |
781 | if ((ok = ref_iterator_advance(iter)) != ITER_OK) | |
782 | iter = NULL; | |
783 | ||
784 | i = 0; | |
785 | ||
786 | while (iter || i < updates->nr) { | |
787 | struct ref_update *update = NULL; | |
788 | int cmp; | |
789 | ||
790 | if (i >= updates->nr) { | |
791 | cmp = -1; | |
792 | } else { | |
793 | update = updates->items[i].util; | |
794 | ||
795 | if (!iter) | |
796 | cmp = +1; | |
797 | else | |
798 | cmp = strcmp(iter->refname, update->refname); | |
799 | } | |
800 | ||
801 | if (!cmp) { | |
802 | /* | |
803 | * There is both an old value and an update | |
804 | * for this reference. Check the old value if | |
805 | * necessary: | |
806 | */ | |
807 | if ((update->flags & REF_HAVE_OLD)) { | |
808 | if (is_null_oid(&update->old_oid)) { | |
809 | strbuf_addf(err, "cannot update ref '%s': " | |
810 | "reference already exists", | |
811 | update->refname); | |
812 | goto error; | |
813 | } else if (oidcmp(&update->old_oid, iter->oid)) { | |
814 | strbuf_addf(err, "cannot update ref '%s': " | |
815 | "is at %s but expected %s", | |
816 | update->refname, | |
817 | oid_to_hex(iter->oid), | |
818 | oid_to_hex(&update->old_oid)); | |
819 | goto error; | |
820 | } | |
821 | } | |
822 | ||
823 | /* Now figure out what to use for the new value: */ | |
824 | if ((update->flags & REF_HAVE_NEW)) { | |
825 | /* | |
826 | * The update takes precedence. Skip | |
827 | * the iterator over the unneeded | |
828 | * value. | |
829 | */ | |
830 | if ((ok = ref_iterator_advance(iter)) != ITER_OK) | |
831 | iter = NULL; | |
832 | cmp = +1; | |
833 | } else { | |
834 | /* | |
835 | * The update doesn't actually want to | |
836 | * change anything. We're done with it. | |
837 | */ | |
838 | i++; | |
839 | cmp = -1; | |
840 | } | |
841 | } else if (cmp > 0) { | |
842 | /* | |
843 | * There is no old value but there is an | |
844 | * update for this reference. Make sure that | |
845 | * the update didn't expect an existing value: | |
846 | */ | |
847 | if ((update->flags & REF_HAVE_OLD) && | |
848 | !is_null_oid(&update->old_oid)) { | |
849 | strbuf_addf(err, "cannot update ref '%s': " | |
850 | "reference is missing but expected %s", | |
851 | update->refname, | |
852 | oid_to_hex(&update->old_oid)); | |
853 | goto error; | |
854 | } | |
855 | } | |
856 | ||
857 | if (cmp < 0) { | |
858 | /* Pass the old reference through. */ | |
859 | ||
860 | struct object_id peeled; | |
861 | int peel_error = ref_iterator_peel(iter, &peeled); | |
862 | ||
863 | if (write_packed_entry(out, iter->refname, | |
864 | iter->oid->hash, | |
865 | peel_error ? NULL : peeled.hash)) | |
866 | goto write_error; | |
867 | ||
868 | if ((ok = ref_iterator_advance(iter)) != ITER_OK) | |
869 | iter = NULL; | |
870 | } else if (is_null_oid(&update->new_oid)) { | |
871 | /* | |
872 | * The update wants to delete the reference, | |
873 | * and the reference either didn't exist or we | |
874 | * have already skipped it. So we're done with | |
875 | * the update (and don't have to write | |
876 | * anything). | |
877 | */ | |
878 | i++; | |
879 | } else { | |
880 | struct object_id peeled; | |
881 | int peel_error = peel_object(update->new_oid.hash, | |
882 | peeled.hash); | |
883 | ||
884 | if (write_packed_entry(out, update->refname, | |
885 | update->new_oid.hash, | |
886 | peel_error ? NULL : peeled.hash)) | |
887 | goto write_error; | |
888 | ||
889 | i++; | |
890 | } | |
891 | } | |
892 | ||
893 | if (ok != ITER_DONE) { | |
894 | strbuf_addf(err, "unable to write packed-refs file: " | |
895 | "error iterating over old contents"); | |
896 | goto error; | |
897 | } | |
898 | ||
899 | if (close_tempfile(&refs->tempfile)) { | |
900 | strbuf_addf(err, "error closing file %s: %s", | |
901 | get_tempfile_path(&refs->tempfile), | |
902 | strerror(errno)); | |
903 | strbuf_release(&sb); | |
904 | return -1; | |
905 | } | |
906 | ||
907 | return 0; | |
908 | ||
909 | write_error: | |
910 | strbuf_addf(err, "error writing to %s: %s", | |
911 | get_tempfile_path(&refs->tempfile), strerror(errno)); | |
912 | ||
913 | error: | |
914 | if (iter) | |
915 | ref_iterator_abort(iter); | |
916 | ||
917 | delete_tempfile(&refs->tempfile); | |
918 | return -1; | |
919 | } | |
920 | ||
921 | struct packed_transaction_backend_data { | |
922 | /* True iff the transaction owns the packed-refs lock. */ | |
923 | int own_lock; | |
924 | ||
925 | struct string_list updates; | |
926 | }; | |
927 | ||
928 | static void packed_transaction_cleanup(struct packed_ref_store *refs, | |
929 | struct ref_transaction *transaction) | |
930 | { | |
931 | struct packed_transaction_backend_data *data = transaction->backend_data; | |
932 | ||
933 | if (data) { | |
934 | string_list_clear(&data->updates, 0); | |
935 | ||
936 | if (is_tempfile_active(&refs->tempfile)) | |
937 | delete_tempfile(&refs->tempfile); | |
938 | ||
939 | if (data->own_lock && is_lock_file_locked(&refs->lock)) { | |
940 | packed_refs_unlock(&refs->base); | |
941 | data->own_lock = 0; | |
942 | } | |
943 | ||
944 | free(data); | |
945 | transaction->backend_data = NULL; | |
946 | } | |
947 | ||
948 | transaction->state = REF_TRANSACTION_CLOSED; | |
949 | } | |
950 | ||
e0cc8ac8 MH |
951 | static int packed_transaction_prepare(struct ref_store *ref_store, |
952 | struct ref_transaction *transaction, | |
953 | struct strbuf *err) | |
954 | { | |
2775d872 MH |
955 | struct packed_ref_store *refs = packed_downcast( |
956 | ref_store, | |
957 | REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, | |
958 | "ref_transaction_prepare"); | |
959 | struct packed_transaction_backend_data *data; | |
960 | size_t i; | |
961 | int ret = TRANSACTION_GENERIC_ERROR; | |
962 | ||
963 | /* | |
964 | * Note that we *don't* skip transactions with zero updates, | |
965 | * because such a transaction might be executed for the side | |
966 | * effect of ensuring that all of the references are peeled. | |
967 | * If the caller wants to optimize away empty transactions, it | |
968 | * should do so itself. | |
969 | */ | |
970 | ||
971 | data = xcalloc(1, sizeof(*data)); | |
972 | string_list_init(&data->updates, 0); | |
973 | ||
974 | transaction->backend_data = data; | |
975 | ||
976 | /* | |
977 | * Stick the updates in a string list by refname so that we | |
978 | * can sort them: | |
979 | */ | |
980 | for (i = 0; i < transaction->nr; i++) { | |
981 | struct ref_update *update = transaction->updates[i]; | |
982 | struct string_list_item *item = | |
983 | string_list_append(&data->updates, update->refname); | |
984 | ||
985 | /* Store a pointer to update in item->util: */ | |
986 | item->util = update; | |
987 | } | |
988 | string_list_sort(&data->updates); | |
989 | ||
990 | if (ref_update_reject_duplicates(&data->updates, err)) | |
991 | goto failure; | |
992 | ||
993 | if (!is_lock_file_locked(&refs->lock)) { | |
994 | if (packed_refs_lock(ref_store, 0, err)) | |
995 | goto failure; | |
996 | data->own_lock = 1; | |
997 | } | |
998 | ||
999 | if (write_with_updates(refs, &data->updates, err)) | |
1000 | goto failure; | |
1001 | ||
1002 | transaction->state = REF_TRANSACTION_PREPARED; | |
1003 | return 0; | |
1004 | ||
1005 | failure: | |
1006 | packed_transaction_cleanup(refs, transaction); | |
1007 | return ret; | |
e0cc8ac8 MH |
1008 | } |
1009 | ||
1010 | static int packed_transaction_abort(struct ref_store *ref_store, | |
1011 | struct ref_transaction *transaction, | |
1012 | struct strbuf *err) | |
1013 | { | |
2775d872 MH |
1014 | struct packed_ref_store *refs = packed_downcast( |
1015 | ref_store, | |
1016 | REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, | |
1017 | "ref_transaction_abort"); | |
1018 | ||
1019 | packed_transaction_cleanup(refs, transaction); | |
1020 | return 0; | |
e0cc8ac8 MH |
1021 | } |
1022 | ||
1023 | static int packed_transaction_finish(struct ref_store *ref_store, | |
1024 | struct ref_transaction *transaction, | |
1025 | struct strbuf *err) | |
1026 | { | |
2775d872 MH |
1027 | struct packed_ref_store *refs = packed_downcast( |
1028 | ref_store, | |
1029 | REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, | |
1030 | "ref_transaction_finish"); | |
1031 | int ret = TRANSACTION_GENERIC_ERROR; | |
1032 | char *packed_refs_path; | |
1033 | ||
1034 | packed_refs_path = get_locked_file_path(&refs->lock); | |
1035 | if (rename_tempfile(&refs->tempfile, packed_refs_path)) { | |
1036 | strbuf_addf(err, "error replacing %s: %s", | |
1037 | refs->path, strerror(errno)); | |
1038 | goto cleanup; | |
1039 | } | |
1040 | ||
1041 | clear_packed_ref_cache(refs); | |
1042 | ret = 0; | |
1043 | ||
1044 | cleanup: | |
1045 | free(packed_refs_path); | |
1046 | packed_transaction_cleanup(refs, transaction); | |
1047 | return ret; | |
e0cc8ac8 MH |
1048 | } |
1049 | ||
1050 | static int packed_initial_transaction_commit(struct ref_store *ref_store, | |
1051 | struct ref_transaction *transaction, | |
1052 | struct strbuf *err) | |
1053 | { | |
1054 | return ref_transaction_commit(transaction, err); | |
1055 | } | |
1056 | ||
1057 | static int packed_delete_refs(struct ref_store *ref_store, const char *msg, | |
1058 | struct string_list *refnames, unsigned int flags) | |
1059 | { | |
2fb330ca MH |
1060 | struct packed_ref_store *refs = |
1061 | packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs"); | |
1062 | struct strbuf err = STRBUF_INIT; | |
1063 | struct ref_transaction *transaction; | |
1064 | struct string_list_item *item; | |
1065 | int ret; | |
1066 | ||
1067 | (void)refs; /* We need the check above, but don't use the variable */ | |
1068 | ||
1069 | if (!refnames->nr) | |
1070 | return 0; | |
1071 | ||
1072 | /* | |
1073 | * Since we don't check the references' old_oids, the | |
1074 | * individual updates can't fail, so we can pack all of the | |
1075 | * updates into a single transaction. | |
1076 | */ | |
1077 | ||
1078 | transaction = ref_store_transaction_begin(ref_store, &err); | |
1079 | if (!transaction) | |
1080 | return -1; | |
1081 | ||
1082 | for_each_string_list_item(item, refnames) { | |
1083 | if (ref_transaction_delete(transaction, item->string, NULL, | |
1084 | flags, msg, &err)) { | |
1085 | warning(_("could not delete reference %s: %s"), | |
1086 | item->string, err.buf); | |
1087 | strbuf_reset(&err); | |
1088 | } | |
1089 | } | |
1090 | ||
1091 | ret = ref_transaction_commit(transaction, &err); | |
1092 | ||
1093 | if (ret) { | |
1094 | if (refnames->nr == 1) | |
1095 | error(_("could not delete reference %s: %s"), | |
1096 | refnames->items[0].string, err.buf); | |
1097 | else | |
1098 | error(_("could not delete references: %s"), err.buf); | |
1099 | } | |
1100 | ||
1101 | ref_transaction_free(transaction); | |
1102 | strbuf_release(&err); | |
1103 | return ret; | |
e0cc8ac8 MH |
1104 | } |
1105 | ||
1106 | static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags) | |
1107 | { | |
1108 | /* | |
1109 | * Packed refs are already packed. It might be that loose refs | |
1110 | * are packed *into* a packed refs store, but that is done by | |
1111 | * updating the packed references via a transaction. | |
1112 | */ | |
1113 | return 0; | |
1114 | } | |
1115 | ||
1116 | static int packed_create_symref(struct ref_store *ref_store, | |
1117 | const char *refname, const char *target, | |
1118 | const char *logmsg) | |
1119 | { | |
1120 | die("BUG: packed reference store does not support symrefs"); | |
1121 | } | |
1122 | ||
1123 | static int packed_rename_ref(struct ref_store *ref_store, | |
1124 | const char *oldrefname, const char *newrefname, | |
1125 | const char *logmsg) | |
1126 | { | |
1127 | die("BUG: packed reference store does not support renaming references"); | |
1128 | } | |
1129 | ||
1130 | static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store) | |
1131 | { | |
1132 | return empty_ref_iterator_begin(); | |
1133 | } | |
1134 | ||
1135 | static int packed_for_each_reflog_ent(struct ref_store *ref_store, | |
1136 | const char *refname, | |
1137 | each_reflog_ent_fn fn, void *cb_data) | |
1138 | { | |
1139 | return 0; | |
1140 | } | |
1141 | ||
1142 | static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store, | |
1143 | const char *refname, | |
1144 | each_reflog_ent_fn fn, | |
1145 | void *cb_data) | |
1146 | { | |
1147 | return 0; | |
1148 | } | |
1149 | ||
1150 | static int packed_reflog_exists(struct ref_store *ref_store, | |
1151 | const char *refname) | |
1152 | { | |
1153 | return 0; | |
1154 | } | |
1155 | ||
1156 | static int packed_create_reflog(struct ref_store *ref_store, | |
1157 | const char *refname, int force_create, | |
1158 | struct strbuf *err) | |
1159 | { | |
1160 | die("BUG: packed reference store does not support reflogs"); | |
1161 | } | |
1162 | ||
1163 | static int packed_delete_reflog(struct ref_store *ref_store, | |
1164 | const char *refname) | |
1165 | { | |
1166 | return 0; | |
1167 | } | |
1168 | ||
1169 | static int packed_reflog_expire(struct ref_store *ref_store, | |
1170 | const char *refname, const unsigned char *sha1, | |
1171 | unsigned int flags, | |
1172 | reflog_expiry_prepare_fn prepare_fn, | |
1173 | reflog_expiry_should_prune_fn should_prune_fn, | |
1174 | reflog_expiry_cleanup_fn cleanup_fn, | |
1175 | void *policy_cb_data) | |
1176 | { | |
1177 | return 0; | |
1178 | } | |
1179 | ||
1180 | struct ref_storage_be refs_be_packed = { | |
1181 | NULL, | |
1182 | "packed", | |
1183 | packed_ref_store_create, | |
1184 | packed_init_db, | |
1185 | packed_transaction_prepare, | |
1186 | packed_transaction_finish, | |
1187 | packed_transaction_abort, | |
1188 | packed_initial_transaction_commit, | |
1189 | ||
1190 | packed_pack_refs, | |
1191 | packed_peel_ref, | |
1192 | packed_create_symref, | |
1193 | packed_delete_refs, | |
1194 | packed_rename_ref, | |
1195 | ||
1196 | packed_ref_iterator_begin, | |
1197 | packed_read_raw_ref, | |
1198 | ||
1199 | packed_reflog_iterator_begin, | |
1200 | packed_for_each_reflog_ent, | |
1201 | packed_for_each_reflog_ent_reverse, | |
1202 | packed_reflog_exists, | |
1203 | packed_create_reflog, | |
1204 | packed_delete_reflog, | |
1205 | packed_reflog_expire | |
1206 | }; |