files-backend: cheapen refname_available check when locking refs
[git/git.git] / refs / packed-backend.c
1 #include "../cache.h"
2 #include "../refs.h"
3 #include "refs-internal.h"
4 #include "ref-cache.h"
5 #include "packed-backend.h"
6 #include "../iterator.h"
7 #include "../lockfile.h"
8
9 struct packed_ref_cache {
10 struct ref_cache *cache;
11
12 /*
13 * Count of references to the data structure in this instance,
14 * including the pointer from files_ref_store::packed if any.
15 * The data will not be freed as long as the reference count
16 * is nonzero.
17 */
18 unsigned int referrers;
19
20 /* The metadata from when this packed-refs cache was read */
21 struct stat_validity validity;
22 };
23
24 /*
25 * Increment the reference count of *packed_refs.
26 */
27 static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
28 {
29 packed_refs->referrers++;
30 }
31
32 /*
33 * Decrease the reference count of *packed_refs. If it goes to zero,
34 * free *packed_refs and return true; otherwise return false.
35 */
36 static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
37 {
38 if (!--packed_refs->referrers) {
39 free_ref_cache(packed_refs->cache);
40 stat_validity_clear(&packed_refs->validity);
41 free(packed_refs);
42 return 1;
43 } else {
44 return 0;
45 }
46 }
47
48 /*
49 * A container for `packed-refs`-related data. It is not (yet) a
50 * `ref_store`.
51 */
52 struct packed_ref_store {
53 struct ref_store base;
54
55 unsigned int store_flags;
56
57 /* The path of the "packed-refs" file: */
58 char *path;
59
60 /*
61 * A cache of the values read from the `packed-refs` file, if
62 * it might still be current; otherwise, NULL.
63 */
64 struct packed_ref_cache *cache;
65
66 /*
67 * Lock used for the "packed-refs" file. Note that this (and
68 * thus the enclosing `packed_ref_store`) must not be freed.
69 */
70 struct lock_file lock;
71
72 /*
73 * Temporary file used when rewriting new contents to the
74 * "packed-refs" file. Note that this (and thus the enclosing
75 * `packed_ref_store`) must not be freed.
76 */
77 struct tempfile tempfile;
78 };
79
80 struct ref_store *packed_ref_store_create(const char *path,
81 unsigned int store_flags)
82 {
83 struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
84 struct ref_store *ref_store = (struct ref_store *)refs;
85
86 base_ref_store_init(ref_store, &refs_be_packed);
87 refs->store_flags = store_flags;
88
89 refs->path = xstrdup(path);
90 return ref_store;
91 }
92
93 /*
94 * Die if refs is not the main ref store. caller is used in any
95 * necessary error messages.
96 */
97 static void packed_assert_main_repository(struct packed_ref_store *refs,
98 const char *caller)
99 {
100 if (refs->store_flags & REF_STORE_MAIN)
101 return;
102
103 die("BUG: operation %s only allowed for main ref store", caller);
104 }
105
106 /*
107 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
108 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
109 * support at least the flags specified in `required_flags`. `caller`
110 * is used in any necessary error messages.
111 */
112 static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
113 unsigned int required_flags,
114 const char *caller)
115 {
116 struct packed_ref_store *refs;
117
118 if (ref_store->be != &refs_be_packed)
119 die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
120 ref_store->be->name, caller);
121
122 refs = (struct packed_ref_store *)ref_store;
123
124 if ((refs->store_flags & required_flags) != required_flags)
125 die("BUG: unallowed operation (%s), requires %x, has %x\n",
126 caller, required_flags, refs->store_flags);
127
128 return refs;
129 }
130
131 static void clear_packed_ref_cache(struct packed_ref_store *refs)
132 {
133 if (refs->cache) {
134 struct packed_ref_cache *cache = refs->cache;
135
136 refs->cache = NULL;
137 release_packed_ref_cache(cache);
138 }
139 }
140
141 /* The length of a peeled reference line in packed-refs, including EOL: */
142 #define PEELED_LINE_LENGTH 42
143
144 /*
145 * Parse one line from a packed-refs file. Write the SHA1 to sha1.
146 * Return a pointer to the refname within the line (null-terminated),
147 * or NULL if there was a problem.
148 */
149 static const char *parse_ref_line(struct strbuf *line, struct object_id *oid)
150 {
151 const char *ref;
152
153 if (parse_oid_hex(line->buf, oid, &ref) < 0)
154 return NULL;
155 if (!isspace(*ref++))
156 return NULL;
157
158 if (isspace(*ref))
159 return NULL;
160
161 if (line->buf[line->len - 1] != '\n')
162 return NULL;
163 line->buf[--line->len] = 0;
164
165 return ref;
166 }
167
168 /*
169 * Read from `packed_refs_file` into a newly-allocated
170 * `packed_ref_cache` and return it. The return value will already
171 * have its reference count incremented.
172 *
173 * A comment line of the form "# pack-refs with: " may contain zero or
174 * more traits. We interpret the traits as follows:
175 *
176 * No traits:
177 *
178 * Probably no references are peeled. But if the file contains a
179 * peeled value for a reference, we will use it.
180 *
181 * peeled:
182 *
183 * References under "refs/tags/", if they *can* be peeled, *are*
184 * peeled in this file. References outside of "refs/tags/" are
185 * probably not peeled even if they could have been, but if we find
186 * a peeled value for such a reference we will use it.
187 *
188 * fully-peeled:
189 *
190 * All references in the file that can be peeled are peeled.
191 * Inversely (and this is more important), any references in the
192 * file for which no peeled value is recorded is not peelable. This
193 * trait should typically be written alongside "peeled" for
194 * compatibility with older clients, but we do not require it
195 * (i.e., "peeled" is a no-op if "fully-peeled" is set).
196 */
197 static struct packed_ref_cache *read_packed_refs(const char *packed_refs_file)
198 {
199 FILE *f;
200 struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs));
201 struct ref_entry *last = NULL;
202 struct strbuf line = STRBUF_INIT;
203 enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE;
204 struct ref_dir *dir;
205
206 acquire_packed_ref_cache(packed_refs);
207 packed_refs->cache = create_ref_cache(NULL, NULL);
208 packed_refs->cache->root->flag &= ~REF_INCOMPLETE;
209
210 f = fopen(packed_refs_file, "r");
211 if (!f) {
212 if (errno == ENOENT) {
213 /*
214 * This is OK; it just means that no
215 * "packed-refs" file has been written yet,
216 * which is equivalent to it being empty.
217 */
218 return packed_refs;
219 } else {
220 die_errno("couldn't read %s", packed_refs_file);
221 }
222 }
223
224 stat_validity_update(&packed_refs->validity, fileno(f));
225
226 dir = get_ref_dir(packed_refs->cache->root);
227 while (strbuf_getwholeline(&line, f, '\n') != EOF) {
228 struct object_id oid;
229 const char *refname;
230 const char *traits;
231
232 if (!line.len || line.buf[line.len - 1] != '\n')
233 die("unterminated line in %s: %s", packed_refs_file, line.buf);
234
235 if (skip_prefix(line.buf, "# pack-refs with:", &traits)) {
236 if (strstr(traits, " fully-peeled "))
237 peeled = PEELED_FULLY;
238 else if (strstr(traits, " peeled "))
239 peeled = PEELED_TAGS;
240 /* perhaps other traits later as well */
241 continue;
242 }
243
244 refname = parse_ref_line(&line, &oid);
245 if (refname) {
246 int flag = REF_ISPACKED;
247
248 if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
249 if (!refname_is_safe(refname))
250 die("packed refname is dangerous: %s", refname);
251 oidclr(&oid);
252 flag |= REF_BAD_NAME | REF_ISBROKEN;
253 }
254 last = create_ref_entry(refname, &oid, flag);
255 if (peeled == PEELED_FULLY ||
256 (peeled == PEELED_TAGS && starts_with(refname, "refs/tags/")))
257 last->flag |= REF_KNOWS_PEELED;
258 add_ref_entry(dir, last);
259 } else if (last &&
260 line.buf[0] == '^' &&
261 line.len == PEELED_LINE_LENGTH &&
262 line.buf[PEELED_LINE_LENGTH - 1] == '\n' &&
263 !get_oid_hex(line.buf + 1, &oid)) {
264 oidcpy(&last->u.value.peeled, &oid);
265 /*
266 * Regardless of what the file header said,
267 * we definitely know the value of *this*
268 * reference:
269 */
270 last->flag |= REF_KNOWS_PEELED;
271 } else {
272 strbuf_setlen(&line, line.len - 1);
273 die("unexpected line in %s: %s", packed_refs_file, line.buf);
274 }
275 }
276
277 fclose(f);
278 strbuf_release(&line);
279
280 return packed_refs;
281 }
282
283 /*
284 * Check that the packed refs cache (if any) still reflects the
285 * contents of the file. If not, clear the cache.
286 */
287 static void validate_packed_ref_cache(struct packed_ref_store *refs)
288 {
289 if (refs->cache &&
290 !stat_validity_check(&refs->cache->validity, refs->path))
291 clear_packed_ref_cache(refs);
292 }
293
294 /*
295 * Get the packed_ref_cache for the specified packed_ref_store,
296 * creating and populating it if it hasn't been read before or if the
297 * file has been changed (according to its `validity` field) since it
298 * was last read. On the other hand, if we hold the lock, then assume
299 * that the file hasn't been changed out from under us, so skip the
300 * extra `stat()` call in `stat_validity_check()`.
301 */
302 static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs)
303 {
304 if (!is_lock_file_locked(&refs->lock))
305 validate_packed_ref_cache(refs);
306
307 if (!refs->cache)
308 refs->cache = read_packed_refs(refs->path);
309
310 return refs->cache;
311 }
312
313 static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
314 {
315 return get_ref_dir(packed_ref_cache->cache->root);
316 }
317
318 static struct ref_dir *get_packed_refs(struct packed_ref_store *refs)
319 {
320 return get_packed_ref_dir(get_packed_ref_cache(refs));
321 }
322
323 /*
324 * Add or overwrite a reference in the in-memory packed reference
325 * cache. This may only be called while the packed-refs file is locked
326 * (see packed_refs_lock()). To actually write the packed-refs file,
327 * call commit_packed_refs().
328 */
329 void add_packed_ref(struct ref_store *ref_store,
330 const char *refname, const struct object_id *oid)
331 {
332 struct packed_ref_store *refs =
333 packed_downcast(ref_store, REF_STORE_WRITE,
334 "add_packed_ref");
335 struct ref_dir *packed_refs;
336 struct ref_entry *packed_entry;
337
338 if (!is_lock_file_locked(&refs->lock))
339 die("BUG: packed refs not locked");
340
341 if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
342 die("Reference has invalid format: '%s'", refname);
343
344 packed_refs = get_packed_refs(refs);
345 packed_entry = find_ref_entry(packed_refs, refname);
346 if (packed_entry) {
347 /* Overwrite the existing entry: */
348 oidcpy(&packed_entry->u.value.oid, oid);
349 packed_entry->flag = REF_ISPACKED;
350 oidclr(&packed_entry->u.value.peeled);
351 } else {
352 packed_entry = create_ref_entry(refname, oid, REF_ISPACKED);
353 add_ref_entry(packed_refs, packed_entry);
354 }
355 }
356
357 /*
358 * Return the ref_entry for the given refname from the packed
359 * references. If it does not exist, return NULL.
360 */
361 static struct ref_entry *get_packed_ref(struct packed_ref_store *refs,
362 const char *refname)
363 {
364 return find_ref_entry(get_packed_refs(refs), refname);
365 }
366
367 static int packed_read_raw_ref(struct ref_store *ref_store,
368 const char *refname, unsigned char *sha1,
369 struct strbuf *referent, unsigned int *type)
370 {
371 struct packed_ref_store *refs =
372 packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
373
374 struct ref_entry *entry;
375
376 *type = 0;
377
378 entry = get_packed_ref(refs, refname);
379 if (!entry) {
380 errno = ENOENT;
381 return -1;
382 }
383
384 hashcpy(sha1, entry->u.value.oid.hash);
385 *type = REF_ISPACKED;
386 return 0;
387 }
388
389 static int packed_peel_ref(struct ref_store *ref_store,
390 const char *refname, unsigned char *sha1)
391 {
392 struct packed_ref_store *refs =
393 packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
394 "peel_ref");
395 struct ref_entry *r = get_packed_ref(refs, refname);
396
397 if (!r || peel_entry(r, 0))
398 return -1;
399
400 hashcpy(sha1, r->u.value.peeled.hash);
401 return 0;
402 }
403
404 struct packed_ref_iterator {
405 struct ref_iterator base;
406
407 struct packed_ref_cache *cache;
408 struct ref_iterator *iter0;
409 unsigned int flags;
410 };
411
412 static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
413 {
414 struct packed_ref_iterator *iter =
415 (struct packed_ref_iterator *)ref_iterator;
416 int ok;
417
418 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
419 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
420 ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
421 continue;
422
423 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
424 !ref_resolves_to_object(iter->iter0->refname,
425 iter->iter0->oid,
426 iter->iter0->flags))
427 continue;
428
429 iter->base.refname = iter->iter0->refname;
430 iter->base.oid = iter->iter0->oid;
431 iter->base.flags = iter->iter0->flags;
432 return ITER_OK;
433 }
434
435 iter->iter0 = NULL;
436 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
437 ok = ITER_ERROR;
438
439 return ok;
440 }
441
442 static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
443 struct object_id *peeled)
444 {
445 struct packed_ref_iterator *iter =
446 (struct packed_ref_iterator *)ref_iterator;
447
448 return ref_iterator_peel(iter->iter0, peeled);
449 }
450
451 static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
452 {
453 struct packed_ref_iterator *iter =
454 (struct packed_ref_iterator *)ref_iterator;
455 int ok = ITER_DONE;
456
457 if (iter->iter0)
458 ok = ref_iterator_abort(iter->iter0);
459
460 release_packed_ref_cache(iter->cache);
461 base_ref_iterator_free(ref_iterator);
462 return ok;
463 }
464
465 static struct ref_iterator_vtable packed_ref_iterator_vtable = {
466 packed_ref_iterator_advance,
467 packed_ref_iterator_peel,
468 packed_ref_iterator_abort
469 };
470
471 static struct ref_iterator *packed_ref_iterator_begin(
472 struct ref_store *ref_store,
473 const char *prefix, unsigned int flags)
474 {
475 struct packed_ref_store *refs;
476 struct packed_ref_iterator *iter;
477 struct ref_iterator *ref_iterator;
478 unsigned int required_flags = REF_STORE_READ;
479
480 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
481 required_flags |= REF_STORE_ODB;
482 refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
483
484 iter = xcalloc(1, sizeof(*iter));
485 ref_iterator = &iter->base;
486 base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable);
487
488 /*
489 * Note that get_packed_ref_cache() internally checks whether
490 * the packed-ref cache is up to date with what is on disk,
491 * and re-reads it if not.
492 */
493
494 iter->cache = get_packed_ref_cache(refs);
495 acquire_packed_ref_cache(iter->cache);
496 iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0);
497
498 iter->flags = flags;
499
500 return ref_iterator;
501 }
502
503 /*
504 * Write an entry to the packed-refs file for the specified refname.
505 * If peeled is non-NULL, write it as the entry's peeled value. On
506 * error, return a nonzero value and leave errno set at the value left
507 * by the failing call to `fprintf()`.
508 */
509 static int write_packed_entry(FILE *fh, const char *refname,
510 const unsigned char *sha1,
511 const unsigned char *peeled)
512 {
513 if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
514 (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
515 return -1;
516
517 return 0;
518 }
519
520 int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
521 {
522 struct packed_ref_store *refs =
523 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
524 "packed_refs_lock");
525 static int timeout_configured = 0;
526 static int timeout_value = 1000;
527 struct packed_ref_cache *packed_ref_cache;
528
529 if (!timeout_configured) {
530 git_config_get_int("core.packedrefstimeout", &timeout_value);
531 timeout_configured = 1;
532 }
533
534 /*
535 * Note that we close the lockfile immediately because we
536 * don't write new content to it, but rather to a separate
537 * tempfile.
538 */
539 if (hold_lock_file_for_update_timeout(
540 &refs->lock,
541 refs->path,
542 flags, timeout_value) < 0) {
543 unable_to_lock_message(refs->path, errno, err);
544 return -1;
545 }
546
547 if (close_lock_file(&refs->lock)) {
548 strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
549 return -1;
550 }
551
552 /*
553 * Now that we hold the `packed-refs` lock, make sure that our
554 * cache matches the current version of the file. Normally
555 * `get_packed_ref_cache()` does that for us, but that
556 * function assumes that when the file is locked, any existing
557 * cache is still valid. We've just locked the file, but it
558 * might have changed the moment *before* we locked it.
559 */
560 validate_packed_ref_cache(refs);
561
562 packed_ref_cache = get_packed_ref_cache(refs);
563 /* Increment the reference count to prevent it from being freed: */
564 acquire_packed_ref_cache(packed_ref_cache);
565 return 0;
566 }
567
568 void packed_refs_unlock(struct ref_store *ref_store)
569 {
570 struct packed_ref_store *refs = packed_downcast(
571 ref_store,
572 REF_STORE_READ | REF_STORE_WRITE,
573 "packed_refs_unlock");
574
575 if (!is_lock_file_locked(&refs->lock))
576 die("BUG: packed_refs_unlock() called when not locked");
577 rollback_lock_file(&refs->lock);
578 release_packed_ref_cache(refs->cache);
579 }
580
581 int packed_refs_is_locked(struct ref_store *ref_store)
582 {
583 struct packed_ref_store *refs = packed_downcast(
584 ref_store,
585 REF_STORE_READ | REF_STORE_WRITE,
586 "packed_refs_is_locked");
587
588 return is_lock_file_locked(&refs->lock);
589 }
590
591 /*
592 * The packed-refs header line that we write out. Perhaps other
593 * traits will be added later. The trailing space is required.
594 */
595 static const char PACKED_REFS_HEADER[] =
596 "# pack-refs with: peeled fully-peeled \n";
597
598 /*
599 * Write the current version of the packed refs cache from memory to
600 * disk. The packed-refs file must already be locked for writing (see
601 * packed_refs_lock()). Return zero on success. On errors, rollback
602 * the lockfile, write an error message to `err`, and return a nonzero
603 * value.
604 */
605 int commit_packed_refs(struct ref_store *ref_store, struct strbuf *err)
606 {
607 struct packed_ref_store *refs =
608 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
609 "commit_packed_refs");
610 struct packed_ref_cache *packed_ref_cache =
611 get_packed_ref_cache(refs);
612 int ok;
613 int ret = -1;
614 struct strbuf sb = STRBUF_INIT;
615 FILE *out;
616 struct ref_iterator *iter;
617 char *packed_refs_path;
618
619 if (!is_lock_file_locked(&refs->lock))
620 die("BUG: commit_packed_refs() called when unlocked");
621
622 /*
623 * If packed-refs is a symlink, we want to overwrite the
624 * symlinked-to file, not the symlink itself. Also, put the
625 * staging file next to it:
626 */
627 packed_refs_path = get_locked_file_path(&refs->lock);
628 strbuf_addf(&sb, "%s.new", packed_refs_path);
629 if (create_tempfile(&refs->tempfile, sb.buf) < 0) {
630 strbuf_addf(err, "unable to create file %s: %s",
631 sb.buf, strerror(errno));
632 strbuf_release(&sb);
633 goto out;
634 }
635 strbuf_release(&sb);
636
637 out = fdopen_tempfile(&refs->tempfile, "w");
638 if (!out) {
639 strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
640 strerror(errno));
641 goto error;
642 }
643
644 if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0) {
645 strbuf_addf(err, "error writing to %s: %s",
646 get_tempfile_path(&refs->tempfile), strerror(errno));
647 goto error;
648 }
649
650 iter = cache_ref_iterator_begin(packed_ref_cache->cache, NULL, 0);
651 while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
652 struct object_id peeled;
653 int peel_error = ref_iterator_peel(iter, &peeled);
654
655 if (write_packed_entry(out, iter->refname, iter->oid->hash,
656 peel_error ? NULL : peeled.hash)) {
657 strbuf_addf(err, "error writing to %s: %s",
658 get_tempfile_path(&refs->tempfile),
659 strerror(errno));
660 ref_iterator_abort(iter);
661 goto error;
662 }
663 }
664
665 if (ok != ITER_DONE) {
666 strbuf_addf(err, "unable to rewrite packed-refs file: "
667 "error iterating over old contents");
668 goto error;
669 }
670
671 if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
672 strbuf_addf(err, "error replacing %s: %s",
673 refs->path, strerror(errno));
674 goto out;
675 }
676
677 ret = 0;
678 goto out;
679
680 error:
681 delete_tempfile(&refs->tempfile);
682
683 out:
684 free(packed_refs_path);
685 return ret;
686 }
687
688 /*
689 * Rewrite the packed-refs file, omitting any refs listed in
690 * 'refnames'. On error, leave packed-refs unchanged, write an error
691 * message to 'err', and return a nonzero value. The packed refs lock
692 * must be held when calling this function; it will still be held when
693 * the function returns.
694 *
695 * The refs in 'refnames' needn't be sorted. `err` must not be NULL.
696 */
697 int repack_without_refs(struct ref_store *ref_store,
698 struct string_list *refnames, struct strbuf *err)
699 {
700 struct packed_ref_store *refs =
701 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
702 "repack_without_refs");
703 struct ref_dir *packed;
704 struct string_list_item *refname;
705 int needs_repacking = 0, removed = 0;
706
707 packed_assert_main_repository(refs, "repack_without_refs");
708 assert(err);
709
710 if (!is_lock_file_locked(&refs->lock))
711 die("BUG: repack_without_refs called without holding lock");
712
713 /* Look for a packed ref */
714 for_each_string_list_item(refname, refnames) {
715 if (get_packed_ref(refs, refname->string)) {
716 needs_repacking = 1;
717 break;
718 }
719 }
720
721 /* Avoid locking if we have nothing to do */
722 if (!needs_repacking)
723 return 0; /* no refname exists in packed refs */
724
725 packed = get_packed_refs(refs);
726
727 /* Remove refnames from the cache */
728 for_each_string_list_item(refname, refnames)
729 if (remove_entry_from_dir(packed, refname->string) != -1)
730 removed = 1;
731 if (!removed) {
732 /*
733 * All packed entries disappeared while we were
734 * acquiring the lock.
735 */
736 clear_packed_ref_cache(refs);
737 return 0;
738 }
739
740 /* Write what remains */
741 return commit_packed_refs(&refs->base, err);
742 }
743
744 static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
745 {
746 /* Nothing to do. */
747 return 0;
748 }
749
750 static int packed_transaction_prepare(struct ref_store *ref_store,
751 struct ref_transaction *transaction,
752 struct strbuf *err)
753 {
754 die("BUG: not implemented yet");
755 }
756
757 static int packed_transaction_abort(struct ref_store *ref_store,
758 struct ref_transaction *transaction,
759 struct strbuf *err)
760 {
761 die("BUG: not implemented yet");
762 }
763
764 static int packed_transaction_finish(struct ref_store *ref_store,
765 struct ref_transaction *transaction,
766 struct strbuf *err)
767 {
768 die("BUG: not implemented yet");
769 }
770
771 static int packed_initial_transaction_commit(struct ref_store *ref_store,
772 struct ref_transaction *transaction,
773 struct strbuf *err)
774 {
775 return ref_transaction_commit(transaction, err);
776 }
777
778 static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
779 struct string_list *refnames, unsigned int flags)
780 {
781 die("BUG: not implemented yet");
782 }
783
784 static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
785 {
786 /*
787 * Packed refs are already packed. It might be that loose refs
788 * are packed *into* a packed refs store, but that is done by
789 * updating the packed references via a transaction.
790 */
791 return 0;
792 }
793
794 static int packed_create_symref(struct ref_store *ref_store,
795 const char *refname, const char *target,
796 const char *logmsg)
797 {
798 die("BUG: packed reference store does not support symrefs");
799 }
800
801 static int packed_rename_ref(struct ref_store *ref_store,
802 const char *oldrefname, const char *newrefname,
803 const char *logmsg)
804 {
805 die("BUG: packed reference store does not support renaming references");
806 }
807
808 static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
809 {
810 return empty_ref_iterator_begin();
811 }
812
813 static int packed_for_each_reflog_ent(struct ref_store *ref_store,
814 const char *refname,
815 each_reflog_ent_fn fn, void *cb_data)
816 {
817 return 0;
818 }
819
820 static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
821 const char *refname,
822 each_reflog_ent_fn fn,
823 void *cb_data)
824 {
825 return 0;
826 }
827
828 static int packed_reflog_exists(struct ref_store *ref_store,
829 const char *refname)
830 {
831 return 0;
832 }
833
834 static int packed_create_reflog(struct ref_store *ref_store,
835 const char *refname, int force_create,
836 struct strbuf *err)
837 {
838 die("BUG: packed reference store does not support reflogs");
839 }
840
841 static int packed_delete_reflog(struct ref_store *ref_store,
842 const char *refname)
843 {
844 return 0;
845 }
846
847 static int packed_reflog_expire(struct ref_store *ref_store,
848 const char *refname, const unsigned char *sha1,
849 unsigned int flags,
850 reflog_expiry_prepare_fn prepare_fn,
851 reflog_expiry_should_prune_fn should_prune_fn,
852 reflog_expiry_cleanup_fn cleanup_fn,
853 void *policy_cb_data)
854 {
855 return 0;
856 }
857
858 struct ref_storage_be refs_be_packed = {
859 NULL,
860 "packed",
861 packed_ref_store_create,
862 packed_init_db,
863 packed_transaction_prepare,
864 packed_transaction_finish,
865 packed_transaction_abort,
866 packed_initial_transaction_commit,
867
868 packed_pack_refs,
869 packed_peel_ref,
870 packed_create_symref,
871 packed_delete_refs,
872 packed_rename_ref,
873
874 packed_ref_iterator_begin,
875 packed_read_raw_ref,
876
877 packed_reflog_iterator_begin,
878 packed_for_each_reflog_ent,
879 packed_for_each_reflog_ent_reverse,
880 packed_reflog_exists,
881 packed_create_reflog,
882 packed_delete_reflog,
883 packed_reflog_expire
884 };