Commit | Line | Data |
---|---|---|
67be7c5a MH |
1 | #include "../cache.h" |
2 | #include "../refs.h" | |
3 | #include "refs-internal.h" | |
4 | #include "ref-cache.h" | |
5 | #include "packed-backend.h" | |
6 | #include "../iterator.h" | |
7 | #include "../lockfile.h" | |
8 | ||
9 | struct packed_ref_cache { | |
10 | struct ref_cache *cache; | |
11 | ||
12 | /* | |
13 | * Count of references to the data structure in this instance, | |
14 | * including the pointer from files_ref_store::packed if any. | |
15 | * The data will not be freed as long as the reference count | |
16 | * is nonzero. | |
17 | */ | |
18 | unsigned int referrers; | |
19 | ||
20 | /* The metadata from when this packed-refs cache was read */ | |
21 | struct stat_validity validity; | |
22 | }; | |
23 | ||
24 | /* | |
25 | * Increment the reference count of *packed_refs. | |
26 | */ | |
27 | static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs) | |
28 | { | |
29 | packed_refs->referrers++; | |
30 | } | |
31 | ||
32 | /* | |
33 | * Decrease the reference count of *packed_refs. If it goes to zero, | |
34 | * free *packed_refs and return true; otherwise return false. | |
35 | */ | |
36 | static int release_packed_ref_cache(struct packed_ref_cache *packed_refs) | |
37 | { | |
38 | if (!--packed_refs->referrers) { | |
39 | free_ref_cache(packed_refs->cache); | |
40 | stat_validity_clear(&packed_refs->validity); | |
41 | free(packed_refs); | |
42 | return 1; | |
43 | } else { | |
44 | return 0; | |
45 | } | |
46 | } | |
47 | ||
48 | /* | |
49 | * A container for `packed-refs`-related data. It is not (yet) a | |
50 | * `ref_store`. | |
51 | */ | |
52 | struct packed_ref_store { | |
53 | unsigned int store_flags; | |
54 | ||
55 | /* The path of the "packed-refs" file: */ | |
56 | char *path; | |
57 | ||
58 | /* | |
59 | * A cache of the values read from the `packed-refs` file, if | |
60 | * it might still be current; otherwise, NULL. | |
61 | */ | |
62 | struct packed_ref_cache *cache; | |
63 | ||
64 | /* | |
65 | * Lock used for the "packed-refs" file. Note that this (and | |
66 | * thus the enclosing `packed_ref_store`) must not be freed. | |
67 | */ | |
68 | struct lock_file lock; | |
69 | }; | |
70 | ||
71 | struct packed_ref_store *packed_ref_store_create( | |
72 | const char *path, unsigned int store_flags) | |
73 | { | |
74 | struct packed_ref_store *refs = xcalloc(1, sizeof(*refs)); | |
75 | ||
76 | refs->store_flags = store_flags; | |
77 | refs->path = xstrdup(path); | |
78 | return refs; | |
79 | } | |
80 | ||
81 | /* | |
82 | * Die if refs is not the main ref store. caller is used in any | |
83 | * necessary error messages. | |
84 | */ | |
85 | static void packed_assert_main_repository(struct packed_ref_store *refs, | |
86 | const char *caller) | |
87 | { | |
88 | if (refs->store_flags & REF_STORE_MAIN) | |
89 | return; | |
90 | ||
91 | die("BUG: operation %s only allowed for main ref store", caller); | |
92 | } | |
93 | ||
94 | static void clear_packed_ref_cache(struct packed_ref_store *refs) | |
95 | { | |
96 | if (refs->cache) { | |
97 | struct packed_ref_cache *cache = refs->cache; | |
98 | ||
99 | if (is_lock_file_locked(&refs->lock)) | |
100 | die("BUG: packed-ref cache cleared while locked"); | |
101 | refs->cache = NULL; | |
102 | release_packed_ref_cache(cache); | |
103 | } | |
104 | } | |
105 | ||
106 | /* The length of a peeled reference line in packed-refs, including EOL: */ | |
107 | #define PEELED_LINE_LENGTH 42 | |
108 | ||
109 | /* | |
110 | * Parse one line from a packed-refs file. Write the SHA1 to sha1. | |
111 | * Return a pointer to the refname within the line (null-terminated), | |
112 | * or NULL if there was a problem. | |
113 | */ | |
114 | static const char *parse_ref_line(struct strbuf *line, struct object_id *oid) | |
115 | { | |
116 | const char *ref; | |
117 | ||
118 | if (parse_oid_hex(line->buf, oid, &ref) < 0) | |
119 | return NULL; | |
120 | if (!isspace(*ref++)) | |
121 | return NULL; | |
122 | ||
123 | if (isspace(*ref)) | |
124 | return NULL; | |
125 | ||
126 | if (line->buf[line->len - 1] != '\n') | |
127 | return NULL; | |
128 | line->buf[--line->len] = 0; | |
129 | ||
130 | return ref; | |
131 | } | |
132 | ||
133 | /* | |
134 | * Read from `packed_refs_file` into a newly-allocated | |
135 | * `packed_ref_cache` and return it. The return value will already | |
136 | * have its reference count incremented. | |
137 | * | |
138 | * A comment line of the form "# pack-refs with: " may contain zero or | |
139 | * more traits. We interpret the traits as follows: | |
140 | * | |
141 | * No traits: | |
142 | * | |
143 | * Probably no references are peeled. But if the file contains a | |
144 | * peeled value for a reference, we will use it. | |
145 | * | |
146 | * peeled: | |
147 | * | |
148 | * References under "refs/tags/", if they *can* be peeled, *are* | |
149 | * peeled in this file. References outside of "refs/tags/" are | |
150 | * probably not peeled even if they could have been, but if we find | |
151 | * a peeled value for such a reference we will use it. | |
152 | * | |
153 | * fully-peeled: | |
154 | * | |
155 | * All references in the file that can be peeled are peeled. | |
156 | * Inversely (and this is more important), any references in the | |
157 | * file for which no peeled value is recorded is not peelable. This | |
158 | * trait should typically be written alongside "peeled" for | |
159 | * compatibility with older clients, but we do not require it | |
160 | * (i.e., "peeled" is a no-op if "fully-peeled" is set). | |
161 | */ | |
162 | static struct packed_ref_cache *read_packed_refs(const char *packed_refs_file) | |
163 | { | |
164 | FILE *f; | |
165 | struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs)); | |
166 | struct ref_entry *last = NULL; | |
167 | struct strbuf line = STRBUF_INIT; | |
168 | enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE; | |
169 | struct ref_dir *dir; | |
170 | ||
171 | acquire_packed_ref_cache(packed_refs); | |
172 | packed_refs->cache = create_ref_cache(NULL, NULL); | |
173 | packed_refs->cache->root->flag &= ~REF_INCOMPLETE; | |
174 | ||
175 | f = fopen(packed_refs_file, "r"); | |
176 | if (!f) { | |
177 | if (errno == ENOENT) { | |
178 | /* | |
179 | * This is OK; it just means that no | |
180 | * "packed-refs" file has been written yet, | |
181 | * which is equivalent to it being empty. | |
182 | */ | |
183 | return packed_refs; | |
184 | } else { | |
185 | die_errno("couldn't read %s", packed_refs_file); | |
186 | } | |
187 | } | |
188 | ||
189 | stat_validity_update(&packed_refs->validity, fileno(f)); | |
190 | ||
191 | dir = get_ref_dir(packed_refs->cache->root); | |
192 | while (strbuf_getwholeline(&line, f, '\n') != EOF) { | |
193 | struct object_id oid; | |
194 | const char *refname; | |
195 | const char *traits; | |
196 | ||
197 | if (skip_prefix(line.buf, "# pack-refs with:", &traits)) { | |
198 | if (strstr(traits, " fully-peeled ")) | |
199 | peeled = PEELED_FULLY; | |
200 | else if (strstr(traits, " peeled ")) | |
201 | peeled = PEELED_TAGS; | |
202 | /* perhaps other traits later as well */ | |
203 | continue; | |
204 | } | |
205 | ||
206 | refname = parse_ref_line(&line, &oid); | |
207 | if (refname) { | |
208 | int flag = REF_ISPACKED; | |
209 | ||
210 | if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) { | |
211 | if (!refname_is_safe(refname)) | |
212 | die("packed refname is dangerous: %s", refname); | |
213 | oidclr(&oid); | |
214 | flag |= REF_BAD_NAME | REF_ISBROKEN; | |
215 | } | |
216 | last = create_ref_entry(refname, &oid, flag); | |
217 | if (peeled == PEELED_FULLY || | |
218 | (peeled == PEELED_TAGS && starts_with(refname, "refs/tags/"))) | |
219 | last->flag |= REF_KNOWS_PEELED; | |
220 | add_ref_entry(dir, last); | |
221 | continue; | |
222 | } | |
223 | if (last && | |
224 | line.buf[0] == '^' && | |
225 | line.len == PEELED_LINE_LENGTH && | |
226 | line.buf[PEELED_LINE_LENGTH - 1] == '\n' && | |
227 | !get_oid_hex(line.buf + 1, &oid)) { | |
228 | oidcpy(&last->u.value.peeled, &oid); | |
229 | /* | |
230 | * Regardless of what the file header said, | |
231 | * we definitely know the value of *this* | |
232 | * reference: | |
233 | */ | |
234 | last->flag |= REF_KNOWS_PEELED; | |
235 | } | |
236 | } | |
237 | ||
238 | fclose(f); | |
239 | strbuf_release(&line); | |
240 | ||
241 | return packed_refs; | |
242 | } | |
243 | ||
244 | /* | |
245 | * Check that the packed refs cache (if any) still reflects the | |
246 | * contents of the file. If not, clear the cache. | |
247 | */ | |
248 | static void validate_packed_ref_cache(struct packed_ref_store *refs) | |
249 | { | |
250 | if (refs->cache && | |
251 | !stat_validity_check(&refs->cache->validity, refs->path)) | |
252 | clear_packed_ref_cache(refs); | |
253 | } | |
254 | ||
255 | /* | |
256 | * Get the packed_ref_cache for the specified packed_ref_store, | |
257 | * creating and populating it if it hasn't been read before or if the | |
258 | * file has been changed (according to its `validity` field) since it | |
259 | * was last read. On the other hand, if we hold the lock, then assume | |
260 | * that the file hasn't been changed out from under us, so skip the | |
261 | * extra `stat()` call in `stat_validity_check()`. | |
262 | */ | |
263 | static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs) | |
264 | { | |
265 | if (!is_lock_file_locked(&refs->lock)) | |
266 | validate_packed_ref_cache(refs); | |
267 | ||
268 | if (!refs->cache) | |
269 | refs->cache = read_packed_refs(refs->path); | |
270 | ||
271 | return refs->cache; | |
272 | } | |
273 | ||
274 | static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache) | |
275 | { | |
276 | return get_ref_dir(packed_ref_cache->cache->root); | |
277 | } | |
278 | ||
279 | static struct ref_dir *get_packed_refs(struct packed_ref_store *refs) | |
280 | { | |
281 | return get_packed_ref_dir(get_packed_ref_cache(refs)); | |
282 | } | |
283 | ||
284 | /* | |
285 | * Add or overwrite a reference in the in-memory packed reference | |
286 | * cache. This may only be called while the packed-refs file is locked | |
287 | * (see lock_packed_refs()). To actually write the packed-refs file, | |
288 | * call commit_packed_refs(). | |
289 | */ | |
290 | void add_packed_ref(struct packed_ref_store *refs, | |
291 | const char *refname, const struct object_id *oid) | |
292 | { | |
293 | struct ref_dir *packed_refs; | |
294 | struct ref_entry *packed_entry; | |
295 | ||
296 | if (!is_lock_file_locked(&refs->lock)) | |
297 | die("BUG: packed refs not locked"); | |
298 | ||
299 | if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) | |
300 | die("Reference has invalid format: '%s'", refname); | |
301 | ||
302 | packed_refs = get_packed_refs(refs); | |
303 | packed_entry = find_ref_entry(packed_refs, refname); | |
304 | if (packed_entry) { | |
305 | /* Overwrite the existing entry: */ | |
306 | oidcpy(&packed_entry->u.value.oid, oid); | |
307 | packed_entry->flag = REF_ISPACKED; | |
308 | oidclr(&packed_entry->u.value.peeled); | |
309 | } else { | |
310 | packed_entry = create_ref_entry(refname, oid, REF_ISPACKED); | |
311 | add_ref_entry(packed_refs, packed_entry); | |
312 | } | |
313 | } | |
314 | ||
315 | /* | |
316 | * Return the ref_entry for the given refname from the packed | |
317 | * references. If it does not exist, return NULL. | |
318 | */ | |
319 | static struct ref_entry *get_packed_ref(struct packed_ref_store *refs, | |
320 | const char *refname) | |
321 | { | |
322 | return find_ref_entry(get_packed_refs(refs), refname); | |
323 | } | |
324 | ||
325 | int packed_read_raw_ref(struct packed_ref_store *refs, | |
326 | const char *refname, unsigned char *sha1, | |
327 | struct strbuf *referent, unsigned int *type) | |
328 | { | |
329 | struct ref_entry *entry; | |
330 | ||
331 | *type = 0; | |
332 | ||
333 | entry = get_packed_ref(refs, refname); | |
334 | if (!entry) { | |
335 | errno = ENOENT; | |
336 | return -1; | |
337 | } | |
338 | ||
339 | hashcpy(sha1, entry->u.value.oid.hash); | |
340 | *type = REF_ISPACKED; | |
341 | return 0; | |
342 | } | |
343 | ||
344 | int packed_peel_ref(struct packed_ref_store *refs, | |
345 | const char *refname, unsigned char *sha1) | |
346 | { | |
347 | struct ref_entry *r = get_packed_ref(refs, refname); | |
348 | ||
349 | if (!r || peel_entry(r, 0)) | |
350 | return -1; | |
351 | ||
352 | hashcpy(sha1, r->u.value.peeled.hash); | |
353 | return 0; | |
354 | } | |
355 | ||
356 | struct packed_ref_iterator { | |
357 | struct ref_iterator base; | |
358 | ||
359 | struct packed_ref_cache *cache; | |
360 | struct ref_iterator *iter0; | |
361 | unsigned int flags; | |
362 | }; | |
363 | ||
364 | static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator) | |
365 | { | |
366 | struct packed_ref_iterator *iter = | |
367 | (struct packed_ref_iterator *)ref_iterator; | |
368 | int ok; | |
369 | ||
370 | while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) { | |
371 | if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && | |
372 | ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE) | |
373 | continue; | |
374 | ||
375 | if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && | |
376 | !ref_resolves_to_object(iter->iter0->refname, | |
377 | iter->iter0->oid, | |
378 | iter->iter0->flags)) | |
379 | continue; | |
380 | ||
381 | iter->base.refname = iter->iter0->refname; | |
382 | iter->base.oid = iter->iter0->oid; | |
383 | iter->base.flags = iter->iter0->flags; | |
384 | return ITER_OK; | |
385 | } | |
386 | ||
387 | iter->iter0 = NULL; | |
388 | if (ref_iterator_abort(ref_iterator) != ITER_DONE) | |
389 | ok = ITER_ERROR; | |
390 | ||
391 | return ok; | |
392 | } | |
393 | ||
394 | static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator, | |
395 | struct object_id *peeled) | |
396 | { | |
397 | struct packed_ref_iterator *iter = | |
398 | (struct packed_ref_iterator *)ref_iterator; | |
399 | ||
400 | return ref_iterator_peel(iter->iter0, peeled); | |
401 | } | |
402 | ||
403 | static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator) | |
404 | { | |
405 | struct packed_ref_iterator *iter = | |
406 | (struct packed_ref_iterator *)ref_iterator; | |
407 | int ok = ITER_DONE; | |
408 | ||
409 | if (iter->iter0) | |
410 | ok = ref_iterator_abort(iter->iter0); | |
411 | ||
412 | release_packed_ref_cache(iter->cache); | |
413 | base_ref_iterator_free(ref_iterator); | |
414 | return ok; | |
415 | } | |
416 | ||
417 | static struct ref_iterator_vtable packed_ref_iterator_vtable = { | |
418 | packed_ref_iterator_advance, | |
419 | packed_ref_iterator_peel, | |
420 | packed_ref_iterator_abort | |
421 | }; | |
422 | ||
423 | struct ref_iterator *packed_ref_iterator_begin( | |
424 | struct packed_ref_store *refs, | |
425 | const char *prefix, unsigned int flags) | |
426 | { | |
427 | struct packed_ref_iterator *iter; | |
428 | struct ref_iterator *ref_iterator; | |
429 | ||
430 | iter = xcalloc(1, sizeof(*iter)); | |
431 | ref_iterator = &iter->base; | |
432 | base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable); | |
433 | ||
434 | /* | |
435 | * Note that get_packed_ref_cache() internally checks whether | |
436 | * the packed-ref cache is up to date with what is on disk, | |
437 | * and re-reads it if not. | |
438 | */ | |
439 | ||
440 | iter->cache = get_packed_ref_cache(refs); | |
441 | acquire_packed_ref_cache(iter->cache); | |
442 | iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0); | |
443 | ||
444 | iter->flags = flags; | |
445 | ||
446 | return ref_iterator; | |
447 | } | |
448 | ||
449 | /* | |
450 | * Write an entry to the packed-refs file for the specified refname. | |
451 | * If peeled is non-NULL, write it as the entry's peeled value. | |
452 | */ | |
453 | static void write_packed_entry(FILE *fh, const char *refname, | |
454 | const unsigned char *sha1, | |
455 | const unsigned char *peeled) | |
456 | { | |
457 | fprintf_or_die(fh, "%s %s\n", sha1_to_hex(sha1), refname); | |
458 | if (peeled) | |
459 | fprintf_or_die(fh, "^%s\n", sha1_to_hex(peeled)); | |
460 | } | |
461 | ||
462 | int lock_packed_refs(struct packed_ref_store *refs, int flags) | |
463 | { | |
464 | static int timeout_configured = 0; | |
465 | static int timeout_value = 1000; | |
466 | struct packed_ref_cache *packed_ref_cache; | |
467 | ||
468 | packed_assert_main_repository(refs, "lock_packed_refs"); | |
469 | ||
470 | if (!timeout_configured) { | |
471 | git_config_get_int("core.packedrefstimeout", &timeout_value); | |
472 | timeout_configured = 1; | |
473 | } | |
474 | ||
475 | if (hold_lock_file_for_update_timeout( | |
476 | &refs->lock, | |
477 | refs->path, | |
478 | flags, timeout_value) < 0) | |
479 | return -1; | |
480 | ||
481 | /* | |
482 | * Now that we hold the `packed-refs` lock, make sure that our | |
483 | * cache matches the current version of the file. Normally | |
484 | * `get_packed_ref_cache()` does that for us, but that | |
485 | * function assumes that when the file is locked, any existing | |
486 | * cache is still valid. We've just locked the file, but it | |
487 | * might have changed the moment *before* we locked it. | |
488 | */ | |
489 | validate_packed_ref_cache(refs); | |
490 | ||
491 | packed_ref_cache = get_packed_ref_cache(refs); | |
492 | /* Increment the reference count to prevent it from being freed: */ | |
493 | acquire_packed_ref_cache(packed_ref_cache); | |
494 | return 0; | |
495 | } | |
496 | ||
497 | /* | |
498 | * The packed-refs header line that we write out. Perhaps other | |
499 | * traits will be added later. The trailing space is required. | |
500 | */ | |
501 | static const char PACKED_REFS_HEADER[] = | |
502 | "# pack-refs with: peeled fully-peeled \n"; | |
503 | ||
504 | /* | |
505 | * Write the current version of the packed refs cache from memory to | |
506 | * disk. The packed-refs file must already be locked for writing (see | |
507 | * lock_packed_refs()). Return zero on success. On errors, set errno | |
508 | * and return a nonzero value. | |
509 | */ | |
510 | int commit_packed_refs(struct packed_ref_store *refs) | |
511 | { | |
512 | struct packed_ref_cache *packed_ref_cache = | |
513 | get_packed_ref_cache(refs); | |
514 | int ok, error = 0; | |
515 | int save_errno = 0; | |
516 | FILE *out; | |
517 | struct ref_iterator *iter; | |
518 | ||
519 | packed_assert_main_repository(refs, "commit_packed_refs"); | |
520 | ||
521 | if (!is_lock_file_locked(&refs->lock)) | |
522 | die("BUG: packed-refs not locked"); | |
523 | ||
524 | out = fdopen_lock_file(&refs->lock, "w"); | |
525 | if (!out) | |
526 | die_errno("unable to fdopen packed-refs descriptor"); | |
527 | ||
528 | fprintf_or_die(out, "%s", PACKED_REFS_HEADER); | |
529 | ||
530 | iter = cache_ref_iterator_begin(packed_ref_cache->cache, NULL, 0); | |
531 | while ((ok = ref_iterator_advance(iter)) == ITER_OK) { | |
532 | struct object_id peeled; | |
533 | int peel_error = ref_iterator_peel(iter, &peeled); | |
534 | ||
535 | write_packed_entry(out, iter->refname, iter->oid->hash, | |
536 | peel_error ? NULL : peeled.hash); | |
537 | } | |
538 | ||
539 | if (ok != ITER_DONE) | |
540 | die("error while iterating over references"); | |
541 | ||
542 | if (commit_lock_file(&refs->lock)) { | |
543 | save_errno = errno; | |
544 | error = -1; | |
545 | } | |
546 | release_packed_ref_cache(packed_ref_cache); | |
547 | errno = save_errno; | |
548 | return error; | |
549 | } | |
550 | ||
551 | /* | |
552 | * Rollback the lockfile for the packed-refs file, and discard the | |
553 | * in-memory packed reference cache. (The packed-refs file will be | |
554 | * read anew if it is needed again after this function is called.) | |
555 | */ | |
556 | static void rollback_packed_refs(struct packed_ref_store *refs) | |
557 | { | |
558 | struct packed_ref_cache *packed_ref_cache = get_packed_ref_cache(refs); | |
559 | ||
560 | packed_assert_main_repository(refs, "rollback_packed_refs"); | |
561 | ||
562 | if (!is_lock_file_locked(&refs->lock)) | |
563 | die("BUG: packed-refs not locked"); | |
564 | rollback_lock_file(&refs->lock); | |
565 | release_packed_ref_cache(packed_ref_cache); | |
566 | clear_packed_ref_cache(refs); | |
567 | } | |
568 | ||
569 | /* | |
570 | * Rewrite the packed-refs file, omitting any refs listed in | |
571 | * 'refnames'. On error, leave packed-refs unchanged, write an error | |
572 | * message to 'err', and return a nonzero value. | |
573 | * | |
574 | * The refs in 'refnames' needn't be sorted. `err` must not be NULL. | |
575 | */ | |
576 | int repack_without_refs(struct packed_ref_store *refs, | |
577 | struct string_list *refnames, struct strbuf *err) | |
578 | { | |
579 | struct ref_dir *packed; | |
580 | struct string_list_item *refname; | |
581 | int ret, needs_repacking = 0, removed = 0; | |
582 | ||
583 | packed_assert_main_repository(refs, "repack_without_refs"); | |
584 | assert(err); | |
585 | ||
586 | /* Look for a packed ref */ | |
587 | for_each_string_list_item(refname, refnames) { | |
588 | if (get_packed_ref(refs, refname->string)) { | |
589 | needs_repacking = 1; | |
590 | break; | |
591 | } | |
592 | } | |
593 | ||
594 | /* Avoid locking if we have nothing to do */ | |
595 | if (!needs_repacking) | |
596 | return 0; /* no refname exists in packed refs */ | |
597 | ||
598 | if (lock_packed_refs(refs, 0)) { | |
599 | unable_to_lock_message(refs->path, errno, err); | |
600 | return -1; | |
601 | } | |
602 | packed = get_packed_refs(refs); | |
603 | ||
604 | /* Remove refnames from the cache */ | |
605 | for_each_string_list_item(refname, refnames) | |
606 | if (remove_entry_from_dir(packed, refname->string) != -1) | |
607 | removed = 1; | |
608 | if (!removed) { | |
609 | /* | |
610 | * All packed entries disappeared while we were | |
611 | * acquiring the lock. | |
612 | */ | |
613 | rollback_packed_refs(refs); | |
614 | return 0; | |
615 | } | |
616 | ||
617 | /* Write what remains */ | |
618 | ret = commit_packed_refs(refs); | |
619 | if (ret) | |
620 | strbuf_addf(err, "unable to overwrite old ref-pack file: %s", | |
621 | strerror(errno)); | |
622 | return ret; | |
623 | } |