diff --git a/.tsan-suppressions b/.tsan-suppressions index 8c85014a0a..5ba86d6845 100644 --- a/.tsan-suppressions +++ b/.tsan-suppressions @@ -8,3 +8,9 @@ # in practice it (hopefully!) doesn't matter. race:^want_color$ race:^transfer_debug$ + +# A boolean value, which tells whether the replace_map has been initialized or +# not, is read racily with an update. As this variable is written to only once, +# and it's OK if the value change right after reading it, this shouldn't be a +# problem. +race:^lookup_replace_object$ diff --git a/object-store.h b/object-store.h index 55ee639350..33739c9dee 100644 --- a/object-store.h +++ b/object-store.h @@ -125,6 +125,8 @@ struct raw_object_store { * (see git-replace(1)). */ struct oidmap *replace_map; + unsigned replace_map_initialized : 1; + pthread_mutex_t replace_mutex; /* protect object replace functions */ struct commit_graph *commit_graph; unsigned commit_graph_attempted : 1; /* if loading has been attempted */ diff --git a/object.c b/object.c index 142ef69399..b4e1d3db3c 100644 --- a/object.c +++ b/object.c @@ -480,6 +480,7 @@ struct raw_object_store *raw_object_store_new(void) memset(o, 0, sizeof(*o)); INIT_LIST_HEAD(&o->packed_git_mru); hashmap_init(&o->pack_map, pack_map_entry_cmp, NULL, 0); + pthread_mutex_init(&o->replace_mutex, NULL); return o; } @@ -507,6 +508,7 @@ void raw_object_store_clear(struct raw_object_store *o) oidmap_free(o->replace_map, 1); FREE_AND_NULL(o->replace_map); + pthread_mutex_destroy(&o->replace_mutex); free_commit_graph(o->commit_graph); o->commit_graph = NULL; diff --git a/replace-object.c b/replace-object.c index e295e87943..7bd9aba6ee 100644 --- a/replace-object.c +++ b/replace-object.c @@ -34,14 +34,23 @@ static int register_replace_ref(struct repository *r, void prepare_replace_object(struct repository *r) { - if (r->objects->replace_map) + if (r->objects->replace_map_initialized) return; + pthread_mutex_lock(&r->objects->replace_mutex); + if (r->objects->replace_map_initialized) { + pthread_mutex_unlock(&r->objects->replace_mutex); + return; + } + r->objects->replace_map = xmalloc(sizeof(*r->objects->replace_map)); oidmap_init(r->objects->replace_map, 0); for_each_replace_ref(r, register_replace_ref, NULL); + r->objects->replace_map_initialized = 1; + + pthread_mutex_unlock(&r->objects->replace_mutex); } /* We allow "recursive" replacement. Only within reason, though */ diff --git a/replace-object.h b/replace-object.h index 04ed7a85a2..3fbc32eb7b 100644 --- a/replace-object.h +++ b/replace-object.h @@ -24,12 +24,17 @@ const struct object_id *do_lookup_replace_object(struct repository *r, * name (replaced recursively, if necessary). The return value is * either sha1 or a pointer to a permanently-allocated value. When * object replacement is suppressed, always return sha1. + * + * Note: some thread debuggers might point a data race on the + * replace_map_initialized reading in this function. However, we know there's no + * problem in the value being updated by one thread right after another one read + * it here (and it should be written to only once, anyway). */ static inline const struct object_id *lookup_replace_object(struct repository *r, const struct object_id *oid) { if (!read_replace_refs || - (r->objects->replace_map && + (r->objects->replace_map_initialized && r->objects->replace_map->map.tablesize == 0)) return oid; return do_lookup_replace_object(r, oid);