+ istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
+ if (istate->version == 4) {
+ mem_pool_init(istate->ce_mem_pool,
+ estimate_cache_size_from_compressed(istate->cache_nr));
+ } else {
+ mem_pool_init(istate->ce_mem_pool,
+ estimate_cache_size(mmap_size, istate->cache_nr));
+ }
+
+ consumed = load_cache_entry_block(istate, istate->ce_mem_pool,
+ 0, istate->cache_nr, mmap, src_offset, NULL);
+ return consumed;
+}
+
+/*
+ * Mostly randomly chosen maximum thread counts: we
+ * cap the parallelism to online_cpus() threads, and we want
+ * to have at least 10000 cache entries per thread for it to
+ * be worth starting a thread.
+ */
+
+#define THREAD_COST (10000)
+
+struct load_cache_entries_thread_data
+{
+ pthread_t pthread;
+ struct index_state *istate;
+ struct mem_pool *ce_mem_pool;
+ int offset;
+ const char *mmap;
+ struct index_entry_offset_table *ieot;
+ int ieot_start; /* starting index into the ieot array */
+ int ieot_blocks; /* count of ieot entries to process */
+ unsigned long consumed; /* return # of bytes in index file processed */
+};
+
+/*
+ * A thread proc to run the load_cache_entries() computation
+ * across multiple background threads.
+ */
+static void *load_cache_entries_thread(void *_data)
+{
+ struct load_cache_entries_thread_data *p = _data;
+ int i;
+
+ /* iterate across all ieot blocks assigned to this thread */
+ for (i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) {
+ p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool,
+ p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL);
+ p->offset += p->ieot->entries[i].nr;
+ }
+ return NULL;
+}
+
+static unsigned long load_cache_entries_threaded(struct index_state *istate, const char *mmap, size_t mmap_size,
+ int nr_threads, struct index_entry_offset_table *ieot)
+{
+ int i, offset, ieot_blocks, ieot_start, err;
+ struct load_cache_entries_thread_data *data;
+ unsigned long consumed = 0;
+
+ /* a little sanity checking */
+ if (istate->name_hash_initialized)
+ BUG("the name hash isn't thread safe");
+
+ istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
+ mem_pool_init(istate->ce_mem_pool, 0);
+
+ /* ensure we have no more threads than we have blocks to process */
+ if (nr_threads > ieot->nr)
+ nr_threads = ieot->nr;
+ data = xcalloc(nr_threads, sizeof(*data));
+
+ offset = ieot_start = 0;
+ ieot_blocks = DIV_ROUND_UP(ieot->nr, nr_threads);
+ for (i = 0; i < nr_threads; i++) {
+ struct load_cache_entries_thread_data *p = &data[i];
+ int nr, j;
+
+ if (ieot_start + ieot_blocks > ieot->nr)
+ ieot_blocks = ieot->nr - ieot_start;
+
+ p->istate = istate;
+ p->offset = offset;
+ p->mmap = mmap;
+ p->ieot = ieot;
+ p->ieot_start = ieot_start;
+ p->ieot_blocks = ieot_blocks;
+
+ /* create a mem_pool for each thread */
+ nr = 0;
+ for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)
+ nr += p->ieot->entries[j].nr;
+ p->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
+ if (istate->version == 4) {
+ mem_pool_init(p->ce_mem_pool,
+ estimate_cache_size_from_compressed(nr));
+ } else {
+ mem_pool_init(p->ce_mem_pool,
+ estimate_cache_size(mmap_size, nr));
+ }
+
+ err = pthread_create(&p->pthread, NULL, load_cache_entries_thread, p);
+ if (err)
+ die(_("unable to create load_cache_entries thread: %s"), strerror(err));
+
+ /* increment by the number of cache entries in the ieot block being processed */
+ for (j = 0; j < ieot_blocks; j++)
+ offset += ieot->entries[ieot_start + j].nr;
+ ieot_start += ieot_blocks;
+ }
+
+ for (i = 0; i < nr_threads; i++) {
+ struct load_cache_entries_thread_data *p = &data[i];
+
+ err = pthread_join(p->pthread, NULL);
+ if (err)
+ die(_("unable to join load_cache_entries thread: %s"), strerror(err));
+ mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool);
+ consumed += p->consumed;
+ }
+
+ free(data);
+
+ return consumed;
+}
+
+/* remember to discard_cache() before reading a different cache! */
+int do_read_index(struct index_state *istate, const char *path, int must_exist)
+{
+ int fd;
+ struct stat st;
+ unsigned long src_offset;
+ const struct cache_header *hdr;
+ const char *mmap;
+ size_t mmap_size;
+ struct load_index_extensions p;
+ size_t extension_offset = 0;
+ int nr_threads, cpus;
+ struct index_entry_offset_table *ieot = NULL;
+
+ if (istate->initialized)
+ return istate->cache_nr;
+
+ istate->timestamp.sec = 0;
+ istate->timestamp.nsec = 0;
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ if (!must_exist && errno == ENOENT)
+ return 0;
+ die_errno(_("%s: index file open failed"), path);
+ }
+
+ if (fstat(fd, &st))
+ die_errno(_("%s: cannot stat the open index"), path);
+
+ mmap_size = xsize_t(st.st_size);
+ if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
+ die(_("%s: index file smaller than expected"), path);
+
+ mmap = xmmap_gently(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (mmap == MAP_FAILED)
+ die_errno(_("%s: unable to map index file"), path);
+ close(fd);
+
+ hdr = (const struct cache_header *)mmap;
+ if (verify_hdr(hdr, mmap_size) < 0)
+ goto unmap;
+
+ hashcpy(istate->oid.hash, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
+ istate->version = ntohl(hdr->hdr_version);
+ istate->cache_nr = ntohl(hdr->hdr_entries);
+ istate->cache_alloc = alloc_nr(istate->cache_nr);
+ istate->cache = xcalloc(istate->cache_alloc, sizeof(*istate->cache));
+ istate->initialized = 1;
+
+ p.istate = istate;
+ p.mmap = mmap;
+ p.mmap_size = mmap_size;
+
+ src_offset = sizeof(*hdr);
+
+ if (git_config_get_index_threads(&nr_threads))
+ nr_threads = 1;