1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_backend_impl.h"
12 #include <sys/resource.h>
15 #include "base/bind.h"
16 #include "base/callback.h"
17 #include "base/file_util.h"
18 #include "base/location.h"
19 #include "base/message_loop/message_loop_proxy.h"
20 #include "base/metrics/field_trial.h"
21 #include "base/metrics/histogram.h"
22 #include "base/metrics/sparse_histogram.h"
23 #include "base/single_thread_task_runner.h"
24 #include "base/sys_info.h"
25 #include "base/task_runner_util.h"
26 #include "base/threading/sequenced_worker_pool.h"
27 #include "base/time/time.h"
28 #include "net/base/net_errors.h"
29 #include "net/disk_cache/backend_impl.h"
30 #include "net/disk_cache/simple/simple_entry_format.h"
31 #include "net/disk_cache/simple/simple_entry_impl.h"
32 #include "net/disk_cache/simple/simple_histogram_macros.h"
33 #include "net/disk_cache/simple/simple_index.h"
34 #include "net/disk_cache/simple/simple_index_file.h"
35 #include "net/disk_cache/simple/simple_synchronous_entry.h"
36 #include "net/disk_cache/simple/simple_util.h"
37 #include "net/disk_cache/simple/simple_version_upgrade.h"
42 using base::MessageLoopProxy;
43 using base::SequencedWorkerPool;
44 using base::SingleThreadTaskRunner;
46 using base::DirectoryExists;
47 using file_util::CreateDirectory;
49 namespace disk_cache {
53 // Maximum number of concurrent worker pool threads, which also is the limit
54 // on concurrent IO (as we use one thread per IO request).
55 const int kDefaultMaxWorkerThreads = 50;
57 const char kThreadNamePrefix[] = "SimpleCache";
59 // Cache size when all other size heuristics failed.
60 const uint64 kDefaultCacheSize = 80 * 1024 * 1024;
62 // Maximum fraction of the cache that one entry can consume.
63 const int kMaxFileRatio = 8;
65 // A global sequenced worker pool to use for launching all tasks.
66 SequencedWorkerPool* g_sequenced_worker_pool = NULL;
68 void MaybeCreateSequencedWorkerPool() {
69 if (!g_sequenced_worker_pool) {
70 int max_worker_threads = kDefaultMaxWorkerThreads;
72 const std::string thread_count_field_trial =
73 base::FieldTrialList::FindFullName("SimpleCacheMaxThreads");
74 if (!thread_count_field_trial.empty()) {
76 std::max(1, std::atoi(thread_count_field_trial.c_str()));
79 g_sequenced_worker_pool = new SequencedWorkerPool(max_worker_threads,
81 g_sequenced_worker_pool->AddRef(); // Leak it.
85 bool g_fd_limit_histogram_has_been_populated = false;
87 void MaybeHistogramFdLimit(net::CacheType cache_type) {
88 if (g_fd_limit_histogram_has_been_populated)
91 // Used in histograms; add new entries at end.
93 FD_LIMIT_STATUS_UNSUPPORTED = 0,
94 FD_LIMIT_STATUS_FAILED = 1,
95 FD_LIMIT_STATUS_SUCCEEDED = 2,
96 FD_LIMIT_STATUS_MAX = 3
98 FdLimitStatus fd_limit_status = FD_LIMIT_STATUS_UNSUPPORTED;
99 int soft_fd_limit = 0;
100 int hard_fd_limit = 0;
102 #if defined(OS_POSIX)
103 struct rlimit nofile;
104 if (!getrlimit(RLIMIT_NOFILE, &nofile)) {
105 soft_fd_limit = nofile.rlim_cur;
106 hard_fd_limit = nofile.rlim_max;
107 fd_limit_status = FD_LIMIT_STATUS_SUCCEEDED;
109 fd_limit_status = FD_LIMIT_STATUS_FAILED;
113 SIMPLE_CACHE_UMA(ENUMERATION,
114 "FileDescriptorLimitStatus", cache_type,
115 fd_limit_status, FD_LIMIT_STATUS_MAX);
116 if (fd_limit_status == FD_LIMIT_STATUS_SUCCEEDED) {
117 SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
118 "FileDescriptorLimitSoft", cache_type, soft_fd_limit);
119 SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
120 "FileDescriptorLimitHard", cache_type, hard_fd_limit);
123 g_fd_limit_histogram_has_been_populated = true;
126 // Must run on IO Thread.
127 void DeleteBackendImpl(disk_cache::Backend** backend,
128 const net::CompletionCallback& callback,
133 callback.Run(result);
136 // Detects if the files in the cache directory match the current disk cache
137 // backend type and version. If the directory contains no cache, occupies it
138 // with the fresh structure.
139 bool FileStructureConsistent(const base::FilePath& path) {
140 if (!base::PathExists(path) && !file_util::CreateDirectory(path)) {
141 LOG(ERROR) << "Failed to create directory: " << path.LossyDisplayName();
144 return disk_cache::UpgradeSimpleCacheOnDisk(path);
147 // A context used by a BarrierCompletionCallback to track state.
148 struct BarrierContext {
149 BarrierContext(int expected)
150 : expected(expected),
159 void BarrierCompletionCallbackImpl(
160 BarrierContext* context,
161 const net::CompletionCallback& final_callback,
163 DCHECK_GT(context->expected, context->count);
164 if (context->had_error)
166 if (result != net::OK) {
167 context->had_error = true;
168 final_callback.Run(result);
172 if (context->count == context->expected)
173 final_callback.Run(net::OK);
176 // A barrier completion callback is a net::CompletionCallback that waits for
177 // |count| successful results before invoking |final_callback|. In the case of
178 // an error, the first error is passed to |final_callback| and all others
180 net::CompletionCallback MakeBarrierCompletionCallback(
182 const net::CompletionCallback& final_callback) {
183 BarrierContext* context = new BarrierContext(count);
184 return base::Bind(&BarrierCompletionCallbackImpl,
185 base::Owned(context), final_callback);
188 // A short bindable thunk that ensures a completion callback is always called
189 // after running an operation asynchronously.
190 void RunOperationAndCallback(
191 const Callback<int(const net::CompletionCallback&)>& operation,
192 const net::CompletionCallback& operation_callback) {
193 const int operation_result = operation.Run(operation_callback);
194 if (operation_result != net::ERR_IO_PENDING)
195 operation_callback.Run(operation_result);
198 // A short bindable thunk that Dooms an entry if it successfully opens.
199 void DoomOpenedEntry(scoped_ptr<Entry*> in_entry,
200 const net::CompletionCallback& doom_callback,
202 DCHECK_NE(open_result, net::ERR_IO_PENDING);
203 if (open_result == net::OK) {
205 SimpleEntryImpl* simple_entry = static_cast<SimpleEntryImpl*>(*in_entry);
206 const int doom_result = simple_entry->DoomEntry(doom_callback);
207 simple_entry->Close();
208 if (doom_result != net::ERR_IO_PENDING)
209 doom_callback.Run(doom_result);
211 doom_callback.Run(open_result);
215 void RecordIndexLoad(net::CacheType cache_type,
216 base::TimeTicks constructed_since,
218 const base::TimeDelta creation_to_index = base::TimeTicks::Now() -
220 if (result == net::OK) {
221 SIMPLE_CACHE_UMA(TIMES, "CreationToIndex", cache_type, creation_to_index);
223 SIMPLE_CACHE_UMA(TIMES,
224 "CreationToIndexFail", cache_type, creation_to_index);
230 SimpleBackendImpl::SimpleBackendImpl(const FilePath& path,
232 net::CacheType cache_type,
233 base::SingleThreadTaskRunner* cache_thread,
234 net::NetLog* net_log)
236 cache_type_(cache_type),
237 cache_thread_(cache_thread),
238 orig_max_size_(max_bytes),
239 entry_operations_mode_(
240 cache_type == net::DISK_CACHE ?
241 SimpleEntryImpl::OPTIMISTIC_OPERATIONS :
242 SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS),
244 MaybeHistogramFdLimit(cache_type_);
247 SimpleBackendImpl::~SimpleBackendImpl() {
248 index_->WriteToDisk();
251 int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) {
252 MaybeCreateSequencedWorkerPool();
254 worker_pool_ = g_sequenced_worker_pool->GetTaskRunnerWithShutdownBehavior(
255 SequencedWorkerPool::CONTINUE_ON_SHUTDOWN);
257 index_.reset(new SimpleIndex(MessageLoopProxy::current(), this, cache_type_,
258 make_scoped_ptr(new SimpleIndexFile(
259 cache_thread_.get(), worker_pool_.get(),
260 cache_type_, path_))));
261 index_->ExecuteWhenReady(
262 base::Bind(&RecordIndexLoad, cache_type_, base::TimeTicks::Now()));
264 PostTaskAndReplyWithResult(
267 base::Bind(&SimpleBackendImpl::InitCacheStructureOnDisk, path_,
269 base::Bind(&SimpleBackendImpl::InitializeIndex, AsWeakPtr(),
270 completion_callback));
271 return net::ERR_IO_PENDING;
274 bool SimpleBackendImpl::SetMaxSize(int max_bytes) {
275 orig_max_size_ = max_bytes;
276 return index_->SetMaxSize(max_bytes);
279 int SimpleBackendImpl::GetMaxFileSize() const {
280 return index_->max_size() / kMaxFileRatio;
283 void SimpleBackendImpl::OnDeactivated(const SimpleEntryImpl* entry) {
284 active_entries_.erase(entry->entry_hash());
287 void SimpleBackendImpl::OnDoomStart(uint64 entry_hash) {
288 DCHECK_EQ(0u, entries_pending_doom_.count(entry_hash));
289 entries_pending_doom_.insert(
290 std::make_pair(entry_hash, std::vector<Closure>()));
293 void SimpleBackendImpl::OnDoomComplete(uint64 entry_hash) {
294 DCHECK_EQ(1u, entries_pending_doom_.count(entry_hash));
295 base::hash_map<uint64, std::vector<Closure> >::iterator it =
296 entries_pending_doom_.find(entry_hash);
297 std::vector<Closure> to_run_closures;
298 to_run_closures.swap(it->second);
299 entries_pending_doom_.erase(it);
301 std::for_each(to_run_closures.begin(), to_run_closures.end(),
302 std::mem_fun_ref(&Closure::Run));
305 void SimpleBackendImpl::DoomEntries(std::vector<uint64>* entry_hashes,
306 const net::CompletionCallback& callback) {
307 scoped_ptr<std::vector<uint64> >
308 mass_doom_entry_hashes(new std::vector<uint64>());
309 mass_doom_entry_hashes->swap(*entry_hashes);
311 std::vector<uint64> to_doom_individually_hashes;
313 // For each of the entry hashes, there are two cases:
314 // 1. The entry is either open or pending doom, and so it should be doomed
315 // individually to avoid flakes.
316 // 2. The entry is not in use at all, so we can call
317 // SimpleSynchronousEntry::DoomEntrySet and delete the files en masse.
318 for (int i = mass_doom_entry_hashes->size() - 1; i >= 0; --i) {
319 const uint64 entry_hash = (*mass_doom_entry_hashes)[i];
320 DCHECK(active_entries_.count(entry_hash) == 0 ||
321 entries_pending_doom_.count(entry_hash) == 0)
322 << "The entry 0x" << std::hex << entry_hash
323 << " is both active and pending doom.";
324 if (!active_entries_.count(entry_hash) &&
325 !entries_pending_doom_.count(entry_hash)) {
329 to_doom_individually_hashes.push_back(entry_hash);
331 (*mass_doom_entry_hashes)[i] = mass_doom_entry_hashes->back();
332 mass_doom_entry_hashes->resize(mass_doom_entry_hashes->size() - 1);
335 net::CompletionCallback barrier_callback =
336 MakeBarrierCompletionCallback(to_doom_individually_hashes.size() + 1,
338 for (std::vector<uint64>::const_iterator
339 it = to_doom_individually_hashes.begin(),
340 end = to_doom_individually_hashes.end(); it != end; ++it) {
341 const int doom_result = DoomEntryFromHash(*it, barrier_callback);
342 DCHECK_EQ(net::ERR_IO_PENDING, doom_result);
346 for (std::vector<uint64>::const_iterator it = mass_doom_entry_hashes->begin(),
347 end = mass_doom_entry_hashes->end();
353 // Taking this pointer here avoids undefined behaviour from calling
354 // base::Passed before mass_doom_entry_hashes.get().
355 std::vector<uint64>* mass_doom_entry_hashes_ptr =
356 mass_doom_entry_hashes.get();
357 PostTaskAndReplyWithResult(
358 worker_pool_, FROM_HERE,
359 base::Bind(&SimpleSynchronousEntry::DoomEntrySet,
360 mass_doom_entry_hashes_ptr, path_),
361 base::Bind(&SimpleBackendImpl::DoomEntriesComplete,
362 AsWeakPtr(), base::Passed(&mass_doom_entry_hashes),
366 net::CacheType SimpleBackendImpl::GetCacheType() const {
367 return net::DISK_CACHE;
370 int32 SimpleBackendImpl::GetEntryCount() const {
371 // TODO(pasko): Use directory file count when index is not ready.
372 return index_->GetEntryCount();
375 int SimpleBackendImpl::OpenEntry(const std::string& key,
377 const CompletionCallback& callback) {
378 const uint64 entry_hash = simple_util::GetEntryHashKey(key);
380 // TODO(gavinp): Factor out this (not quite completely) repetitive code
381 // block from OpenEntry/CreateEntry/DoomEntry.
382 base::hash_map<uint64, std::vector<Closure> >::iterator it =
383 entries_pending_doom_.find(entry_hash);
384 if (it != entries_pending_doom_.end()) {
385 Callback<int(const net::CompletionCallback&)> operation =
386 base::Bind(&SimpleBackendImpl::OpenEntry,
387 base::Unretained(this), key, entry);
388 it->second.push_back(base::Bind(&RunOperationAndCallback,
389 operation, callback));
390 return net::ERR_IO_PENDING;
392 scoped_refptr<SimpleEntryImpl> simple_entry =
393 CreateOrFindActiveEntry(entry_hash, key);
394 CompletionCallback backend_callback =
395 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey,
401 return simple_entry->OpenEntry(entry, backend_callback);
404 int SimpleBackendImpl::CreateEntry(const std::string& key,
406 const CompletionCallback& callback) {
407 DCHECK_LT(0u, key.size());
408 const uint64 entry_hash = simple_util::GetEntryHashKey(key);
410 base::hash_map<uint64, std::vector<Closure> >::iterator it =
411 entries_pending_doom_.find(entry_hash);
412 if (it != entries_pending_doom_.end()) {
413 Callback<int(const net::CompletionCallback&)> operation =
414 base::Bind(&SimpleBackendImpl::CreateEntry,
415 base::Unretained(this), key, entry);
416 it->second.push_back(base::Bind(&RunOperationAndCallback,
417 operation, callback));
418 return net::ERR_IO_PENDING;
420 scoped_refptr<SimpleEntryImpl> simple_entry =
421 CreateOrFindActiveEntry(entry_hash, key);
422 return simple_entry->CreateEntry(entry, callback);
425 int SimpleBackendImpl::DoomEntry(const std::string& key,
426 const net::CompletionCallback& callback) {
427 const uint64 entry_hash = simple_util::GetEntryHashKey(key);
429 base::hash_map<uint64, std::vector<Closure> >::iterator it =
430 entries_pending_doom_.find(entry_hash);
431 if (it != entries_pending_doom_.end()) {
432 Callback<int(const net::CompletionCallback&)> operation =
433 base::Bind(&SimpleBackendImpl::DoomEntry, base::Unretained(this), key);
434 it->second.push_back(base::Bind(&RunOperationAndCallback,
435 operation, callback));
436 return net::ERR_IO_PENDING;
438 scoped_refptr<SimpleEntryImpl> simple_entry =
439 CreateOrFindActiveEntry(entry_hash, key);
440 return simple_entry->DoomEntry(callback);
443 int SimpleBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
444 return DoomEntriesBetween(Time(), Time(), callback);
447 void SimpleBackendImpl::IndexReadyForDoom(Time initial_time,
449 const CompletionCallback& callback,
451 if (result != net::OK) {
452 callback.Run(result);
455 scoped_ptr<std::vector<uint64> > removed_key_hashes(
456 index_->GetEntriesBetween(initial_time, end_time).release());
457 DoomEntries(removed_key_hashes.get(), callback);
460 int SimpleBackendImpl::DoomEntriesBetween(
461 const Time initial_time,
463 const CompletionCallback& callback) {
464 return index_->ExecuteWhenReady(
465 base::Bind(&SimpleBackendImpl::IndexReadyForDoom, AsWeakPtr(),
466 initial_time, end_time, callback));
469 int SimpleBackendImpl::DoomEntriesSince(
470 const Time initial_time,
471 const CompletionCallback& callback) {
472 return DoomEntriesBetween(initial_time, Time(), callback);
475 int SimpleBackendImpl::OpenNextEntry(void** iter,
477 const CompletionCallback& callback) {
478 CompletionCallback get_next_entry =
479 base::Bind(&SimpleBackendImpl::GetNextEntryInIterator, AsWeakPtr(), iter,
480 next_entry, callback);
481 return index_->ExecuteWhenReady(get_next_entry);
484 void SimpleBackendImpl::EndEnumeration(void** iter) {
485 SimpleIndex::HashList* entry_list =
486 static_cast<SimpleIndex::HashList*>(*iter);
491 void SimpleBackendImpl::GetStats(
492 std::vector<std::pair<std::string, std::string> >* stats) {
493 std::pair<std::string, std::string> item;
494 item.first = "Cache type";
495 item.second = "Simple Cache";
496 stats->push_back(item);
499 void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) {
500 index_->UseIfExists(simple_util::GetEntryHashKey(key));
503 void SimpleBackendImpl::InitializeIndex(const CompletionCallback& callback,
504 const DiskStatResult& result) {
505 if (result.net_error == net::OK) {
506 index_->SetMaxSize(result.max_size);
507 index_->Initialize(result.cache_dir_mtime);
509 callback.Run(result.net_error);
512 SimpleBackendImpl::DiskStatResult SimpleBackendImpl::InitCacheStructureOnDisk(
513 const base::FilePath& path,
514 uint64 suggested_max_size) {
515 DiskStatResult result;
516 result.max_size = suggested_max_size;
517 result.net_error = net::OK;
518 if (!FileStructureConsistent(path)) {
519 LOG(ERROR) << "Simple Cache Backend: wrong file structure on disk: "
520 << path.LossyDisplayName();
521 result.net_error = net::ERR_FAILED;
524 disk_cache::simple_util::GetMTime(path, &result.cache_dir_mtime);
525 DCHECK(mtime_result);
526 if (!result.max_size) {
527 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path);
529 result.max_size = kDefaultCacheSize;
531 // TODO(pasko): Move PreferedCacheSize() to cache_util.h. Also fix the
533 result.max_size = disk_cache::PreferedCacheSize(available);
535 DCHECK(result.max_size);
540 scoped_refptr<SimpleEntryImpl> SimpleBackendImpl::CreateOrFindActiveEntry(
541 const uint64 entry_hash,
542 const std::string& key) {
543 DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
544 std::pair<EntryMap::iterator, bool> insert_result =
545 active_entries_.insert(std::make_pair(entry_hash,
546 base::WeakPtr<SimpleEntryImpl>()));
547 EntryMap::iterator& it = insert_result.first;
548 if (insert_result.second)
549 DCHECK(!it->second.get());
550 if (!it->second.get()) {
551 SimpleEntryImpl* entry = new SimpleEntryImpl(
552 cache_type_, path_, entry_hash, entry_operations_mode_, this, net_log_);
554 it->second = entry->AsWeakPtr();
556 DCHECK(it->second.get());
557 // It's possible, but unlikely, that we have an entry hash collision with a
558 // currently active entry.
559 if (key != it->second->key()) {
561 DCHECK_EQ(0U, active_entries_.count(entry_hash));
562 return CreateOrFindActiveEntry(entry_hash, key);
564 return make_scoped_refptr(it->second.get());
567 int SimpleBackendImpl::OpenEntryFromHash(uint64 entry_hash,
569 const CompletionCallback& callback) {
570 base::hash_map<uint64, std::vector<Closure> >::iterator it =
571 entries_pending_doom_.find(entry_hash);
572 if (it != entries_pending_doom_.end()) {
573 Callback<int(const net::CompletionCallback&)> operation =
574 base::Bind(&SimpleBackendImpl::OpenEntryFromHash,
575 base::Unretained(this), entry_hash, entry);
576 it->second.push_back(base::Bind(&RunOperationAndCallback,
577 operation, callback));
578 return net::ERR_IO_PENDING;
581 EntryMap::iterator has_active = active_entries_.find(entry_hash);
582 if (has_active != active_entries_.end()) {
583 return OpenEntry(has_active->second->key(), entry, callback);
586 scoped_refptr<SimpleEntryImpl> simple_entry = new SimpleEntryImpl(
587 cache_type_, path_, entry_hash, entry_operations_mode_, this, net_log_);
588 CompletionCallback backend_callback =
589 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash,
590 AsWeakPtr(), entry_hash, entry, simple_entry, callback);
591 return simple_entry->OpenEntry(entry, backend_callback);
594 int SimpleBackendImpl::DoomEntryFromHash(uint64 entry_hash,
595 const CompletionCallback& callback) {
596 Entry** entry = new Entry*();
597 scoped_ptr<Entry*> scoped_entry(entry);
599 base::hash_map<uint64, std::vector<Closure> >::iterator it =
600 entries_pending_doom_.find(entry_hash);
601 if (it != entries_pending_doom_.end()) {
602 Callback<int(const net::CompletionCallback&)> operation =
603 base::Bind(&SimpleBackendImpl::DoomEntryFromHash,
604 base::Unretained(this), entry_hash);
605 it->second.push_back(base::Bind(&RunOperationAndCallback,
606 operation, callback));
607 return net::ERR_IO_PENDING;
610 EntryMap::iterator active_it = active_entries_.find(entry_hash);
611 if (active_it != active_entries_.end())
612 return active_it->second->DoomEntry(callback);
614 // There's no pending dooms, nor any open entry. We can make a trivial
615 // call to DoomEntries() to delete this entry.
616 std::vector<uint64> entry_hash_vector;
617 entry_hash_vector.push_back(entry_hash);
618 DoomEntries(&entry_hash_vector, callback);
619 return net::ERR_IO_PENDING;
622 void SimpleBackendImpl::GetNextEntryInIterator(
625 const CompletionCallback& callback,
627 if (error_code != net::OK) {
628 callback.Run(error_code);
632 *iter = index()->GetAllHashes().release();
634 SimpleIndex::HashList* entry_list =
635 static_cast<SimpleIndex::HashList*>(*iter);
636 while (entry_list->size() > 0) {
637 uint64 entry_hash = entry_list->back();
638 entry_list->pop_back();
639 if (index()->Has(entry_hash)) {
641 CompletionCallback continue_iteration = base::Bind(
642 &SimpleBackendImpl::CheckIterationReturnValue,
647 int error_code_open = OpenEntryFromHash(entry_hash,
650 if (error_code_open == net::ERR_IO_PENDING)
652 if (error_code_open != net::ERR_FAILED) {
653 callback.Run(error_code_open);
658 callback.Run(net::ERR_FAILED);
661 void SimpleBackendImpl::OnEntryOpenedFromHash(
664 scoped_refptr<SimpleEntryImpl> simple_entry,
665 const CompletionCallback& callback,
667 if (error_code != net::OK) {
668 callback.Run(error_code);
672 std::pair<EntryMap::iterator, bool> insert_result =
673 active_entries_.insert(std::make_pair(hash,
674 base::WeakPtr<SimpleEntryImpl>()));
675 EntryMap::iterator& it = insert_result.first;
676 const bool did_insert = insert_result.second;
678 // There is no active entry corresponding to this hash. The entry created
679 // is put in the map of active entries and returned to the caller.
680 it->second = simple_entry->AsWeakPtr();
681 callback.Run(error_code);
683 // The entry was made active with the key while the creation from hash
684 // occurred. The entry created from hash needs to be closed, and the one
685 // coming from the key returned to the caller.
686 simple_entry->Close();
687 it->second->OpenEntry(entry, callback);
691 void SimpleBackendImpl::OnEntryOpenedFromKey(
692 const std::string key,
694 scoped_refptr<SimpleEntryImpl> simple_entry,
695 const CompletionCallback& callback,
697 int final_code = error_code;
698 if (final_code == net::OK) {
699 bool key_matches = key.compare(simple_entry->key()) == 0;
701 // TODO(clamy): Add a unit test to check this code path.
702 DLOG(WARNING) << "Key mismatch on open.";
703 simple_entry->Doom();
704 simple_entry->Close();
705 final_code = net::ERR_FAILED;
707 DCHECK_EQ(simple_entry->entry_hash(), simple_util::GetEntryHashKey(key));
709 SIMPLE_CACHE_UMA(BOOLEAN, "KeyMatchedOnOpen", cache_type_, key_matches);
711 callback.Run(final_code);
714 void SimpleBackendImpl::CheckIterationReturnValue(
717 const CompletionCallback& callback,
719 if (error_code == net::ERR_FAILED) {
720 OpenNextEntry(iter, entry, callback);
723 callback.Run(error_code);
726 void SimpleBackendImpl::DoomEntriesComplete(
727 scoped_ptr<std::vector<uint64> > entry_hashes,
728 const net::CompletionCallback& callback,
731 entry_hashes->begin(), entry_hashes->end(),
732 std::bind1st(std::mem_fun(&SimpleBackendImpl::OnDoomComplete),
734 callback.Run(result);
737 void SimpleBackendImpl::FlushWorkerPoolForTesting() {
738 if (g_sequenced_worker_pool)
739 g_sequenced_worker_pool->FlushForTesting();
742 } // namespace disk_cache