1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_backend_impl.h"
12 #include <sys/resource.h>
15 #include "base/bind.h"
16 #include "base/callback.h"
17 #include "base/file_util.h"
18 #include "base/location.h"
19 #include "base/message_loop/message_loop_proxy.h"
20 #include "base/metrics/field_trial.h"
21 #include "base/metrics/histogram.h"
22 #include "base/metrics/sparse_histogram.h"
23 #include "base/single_thread_task_runner.h"
24 #include "base/sys_info.h"
25 #include "base/task_runner_util.h"
26 #include "base/threading/sequenced_worker_pool.h"
27 #include "base/time/time.h"
28 #include "net/base/net_errors.h"
29 #include "net/disk_cache/cache_util.h"
30 #include "net/disk_cache/simple/simple_entry_format.h"
31 #include "net/disk_cache/simple/simple_entry_impl.h"
32 #include "net/disk_cache/simple/simple_histogram_macros.h"
33 #include "net/disk_cache/simple/simple_index.h"
34 #include "net/disk_cache/simple/simple_index_file.h"
35 #include "net/disk_cache/simple/simple_synchronous_entry.h"
36 #include "net/disk_cache/simple/simple_util.h"
37 #include "net/disk_cache/simple/simple_version_upgrade.h"
42 using base::MessageLoopProxy;
43 using base::SequencedWorkerPool;
44 using base::SingleThreadTaskRunner;
46 using base::DirectoryExists;
47 using base::CreateDirectory;
49 namespace disk_cache {
53 // Maximum number of concurrent worker pool threads, which also is the limit
54 // on concurrent IO (as we use one thread per IO request).
55 const int kDefaultMaxWorkerThreads = 50;
57 const char kThreadNamePrefix[] = "SimpleCache";
59 // Maximum fraction of the cache that one entry can consume.
60 const int kMaxFileRatio = 8;
62 // A global sequenced worker pool to use for launching all tasks.
63 SequencedWorkerPool* g_sequenced_worker_pool = NULL;
65 void MaybeCreateSequencedWorkerPool() {
66 if (!g_sequenced_worker_pool) {
67 int max_worker_threads = kDefaultMaxWorkerThreads;
69 const std::string thread_count_field_trial =
70 base::FieldTrialList::FindFullName("SimpleCacheMaxThreads");
71 if (!thread_count_field_trial.empty()) {
73 std::max(1, std::atoi(thread_count_field_trial.c_str()));
76 g_sequenced_worker_pool = new SequencedWorkerPool(max_worker_threads,
78 g_sequenced_worker_pool->AddRef(); // Leak it.
82 bool g_fd_limit_histogram_has_been_populated = false;
84 void MaybeHistogramFdLimit(net::CacheType cache_type) {
85 if (g_fd_limit_histogram_has_been_populated)
88 // Used in histograms; add new entries at end.
90 FD_LIMIT_STATUS_UNSUPPORTED = 0,
91 FD_LIMIT_STATUS_FAILED = 1,
92 FD_LIMIT_STATUS_SUCCEEDED = 2,
93 FD_LIMIT_STATUS_MAX = 3
95 FdLimitStatus fd_limit_status = FD_LIMIT_STATUS_UNSUPPORTED;
96 int soft_fd_limit = 0;
97 int hard_fd_limit = 0;
100 struct rlimit nofile;
101 if (!getrlimit(RLIMIT_NOFILE, &nofile)) {
102 soft_fd_limit = nofile.rlim_cur;
103 hard_fd_limit = nofile.rlim_max;
104 fd_limit_status = FD_LIMIT_STATUS_SUCCEEDED;
106 fd_limit_status = FD_LIMIT_STATUS_FAILED;
110 SIMPLE_CACHE_UMA(ENUMERATION,
111 "FileDescriptorLimitStatus", cache_type,
112 fd_limit_status, FD_LIMIT_STATUS_MAX);
113 if (fd_limit_status == FD_LIMIT_STATUS_SUCCEEDED) {
114 SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
115 "FileDescriptorLimitSoft", cache_type, soft_fd_limit);
116 SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
117 "FileDescriptorLimitHard", cache_type, hard_fd_limit);
120 g_fd_limit_histogram_has_been_populated = true;
123 // Detects if the files in the cache directory match the current disk cache
124 // backend type and version. If the directory contains no cache, occupies it
125 // with the fresh structure.
126 bool FileStructureConsistent(const base::FilePath& path) {
127 if (!base::PathExists(path) && !base::CreateDirectory(path)) {
128 LOG(ERROR) << "Failed to create directory: " << path.LossyDisplayName();
131 return disk_cache::UpgradeSimpleCacheOnDisk(path);
134 // A context used by a BarrierCompletionCallback to track state.
135 struct BarrierContext {
136 BarrierContext(int expected)
137 : expected(expected),
146 void BarrierCompletionCallbackImpl(
147 BarrierContext* context,
148 const net::CompletionCallback& final_callback,
150 DCHECK_GT(context->expected, context->count);
151 if (context->had_error)
153 if (result != net::OK) {
154 context->had_error = true;
155 final_callback.Run(result);
159 if (context->count == context->expected)
160 final_callback.Run(net::OK);
163 // A barrier completion callback is a net::CompletionCallback that waits for
164 // |count| successful results before invoking |final_callback|. In the case of
165 // an error, the first error is passed to |final_callback| and all others
167 net::CompletionCallback MakeBarrierCompletionCallback(
169 const net::CompletionCallback& final_callback) {
170 BarrierContext* context = new BarrierContext(count);
171 return base::Bind(&BarrierCompletionCallbackImpl,
172 base::Owned(context), final_callback);
175 // A short bindable thunk that ensures a completion callback is always called
176 // after running an operation asynchronously.
177 void RunOperationAndCallback(
178 const Callback<int(const net::CompletionCallback&)>& operation,
179 const net::CompletionCallback& operation_callback) {
180 const int operation_result = operation.Run(operation_callback);
181 if (operation_result != net::ERR_IO_PENDING)
182 operation_callback.Run(operation_result);
185 void RecordIndexLoad(net::CacheType cache_type,
186 base::TimeTicks constructed_since,
188 const base::TimeDelta creation_to_index = base::TimeTicks::Now() -
190 if (result == net::OK) {
191 SIMPLE_CACHE_UMA(TIMES, "CreationToIndex", cache_type, creation_to_index);
193 SIMPLE_CACHE_UMA(TIMES,
194 "CreationToIndexFail", cache_type, creation_to_index);
200 class SimpleBackendImpl::ActiveEntryProxy
201 : public SimpleEntryImpl::ActiveEntryProxy {
203 virtual ~ActiveEntryProxy() {
205 DCHECK_EQ(1U, backend_->active_entries_.count(entry_hash_));
206 backend_->active_entries_.erase(entry_hash_);
210 static scoped_ptr<SimpleEntryImpl::ActiveEntryProxy> Create(
212 SimpleBackendImpl* backend) {
213 scoped_ptr<SimpleEntryImpl::ActiveEntryProxy>
214 proxy(new ActiveEntryProxy(entry_hash, backend));
219 ActiveEntryProxy(uint64 entry_hash,
220 SimpleBackendImpl* backend)
221 : entry_hash_(entry_hash),
222 backend_(backend->AsWeakPtr()) {}
225 base::WeakPtr<SimpleBackendImpl> backend_;
228 SimpleBackendImpl::SimpleBackendImpl(const FilePath& path,
230 net::CacheType cache_type,
231 base::SingleThreadTaskRunner* cache_thread,
232 net::NetLog* net_log)
234 cache_type_(cache_type),
235 cache_thread_(cache_thread),
236 orig_max_size_(max_bytes),
237 entry_operations_mode_(
238 cache_type == net::DISK_CACHE ?
239 SimpleEntryImpl::OPTIMISTIC_OPERATIONS :
240 SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS),
242 MaybeHistogramFdLimit(cache_type_);
245 SimpleBackendImpl::~SimpleBackendImpl() {
246 index_->WriteToDisk();
249 int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) {
250 MaybeCreateSequencedWorkerPool();
252 worker_pool_ = g_sequenced_worker_pool->GetTaskRunnerWithShutdownBehavior(
253 SequencedWorkerPool::CONTINUE_ON_SHUTDOWN);
255 index_.reset(new SimpleIndex(MessageLoopProxy::current(), this, cache_type_,
256 make_scoped_ptr(new SimpleIndexFile(
257 cache_thread_.get(), worker_pool_.get(),
258 cache_type_, path_))));
259 index_->ExecuteWhenReady(
260 base::Bind(&RecordIndexLoad, cache_type_, base::TimeTicks::Now()));
262 PostTaskAndReplyWithResult(
265 base::Bind(&SimpleBackendImpl::InitCacheStructureOnDisk, path_,
267 base::Bind(&SimpleBackendImpl::InitializeIndex, AsWeakPtr(),
268 completion_callback));
269 return net::ERR_IO_PENDING;
272 bool SimpleBackendImpl::SetMaxSize(int max_bytes) {
273 orig_max_size_ = max_bytes;
274 return index_->SetMaxSize(max_bytes);
277 int SimpleBackendImpl::GetMaxFileSize() const {
278 return index_->max_size() / kMaxFileRatio;
281 void SimpleBackendImpl::OnDoomStart(uint64 entry_hash) {
282 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
283 CHECK_EQ(0u, entries_pending_doom_.count(entry_hash));
284 entries_pending_doom_.insert(
285 std::make_pair(entry_hash, std::vector<Closure>()));
288 void SimpleBackendImpl::OnDoomComplete(uint64 entry_hash) {
289 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
290 CHECK_EQ(1u, entries_pending_doom_.count(entry_hash));
291 base::hash_map<uint64, std::vector<Closure> >::iterator it =
292 entries_pending_doom_.find(entry_hash);
293 std::vector<Closure> to_run_closures;
294 to_run_closures.swap(it->second);
295 entries_pending_doom_.erase(it);
297 std::for_each(to_run_closures.begin(), to_run_closures.end(),
298 std::mem_fun_ref(&Closure::Run));
301 void SimpleBackendImpl::DoomEntries(std::vector<uint64>* entry_hashes,
302 const net::CompletionCallback& callback) {
303 scoped_ptr<std::vector<uint64> >
304 mass_doom_entry_hashes(new std::vector<uint64>());
305 mass_doom_entry_hashes->swap(*entry_hashes);
307 std::vector<uint64> to_doom_individually_hashes;
309 // For each of the entry hashes, there are two cases:
310 // 1. The entry is either open or pending doom, and so it should be doomed
311 // individually to avoid flakes.
312 // 2. The entry is not in use at all, so we can call
313 // SimpleSynchronousEntry::DoomEntrySet and delete the files en masse.
314 for (int i = mass_doom_entry_hashes->size() - 1; i >= 0; --i) {
315 const uint64 entry_hash = (*mass_doom_entry_hashes)[i];
316 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
317 CHECK(active_entries_.count(entry_hash) == 0 ||
318 entries_pending_doom_.count(entry_hash) == 0)
319 << "The entry 0x" << std::hex << entry_hash
320 << " is both active and pending doom.";
321 if (!active_entries_.count(entry_hash) &&
322 !entries_pending_doom_.count(entry_hash)) {
326 to_doom_individually_hashes.push_back(entry_hash);
328 (*mass_doom_entry_hashes)[i] = mass_doom_entry_hashes->back();
329 mass_doom_entry_hashes->resize(mass_doom_entry_hashes->size() - 1);
332 net::CompletionCallback barrier_callback =
333 MakeBarrierCompletionCallback(to_doom_individually_hashes.size() + 1,
335 for (std::vector<uint64>::const_iterator
336 it = to_doom_individually_hashes.begin(),
337 end = to_doom_individually_hashes.end(); it != end; ++it) {
338 const int doom_result = DoomEntryFromHash(*it, barrier_callback);
339 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
340 CHECK_EQ(net::ERR_IO_PENDING, doom_result);
344 for (std::vector<uint64>::const_iterator it = mass_doom_entry_hashes->begin(),
345 end = mass_doom_entry_hashes->end();
351 // Taking this pointer here avoids undefined behaviour from calling
352 // base::Passed before mass_doom_entry_hashes.get().
353 std::vector<uint64>* mass_doom_entry_hashes_ptr =
354 mass_doom_entry_hashes.get();
355 PostTaskAndReplyWithResult(
356 worker_pool_, FROM_HERE,
357 base::Bind(&SimpleSynchronousEntry::DoomEntrySet,
358 mass_doom_entry_hashes_ptr, path_),
359 base::Bind(&SimpleBackendImpl::DoomEntriesComplete,
360 AsWeakPtr(), base::Passed(&mass_doom_entry_hashes),
364 net::CacheType SimpleBackendImpl::GetCacheType() const {
365 return net::DISK_CACHE;
368 int32 SimpleBackendImpl::GetEntryCount() const {
369 // TODO(pasko): Use directory file count when index is not ready.
370 return index_->GetEntryCount();
373 int SimpleBackendImpl::OpenEntry(const std::string& key,
375 const CompletionCallback& callback) {
376 const uint64 entry_hash = simple_util::GetEntryHashKey(key);
378 // TODO(gavinp): Factor out this (not quite completely) repetitive code
379 // block from OpenEntry/CreateEntry/DoomEntry.
380 base::hash_map<uint64, std::vector<Closure> >::iterator it =
381 entries_pending_doom_.find(entry_hash);
382 if (it != entries_pending_doom_.end()) {
383 Callback<int(const net::CompletionCallback&)> operation =
384 base::Bind(&SimpleBackendImpl::OpenEntry,
385 base::Unretained(this), key, entry);
386 it->second.push_back(base::Bind(&RunOperationAndCallback,
387 operation, callback));
388 return net::ERR_IO_PENDING;
390 scoped_refptr<SimpleEntryImpl> simple_entry =
391 CreateOrFindActiveEntry(entry_hash, key);
392 CompletionCallback backend_callback =
393 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey,
399 return simple_entry->OpenEntry(entry, backend_callback);
402 int SimpleBackendImpl::CreateEntry(const std::string& key,
404 const CompletionCallback& callback) {
405 DCHECK_LT(0u, key.size());
406 const uint64 entry_hash = simple_util::GetEntryHashKey(key);
408 base::hash_map<uint64, std::vector<Closure> >::iterator it =
409 entries_pending_doom_.find(entry_hash);
410 if (it != entries_pending_doom_.end()) {
411 Callback<int(const net::CompletionCallback&)> operation =
412 base::Bind(&SimpleBackendImpl::CreateEntry,
413 base::Unretained(this), key, entry);
414 it->second.push_back(base::Bind(&RunOperationAndCallback,
415 operation, callback));
416 return net::ERR_IO_PENDING;
418 scoped_refptr<SimpleEntryImpl> simple_entry =
419 CreateOrFindActiveEntry(entry_hash, key);
420 return simple_entry->CreateEntry(entry, callback);
423 int SimpleBackendImpl::DoomEntry(const std::string& key,
424 const net::CompletionCallback& callback) {
425 const uint64 entry_hash = simple_util::GetEntryHashKey(key);
427 base::hash_map<uint64, std::vector<Closure> >::iterator it =
428 entries_pending_doom_.find(entry_hash);
429 if (it != entries_pending_doom_.end()) {
430 Callback<int(const net::CompletionCallback&)> operation =
431 base::Bind(&SimpleBackendImpl::DoomEntry, base::Unretained(this), key);
432 it->second.push_back(base::Bind(&RunOperationAndCallback,
433 operation, callback));
434 return net::ERR_IO_PENDING;
436 scoped_refptr<SimpleEntryImpl> simple_entry =
437 CreateOrFindActiveEntry(entry_hash, key);
438 return simple_entry->DoomEntry(callback);
441 int SimpleBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
442 return DoomEntriesBetween(Time(), Time(), callback);
445 void SimpleBackendImpl::IndexReadyForDoom(Time initial_time,
447 const CompletionCallback& callback,
449 if (result != net::OK) {
450 callback.Run(result);
453 scoped_ptr<std::vector<uint64> > removed_key_hashes(
454 index_->GetEntriesBetween(initial_time, end_time).release());
455 DoomEntries(removed_key_hashes.get(), callback);
458 int SimpleBackendImpl::DoomEntriesBetween(
459 const Time initial_time,
461 const CompletionCallback& callback) {
462 return index_->ExecuteWhenReady(
463 base::Bind(&SimpleBackendImpl::IndexReadyForDoom, AsWeakPtr(),
464 initial_time, end_time, callback));
467 int SimpleBackendImpl::DoomEntriesSince(
468 const Time initial_time,
469 const CompletionCallback& callback) {
470 return DoomEntriesBetween(initial_time, Time(), callback);
473 int SimpleBackendImpl::OpenNextEntry(void** iter,
475 const CompletionCallback& callback) {
476 CompletionCallback get_next_entry =
477 base::Bind(&SimpleBackendImpl::GetNextEntryInIterator, AsWeakPtr(), iter,
478 next_entry, callback);
479 return index_->ExecuteWhenReady(get_next_entry);
482 void SimpleBackendImpl::EndEnumeration(void** iter) {
483 SimpleIndex::HashList* entry_list =
484 static_cast<SimpleIndex::HashList*>(*iter);
489 void SimpleBackendImpl::GetStats(
490 std::vector<std::pair<std::string, std::string> >* stats) {
491 std::pair<std::string, std::string> item;
492 item.first = "Cache type";
493 item.second = "Simple Cache";
494 stats->push_back(item);
497 void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) {
498 index_->UseIfExists(simple_util::GetEntryHashKey(key));
501 void SimpleBackendImpl::InitializeIndex(const CompletionCallback& callback,
502 const DiskStatResult& result) {
503 if (result.net_error == net::OK) {
504 index_->SetMaxSize(result.max_size);
505 index_->Initialize(result.cache_dir_mtime);
507 callback.Run(result.net_error);
510 SimpleBackendImpl::DiskStatResult SimpleBackendImpl::InitCacheStructureOnDisk(
511 const base::FilePath& path,
512 uint64 suggested_max_size) {
513 DiskStatResult result;
514 result.max_size = suggested_max_size;
515 result.net_error = net::OK;
516 if (!FileStructureConsistent(path)) {
517 LOG(ERROR) << "Simple Cache Backend: wrong file structure on disk: "
518 << path.LossyDisplayName();
519 result.net_error = net::ERR_FAILED;
522 disk_cache::simple_util::GetMTime(path, &result.cache_dir_mtime);
523 DCHECK(mtime_result);
524 if (!result.max_size) {
525 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path);
526 result.max_size = disk_cache::PreferredCacheSize(available);
528 DCHECK(result.max_size);
533 scoped_refptr<SimpleEntryImpl> SimpleBackendImpl::CreateOrFindActiveEntry(
534 const uint64 entry_hash,
535 const std::string& key) {
536 DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
537 std::pair<EntryMap::iterator, bool> insert_result =
538 active_entries_.insert(EntryMap::value_type(entry_hash, NULL));
539 EntryMap::iterator& it = insert_result.first;
540 const bool did_insert = insert_result.second;
542 SimpleEntryImpl* entry = it->second =
543 new SimpleEntryImpl(cache_type_, path_, entry_hash,
544 entry_operations_mode_,this, net_log_);
546 entry->SetActiveEntryProxy(ActiveEntryProxy::Create(entry_hash, this));
549 // It's possible, but unlikely, that we have an entry hash collision with a
550 // currently active entry.
551 if (key != it->second->key()) {
553 DCHECK_EQ(0U, active_entries_.count(entry_hash));
554 return CreateOrFindActiveEntry(entry_hash, key);
556 return make_scoped_refptr(it->second);
559 int SimpleBackendImpl::OpenEntryFromHash(uint64 entry_hash,
561 const CompletionCallback& callback) {
562 base::hash_map<uint64, std::vector<Closure> >::iterator it =
563 entries_pending_doom_.find(entry_hash);
564 if (it != entries_pending_doom_.end()) {
565 Callback<int(const net::CompletionCallback&)> operation =
566 base::Bind(&SimpleBackendImpl::OpenEntryFromHash,
567 base::Unretained(this), entry_hash, entry);
568 it->second.push_back(base::Bind(&RunOperationAndCallback,
569 operation, callback));
570 return net::ERR_IO_PENDING;
573 EntryMap::iterator has_active = active_entries_.find(entry_hash);
574 if (has_active != active_entries_.end()) {
575 return OpenEntry(has_active->second->key(), entry, callback);
578 scoped_refptr<SimpleEntryImpl> simple_entry = new SimpleEntryImpl(
579 cache_type_, path_, entry_hash, entry_operations_mode_, this, net_log_);
580 CompletionCallback backend_callback =
581 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash,
582 AsWeakPtr(), entry_hash, entry, simple_entry, callback);
583 return simple_entry->OpenEntry(entry, backend_callback);
586 int SimpleBackendImpl::DoomEntryFromHash(uint64 entry_hash,
587 const CompletionCallback& callback) {
588 Entry** entry = new Entry*();
589 scoped_ptr<Entry*> scoped_entry(entry);
591 base::hash_map<uint64, std::vector<Closure> >::iterator pending_it =
592 entries_pending_doom_.find(entry_hash);
593 if (pending_it != entries_pending_doom_.end()) {
594 Callback<int(const net::CompletionCallback&)> operation =
595 base::Bind(&SimpleBackendImpl::DoomEntryFromHash,
596 base::Unretained(this), entry_hash);
597 pending_it->second.push_back(base::Bind(&RunOperationAndCallback,
598 operation, callback));
599 return net::ERR_IO_PENDING;
602 EntryMap::iterator active_it = active_entries_.find(entry_hash);
603 if (active_it != active_entries_.end())
604 return active_it->second->DoomEntry(callback);
606 // There's no pending dooms, nor any open entry. We can make a trivial
607 // call to DoomEntries() to delete this entry.
608 std::vector<uint64> entry_hash_vector;
609 entry_hash_vector.push_back(entry_hash);
610 DoomEntries(&entry_hash_vector, callback);
611 return net::ERR_IO_PENDING;
614 void SimpleBackendImpl::GetNextEntryInIterator(
617 const CompletionCallback& callback,
619 if (error_code != net::OK) {
620 callback.Run(error_code);
624 *iter = index()->GetAllHashes().release();
626 SimpleIndex::HashList* entry_list =
627 static_cast<SimpleIndex::HashList*>(*iter);
628 while (entry_list->size() > 0) {
629 uint64 entry_hash = entry_list->back();
630 entry_list->pop_back();
631 if (index()->Has(entry_hash)) {
633 CompletionCallback continue_iteration = base::Bind(
634 &SimpleBackendImpl::CheckIterationReturnValue,
639 int error_code_open = OpenEntryFromHash(entry_hash,
642 if (error_code_open == net::ERR_IO_PENDING)
644 if (error_code_open != net::ERR_FAILED) {
645 callback.Run(error_code_open);
650 callback.Run(net::ERR_FAILED);
653 void SimpleBackendImpl::OnEntryOpenedFromHash(
656 scoped_refptr<SimpleEntryImpl> simple_entry,
657 const CompletionCallback& callback,
659 if (error_code != net::OK) {
660 callback.Run(error_code);
664 std::pair<EntryMap::iterator, bool> insert_result =
665 active_entries_.insert(EntryMap::value_type(hash, simple_entry));
666 EntryMap::iterator& it = insert_result.first;
667 const bool did_insert = insert_result.second;
669 // There was no active entry corresponding to this hash. We've already put
670 // the entry opened from hash in the |active_entries_|. We now provide the
671 // proxy object to the entry.
672 it->second->SetActiveEntryProxy(ActiveEntryProxy::Create(hash, this));
673 callback.Run(net::OK);
675 // The entry was made active while we waiting for the open from hash to
676 // finish. The entry created from hash needs to be closed, and the one
677 // in |active_entries_| can be returned to the caller.
678 simple_entry->Close();
679 it->second->OpenEntry(entry, callback);
683 void SimpleBackendImpl::OnEntryOpenedFromKey(
684 const std::string key,
686 scoped_refptr<SimpleEntryImpl> simple_entry,
687 const CompletionCallback& callback,
689 int final_code = error_code;
690 if (final_code == net::OK) {
691 bool key_matches = key.compare(simple_entry->key()) == 0;
693 // TODO(clamy): Add a unit test to check this code path.
694 DLOG(WARNING) << "Key mismatch on open.";
695 simple_entry->Doom();
696 simple_entry->Close();
697 final_code = net::ERR_FAILED;
699 DCHECK_EQ(simple_entry->entry_hash(), simple_util::GetEntryHashKey(key));
701 SIMPLE_CACHE_UMA(BOOLEAN, "KeyMatchedOnOpen", cache_type_, key_matches);
703 callback.Run(final_code);
706 void SimpleBackendImpl::CheckIterationReturnValue(
709 const CompletionCallback& callback,
711 if (error_code == net::ERR_FAILED) {
712 OpenNextEntry(iter, entry, callback);
715 callback.Run(error_code);
718 void SimpleBackendImpl::DoomEntriesComplete(
719 scoped_ptr<std::vector<uint64> > entry_hashes,
720 const net::CompletionCallback& callback,
723 entry_hashes->begin(), entry_hashes->end(),
724 std::bind1st(std::mem_fun(&SimpleBackendImpl::OnDoomComplete),
726 callback.Run(result);
729 void SimpleBackendImpl::FlushWorkerPoolForTesting() {
730 if (g_sequenced_worker_pool)
731 g_sequenced_worker_pool->FlushForTesting();
734 } // namespace disk_cache