a762620eab15b8199bb2e2ff3329392c72eef92d
[platform/framework/web/crosswalk.git] / src / net / disk_cache / simple / simple_backend_impl.cc
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/simple/simple_backend_impl.h"
6
7 #include <algorithm>
8 #include <cstdlib>
9 #include <functional>
10
11 #if defined(OS_POSIX)
12 #include <sys/resource.h>
13 #endif
14
15 #include "base/bind.h"
16 #include "base/callback.h"
17 #include "base/file_util.h"
18 #include "base/location.h"
19 #include "base/message_loop/message_loop_proxy.h"
20 #include "base/metrics/field_trial.h"
21 #include "base/metrics/histogram.h"
22 #include "base/metrics/sparse_histogram.h"
23 #include "base/single_thread_task_runner.h"
24 #include "base/sys_info.h"
25 #include "base/task_runner_util.h"
26 #include "base/threading/sequenced_worker_pool.h"
27 #include "base/time/time.h"
28 #include "net/base/net_errors.h"
29 #include "net/disk_cache/cache_util.h"
30 #include "net/disk_cache/simple/simple_entry_format.h"
31 #include "net/disk_cache/simple/simple_entry_impl.h"
32 #include "net/disk_cache/simple/simple_histogram_macros.h"
33 #include "net/disk_cache/simple/simple_index.h"
34 #include "net/disk_cache/simple/simple_index_file.h"
35 #include "net/disk_cache/simple/simple_synchronous_entry.h"
36 #include "net/disk_cache/simple/simple_util.h"
37 #include "net/disk_cache/simple/simple_version_upgrade.h"
38
39 using base::Callback;
40 using base::Closure;
41 using base::FilePath;
42 using base::MessageLoopProxy;
43 using base::SequencedWorkerPool;
44 using base::SingleThreadTaskRunner;
45 using base::Time;
46 using base::DirectoryExists;
47 using base::CreateDirectory;
48
49 namespace disk_cache {
50
51 namespace {
52
53 // Maximum number of concurrent worker pool threads, which also is the limit
54 // on concurrent IO (as we use one thread per IO request).
55 const int kDefaultMaxWorkerThreads = 50;
56
57 const char kThreadNamePrefix[] = "SimpleCache";
58
59 // Maximum fraction of the cache that one entry can consume.
60 const int kMaxFileRatio = 8;
61
62 // A global sequenced worker pool to use for launching all tasks.
63 SequencedWorkerPool* g_sequenced_worker_pool = NULL;
64
65 void MaybeCreateSequencedWorkerPool() {
66   if (!g_sequenced_worker_pool) {
67     int max_worker_threads = kDefaultMaxWorkerThreads;
68
69     const std::string thread_count_field_trial =
70         base::FieldTrialList::FindFullName("SimpleCacheMaxThreads");
71     if (!thread_count_field_trial.empty()) {
72       max_worker_threads =
73           std::max(1, std::atoi(thread_count_field_trial.c_str()));
74     }
75
76     g_sequenced_worker_pool = new SequencedWorkerPool(max_worker_threads,
77                                                       kThreadNamePrefix);
78     g_sequenced_worker_pool->AddRef();  // Leak it.
79   }
80 }
81
82 bool g_fd_limit_histogram_has_been_populated = false;
83
84 void MaybeHistogramFdLimit(net::CacheType cache_type) {
85   if (g_fd_limit_histogram_has_been_populated)
86     return;
87
88   // Used in histograms; add new entries at end.
89   enum FdLimitStatus {
90     FD_LIMIT_STATUS_UNSUPPORTED = 0,
91     FD_LIMIT_STATUS_FAILED      = 1,
92     FD_LIMIT_STATUS_SUCCEEDED   = 2,
93     FD_LIMIT_STATUS_MAX         = 3
94   };
95   FdLimitStatus fd_limit_status = FD_LIMIT_STATUS_UNSUPPORTED;
96   int soft_fd_limit = 0;
97   int hard_fd_limit = 0;
98
99 #if defined(OS_POSIX)
100   struct rlimit nofile;
101   if (!getrlimit(RLIMIT_NOFILE, &nofile)) {
102     soft_fd_limit = nofile.rlim_cur;
103     hard_fd_limit = nofile.rlim_max;
104     fd_limit_status = FD_LIMIT_STATUS_SUCCEEDED;
105   } else {
106     fd_limit_status = FD_LIMIT_STATUS_FAILED;
107   }
108 #endif
109
110   SIMPLE_CACHE_UMA(ENUMERATION,
111                    "FileDescriptorLimitStatus", cache_type,
112                    fd_limit_status, FD_LIMIT_STATUS_MAX);
113   if (fd_limit_status == FD_LIMIT_STATUS_SUCCEEDED) {
114     SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
115                      "FileDescriptorLimitSoft", cache_type, soft_fd_limit);
116     SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
117                      "FileDescriptorLimitHard", cache_type, hard_fd_limit);
118   }
119
120   g_fd_limit_histogram_has_been_populated = true;
121 }
122
123 // Detects if the files in the cache directory match the current disk cache
124 // backend type and version. If the directory contains no cache, occupies it
125 // with the fresh structure.
126 bool FileStructureConsistent(const base::FilePath& path) {
127   if (!base::PathExists(path) && !base::CreateDirectory(path)) {
128     LOG(ERROR) << "Failed to create directory: " << path.LossyDisplayName();
129     return false;
130   }
131   return disk_cache::UpgradeSimpleCacheOnDisk(path);
132 }
133
134 // A context used by a BarrierCompletionCallback to track state.
135 struct BarrierContext {
136   BarrierContext(int expected)
137       : expected(expected),
138         count(0),
139         had_error(false) {}
140
141   const int expected;
142   int count;
143   bool had_error;
144 };
145
146 void BarrierCompletionCallbackImpl(
147     BarrierContext* context,
148     const net::CompletionCallback& final_callback,
149     int result) {
150   DCHECK_GT(context->expected, context->count);
151   if (context->had_error)
152     return;
153   if (result != net::OK) {
154     context->had_error = true;
155     final_callback.Run(result);
156     return;
157   }
158   ++context->count;
159   if (context->count == context->expected)
160     final_callback.Run(net::OK);
161 }
162
163 // A barrier completion callback is a net::CompletionCallback that waits for
164 // |count| successful results before invoking |final_callback|. In the case of
165 // an error, the first error is passed to |final_callback| and all others
166 // are ignored.
167 net::CompletionCallback MakeBarrierCompletionCallback(
168     int count,
169     const net::CompletionCallback& final_callback) {
170   BarrierContext* context = new BarrierContext(count);
171   return base::Bind(&BarrierCompletionCallbackImpl,
172                     base::Owned(context), final_callback);
173 }
174
175 // A short bindable thunk that ensures a completion callback is always called
176 // after running an operation asynchronously.
177 void RunOperationAndCallback(
178     const Callback<int(const net::CompletionCallback&)>& operation,
179     const net::CompletionCallback& operation_callback) {
180   const int operation_result = operation.Run(operation_callback);
181   if (operation_result != net::ERR_IO_PENDING)
182     operation_callback.Run(operation_result);
183 }
184
185 void RecordIndexLoad(net::CacheType cache_type,
186                      base::TimeTicks constructed_since,
187                      int result) {
188   const base::TimeDelta creation_to_index = base::TimeTicks::Now() -
189                                             constructed_since;
190   if (result == net::OK) {
191     SIMPLE_CACHE_UMA(TIMES, "CreationToIndex", cache_type, creation_to_index);
192   } else {
193     SIMPLE_CACHE_UMA(TIMES,
194                      "CreationToIndexFail", cache_type, creation_to_index);
195   }
196 }
197
198 }  // namespace
199
200 class SimpleBackendImpl::ActiveEntryProxy
201     : public SimpleEntryImpl::ActiveEntryProxy {
202  public:
203   virtual ~ActiveEntryProxy() {
204     if (backend_) {
205       DCHECK_EQ(1U, backend_->active_entries_.count(entry_hash_));
206       backend_->active_entries_.erase(entry_hash_);
207     }
208   }
209
210   static scoped_ptr<SimpleEntryImpl::ActiveEntryProxy> Create(
211       int64 entry_hash,
212       SimpleBackendImpl* backend) {
213     scoped_ptr<SimpleEntryImpl::ActiveEntryProxy>
214         proxy(new ActiveEntryProxy(entry_hash, backend));
215     return proxy.Pass();
216   }
217
218  private:
219   ActiveEntryProxy(uint64 entry_hash,
220                    SimpleBackendImpl* backend)
221       : entry_hash_(entry_hash),
222         backend_(backend->AsWeakPtr()) {}
223
224   uint64 entry_hash_;
225   base::WeakPtr<SimpleBackendImpl> backend_;
226 };
227
228 SimpleBackendImpl::SimpleBackendImpl(const FilePath& path,
229                                      int max_bytes,
230                                      net::CacheType cache_type,
231                                      base::SingleThreadTaskRunner* cache_thread,
232                                      net::NetLog* net_log)
233     : path_(path),
234       cache_type_(cache_type),
235       cache_thread_(cache_thread),
236       orig_max_size_(max_bytes),
237       entry_operations_mode_(
238           cache_type == net::DISK_CACHE ?
239               SimpleEntryImpl::OPTIMISTIC_OPERATIONS :
240               SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS),
241       net_log_(net_log) {
242   MaybeHistogramFdLimit(cache_type_);
243 }
244
245 SimpleBackendImpl::~SimpleBackendImpl() {
246   index_->WriteToDisk();
247 }
248
249 int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) {
250   MaybeCreateSequencedWorkerPool();
251
252   worker_pool_ = g_sequenced_worker_pool->GetTaskRunnerWithShutdownBehavior(
253       SequencedWorkerPool::CONTINUE_ON_SHUTDOWN);
254
255   index_.reset(new SimpleIndex(MessageLoopProxy::current(), this, cache_type_,
256                                make_scoped_ptr(new SimpleIndexFile(
257                                    cache_thread_.get(), worker_pool_.get(),
258                                    cache_type_, path_))));
259   index_->ExecuteWhenReady(
260       base::Bind(&RecordIndexLoad, cache_type_, base::TimeTicks::Now()));
261
262   PostTaskAndReplyWithResult(
263       cache_thread_,
264       FROM_HERE,
265       base::Bind(&SimpleBackendImpl::InitCacheStructureOnDisk, path_,
266                  orig_max_size_),
267       base::Bind(&SimpleBackendImpl::InitializeIndex, AsWeakPtr(),
268                  completion_callback));
269   return net::ERR_IO_PENDING;
270 }
271
272 bool SimpleBackendImpl::SetMaxSize(int max_bytes) {
273   orig_max_size_ = max_bytes;
274   return index_->SetMaxSize(max_bytes);
275 }
276
277 int SimpleBackendImpl::GetMaxFileSize() const {
278   return index_->max_size() / kMaxFileRatio;
279 }
280
281 void SimpleBackendImpl::OnDoomStart(uint64 entry_hash) {
282   // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
283   CHECK_EQ(0u, entries_pending_doom_.count(entry_hash));
284   entries_pending_doom_.insert(
285       std::make_pair(entry_hash, std::vector<Closure>()));
286 }
287
288 void SimpleBackendImpl::OnDoomComplete(uint64 entry_hash) {
289   // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
290   CHECK_EQ(1u, entries_pending_doom_.count(entry_hash));
291   base::hash_map<uint64, std::vector<Closure> >::iterator it =
292       entries_pending_doom_.find(entry_hash);
293   std::vector<Closure> to_run_closures;
294   to_run_closures.swap(it->second);
295   entries_pending_doom_.erase(it);
296
297   std::for_each(to_run_closures.begin(), to_run_closures.end(),
298                 std::mem_fun_ref(&Closure::Run));
299 }
300
301 void SimpleBackendImpl::DoomEntries(std::vector<uint64>* entry_hashes,
302                                     const net::CompletionCallback& callback) {
303   scoped_ptr<std::vector<uint64> >
304       mass_doom_entry_hashes(new std::vector<uint64>());
305   mass_doom_entry_hashes->swap(*entry_hashes);
306
307   std::vector<uint64> to_doom_individually_hashes;
308
309   // For each of the entry hashes, there are two cases:
310   // 1. The entry is either open or pending doom, and so it should be doomed
311   //    individually to avoid flakes.
312   // 2. The entry is not in use at all, so we can call
313   //    SimpleSynchronousEntry::DoomEntrySet and delete the files en masse.
314   for (int i = mass_doom_entry_hashes->size() - 1; i >= 0; --i) {
315     const uint64 entry_hash = (*mass_doom_entry_hashes)[i];
316     // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
317     CHECK(active_entries_.count(entry_hash) == 0 ||
318           entries_pending_doom_.count(entry_hash) == 0)
319         << "The entry 0x" << std::hex << entry_hash
320         << " is both active and pending doom.";
321     if (!active_entries_.count(entry_hash) &&
322         !entries_pending_doom_.count(entry_hash)) {
323       continue;
324     }
325
326     to_doom_individually_hashes.push_back(entry_hash);
327
328     (*mass_doom_entry_hashes)[i] = mass_doom_entry_hashes->back();
329     mass_doom_entry_hashes->resize(mass_doom_entry_hashes->size() - 1);
330   }
331
332   net::CompletionCallback barrier_callback =
333       MakeBarrierCompletionCallback(to_doom_individually_hashes.size() + 1,
334                                     callback);
335   for (std::vector<uint64>::const_iterator
336            it = to_doom_individually_hashes.begin(),
337            end = to_doom_individually_hashes.end(); it != end; ++it) {
338     const int doom_result = DoomEntryFromHash(*it, barrier_callback);
339     // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
340     CHECK_EQ(net::ERR_IO_PENDING, doom_result);
341     index_->Remove(*it);
342   }
343
344   for (std::vector<uint64>::const_iterator it = mass_doom_entry_hashes->begin(),
345                                            end = mass_doom_entry_hashes->end();
346        it != end; ++it) {
347     index_->Remove(*it);
348     OnDoomStart(*it);
349   }
350
351   // Taking this pointer here avoids undefined behaviour from calling
352   // base::Passed before mass_doom_entry_hashes.get().
353   std::vector<uint64>* mass_doom_entry_hashes_ptr =
354       mass_doom_entry_hashes.get();
355   PostTaskAndReplyWithResult(
356       worker_pool_, FROM_HERE,
357       base::Bind(&SimpleSynchronousEntry::DoomEntrySet,
358                  mass_doom_entry_hashes_ptr, path_),
359       base::Bind(&SimpleBackendImpl::DoomEntriesComplete,
360                  AsWeakPtr(), base::Passed(&mass_doom_entry_hashes),
361                  barrier_callback));
362 }
363
364 net::CacheType SimpleBackendImpl::GetCacheType() const {
365   return net::DISK_CACHE;
366 }
367
368 int32 SimpleBackendImpl::GetEntryCount() const {
369   // TODO(pasko): Use directory file count when index is not ready.
370   return index_->GetEntryCount();
371 }
372
373 int SimpleBackendImpl::OpenEntry(const std::string& key,
374                                  Entry** entry,
375                                  const CompletionCallback& callback) {
376   const uint64 entry_hash = simple_util::GetEntryHashKey(key);
377
378   // TODO(gavinp): Factor out this (not quite completely) repetitive code
379   // block from OpenEntry/CreateEntry/DoomEntry.
380   base::hash_map<uint64, std::vector<Closure> >::iterator it =
381       entries_pending_doom_.find(entry_hash);
382   if (it != entries_pending_doom_.end()) {
383     Callback<int(const net::CompletionCallback&)> operation =
384         base::Bind(&SimpleBackendImpl::OpenEntry,
385                    base::Unretained(this), key, entry);
386     it->second.push_back(base::Bind(&RunOperationAndCallback,
387                                     operation, callback));
388     return net::ERR_IO_PENDING;
389   }
390   scoped_refptr<SimpleEntryImpl> simple_entry =
391       CreateOrFindActiveEntry(entry_hash, key);
392   CompletionCallback backend_callback =
393       base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey,
394                  AsWeakPtr(),
395                  key,
396                  entry,
397                  simple_entry,
398                  callback);
399   return simple_entry->OpenEntry(entry, backend_callback);
400 }
401
402 int SimpleBackendImpl::CreateEntry(const std::string& key,
403                                    Entry** entry,
404                                    const CompletionCallback& callback) {
405   DCHECK_LT(0u, key.size());
406   const uint64 entry_hash = simple_util::GetEntryHashKey(key);
407
408   base::hash_map<uint64, std::vector<Closure> >::iterator it =
409       entries_pending_doom_.find(entry_hash);
410   if (it != entries_pending_doom_.end()) {
411     Callback<int(const net::CompletionCallback&)> operation =
412         base::Bind(&SimpleBackendImpl::CreateEntry,
413                    base::Unretained(this), key, entry);
414     it->second.push_back(base::Bind(&RunOperationAndCallback,
415                                     operation, callback));
416     return net::ERR_IO_PENDING;
417   }
418   scoped_refptr<SimpleEntryImpl> simple_entry =
419       CreateOrFindActiveEntry(entry_hash, key);
420   return simple_entry->CreateEntry(entry, callback);
421 }
422
423 int SimpleBackendImpl::DoomEntry(const std::string& key,
424                                  const net::CompletionCallback& callback) {
425   const uint64 entry_hash = simple_util::GetEntryHashKey(key);
426
427   base::hash_map<uint64, std::vector<Closure> >::iterator it =
428       entries_pending_doom_.find(entry_hash);
429   if (it != entries_pending_doom_.end()) {
430     Callback<int(const net::CompletionCallback&)> operation =
431         base::Bind(&SimpleBackendImpl::DoomEntry, base::Unretained(this), key);
432     it->second.push_back(base::Bind(&RunOperationAndCallback,
433                                     operation, callback));
434     return net::ERR_IO_PENDING;
435   }
436   scoped_refptr<SimpleEntryImpl> simple_entry =
437       CreateOrFindActiveEntry(entry_hash, key);
438   return simple_entry->DoomEntry(callback);
439 }
440
441 int SimpleBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
442   return DoomEntriesBetween(Time(), Time(), callback);
443 }
444
445 void SimpleBackendImpl::IndexReadyForDoom(Time initial_time,
446                                           Time end_time,
447                                           const CompletionCallback& callback,
448                                           int result) {
449   if (result != net::OK) {
450     callback.Run(result);
451     return;
452   }
453   scoped_ptr<std::vector<uint64> > removed_key_hashes(
454       index_->GetEntriesBetween(initial_time, end_time).release());
455   DoomEntries(removed_key_hashes.get(), callback);
456 }
457
458 int SimpleBackendImpl::DoomEntriesBetween(
459     const Time initial_time,
460     const Time end_time,
461     const CompletionCallback& callback) {
462   return index_->ExecuteWhenReady(
463       base::Bind(&SimpleBackendImpl::IndexReadyForDoom, AsWeakPtr(),
464                  initial_time, end_time, callback));
465 }
466
467 int SimpleBackendImpl::DoomEntriesSince(
468     const Time initial_time,
469     const CompletionCallback& callback) {
470   return DoomEntriesBetween(initial_time, Time(), callback);
471 }
472
473 int SimpleBackendImpl::OpenNextEntry(void** iter,
474                                      Entry** next_entry,
475                                      const CompletionCallback& callback) {
476   CompletionCallback get_next_entry =
477       base::Bind(&SimpleBackendImpl::GetNextEntryInIterator, AsWeakPtr(), iter,
478                  next_entry, callback);
479   return index_->ExecuteWhenReady(get_next_entry);
480 }
481
482 void SimpleBackendImpl::EndEnumeration(void** iter) {
483   SimpleIndex::HashList* entry_list =
484       static_cast<SimpleIndex::HashList*>(*iter);
485   delete entry_list;
486   *iter = NULL;
487 }
488
489 void SimpleBackendImpl::GetStats(
490     std::vector<std::pair<std::string, std::string> >* stats) {
491   std::pair<std::string, std::string> item;
492   item.first = "Cache type";
493   item.second = "Simple Cache";
494   stats->push_back(item);
495 }
496
497 void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) {
498   index_->UseIfExists(simple_util::GetEntryHashKey(key));
499 }
500
501 void SimpleBackendImpl::InitializeIndex(const CompletionCallback& callback,
502                                         const DiskStatResult& result) {
503   if (result.net_error == net::OK) {
504     index_->SetMaxSize(result.max_size);
505     index_->Initialize(result.cache_dir_mtime);
506   }
507   callback.Run(result.net_error);
508 }
509
510 SimpleBackendImpl::DiskStatResult SimpleBackendImpl::InitCacheStructureOnDisk(
511     const base::FilePath& path,
512     uint64 suggested_max_size) {
513   DiskStatResult result;
514   result.max_size = suggested_max_size;
515   result.net_error = net::OK;
516   if (!FileStructureConsistent(path)) {
517     LOG(ERROR) << "Simple Cache Backend: wrong file structure on disk: "
518                << path.LossyDisplayName();
519     result.net_error = net::ERR_FAILED;
520   } else {
521     bool mtime_result =
522         disk_cache::simple_util::GetMTime(path, &result.cache_dir_mtime);
523     DCHECK(mtime_result);
524     if (!result.max_size) {
525       int64 available = base::SysInfo::AmountOfFreeDiskSpace(path);
526       result.max_size = disk_cache::PreferredCacheSize(available);
527     }
528     DCHECK(result.max_size);
529   }
530   return result;
531 }
532
533 scoped_refptr<SimpleEntryImpl> SimpleBackendImpl::CreateOrFindActiveEntry(
534     const uint64 entry_hash,
535     const std::string& key) {
536   DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
537   std::pair<EntryMap::iterator, bool> insert_result =
538       active_entries_.insert(EntryMap::value_type(entry_hash, NULL));
539   EntryMap::iterator& it = insert_result.first;
540   const bool did_insert = insert_result.second;
541   if (did_insert) {
542     SimpleEntryImpl* entry = it->second =
543         new SimpleEntryImpl(cache_type_, path_, entry_hash,
544                             entry_operations_mode_,this, net_log_);
545     entry->SetKey(key);
546     entry->SetActiveEntryProxy(ActiveEntryProxy::Create(entry_hash, this));
547   }
548   DCHECK(it->second);
549   // It's possible, but unlikely, that we have an entry hash collision with a
550   // currently active entry.
551   if (key != it->second->key()) {
552     it->second->Doom();
553     DCHECK_EQ(0U, active_entries_.count(entry_hash));
554     return CreateOrFindActiveEntry(entry_hash, key);
555   }
556   return make_scoped_refptr(it->second);
557 }
558
559 int SimpleBackendImpl::OpenEntryFromHash(uint64 entry_hash,
560                                          Entry** entry,
561                                          const CompletionCallback& callback) {
562   base::hash_map<uint64, std::vector<Closure> >::iterator it =
563       entries_pending_doom_.find(entry_hash);
564   if (it != entries_pending_doom_.end()) {
565     Callback<int(const net::CompletionCallback&)> operation =
566         base::Bind(&SimpleBackendImpl::OpenEntryFromHash,
567                    base::Unretained(this), entry_hash, entry);
568     it->second.push_back(base::Bind(&RunOperationAndCallback,
569                                     operation, callback));
570     return net::ERR_IO_PENDING;
571   }
572
573   EntryMap::iterator has_active = active_entries_.find(entry_hash);
574   if (has_active != active_entries_.end()) {
575     return OpenEntry(has_active->second->key(), entry, callback);
576   }
577
578   scoped_refptr<SimpleEntryImpl> simple_entry = new SimpleEntryImpl(
579       cache_type_, path_, entry_hash, entry_operations_mode_, this, net_log_);
580   CompletionCallback backend_callback =
581       base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash,
582                  AsWeakPtr(), entry_hash, entry, simple_entry, callback);
583   return simple_entry->OpenEntry(entry, backend_callback);
584 }
585
586 int SimpleBackendImpl::DoomEntryFromHash(uint64 entry_hash,
587                                          const CompletionCallback& callback) {
588   Entry** entry = new Entry*();
589   scoped_ptr<Entry*> scoped_entry(entry);
590
591   base::hash_map<uint64, std::vector<Closure> >::iterator pending_it =
592       entries_pending_doom_.find(entry_hash);
593   if (pending_it != entries_pending_doom_.end()) {
594     Callback<int(const net::CompletionCallback&)> operation =
595         base::Bind(&SimpleBackendImpl::DoomEntryFromHash,
596                    base::Unretained(this), entry_hash);
597     pending_it->second.push_back(base::Bind(&RunOperationAndCallback,
598                                     operation, callback));
599     return net::ERR_IO_PENDING;
600   }
601
602   EntryMap::iterator active_it = active_entries_.find(entry_hash);
603   if (active_it != active_entries_.end())
604     return active_it->second->DoomEntry(callback);
605
606   // There's no pending dooms, nor any open entry. We can make a trivial
607   // call to DoomEntries() to delete this entry.
608   std::vector<uint64> entry_hash_vector;
609   entry_hash_vector.push_back(entry_hash);
610   DoomEntries(&entry_hash_vector, callback);
611   return net::ERR_IO_PENDING;
612 }
613
614 void SimpleBackendImpl::GetNextEntryInIterator(
615     void** iter,
616     Entry** next_entry,
617     const CompletionCallback& callback,
618     int error_code) {
619   if (error_code != net::OK) {
620     callback.Run(error_code);
621     return;
622   }
623   if (*iter == NULL) {
624     *iter = index()->GetAllHashes().release();
625   }
626   SimpleIndex::HashList* entry_list =
627       static_cast<SimpleIndex::HashList*>(*iter);
628   while (entry_list->size() > 0) {
629     uint64 entry_hash = entry_list->back();
630     entry_list->pop_back();
631     if (index()->Has(entry_hash)) {
632       *next_entry = NULL;
633       CompletionCallback continue_iteration = base::Bind(
634           &SimpleBackendImpl::CheckIterationReturnValue,
635           AsWeakPtr(),
636           iter,
637           next_entry,
638           callback);
639       int error_code_open = OpenEntryFromHash(entry_hash,
640                                               next_entry,
641                                               continue_iteration);
642       if (error_code_open == net::ERR_IO_PENDING)
643         return;
644       if (error_code_open != net::ERR_FAILED) {
645         callback.Run(error_code_open);
646         return;
647       }
648     }
649   }
650   callback.Run(net::ERR_FAILED);
651 }
652
653 void SimpleBackendImpl::OnEntryOpenedFromHash(
654     uint64 hash,
655     Entry** entry,
656     scoped_refptr<SimpleEntryImpl> simple_entry,
657     const CompletionCallback& callback,
658     int error_code) {
659   if (error_code != net::OK) {
660     callback.Run(error_code);
661     return;
662   }
663   DCHECK(*entry);
664   std::pair<EntryMap::iterator, bool> insert_result =
665       active_entries_.insert(EntryMap::value_type(hash, simple_entry));
666   EntryMap::iterator& it = insert_result.first;
667   const bool did_insert = insert_result.second;
668   if (did_insert) {
669     // There was no active entry corresponding to this hash. We've already put
670     // the entry opened from hash in the |active_entries_|. We now provide the
671     // proxy object to the entry.
672     it->second->SetActiveEntryProxy(ActiveEntryProxy::Create(hash, this));
673     callback.Run(net::OK);
674   } else {
675     // The entry was made active while we waiting for the open from hash to
676     // finish. The entry created from hash needs to be closed, and the one
677     // in |active_entries_| can be returned to the caller.
678     simple_entry->Close();
679     it->second->OpenEntry(entry, callback);
680   }
681 }
682
683 void SimpleBackendImpl::OnEntryOpenedFromKey(
684     const std::string key,
685     Entry** entry,
686     scoped_refptr<SimpleEntryImpl> simple_entry,
687     const CompletionCallback& callback,
688     int error_code) {
689   int final_code = error_code;
690   if (final_code == net::OK) {
691     bool key_matches = key.compare(simple_entry->key()) == 0;
692     if (!key_matches) {
693       // TODO(clamy): Add a unit test to check this code path.
694       DLOG(WARNING) << "Key mismatch on open.";
695       simple_entry->Doom();
696       simple_entry->Close();
697       final_code = net::ERR_FAILED;
698     } else {
699       DCHECK_EQ(simple_entry->entry_hash(), simple_util::GetEntryHashKey(key));
700     }
701     SIMPLE_CACHE_UMA(BOOLEAN, "KeyMatchedOnOpen", cache_type_, key_matches);
702   }
703   callback.Run(final_code);
704 }
705
706 void SimpleBackendImpl::CheckIterationReturnValue(
707     void** iter,
708     Entry** entry,
709     const CompletionCallback& callback,
710     int error_code) {
711   if (error_code == net::ERR_FAILED) {
712     OpenNextEntry(iter, entry, callback);
713     return;
714   }
715   callback.Run(error_code);
716 }
717
718 void SimpleBackendImpl::DoomEntriesComplete(
719     scoped_ptr<std::vector<uint64> > entry_hashes,
720     const net::CompletionCallback& callback,
721     int result) {
722   std::for_each(
723       entry_hashes->begin(), entry_hashes->end(),
724       std::bind1st(std::mem_fun(&SimpleBackendImpl::OnDoomComplete),
725                    this));
726   callback.Run(result);
727 }
728
729 void SimpleBackendImpl::FlushWorkerPoolForTesting() {
730   if (g_sequenced_worker_pool)
731     g_sequenced_worker_pool->FlushForTesting();
732 }
733
734 }  // namespace disk_cache