Upstream version 10.38.208.0
[platform/framework/web/crosswalk.git] / src / net / disk_cache / simple / simple_entry_impl.cc
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/simple/simple_entry_impl.h"
6
7 #include <algorithm>
8 #include <cstring>
9 #include <vector>
10
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/callback.h"
14 #include "base/location.h"
15 #include "base/logging.h"
16 #include "base/message_loop/message_loop_proxy.h"
17 #include "base/task_runner.h"
18 #include "base/task_runner_util.h"
19 #include "base/time/time.h"
20 #include "net/base/io_buffer.h"
21 #include "net/base/net_errors.h"
22 #include "net/disk_cache/net_log_parameters.h"
23 #include "net/disk_cache/simple/simple_backend_impl.h"
24 #include "net/disk_cache/simple/simple_histogram_macros.h"
25 #include "net/disk_cache/simple/simple_index.h"
26 #include "net/disk_cache/simple/simple_net_log_parameters.h"
27 #include "net/disk_cache/simple/simple_synchronous_entry.h"
28 #include "net/disk_cache/simple/simple_util.h"
29 #include "third_party/zlib/zlib.h"
30
31 namespace disk_cache {
32 namespace {
33
34 // An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of
35 // the cache.
36 const int64 kMaxSparseDataSizeDivisor = 10;
37
38 // Used in histograms, please only add entries at the end.
39 enum ReadResult {
40   READ_RESULT_SUCCESS = 0,
41   READ_RESULT_INVALID_ARGUMENT = 1,
42   READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
43   READ_RESULT_BAD_STATE = 3,
44   READ_RESULT_FAST_EMPTY_RETURN = 4,
45   READ_RESULT_SYNC_READ_FAILURE = 5,
46   READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
47   READ_RESULT_MAX = 7,
48 };
49
50 // Used in histograms, please only add entries at the end.
51 enum WriteResult {
52   WRITE_RESULT_SUCCESS = 0,
53   WRITE_RESULT_INVALID_ARGUMENT = 1,
54   WRITE_RESULT_OVER_MAX_SIZE = 2,
55   WRITE_RESULT_BAD_STATE = 3,
56   WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
57   WRITE_RESULT_FAST_EMPTY_RETURN = 5,
58   WRITE_RESULT_MAX = 6,
59 };
60
61 // Used in histograms, please only add entries at the end.
62 enum HeaderSizeChange {
63   HEADER_SIZE_CHANGE_INITIAL,
64   HEADER_SIZE_CHANGE_SAME,
65   HEADER_SIZE_CHANGE_INCREASE,
66   HEADER_SIZE_CHANGE_DECREASE,
67   HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
68   HEADER_SIZE_CHANGE_MAX
69 };
70
71 void RecordReadResult(net::CacheType cache_type, ReadResult result) {
72   SIMPLE_CACHE_UMA(ENUMERATION,
73                    "ReadResult", cache_type, result, READ_RESULT_MAX);
74 }
75
76 void RecordWriteResult(net::CacheType cache_type, WriteResult result) {
77   SIMPLE_CACHE_UMA(ENUMERATION,
78                    "WriteResult2", cache_type, result, WRITE_RESULT_MAX);
79 }
80
81 // TODO(ttuttle): Consider removing this once we have a good handle on header
82 // size changes.
83 void RecordHeaderSizeChange(net::CacheType cache_type,
84                             int old_size, int new_size) {
85   HeaderSizeChange size_change;
86
87   SIMPLE_CACHE_UMA(COUNTS_10000, "HeaderSize", cache_type, new_size);
88
89   if (old_size == 0) {
90     size_change = HEADER_SIZE_CHANGE_INITIAL;
91   } else if (new_size == old_size) {
92     size_change = HEADER_SIZE_CHANGE_SAME;
93   } else if (new_size > old_size) {
94     int delta = new_size - old_size;
95     SIMPLE_CACHE_UMA(COUNTS_10000,
96                      "HeaderSizeIncreaseAbsolute", cache_type, delta);
97     SIMPLE_CACHE_UMA(PERCENTAGE,
98                      "HeaderSizeIncreasePercentage", cache_type,
99                      delta * 100 / old_size);
100     size_change = HEADER_SIZE_CHANGE_INCREASE;
101   } else {  // new_size < old_size
102     int delta = old_size - new_size;
103     SIMPLE_CACHE_UMA(COUNTS_10000,
104                      "HeaderSizeDecreaseAbsolute", cache_type, delta);
105     SIMPLE_CACHE_UMA(PERCENTAGE,
106                      "HeaderSizeDecreasePercentage", cache_type,
107                      delta * 100 / old_size);
108     size_change = HEADER_SIZE_CHANGE_DECREASE;
109   }
110
111   SIMPLE_CACHE_UMA(ENUMERATION,
112                    "HeaderSizeChange", cache_type,
113                    size_change, HEADER_SIZE_CHANGE_MAX);
114 }
115
116 void RecordUnexpectedStream0Write(net::CacheType cache_type) {
117   SIMPLE_CACHE_UMA(ENUMERATION,
118                    "HeaderSizeChange", cache_type,
119                    HEADER_SIZE_CHANGE_UNEXPECTED_WRITE, HEADER_SIZE_CHANGE_MAX);
120 }
121
122 int g_open_entry_count = 0;
123
124 void AdjustOpenEntryCountBy(net::CacheType cache_type, int offset) {
125   g_open_entry_count += offset;
126   SIMPLE_CACHE_UMA(COUNTS_10000,
127                    "GlobalOpenEntryCount", cache_type, g_open_entry_count);
128 }
129
130 void InvokeCallbackIfBackendIsAlive(
131     const base::WeakPtr<SimpleBackendImpl>& backend,
132     const net::CompletionCallback& completion_callback,
133     int result) {
134   DCHECK(!completion_callback.is_null());
135   if (!backend.get())
136     return;
137   completion_callback.Run(result);
138 }
139
140 }  // namespace
141
142 using base::Closure;
143 using base::FilePath;
144 using base::MessageLoopProxy;
145 using base::Time;
146 using base::TaskRunner;
147
148 // A helper class to insure that RunNextOperationIfNeeded() is called when
149 // exiting the current stack frame.
150 class SimpleEntryImpl::ScopedOperationRunner {
151  public:
152   explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
153   }
154
155   ~ScopedOperationRunner() {
156     entry_->RunNextOperationIfNeeded();
157   }
158
159  private:
160   SimpleEntryImpl* const entry_;
161 };
162
163 SimpleEntryImpl::ActiveEntryProxy::~ActiveEntryProxy() {}
164
165 SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type,
166                                  const FilePath& path,
167                                  const uint64 entry_hash,
168                                  OperationsMode operations_mode,
169                                  SimpleBackendImpl* backend,
170                                  net::NetLog* net_log)
171     : backend_(backend->AsWeakPtr()),
172       cache_type_(cache_type),
173       worker_pool_(backend->worker_pool()),
174       path_(path),
175       entry_hash_(entry_hash),
176       use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
177       last_used_(Time::Now()),
178       last_modified_(last_used_),
179       sparse_data_size_(0),
180       open_count_(0),
181       doomed_(false),
182       state_(STATE_UNINITIALIZED),
183       synchronous_entry_(NULL),
184       net_log_(net::BoundNetLog::Make(
185           net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)),
186       stream_0_data_(new net::GrowableIOBuffer()) {
187   COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
188                  arrays_should_be_same_size);
189   COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
190                  arrays_should_be_same_size);
191   COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
192                  arrays_should_be_same_size);
193   COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_),
194                  arrays_should_be_same_size);
195   MakeUninitialized();
196   net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY,
197       CreateNetLogSimpleEntryConstructionCallback(this));
198 }
199
200 void SimpleEntryImpl::SetActiveEntryProxy(
201     scoped_ptr<ActiveEntryProxy> active_entry_proxy) {
202   DCHECK(!active_entry_proxy_);
203   active_entry_proxy_.reset(active_entry_proxy.release());
204 }
205
206 int SimpleEntryImpl::OpenEntry(Entry** out_entry,
207                                const CompletionCallback& callback) {
208   DCHECK(backend_.get());
209
210   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL);
211
212   bool have_index = backend_->index()->initialized();
213   // This enumeration is used in histograms, add entries only at end.
214   enum OpenEntryIndexEnum {
215     INDEX_NOEXIST = 0,
216     INDEX_MISS = 1,
217     INDEX_HIT = 2,
218     INDEX_MAX = 3,
219   };
220   OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
221   if (have_index) {
222     if (backend_->index()->Has(entry_hash_))
223       open_entry_index_enum = INDEX_HIT;
224     else
225       open_entry_index_enum = INDEX_MISS;
226   }
227   SIMPLE_CACHE_UMA(ENUMERATION,
228                    "OpenEntryIndexState", cache_type_,
229                    open_entry_index_enum, INDEX_MAX);
230
231   // If entry is not known to the index, initiate fast failover to the network.
232   if (open_entry_index_enum == INDEX_MISS) {
233     net_log_.AddEventWithNetErrorCode(
234         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
235         net::ERR_FAILED);
236     return net::ERR_FAILED;
237   }
238
239   pending_operations_.push(SimpleEntryOperation::OpenOperation(
240       this, have_index, callback, out_entry));
241   RunNextOperationIfNeeded();
242   return net::ERR_IO_PENDING;
243 }
244
245 int SimpleEntryImpl::CreateEntry(Entry** out_entry,
246                                  const CompletionCallback& callback) {
247   DCHECK(backend_.get());
248   DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
249
250   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL);
251
252   bool have_index = backend_->index()->initialized();
253   int ret_value = net::ERR_FAILED;
254   if (use_optimistic_operations_ &&
255       state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
256     net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
257
258     ReturnEntryToCaller(out_entry);
259     pending_operations_.push(SimpleEntryOperation::CreateOperation(
260         this, have_index, CompletionCallback(), static_cast<Entry**>(NULL)));
261     ret_value = net::OK;
262   } else {
263     pending_operations_.push(SimpleEntryOperation::CreateOperation(
264         this, have_index, callback, out_entry));
265     ret_value = net::ERR_IO_PENDING;
266   }
267
268   // We insert the entry in the index before creating the entry files in the
269   // SimpleSynchronousEntry, because this way the worst scenario is when we
270   // have the entry in the index but we don't have the created files yet, this
271   // way we never leak files. CreationOperationComplete will remove the entry
272   // from the index if the creation fails.
273   backend_->index()->Insert(entry_hash_);
274
275   RunNextOperationIfNeeded();
276   return ret_value;
277 }
278
279 int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
280   if (doomed_)
281     return net::OK;
282   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL);
283   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
284
285   MarkAsDoomed();
286   if (backend_.get())
287     backend_->OnDoomStart(entry_hash_);
288   pending_operations_.push(SimpleEntryOperation::DoomOperation(this, callback));
289   RunNextOperationIfNeeded();
290   return net::ERR_IO_PENDING;
291 }
292
293 void SimpleEntryImpl::SetKey(const std::string& key) {
294   key_ = key;
295   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY,
296       net::NetLog::StringCallback("key", &key));
297 }
298
299 void SimpleEntryImpl::Doom() {
300   DoomEntry(CompletionCallback());
301 }
302
303 void SimpleEntryImpl::Close() {
304   DCHECK(io_thread_checker_.CalledOnValidThread());
305   DCHECK_LT(0, open_count_);
306
307   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL);
308
309   if (--open_count_ > 0) {
310     DCHECK(!HasOneRef());
311     Release();  // Balanced in ReturnEntryToCaller().
312     return;
313   }
314
315   pending_operations_.push(SimpleEntryOperation::CloseOperation(this));
316   DCHECK(!HasOneRef());
317   Release();  // Balanced in ReturnEntryToCaller().
318   RunNextOperationIfNeeded();
319 }
320
321 std::string SimpleEntryImpl::GetKey() const {
322   DCHECK(io_thread_checker_.CalledOnValidThread());
323   return key_;
324 }
325
326 Time SimpleEntryImpl::GetLastUsed() const {
327   DCHECK(io_thread_checker_.CalledOnValidThread());
328   return last_used_;
329 }
330
331 Time SimpleEntryImpl::GetLastModified() const {
332   DCHECK(io_thread_checker_.CalledOnValidThread());
333   return last_modified_;
334 }
335
336 int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
337   DCHECK(io_thread_checker_.CalledOnValidThread());
338   DCHECK_LE(0, data_size_[stream_index]);
339   return data_size_[stream_index];
340 }
341
342 int SimpleEntryImpl::ReadData(int stream_index,
343                               int offset,
344                               net::IOBuffer* buf,
345                               int buf_len,
346                               const CompletionCallback& callback) {
347   DCHECK(io_thread_checker_.CalledOnValidThread());
348
349   if (net_log_.IsLogging()) {
350     net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL,
351         CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
352                                           false));
353   }
354
355   if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
356       buf_len < 0) {
357     if (net_log_.IsLogging()) {
358       net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
359           CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
360     }
361
362     RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT);
363     return net::ERR_INVALID_ARGUMENT;
364   }
365   if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
366                                       offset < 0 || !buf_len)) {
367     if (net_log_.IsLogging()) {
368       net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
369           CreateNetLogReadWriteCompleteCallback(0));
370     }
371
372     RecordReadResult(cache_type_, READ_RESULT_NONBLOCK_EMPTY_RETURN);
373     return 0;
374   }
375
376   // TODO(clamy): return immediatly when reading from stream 0.
377
378   // TODO(felipeg): Optimization: Add support for truly parallel read
379   // operations.
380   bool alone_in_queue =
381       pending_operations_.size() == 0 && state_ == STATE_READY;
382   pending_operations_.push(SimpleEntryOperation::ReadOperation(
383       this, stream_index, offset, buf_len, buf, callback, alone_in_queue));
384   RunNextOperationIfNeeded();
385   return net::ERR_IO_PENDING;
386 }
387
388 int SimpleEntryImpl::WriteData(int stream_index,
389                                int offset,
390                                net::IOBuffer* buf,
391                                int buf_len,
392                                const CompletionCallback& callback,
393                                bool truncate) {
394   DCHECK(io_thread_checker_.CalledOnValidThread());
395
396   if (net_log_.IsLogging()) {
397     net_log_.AddEvent(
398         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL,
399         CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
400                                           truncate));
401   }
402
403   if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
404       offset < 0 || buf_len < 0) {
405     if (net_log_.IsLogging()) {
406       net_log_.AddEvent(
407           net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
408           CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
409     }
410     RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT);
411     return net::ERR_INVALID_ARGUMENT;
412   }
413   if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
414     if (net_log_.IsLogging()) {
415       net_log_.AddEvent(
416           net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
417           CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
418     }
419     RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE);
420     return net::ERR_FAILED;
421   }
422   ScopedOperationRunner operation_runner(this);
423
424   // Stream 0 data is kept in memory, so can be written immediatly if there are
425   // no IO operations pending.
426   if (stream_index == 0 && state_ == STATE_READY &&
427       pending_operations_.size() == 0)
428     return SetStream0Data(buf, offset, buf_len, truncate);
429
430   // We can only do optimistic Write if there is no pending operations, so
431   // that we are sure that the next call to RunNextOperationIfNeeded will
432   // actually run the write operation that sets the stream size. It also
433   // prevents from previous possibly-conflicting writes that could be stacked
434   // in the |pending_operations_|. We could optimize this for when we have
435   // only read operations enqueued.
436   const bool optimistic =
437       (use_optimistic_operations_ && state_ == STATE_READY &&
438        pending_operations_.size() == 0);
439   CompletionCallback op_callback;
440   scoped_refptr<net::IOBuffer> op_buf;
441   int ret_value = net::ERR_FAILED;
442   if (!optimistic) {
443     op_buf = buf;
444     op_callback = callback;
445     ret_value = net::ERR_IO_PENDING;
446   } else {
447     // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
448     // here to avoid paying the price of the RefCountedThreadSafe atomic
449     // operations.
450     if (buf) {
451       op_buf = new IOBuffer(buf_len);
452       memcpy(op_buf->data(), buf->data(), buf_len);
453     }
454     op_callback = CompletionCallback();
455     ret_value = buf_len;
456     if (net_log_.IsLogging()) {
457       net_log_.AddEvent(
458           net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
459           CreateNetLogReadWriteCompleteCallback(buf_len));
460     }
461   }
462
463   pending_operations_.push(SimpleEntryOperation::WriteOperation(this,
464                                                                 stream_index,
465                                                                 offset,
466                                                                 buf_len,
467                                                                 op_buf.get(),
468                                                                 truncate,
469                                                                 optimistic,
470                                                                 op_callback));
471   return ret_value;
472 }
473
474 int SimpleEntryImpl::ReadSparseData(int64 offset,
475                                     net::IOBuffer* buf,
476                                     int buf_len,
477                                     const CompletionCallback& callback) {
478   DCHECK(io_thread_checker_.CalledOnValidThread());
479
480   ScopedOperationRunner operation_runner(this);
481   pending_operations_.push(SimpleEntryOperation::ReadSparseOperation(
482       this, offset, buf_len, buf, callback));
483   return net::ERR_IO_PENDING;
484 }
485
486 int SimpleEntryImpl::WriteSparseData(int64 offset,
487                                      net::IOBuffer* buf,
488                                      int buf_len,
489                                      const CompletionCallback& callback) {
490   DCHECK(io_thread_checker_.CalledOnValidThread());
491
492   ScopedOperationRunner operation_runner(this);
493   pending_operations_.push(SimpleEntryOperation::WriteSparseOperation(
494       this, offset, buf_len, buf, callback));
495   return net::ERR_IO_PENDING;
496 }
497
498 int SimpleEntryImpl::GetAvailableRange(int64 offset,
499                                        int len,
500                                        int64* start,
501                                        const CompletionCallback& callback) {
502   DCHECK(io_thread_checker_.CalledOnValidThread());
503
504   ScopedOperationRunner operation_runner(this);
505   pending_operations_.push(SimpleEntryOperation::GetAvailableRangeOperation(
506       this, offset, len, start, callback));
507   return net::ERR_IO_PENDING;
508 }
509
510 bool SimpleEntryImpl::CouldBeSparse() const {
511   DCHECK(io_thread_checker_.CalledOnValidThread());
512   // TODO(ttuttle): Actually check.
513   return true;
514 }
515
516 void SimpleEntryImpl::CancelSparseIO() {
517   DCHECK(io_thread_checker_.CalledOnValidThread());
518   // The Simple Cache does not return distinct objects for the same non-doomed
519   // entry, so there's no need to coordinate which object is performing sparse
520   // I/O.  Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
521 }
522
523 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
524   DCHECK(io_thread_checker_.CalledOnValidThread());
525   // The simple Cache does not return distinct objects for the same non-doomed
526   // entry, so there's no need to coordinate which object is performing sparse
527   // I/O.  Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
528   return net::OK;
529 }
530
531 SimpleEntryImpl::~SimpleEntryImpl() {
532   DCHECK(io_thread_checker_.CalledOnValidThread());
533   DCHECK_EQ(0U, pending_operations_.size());
534   DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE);
535   DCHECK(!synchronous_entry_);
536   net_log_.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY);
537 }
538
539 void SimpleEntryImpl::PostClientCallback(const CompletionCallback& callback,
540                                          int result) {
541   if (callback.is_null())
542     return;
543   // Note that the callback is posted rather than directly invoked to avoid
544   // reentrancy issues.
545   MessageLoopProxy::current()->PostTask(
546       FROM_HERE,
547       base::Bind(&InvokeCallbackIfBackendIsAlive, backend_, callback, result));
548 }
549
550 void SimpleEntryImpl::MakeUninitialized() {
551   state_ = STATE_UNINITIALIZED;
552   std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
553   std::memset(crc32s_, 0, sizeof(crc32s_));
554   std::memset(have_written_, 0, sizeof(have_written_));
555   std::memset(data_size_, 0, sizeof(data_size_));
556   for (size_t i = 0; i < arraysize(crc_check_state_); ++i) {
557     crc_check_state_[i] = CRC_CHECK_NEVER_READ_AT_ALL;
558   }
559 }
560
561 void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
562   DCHECK(out_entry);
563   ++open_count_;
564   AddRef();  // Balanced in Close()
565   if (!backend_.get()) {
566     // This method can be called when an asynchronous operation completed.
567     // If the backend no longer exists, the callback won't be invoked, and so we
568     // must close ourselves to avoid leaking. As well, there's no guarantee the
569     // client-provided pointer (|out_entry|) hasn't been freed, and no point
570     // dereferencing it, either.
571     Close();
572     return;
573   }
574   *out_entry = this;
575 }
576
577 void SimpleEntryImpl::MarkAsDoomed() {
578   doomed_ = true;
579   if (!backend_.get())
580     return;
581   backend_->index()->Remove(entry_hash_);
582   active_entry_proxy_.reset();
583 }
584
585 void SimpleEntryImpl::RunNextOperationIfNeeded() {
586   DCHECK(io_thread_checker_.CalledOnValidThread());
587   SIMPLE_CACHE_UMA(CUSTOM_COUNTS,
588                    "EntryOperationsPending", cache_type_,
589                    pending_operations_.size(), 0, 100, 20);
590   if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
591     scoped_ptr<SimpleEntryOperation> operation(
592         new SimpleEntryOperation(pending_operations_.front()));
593     pending_operations_.pop();
594     switch (operation->type()) {
595       case SimpleEntryOperation::TYPE_OPEN:
596         OpenEntryInternal(operation->have_index(),
597                           operation->callback(),
598                           operation->out_entry());
599         break;
600       case SimpleEntryOperation::TYPE_CREATE:
601         CreateEntryInternal(operation->have_index(),
602                             operation->callback(),
603                             operation->out_entry());
604         break;
605       case SimpleEntryOperation::TYPE_CLOSE:
606         CloseInternal();
607         break;
608       case SimpleEntryOperation::TYPE_READ:
609         RecordReadIsParallelizable(*operation);
610         ReadDataInternal(operation->index(),
611                          operation->offset(),
612                          operation->buf(),
613                          operation->length(),
614                          operation->callback());
615         break;
616       case SimpleEntryOperation::TYPE_WRITE:
617         RecordWriteDependencyType(*operation);
618         WriteDataInternal(operation->index(),
619                           operation->offset(),
620                           operation->buf(),
621                           operation->length(),
622                           operation->callback(),
623                           operation->truncate());
624         break;
625       case SimpleEntryOperation::TYPE_READ_SPARSE:
626         ReadSparseDataInternal(operation->sparse_offset(),
627                                operation->buf(),
628                                operation->length(),
629                                operation->callback());
630         break;
631       case SimpleEntryOperation::TYPE_WRITE_SPARSE:
632         WriteSparseDataInternal(operation->sparse_offset(),
633                                 operation->buf(),
634                                 operation->length(),
635                                 operation->callback());
636         break;
637       case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE:
638         GetAvailableRangeInternal(operation->sparse_offset(),
639                                   operation->length(),
640                                   operation->out_start(),
641                                   operation->callback());
642         break;
643       case SimpleEntryOperation::TYPE_DOOM:
644         DoomEntryInternal(operation->callback());
645         break;
646       default:
647         NOTREACHED();
648     }
649     // The operation is kept for histograms. Makes sure it does not leak
650     // resources.
651     executing_operation_.swap(operation);
652     executing_operation_->ReleaseReferences();
653     // |this| may have been deleted.
654   }
655 }
656
657 void SimpleEntryImpl::OpenEntryInternal(bool have_index,
658                                         const CompletionCallback& callback,
659                                         Entry** out_entry) {
660   ScopedOperationRunner operation_runner(this);
661
662   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
663
664   if (state_ == STATE_READY) {
665     ReturnEntryToCaller(out_entry);
666     PostClientCallback(callback, net::OK);
667     net_log_.AddEvent(
668         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
669         CreateNetLogSimpleEntryCreationCallback(this, net::OK));
670     return;
671   }
672   if (state_ == STATE_FAILURE) {
673     PostClientCallback(callback, net::ERR_FAILED);
674     net_log_.AddEvent(
675         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
676         CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
677     return;
678   }
679
680   DCHECK_EQ(STATE_UNINITIALIZED, state_);
681   DCHECK(!synchronous_entry_);
682   state_ = STATE_IO_PENDING;
683   const base::TimeTicks start_time = base::TimeTicks::Now();
684   scoped_ptr<SimpleEntryCreationResults> results(
685       new SimpleEntryCreationResults(
686           SimpleEntryStat(last_used_, last_modified_, data_size_,
687                           sparse_data_size_)));
688   Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
689                             cache_type_,
690                             path_,
691                             entry_hash_,
692                             have_index,
693                             results.get());
694   Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
695                              this,
696                              callback,
697                              start_time,
698                              base::Passed(&results),
699                              out_entry,
700                              net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END);
701   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
702 }
703
704 void SimpleEntryImpl::CreateEntryInternal(bool have_index,
705                                           const CompletionCallback& callback,
706                                           Entry** out_entry) {
707   ScopedOperationRunner operation_runner(this);
708
709   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN);
710
711   if (state_ != STATE_UNINITIALIZED) {
712     // There is already an active normal entry.
713     net_log_.AddEvent(
714         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END,
715         CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
716     PostClientCallback(callback, net::ERR_FAILED);
717     return;
718   }
719   DCHECK_EQ(STATE_UNINITIALIZED, state_);
720   DCHECK(!synchronous_entry_);
721
722   state_ = STATE_IO_PENDING;
723
724   // Since we don't know the correct values for |last_used_| and
725   // |last_modified_| yet, we make this approximation.
726   last_used_ = last_modified_ = base::Time::Now();
727
728   // If creation succeeds, we should mark all streams to be saved on close.
729   for (int i = 0; i < kSimpleEntryStreamCount; ++i)
730     have_written_[i] = true;
731
732   const base::TimeTicks start_time = base::TimeTicks::Now();
733   scoped_ptr<SimpleEntryCreationResults> results(
734       new SimpleEntryCreationResults(
735           SimpleEntryStat(last_used_, last_modified_, data_size_,
736                           sparse_data_size_)));
737   Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
738                             cache_type_,
739                             path_,
740                             key_,
741                             entry_hash_,
742                             have_index,
743                             results.get());
744   Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
745                              this,
746                              callback,
747                              start_time,
748                              base::Passed(&results),
749                              out_entry,
750                              net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END);
751   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
752 }
753
754 void SimpleEntryImpl::CloseInternal() {
755   DCHECK(io_thread_checker_.CalledOnValidThread());
756   typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
757   scoped_ptr<std::vector<CRCRecord> >
758       crc32s_to_write(new std::vector<CRCRecord>());
759
760   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
761
762   if (state_ == STATE_READY) {
763     DCHECK(synchronous_entry_);
764     state_ = STATE_IO_PENDING;
765     for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
766       if (have_written_[i]) {
767         if (GetDataSize(i) == crc32s_end_offset_[i]) {
768           int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
769           crc32s_to_write->push_back(CRCRecord(i, true, crc));
770         } else {
771           crc32s_to_write->push_back(CRCRecord(i, false, 0));
772         }
773       }
774     }
775   } else {
776     DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
777   }
778
779   if (synchronous_entry_) {
780     Closure task =
781         base::Bind(&SimpleSynchronousEntry::Close,
782                    base::Unretained(synchronous_entry_),
783                    SimpleEntryStat(last_used_, last_modified_, data_size_,
784                                    sparse_data_size_),
785                    base::Passed(&crc32s_to_write),
786                    stream_0_data_);
787     Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
788     synchronous_entry_ = NULL;
789     worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
790
791     for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
792       if (!have_written_[i]) {
793         SIMPLE_CACHE_UMA(ENUMERATION,
794                          "CheckCRCResult", cache_type_,
795                          crc_check_state_[i], CRC_CHECK_MAX);
796       }
797     }
798   } else {
799     CloseOperationComplete();
800   }
801 }
802
803 void SimpleEntryImpl::ReadDataInternal(int stream_index,
804                                        int offset,
805                                        net::IOBuffer* buf,
806                                        int buf_len,
807                                        const CompletionCallback& callback) {
808   DCHECK(io_thread_checker_.CalledOnValidThread());
809   ScopedOperationRunner operation_runner(this);
810
811   if (net_log_.IsLogging()) {
812     net_log_.AddEvent(
813         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN,
814         CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
815                                           false));
816   }
817
818   if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
819     if (!callback.is_null()) {
820       RecordReadResult(cache_type_, READ_RESULT_BAD_STATE);
821       // Note that the API states that client-provided callbacks for entry-level
822       // (i.e. non-backend) operations (e.g. read, write) are invoked even if
823       // the backend was already destroyed.
824       MessageLoopProxy::current()->PostTask(
825           FROM_HERE, base::Bind(callback, net::ERR_FAILED));
826     }
827     if (net_log_.IsLogging()) {
828       net_log_.AddEvent(
829           net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
830           CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
831     }
832     return;
833   }
834   DCHECK_EQ(STATE_READY, state_);
835   if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
836     RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN);
837     // If there is nothing to read, we bail out before setting state_ to
838     // STATE_IO_PENDING.
839     if (!callback.is_null())
840       MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 0));
841     return;
842   }
843
844   buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
845
846   // Since stream 0 data is kept in memory, it is read immediately.
847   if (stream_index == 0) {
848     int ret_value = ReadStream0Data(buf, offset, buf_len);
849     if (!callback.is_null()) {
850       MessageLoopProxy::current()->PostTask(FROM_HERE,
851                                             base::Bind(callback, ret_value));
852     }
853     return;
854   }
855
856   state_ = STATE_IO_PENDING;
857   if (!doomed_ && backend_.get())
858     backend_->index()->UseIfExists(entry_hash_);
859
860   scoped_ptr<uint32> read_crc32(new uint32());
861   scoped_ptr<int> result(new int());
862   scoped_ptr<SimpleEntryStat> entry_stat(
863       new SimpleEntryStat(last_used_, last_modified_, data_size_,
864                           sparse_data_size_));
865   Closure task = base::Bind(
866       &SimpleSynchronousEntry::ReadData,
867       base::Unretained(synchronous_entry_),
868       SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len),
869       make_scoped_refptr(buf),
870       read_crc32.get(),
871       entry_stat.get(),
872       result.get());
873   Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete,
874                              this,
875                              stream_index,
876                              offset,
877                              callback,
878                              base::Passed(&read_crc32),
879                              base::Passed(&entry_stat),
880                              base::Passed(&result));
881   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
882 }
883
884 void SimpleEntryImpl::WriteDataInternal(int stream_index,
885                                        int offset,
886                                        net::IOBuffer* buf,
887                                        int buf_len,
888                                        const CompletionCallback& callback,
889                                        bool truncate) {
890   DCHECK(io_thread_checker_.CalledOnValidThread());
891   ScopedOperationRunner operation_runner(this);
892
893   if (net_log_.IsLogging()) {
894     net_log_.AddEvent(
895         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
896         CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
897                                           truncate));
898   }
899
900   if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
901     RecordWriteResult(cache_type_, WRITE_RESULT_BAD_STATE);
902     if (net_log_.IsLogging()) {
903       net_log_.AddEvent(
904           net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
905           CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
906     }
907     if (!callback.is_null()) {
908       MessageLoopProxy::current()->PostTask(
909           FROM_HERE, base::Bind(callback, net::ERR_FAILED));
910     }
911     // |this| may be destroyed after return here.
912     return;
913   }
914
915   DCHECK_EQ(STATE_READY, state_);
916
917   // Since stream 0 data is kept in memory, it will be written immediatly.
918   if (stream_index == 0) {
919     int ret_value = SetStream0Data(buf, offset, buf_len, truncate);
920     if (!callback.is_null()) {
921       MessageLoopProxy::current()->PostTask(FROM_HERE,
922                                             base::Bind(callback, ret_value));
923     }
924     return;
925   }
926
927   // Ignore zero-length writes that do not change the file size.
928   if (buf_len == 0) {
929     int32 data_size = data_size_[stream_index];
930     if (truncate ? (offset == data_size) : (offset <= data_size)) {
931       RecordWriteResult(cache_type_, WRITE_RESULT_FAST_EMPTY_RETURN);
932       if (!callback.is_null()) {
933         MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
934             callback, 0));
935       }
936       return;
937     }
938   }
939   state_ = STATE_IO_PENDING;
940   if (!doomed_ && backend_.get())
941     backend_->index()->UseIfExists(entry_hash_);
942
943   AdvanceCrc(buf, offset, buf_len, stream_index);
944
945   // |entry_stat| needs to be initialized before modifying |data_size_|.
946   scoped_ptr<SimpleEntryStat> entry_stat(
947       new SimpleEntryStat(last_used_, last_modified_, data_size_,
948                           sparse_data_size_));
949   if (truncate) {
950     data_size_[stream_index] = offset + buf_len;
951   } else {
952     data_size_[stream_index] = std::max(offset + buf_len,
953                                         GetDataSize(stream_index));
954   }
955
956   // Since we don't know the correct values for |last_used_| and
957   // |last_modified_| yet, we make this approximation.
958   last_used_ = last_modified_ = base::Time::Now();
959
960   have_written_[stream_index] = true;
961   // Writing on stream 1 affects the placement of stream 0 in the file, the EOF
962   // record will have to be rewritten.
963   if (stream_index == 1)
964     have_written_[0] = true;
965
966   scoped_ptr<int> result(new int());
967   Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
968                             base::Unretained(synchronous_entry_),
969                             SimpleSynchronousEntry::EntryOperationData(
970                                 stream_index, offset, buf_len, truncate,
971                                 doomed_),
972                             make_scoped_refptr(buf),
973                             entry_stat.get(),
974                             result.get());
975   Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete,
976                              this,
977                              stream_index,
978                              callback,
979                              base::Passed(&entry_stat),
980                              base::Passed(&result));
981   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
982 }
983
984 void SimpleEntryImpl::ReadSparseDataInternal(
985     int64 sparse_offset,
986     net::IOBuffer* buf,
987     int buf_len,
988     const CompletionCallback& callback) {
989   DCHECK(io_thread_checker_.CalledOnValidThread());
990   ScopedOperationRunner operation_runner(this);
991
992   DCHECK_EQ(STATE_READY, state_);
993   state_ = STATE_IO_PENDING;
994
995   scoped_ptr<int> result(new int());
996   scoped_ptr<base::Time> last_used(new base::Time());
997   Closure task = base::Bind(&SimpleSynchronousEntry::ReadSparseData,
998                             base::Unretained(synchronous_entry_),
999                             SimpleSynchronousEntry::EntryOperationData(
1000                                 sparse_offset, buf_len),
1001                             make_scoped_refptr(buf),
1002                             last_used.get(),
1003                             result.get());
1004   Closure reply = base::Bind(&SimpleEntryImpl::ReadSparseOperationComplete,
1005                              this,
1006                              callback,
1007                              base::Passed(&last_used),
1008                              base::Passed(&result));
1009   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1010 }
1011
1012 void SimpleEntryImpl::WriteSparseDataInternal(
1013     int64 sparse_offset,
1014     net::IOBuffer* buf,
1015     int buf_len,
1016     const CompletionCallback& callback) {
1017   DCHECK(io_thread_checker_.CalledOnValidThread());
1018   ScopedOperationRunner operation_runner(this);
1019
1020   DCHECK_EQ(STATE_READY, state_);
1021   state_ = STATE_IO_PENDING;
1022
1023   int64 max_sparse_data_size = kint64max;
1024   if (backend_.get()) {
1025     int64 max_cache_size = backend_->index()->max_size();
1026     max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor;
1027   }
1028
1029   scoped_ptr<SimpleEntryStat> entry_stat(
1030       new SimpleEntryStat(last_used_, last_modified_, data_size_,
1031                           sparse_data_size_));
1032
1033   last_used_ = last_modified_ = base::Time::Now();
1034
1035   scoped_ptr<int> result(new int());
1036   Closure task = base::Bind(&SimpleSynchronousEntry::WriteSparseData,
1037                             base::Unretained(synchronous_entry_),
1038                             SimpleSynchronousEntry::EntryOperationData(
1039                                 sparse_offset, buf_len),
1040                             make_scoped_refptr(buf),
1041                             max_sparse_data_size,
1042                             entry_stat.get(),
1043                             result.get());
1044   Closure reply = base::Bind(&SimpleEntryImpl::WriteSparseOperationComplete,
1045                              this,
1046                              callback,
1047                              base::Passed(&entry_stat),
1048                              base::Passed(&result));
1049   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1050 }
1051
1052 void SimpleEntryImpl::GetAvailableRangeInternal(
1053     int64 sparse_offset,
1054     int len,
1055     int64* out_start,
1056     const CompletionCallback& callback) {
1057   DCHECK(io_thread_checker_.CalledOnValidThread());
1058   ScopedOperationRunner operation_runner(this);
1059
1060   DCHECK_EQ(STATE_READY, state_);
1061   state_ = STATE_IO_PENDING;
1062
1063   scoped_ptr<int> result(new int());
1064   Closure task = base::Bind(&SimpleSynchronousEntry::GetAvailableRange,
1065                             base::Unretained(synchronous_entry_),
1066                             SimpleSynchronousEntry::EntryOperationData(
1067                                 sparse_offset, len),
1068                             out_start,
1069                             result.get());
1070   Closure reply = base::Bind(
1071       &SimpleEntryImpl::GetAvailableRangeOperationComplete,
1072       this,
1073       callback,
1074       base::Passed(&result));
1075   worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1076 }
1077
1078 void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback& callback) {
1079   PostTaskAndReplyWithResult(
1080       worker_pool_, FROM_HERE,
1081       base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, entry_hash_),
1082       base::Bind(&SimpleEntryImpl::DoomOperationComplete, this, callback,
1083                  state_));
1084   state_ = STATE_IO_PENDING;
1085 }
1086
1087 void SimpleEntryImpl::CreationOperationComplete(
1088     const CompletionCallback& completion_callback,
1089     const base::TimeTicks& start_time,
1090     scoped_ptr<SimpleEntryCreationResults> in_results,
1091     Entry** out_entry,
1092     net::NetLog::EventType end_event_type) {
1093   DCHECK(io_thread_checker_.CalledOnValidThread());
1094   DCHECK_EQ(state_, STATE_IO_PENDING);
1095   DCHECK(in_results);
1096   ScopedOperationRunner operation_runner(this);
1097   SIMPLE_CACHE_UMA(BOOLEAN,
1098                    "EntryCreationResult", cache_type_,
1099                    in_results->result == net::OK);
1100   if (in_results->result != net::OK) {
1101     if (in_results->result != net::ERR_FILE_EXISTS)
1102       MarkAsDoomed();
1103
1104     net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
1105     PostClientCallback(completion_callback, net::ERR_FAILED);
1106     MakeUninitialized();
1107     return;
1108   }
1109   // If out_entry is NULL, it means we already called ReturnEntryToCaller from
1110   // the optimistic Create case.
1111   if (out_entry)
1112     ReturnEntryToCaller(out_entry);
1113
1114   state_ = STATE_READY;
1115   synchronous_entry_ = in_results->sync_entry;
1116   if (in_results->stream_0_data) {
1117     stream_0_data_ = in_results->stream_0_data;
1118     // The crc was read in SimpleSynchronousEntry.
1119     crc_check_state_[0] = CRC_CHECK_DONE;
1120     crc32s_[0] = in_results->stream_0_crc32;
1121     crc32s_end_offset_[0] = in_results->entry_stat.data_size(0);
1122   }
1123   if (key_.empty()) {
1124     SetKey(synchronous_entry_->key());
1125   } else {
1126     // This should only be triggered when creating an entry. The key check in
1127     // the open case is handled in SimpleBackendImpl.
1128     DCHECK_EQ(key_, synchronous_entry_->key());
1129   }
1130   UpdateDataFromEntryStat(in_results->entry_stat);
1131   SIMPLE_CACHE_UMA(TIMES,
1132                    "EntryCreationTime", cache_type_,
1133                    (base::TimeTicks::Now() - start_time));
1134   AdjustOpenEntryCountBy(cache_type_, 1);
1135
1136   net_log_.AddEvent(end_event_type);
1137   PostClientCallback(completion_callback, net::OK);
1138 }
1139
1140 void SimpleEntryImpl::EntryOperationComplete(
1141     const CompletionCallback& completion_callback,
1142     const SimpleEntryStat& entry_stat,
1143     scoped_ptr<int> result) {
1144   DCHECK(io_thread_checker_.CalledOnValidThread());
1145   DCHECK(synchronous_entry_);
1146   DCHECK_EQ(STATE_IO_PENDING, state_);
1147   DCHECK(result);
1148   if (*result < 0) {
1149     state_ = STATE_FAILURE;
1150     MarkAsDoomed();
1151   } else {
1152     state_ = STATE_READY;
1153     UpdateDataFromEntryStat(entry_stat);
1154   }
1155
1156   if (!completion_callback.is_null()) {
1157     MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
1158         completion_callback, *result));
1159   }
1160   RunNextOperationIfNeeded();
1161 }
1162
1163 void SimpleEntryImpl::ReadOperationComplete(
1164     int stream_index,
1165     int offset,
1166     const CompletionCallback& completion_callback,
1167     scoped_ptr<uint32> read_crc32,
1168     scoped_ptr<SimpleEntryStat> entry_stat,
1169     scoped_ptr<int> result) {
1170   DCHECK(io_thread_checker_.CalledOnValidThread());
1171   DCHECK(synchronous_entry_);
1172   DCHECK_EQ(STATE_IO_PENDING, state_);
1173   DCHECK(read_crc32);
1174   DCHECK(result);
1175
1176   if (*result > 0 &&
1177       crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) {
1178     crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END;
1179   }
1180
1181   if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
1182     uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
1183                                      : crc32s_[stream_index];
1184     crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
1185     crc32s_end_offset_[stream_index] += *result;
1186     if (!have_written_[stream_index] &&
1187         GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
1188       // We have just read a file from start to finish, and so we have
1189       // computed a crc of the entire file. We can check it now. If a cache
1190       // entry has a single reader, the normal pattern is to read from start
1191       // to finish.
1192
1193       // Other cases are possible. In the case of two readers on the same
1194       // entry, one reader can be behind the other. In this case we compute
1195       // the crc as the most advanced reader progresses, and check it for
1196       // both readers as they read the last byte.
1197
1198       net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN);
1199
1200       scoped_ptr<int> new_result(new int());
1201       Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
1202                                 base::Unretained(synchronous_entry_),
1203                                 stream_index,
1204                                 *entry_stat,
1205                                 crc32s_[stream_index],
1206                                 new_result.get());
1207       Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
1208                                  this, *result, stream_index,
1209                                  completion_callback,
1210                                  base::Passed(&new_result));
1211       worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1212       crc_check_state_[stream_index] = CRC_CHECK_DONE;
1213       return;
1214     }
1215   }
1216
1217   if (*result < 0) {
1218     crc32s_end_offset_[stream_index] = 0;
1219   }
1220
1221   if (*result < 0) {
1222     RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1223   } else {
1224     RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1225     if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
1226         offset + *result == GetDataSize(stream_index)) {
1227       crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
1228     }
1229   }
1230   if (net_log_.IsLogging()) {
1231     net_log_.AddEvent(
1232         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1233         CreateNetLogReadWriteCompleteCallback(*result));
1234   }
1235
1236   EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1237 }
1238
1239 void SimpleEntryImpl::WriteOperationComplete(
1240     int stream_index,
1241     const CompletionCallback& completion_callback,
1242     scoped_ptr<SimpleEntryStat> entry_stat,
1243     scoped_ptr<int> result) {
1244   if (*result >= 0)
1245     RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1246   else
1247     RecordWriteResult(cache_type_, WRITE_RESULT_SYNC_WRITE_FAILURE);
1248   if (net_log_.IsLogging()) {
1249     net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
1250         CreateNetLogReadWriteCompleteCallback(*result));
1251   }
1252
1253   if (*result < 0) {
1254     crc32s_end_offset_[stream_index] = 0;
1255   }
1256
1257   EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1258 }
1259
1260 void SimpleEntryImpl::ReadSparseOperationComplete(
1261     const CompletionCallback& completion_callback,
1262     scoped_ptr<base::Time> last_used,
1263     scoped_ptr<int> result) {
1264   DCHECK(io_thread_checker_.CalledOnValidThread());
1265   DCHECK(synchronous_entry_);
1266   DCHECK(result);
1267
1268   SimpleEntryStat entry_stat(*last_used, last_modified_, data_size_,
1269                              sparse_data_size_);
1270   EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1271 }
1272
1273 void SimpleEntryImpl::WriteSparseOperationComplete(
1274     const CompletionCallback& completion_callback,
1275     scoped_ptr<SimpleEntryStat> entry_stat,
1276     scoped_ptr<int> result) {
1277   DCHECK(io_thread_checker_.CalledOnValidThread());
1278   DCHECK(synchronous_entry_);
1279   DCHECK(result);
1280
1281   EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1282 }
1283
1284 void SimpleEntryImpl::GetAvailableRangeOperationComplete(
1285     const CompletionCallback& completion_callback,
1286     scoped_ptr<int> result) {
1287   DCHECK(io_thread_checker_.CalledOnValidThread());
1288   DCHECK(synchronous_entry_);
1289   DCHECK(result);
1290
1291   SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
1292                              sparse_data_size_);
1293   EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1294 }
1295
1296 void SimpleEntryImpl::DoomOperationComplete(
1297     const CompletionCallback& callback,
1298     State state_to_restore,
1299     int result) {
1300   state_ = state_to_restore;
1301   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_END);
1302   if (!callback.is_null())
1303     callback.Run(result);
1304   RunNextOperationIfNeeded();
1305   if (backend_)
1306     backend_->OnDoomComplete(entry_hash_);
1307 }
1308
1309 void SimpleEntryImpl::ChecksumOperationComplete(
1310     int orig_result,
1311     int stream_index,
1312     const CompletionCallback& completion_callback,
1313     scoped_ptr<int> result) {
1314   DCHECK(io_thread_checker_.CalledOnValidThread());
1315   DCHECK(synchronous_entry_);
1316   DCHECK_EQ(STATE_IO_PENDING, state_);
1317   DCHECK(result);
1318
1319   if (net_log_.IsLogging()) {
1320     net_log_.AddEventWithNetErrorCode(
1321         net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END,
1322         *result);
1323   }
1324
1325   if (*result == net::OK) {
1326     *result = orig_result;
1327     if (orig_result >= 0)
1328       RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1329     else
1330       RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1331   } else {
1332     RecordReadResult(cache_type_, READ_RESULT_SYNC_CHECKSUM_FAILURE);
1333   }
1334   if (net_log_.IsLogging()) {
1335     net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1336         CreateNetLogReadWriteCompleteCallback(*result));
1337   }
1338
1339   SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
1340                              sparse_data_size_);
1341   EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1342 }
1343
1344 void SimpleEntryImpl::CloseOperationComplete() {
1345   DCHECK(!synchronous_entry_);
1346   DCHECK_EQ(0, open_count_);
1347   DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
1348          STATE_UNINITIALIZED == state_);
1349   net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END);
1350   AdjustOpenEntryCountBy(cache_type_, -1);
1351   MakeUninitialized();
1352   RunNextOperationIfNeeded();
1353 }
1354
1355 void SimpleEntryImpl::UpdateDataFromEntryStat(
1356     const SimpleEntryStat& entry_stat) {
1357   DCHECK(io_thread_checker_.CalledOnValidThread());
1358   DCHECK(synchronous_entry_);
1359   DCHECK_EQ(STATE_READY, state_);
1360
1361   last_used_ = entry_stat.last_used();
1362   last_modified_ = entry_stat.last_modified();
1363   for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1364     data_size_[i] = entry_stat.data_size(i);
1365   }
1366   sparse_data_size_ = entry_stat.sparse_data_size();
1367   if (!doomed_ && backend_.get())
1368     backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage());
1369 }
1370
1371 int64 SimpleEntryImpl::GetDiskUsage() const {
1372   int64 file_size = 0;
1373   for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1374     file_size +=
1375         simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
1376   }
1377   file_size += sparse_data_size_;
1378   return file_size;
1379 }
1380
1381 void SimpleEntryImpl::RecordReadIsParallelizable(
1382     const SimpleEntryOperation& operation) const {
1383   if (!executing_operation_)
1384     return;
1385   // Used in histograms, please only add entries at the end.
1386   enum ReadDependencyType {
1387     // READ_STANDALONE = 0, Deprecated.
1388     READ_FOLLOWS_READ = 1,
1389     READ_FOLLOWS_CONFLICTING_WRITE = 2,
1390     READ_FOLLOWS_NON_CONFLICTING_WRITE = 3,
1391     READ_FOLLOWS_OTHER = 4,
1392     READ_ALONE_IN_QUEUE = 5,
1393     READ_DEPENDENCY_TYPE_MAX = 6,
1394   };
1395
1396   ReadDependencyType type = READ_FOLLOWS_OTHER;
1397   if (operation.alone_in_queue()) {
1398     type = READ_ALONE_IN_QUEUE;
1399   } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1400     type = READ_FOLLOWS_READ;
1401   } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1402     if (executing_operation_->ConflictsWith(operation))
1403       type = READ_FOLLOWS_CONFLICTING_WRITE;
1404     else
1405       type = READ_FOLLOWS_NON_CONFLICTING_WRITE;
1406   }
1407   SIMPLE_CACHE_UMA(ENUMERATION,
1408                    "ReadIsParallelizable", cache_type_,
1409                    type, READ_DEPENDENCY_TYPE_MAX);
1410 }
1411
1412 void SimpleEntryImpl::RecordWriteDependencyType(
1413     const SimpleEntryOperation& operation) const {
1414   if (!executing_operation_)
1415     return;
1416   // Used in histograms, please only add entries at the end.
1417   enum WriteDependencyType {
1418     WRITE_OPTIMISTIC = 0,
1419     WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1,
1420     WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC = 2,
1421     WRITE_FOLLOWS_CONFLICTING_WRITE = 3,
1422     WRITE_FOLLOWS_NON_CONFLICTING_WRITE = 4,
1423     WRITE_FOLLOWS_CONFLICTING_READ = 5,
1424     WRITE_FOLLOWS_NON_CONFLICTING_READ = 6,
1425     WRITE_FOLLOWS_OTHER = 7,
1426     WRITE_DEPENDENCY_TYPE_MAX = 8,
1427   };
1428
1429   WriteDependencyType type = WRITE_FOLLOWS_OTHER;
1430   if (operation.optimistic()) {
1431     type = WRITE_OPTIMISTIC;
1432   } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ ||
1433              executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1434     bool conflicting = executing_operation_->ConflictsWith(operation);
1435
1436     if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1437       type = conflicting ? WRITE_FOLLOWS_CONFLICTING_READ
1438                          : WRITE_FOLLOWS_NON_CONFLICTING_READ;
1439     } else if (executing_operation_->optimistic()) {
1440       type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
1441                          : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC;
1442     } else {
1443       type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE
1444                          : WRITE_FOLLOWS_NON_CONFLICTING_WRITE;
1445     }
1446   }
1447   SIMPLE_CACHE_UMA(ENUMERATION,
1448                    "WriteDependencyType", cache_type_,
1449                    type, WRITE_DEPENDENCY_TYPE_MAX);
1450 }
1451
1452 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf,
1453                                      int offset,
1454                                      int buf_len) {
1455   if (buf_len < 0) {
1456     RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1457     return 0;
1458   }
1459   memcpy(buf->data(), stream_0_data_->data() + offset, buf_len);
1460   UpdateDataFromEntryStat(
1461       SimpleEntryStat(base::Time::Now(), last_modified_, data_size_,
1462                       sparse_data_size_));
1463   RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1464   return buf_len;
1465 }
1466
1467 int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
1468                                     int offset,
1469                                     int buf_len,
1470                                     bool truncate) {
1471   // Currently, stream 0 is only used for HTTP headers, and always writes them
1472   // with a single, truncating write. Detect these writes and record the size
1473   // changes of the headers. Also, support writes to stream 0 that have
1474   // different access patterns, as required by the API contract.
1475   // All other clients of the Simple Cache are encouraged to use stream 1.
1476   have_written_[0] = true;
1477   int data_size = GetDataSize(0);
1478   if (offset == 0 && truncate) {
1479     RecordHeaderSizeChange(cache_type_, data_size, buf_len);
1480     stream_0_data_->SetCapacity(buf_len);
1481     memcpy(stream_0_data_->data(), buf->data(), buf_len);
1482     data_size_[0] = buf_len;
1483   } else {
1484     RecordUnexpectedStream0Write(cache_type_);
1485     const int buffer_size =
1486         truncate ? offset + buf_len : std::max(offset + buf_len, data_size);
1487     stream_0_data_->SetCapacity(buffer_size);
1488     // If |stream_0_data_| was extended, the extension until offset needs to be
1489     // zero-filled.
1490     const int fill_size = offset <= data_size ? 0 : offset - data_size;
1491     if (fill_size > 0)
1492       memset(stream_0_data_->data() + data_size, 0, fill_size);
1493     if (buf)
1494       memcpy(stream_0_data_->data() + offset, buf->data(), buf_len);
1495     data_size_[0] = buffer_size;
1496   }
1497   base::Time modification_time = base::Time::Now();
1498   AdvanceCrc(buf, offset, buf_len, 0);
1499   UpdateDataFromEntryStat(
1500       SimpleEntryStat(modification_time, modification_time, data_size_,
1501                       sparse_data_size_));
1502   RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1503   return buf_len;
1504 }
1505
1506 void SimpleEntryImpl::AdvanceCrc(net::IOBuffer* buffer,
1507                                  int offset,
1508                                  int length,
1509                                  int stream_index) {
1510   // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
1511   // if |offset == 0| or we have already computed the CRC for [0 .. offset).
1512   // We rely on most write operations being sequential, start to end to compute
1513   // the crc of the data. When we write to an entry and close without having
1514   // done a sequential write, we don't check the CRC on read.
1515   if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
1516     uint32 initial_crc =
1517         (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0);
1518     if (length > 0) {
1519       crc32s_[stream_index] = crc32(
1520           initial_crc, reinterpret_cast<const Bytef*>(buffer->data()), length);
1521     }
1522     crc32s_end_offset_[stream_index] = offset + length;
1523   } else if (offset < crc32s_end_offset_[stream_index]) {
1524     // If a range for which the crc32 was already computed is rewritten, the
1525     // computation of the crc32 need to start from 0 again.
1526     crc32s_end_offset_[stream_index] = 0;
1527   }
1528 }
1529
1530 }  // namespace disk_cache