1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_entry_impl.h"
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/callback.h"
14 #include "base/location.h"
15 #include "base/logging.h"
16 #include "base/message_loop/message_loop_proxy.h"
17 #include "base/task_runner.h"
18 #include "base/task_runner_util.h"
19 #include "base/time/time.h"
20 #include "net/base/io_buffer.h"
21 #include "net/base/net_errors.h"
22 #include "net/disk_cache/net_log_parameters.h"
23 #include "net/disk_cache/simple/simple_backend_impl.h"
24 #include "net/disk_cache/simple/simple_histogram_macros.h"
25 #include "net/disk_cache/simple/simple_index.h"
26 #include "net/disk_cache/simple/simple_net_log_parameters.h"
27 #include "net/disk_cache/simple/simple_synchronous_entry.h"
28 #include "net/disk_cache/simple/simple_util.h"
29 #include "third_party/zlib/zlib.h"
31 namespace disk_cache {
34 // An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of
36 const int64 kMaxSparseDataSizeDivisor = 10;
38 // Used in histograms, please only add entries at the end.
40 READ_RESULT_SUCCESS = 0,
41 READ_RESULT_INVALID_ARGUMENT = 1,
42 READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
43 READ_RESULT_BAD_STATE = 3,
44 READ_RESULT_FAST_EMPTY_RETURN = 4,
45 READ_RESULT_SYNC_READ_FAILURE = 5,
46 READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
50 // Used in histograms, please only add entries at the end.
52 WRITE_RESULT_SUCCESS = 0,
53 WRITE_RESULT_INVALID_ARGUMENT = 1,
54 WRITE_RESULT_OVER_MAX_SIZE = 2,
55 WRITE_RESULT_BAD_STATE = 3,
56 WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
57 WRITE_RESULT_FAST_EMPTY_RETURN = 5,
61 // Used in histograms, please only add entries at the end.
62 enum HeaderSizeChange {
63 HEADER_SIZE_CHANGE_INITIAL,
64 HEADER_SIZE_CHANGE_SAME,
65 HEADER_SIZE_CHANGE_INCREASE,
66 HEADER_SIZE_CHANGE_DECREASE,
67 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
68 HEADER_SIZE_CHANGE_MAX
71 void RecordReadResult(net::CacheType cache_type, ReadResult result) {
72 SIMPLE_CACHE_UMA(ENUMERATION,
73 "ReadResult", cache_type, result, READ_RESULT_MAX);
76 void RecordWriteResult(net::CacheType cache_type, WriteResult result) {
77 SIMPLE_CACHE_UMA(ENUMERATION,
78 "WriteResult2", cache_type, result, WRITE_RESULT_MAX);
81 // TODO(ttuttle): Consider removing this once we have a good handle on header
83 void RecordHeaderSizeChange(net::CacheType cache_type,
84 int old_size, int new_size) {
85 HeaderSizeChange size_change;
87 SIMPLE_CACHE_UMA(COUNTS_10000, "HeaderSize", cache_type, new_size);
90 size_change = HEADER_SIZE_CHANGE_INITIAL;
91 } else if (new_size == old_size) {
92 size_change = HEADER_SIZE_CHANGE_SAME;
93 } else if (new_size > old_size) {
94 int delta = new_size - old_size;
95 SIMPLE_CACHE_UMA(COUNTS_10000,
96 "HeaderSizeIncreaseAbsolute", cache_type, delta);
97 SIMPLE_CACHE_UMA(PERCENTAGE,
98 "HeaderSizeIncreasePercentage", cache_type,
99 delta * 100 / old_size);
100 size_change = HEADER_SIZE_CHANGE_INCREASE;
101 } else { // new_size < old_size
102 int delta = old_size - new_size;
103 SIMPLE_CACHE_UMA(COUNTS_10000,
104 "HeaderSizeDecreaseAbsolute", cache_type, delta);
105 SIMPLE_CACHE_UMA(PERCENTAGE,
106 "HeaderSizeDecreasePercentage", cache_type,
107 delta * 100 / old_size);
108 size_change = HEADER_SIZE_CHANGE_DECREASE;
111 SIMPLE_CACHE_UMA(ENUMERATION,
112 "HeaderSizeChange", cache_type,
113 size_change, HEADER_SIZE_CHANGE_MAX);
116 void RecordUnexpectedStream0Write(net::CacheType cache_type) {
117 SIMPLE_CACHE_UMA(ENUMERATION,
118 "HeaderSizeChange", cache_type,
119 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE, HEADER_SIZE_CHANGE_MAX);
122 int g_open_entry_count = 0;
124 void AdjustOpenEntryCountBy(net::CacheType cache_type, int offset) {
125 g_open_entry_count += offset;
126 SIMPLE_CACHE_UMA(COUNTS_10000,
127 "GlobalOpenEntryCount", cache_type, g_open_entry_count);
130 void InvokeCallbackIfBackendIsAlive(
131 const base::WeakPtr<SimpleBackendImpl>& backend,
132 const net::CompletionCallback& completion_callback,
134 DCHECK(!completion_callback.is_null());
137 completion_callback.Run(result);
143 using base::FilePath;
144 using base::MessageLoopProxy;
146 using base::TaskRunner;
148 // A helper class to insure that RunNextOperationIfNeeded() is called when
149 // exiting the current stack frame.
150 class SimpleEntryImpl::ScopedOperationRunner {
152 explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
155 ~ScopedOperationRunner() {
156 entry_->RunNextOperationIfNeeded();
160 SimpleEntryImpl* const entry_;
163 SimpleEntryImpl::ActiveEntryProxy::~ActiveEntryProxy() {}
165 SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type,
166 const FilePath& path,
167 const uint64 entry_hash,
168 OperationsMode operations_mode,
169 SimpleBackendImpl* backend,
170 net::NetLog* net_log)
171 : backend_(backend->AsWeakPtr()),
172 cache_type_(cache_type),
173 worker_pool_(backend->worker_pool()),
175 entry_hash_(entry_hash),
176 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
177 last_used_(Time::Now()),
178 last_modified_(last_used_),
179 sparse_data_size_(0),
182 state_(STATE_UNINITIALIZED),
183 synchronous_entry_(NULL),
184 net_log_(net::BoundNetLog::Make(
185 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)),
186 stream_0_data_(new net::GrowableIOBuffer()) {
187 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
188 arrays_should_be_same_size);
189 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
190 arrays_should_be_same_size);
191 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
192 arrays_should_be_same_size);
193 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_),
194 arrays_should_be_same_size);
196 net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY,
197 CreateNetLogSimpleEntryConstructionCallback(this));
200 void SimpleEntryImpl::SetActiveEntryProxy(
201 scoped_ptr<ActiveEntryProxy> active_entry_proxy) {
202 DCHECK(!active_entry_proxy_);
203 active_entry_proxy_.reset(active_entry_proxy.release());
206 int SimpleEntryImpl::OpenEntry(Entry** out_entry,
207 const CompletionCallback& callback) {
208 DCHECK(backend_.get());
210 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL);
212 bool have_index = backend_->index()->initialized();
213 // This enumeration is used in histograms, add entries only at end.
214 enum OpenEntryIndexEnum {
220 OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
222 if (backend_->index()->Has(entry_hash_))
223 open_entry_index_enum = INDEX_HIT;
225 open_entry_index_enum = INDEX_MISS;
227 SIMPLE_CACHE_UMA(ENUMERATION,
228 "OpenEntryIndexState", cache_type_,
229 open_entry_index_enum, INDEX_MAX);
231 // If entry is not known to the index, initiate fast failover to the network.
232 if (open_entry_index_enum == INDEX_MISS) {
233 net_log_.AddEventWithNetErrorCode(
234 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
236 return net::ERR_FAILED;
239 pending_operations_.push(SimpleEntryOperation::OpenOperation(
240 this, have_index, callback, out_entry));
241 RunNextOperationIfNeeded();
242 return net::ERR_IO_PENDING;
245 int SimpleEntryImpl::CreateEntry(Entry** out_entry,
246 const CompletionCallback& callback) {
247 DCHECK(backend_.get());
248 DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
250 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL);
252 bool have_index = backend_->index()->initialized();
253 int ret_value = net::ERR_FAILED;
254 if (use_optimistic_operations_ &&
255 state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
256 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
258 ReturnEntryToCaller(out_entry);
259 pending_operations_.push(SimpleEntryOperation::CreateOperation(
260 this, have_index, CompletionCallback(), static_cast<Entry**>(NULL)));
263 pending_operations_.push(SimpleEntryOperation::CreateOperation(
264 this, have_index, callback, out_entry));
265 ret_value = net::ERR_IO_PENDING;
268 // We insert the entry in the index before creating the entry files in the
269 // SimpleSynchronousEntry, because this way the worst scenario is when we
270 // have the entry in the index but we don't have the created files yet, this
271 // way we never leak files. CreationOperationComplete will remove the entry
272 // from the index if the creation fails.
273 backend_->index()->Insert(entry_hash_);
275 RunNextOperationIfNeeded();
279 int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
282 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL);
283 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
287 backend_->OnDoomStart(entry_hash_);
288 pending_operations_.push(SimpleEntryOperation::DoomOperation(this, callback));
289 RunNextOperationIfNeeded();
290 return net::ERR_IO_PENDING;
293 void SimpleEntryImpl::SetKey(const std::string& key) {
295 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY,
296 net::NetLog::StringCallback("key", &key));
299 void SimpleEntryImpl::Doom() {
300 DoomEntry(CompletionCallback());
303 void SimpleEntryImpl::Close() {
304 DCHECK(io_thread_checker_.CalledOnValidThread());
305 DCHECK_LT(0, open_count_);
307 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL);
309 if (--open_count_ > 0) {
310 DCHECK(!HasOneRef());
311 Release(); // Balanced in ReturnEntryToCaller().
315 pending_operations_.push(SimpleEntryOperation::CloseOperation(this));
316 DCHECK(!HasOneRef());
317 Release(); // Balanced in ReturnEntryToCaller().
318 RunNextOperationIfNeeded();
321 std::string SimpleEntryImpl::GetKey() const {
322 DCHECK(io_thread_checker_.CalledOnValidThread());
326 Time SimpleEntryImpl::GetLastUsed() const {
327 DCHECK(io_thread_checker_.CalledOnValidThread());
331 Time SimpleEntryImpl::GetLastModified() const {
332 DCHECK(io_thread_checker_.CalledOnValidThread());
333 return last_modified_;
336 int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
337 DCHECK(io_thread_checker_.CalledOnValidThread());
338 DCHECK_LE(0, data_size_[stream_index]);
339 return data_size_[stream_index];
342 int SimpleEntryImpl::ReadData(int stream_index,
346 const CompletionCallback& callback) {
347 DCHECK(io_thread_checker_.CalledOnValidThread());
349 if (net_log_.IsLogging()) {
350 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL,
351 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
355 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
357 if (net_log_.IsLogging()) {
358 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
359 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
362 RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT);
363 return net::ERR_INVALID_ARGUMENT;
365 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
366 offset < 0 || !buf_len)) {
367 if (net_log_.IsLogging()) {
368 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
369 CreateNetLogReadWriteCompleteCallback(0));
372 RecordReadResult(cache_type_, READ_RESULT_NONBLOCK_EMPTY_RETURN);
376 // TODO(clamy): return immediatly when reading from stream 0.
378 // TODO(felipeg): Optimization: Add support for truly parallel read
380 bool alone_in_queue =
381 pending_operations_.size() == 0 && state_ == STATE_READY;
382 pending_operations_.push(SimpleEntryOperation::ReadOperation(
383 this, stream_index, offset, buf_len, buf, callback, alone_in_queue));
384 RunNextOperationIfNeeded();
385 return net::ERR_IO_PENDING;
388 int SimpleEntryImpl::WriteData(int stream_index,
392 const CompletionCallback& callback,
394 DCHECK(io_thread_checker_.CalledOnValidThread());
396 if (net_log_.IsLogging()) {
398 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL,
399 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
403 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
404 offset < 0 || buf_len < 0) {
405 if (net_log_.IsLogging()) {
407 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
408 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
410 RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT);
411 return net::ERR_INVALID_ARGUMENT;
413 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
414 if (net_log_.IsLogging()) {
416 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
417 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
419 RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE);
420 return net::ERR_FAILED;
422 ScopedOperationRunner operation_runner(this);
424 // Stream 0 data is kept in memory, so can be written immediatly if there are
425 // no IO operations pending.
426 if (stream_index == 0 && state_ == STATE_READY &&
427 pending_operations_.size() == 0)
428 return SetStream0Data(buf, offset, buf_len, truncate);
430 // We can only do optimistic Write if there is no pending operations, so
431 // that we are sure that the next call to RunNextOperationIfNeeded will
432 // actually run the write operation that sets the stream size. It also
433 // prevents from previous possibly-conflicting writes that could be stacked
434 // in the |pending_operations_|. We could optimize this for when we have
435 // only read operations enqueued.
436 const bool optimistic =
437 (use_optimistic_operations_ && state_ == STATE_READY &&
438 pending_operations_.size() == 0);
439 CompletionCallback op_callback;
440 scoped_refptr<net::IOBuffer> op_buf;
441 int ret_value = net::ERR_FAILED;
444 op_callback = callback;
445 ret_value = net::ERR_IO_PENDING;
447 // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
448 // here to avoid paying the price of the RefCountedThreadSafe atomic
451 op_buf = new IOBuffer(buf_len);
452 memcpy(op_buf->data(), buf->data(), buf_len);
454 op_callback = CompletionCallback();
456 if (net_log_.IsLogging()) {
458 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
459 CreateNetLogReadWriteCompleteCallback(buf_len));
463 pending_operations_.push(SimpleEntryOperation::WriteOperation(this,
474 int SimpleEntryImpl::ReadSparseData(int64 offset,
477 const CompletionCallback& callback) {
478 DCHECK(io_thread_checker_.CalledOnValidThread());
480 ScopedOperationRunner operation_runner(this);
481 pending_operations_.push(SimpleEntryOperation::ReadSparseOperation(
482 this, offset, buf_len, buf, callback));
483 return net::ERR_IO_PENDING;
486 int SimpleEntryImpl::WriteSparseData(int64 offset,
489 const CompletionCallback& callback) {
490 DCHECK(io_thread_checker_.CalledOnValidThread());
492 ScopedOperationRunner operation_runner(this);
493 pending_operations_.push(SimpleEntryOperation::WriteSparseOperation(
494 this, offset, buf_len, buf, callback));
495 return net::ERR_IO_PENDING;
498 int SimpleEntryImpl::GetAvailableRange(int64 offset,
501 const CompletionCallback& callback) {
502 DCHECK(io_thread_checker_.CalledOnValidThread());
504 ScopedOperationRunner operation_runner(this);
505 pending_operations_.push(SimpleEntryOperation::GetAvailableRangeOperation(
506 this, offset, len, start, callback));
507 return net::ERR_IO_PENDING;
510 bool SimpleEntryImpl::CouldBeSparse() const {
511 DCHECK(io_thread_checker_.CalledOnValidThread());
512 // TODO(ttuttle): Actually check.
516 void SimpleEntryImpl::CancelSparseIO() {
517 DCHECK(io_thread_checker_.CalledOnValidThread());
518 // The Simple Cache does not return distinct objects for the same non-doomed
519 // entry, so there's no need to coordinate which object is performing sparse
520 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
523 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
524 DCHECK(io_thread_checker_.CalledOnValidThread());
525 // The simple Cache does not return distinct objects for the same non-doomed
526 // entry, so there's no need to coordinate which object is performing sparse
527 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
531 SimpleEntryImpl::~SimpleEntryImpl() {
532 DCHECK(io_thread_checker_.CalledOnValidThread());
533 DCHECK_EQ(0U, pending_operations_.size());
534 DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE);
535 DCHECK(!synchronous_entry_);
536 net_log_.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY);
539 void SimpleEntryImpl::PostClientCallback(const CompletionCallback& callback,
541 if (callback.is_null())
543 // Note that the callback is posted rather than directly invoked to avoid
544 // reentrancy issues.
545 MessageLoopProxy::current()->PostTask(
547 base::Bind(&InvokeCallbackIfBackendIsAlive, backend_, callback, result));
550 void SimpleEntryImpl::MakeUninitialized() {
551 state_ = STATE_UNINITIALIZED;
552 std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
553 std::memset(crc32s_, 0, sizeof(crc32s_));
554 std::memset(have_written_, 0, sizeof(have_written_));
555 std::memset(data_size_, 0, sizeof(data_size_));
556 for (size_t i = 0; i < arraysize(crc_check_state_); ++i) {
557 crc_check_state_[i] = CRC_CHECK_NEVER_READ_AT_ALL;
561 void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
564 AddRef(); // Balanced in Close()
565 if (!backend_.get()) {
566 // This method can be called when an asynchronous operation completed.
567 // If the backend no longer exists, the callback won't be invoked, and so we
568 // must close ourselves to avoid leaking. As well, there's no guarantee the
569 // client-provided pointer (|out_entry|) hasn't been freed, and no point
570 // dereferencing it, either.
577 void SimpleEntryImpl::MarkAsDoomed() {
581 backend_->index()->Remove(entry_hash_);
582 active_entry_proxy_.reset();
585 void SimpleEntryImpl::RunNextOperationIfNeeded() {
586 DCHECK(io_thread_checker_.CalledOnValidThread());
587 SIMPLE_CACHE_UMA(CUSTOM_COUNTS,
588 "EntryOperationsPending", cache_type_,
589 pending_operations_.size(), 0, 100, 20);
590 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
591 scoped_ptr<SimpleEntryOperation> operation(
592 new SimpleEntryOperation(pending_operations_.front()));
593 pending_operations_.pop();
594 switch (operation->type()) {
595 case SimpleEntryOperation::TYPE_OPEN:
596 OpenEntryInternal(operation->have_index(),
597 operation->callback(),
598 operation->out_entry());
600 case SimpleEntryOperation::TYPE_CREATE:
601 CreateEntryInternal(operation->have_index(),
602 operation->callback(),
603 operation->out_entry());
605 case SimpleEntryOperation::TYPE_CLOSE:
608 case SimpleEntryOperation::TYPE_READ:
609 RecordReadIsParallelizable(*operation);
610 ReadDataInternal(operation->index(),
614 operation->callback());
616 case SimpleEntryOperation::TYPE_WRITE:
617 RecordWriteDependencyType(*operation);
618 WriteDataInternal(operation->index(),
622 operation->callback(),
623 operation->truncate());
625 case SimpleEntryOperation::TYPE_READ_SPARSE:
626 ReadSparseDataInternal(operation->sparse_offset(),
629 operation->callback());
631 case SimpleEntryOperation::TYPE_WRITE_SPARSE:
632 WriteSparseDataInternal(operation->sparse_offset(),
635 operation->callback());
637 case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE:
638 GetAvailableRangeInternal(operation->sparse_offset(),
640 operation->out_start(),
641 operation->callback());
643 case SimpleEntryOperation::TYPE_DOOM:
644 DoomEntryInternal(operation->callback());
649 // The operation is kept for histograms. Makes sure it does not leak
651 executing_operation_.swap(operation);
652 executing_operation_->ReleaseReferences();
653 // |this| may have been deleted.
657 void SimpleEntryImpl::OpenEntryInternal(bool have_index,
658 const CompletionCallback& callback,
660 ScopedOperationRunner operation_runner(this);
662 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
664 if (state_ == STATE_READY) {
665 ReturnEntryToCaller(out_entry);
666 PostClientCallback(callback, net::OK);
668 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
669 CreateNetLogSimpleEntryCreationCallback(this, net::OK));
672 if (state_ == STATE_FAILURE) {
673 PostClientCallback(callback, net::ERR_FAILED);
675 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
676 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
680 DCHECK_EQ(STATE_UNINITIALIZED, state_);
681 DCHECK(!synchronous_entry_);
682 state_ = STATE_IO_PENDING;
683 const base::TimeTicks start_time = base::TimeTicks::Now();
684 scoped_ptr<SimpleEntryCreationResults> results(
685 new SimpleEntryCreationResults(
686 SimpleEntryStat(last_used_, last_modified_, data_size_,
687 sparse_data_size_)));
688 Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
694 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
698 base::Passed(&results),
700 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END);
701 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
704 void SimpleEntryImpl::CreateEntryInternal(bool have_index,
705 const CompletionCallback& callback,
707 ScopedOperationRunner operation_runner(this);
709 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN);
711 if (state_ != STATE_UNINITIALIZED) {
712 // There is already an active normal entry.
714 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END,
715 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
716 PostClientCallback(callback, net::ERR_FAILED);
719 DCHECK_EQ(STATE_UNINITIALIZED, state_);
720 DCHECK(!synchronous_entry_);
722 state_ = STATE_IO_PENDING;
724 // Since we don't know the correct values for |last_used_| and
725 // |last_modified_| yet, we make this approximation.
726 last_used_ = last_modified_ = base::Time::Now();
728 // If creation succeeds, we should mark all streams to be saved on close.
729 for (int i = 0; i < kSimpleEntryStreamCount; ++i)
730 have_written_[i] = true;
732 const base::TimeTicks start_time = base::TimeTicks::Now();
733 scoped_ptr<SimpleEntryCreationResults> results(
734 new SimpleEntryCreationResults(
735 SimpleEntryStat(last_used_, last_modified_, data_size_,
736 sparse_data_size_)));
737 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
744 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
748 base::Passed(&results),
750 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END);
751 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
754 void SimpleEntryImpl::CloseInternal() {
755 DCHECK(io_thread_checker_.CalledOnValidThread());
756 typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
757 scoped_ptr<std::vector<CRCRecord> >
758 crc32s_to_write(new std::vector<CRCRecord>());
760 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
762 if (state_ == STATE_READY) {
763 DCHECK(synchronous_entry_);
764 state_ = STATE_IO_PENDING;
765 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
766 if (have_written_[i]) {
767 if (GetDataSize(i) == crc32s_end_offset_[i]) {
768 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
769 crc32s_to_write->push_back(CRCRecord(i, true, crc));
771 crc32s_to_write->push_back(CRCRecord(i, false, 0));
776 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
779 if (synchronous_entry_) {
781 base::Bind(&SimpleSynchronousEntry::Close,
782 base::Unretained(synchronous_entry_),
783 SimpleEntryStat(last_used_, last_modified_, data_size_,
785 base::Passed(&crc32s_to_write),
787 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
788 synchronous_entry_ = NULL;
789 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
791 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
792 if (!have_written_[i]) {
793 SIMPLE_CACHE_UMA(ENUMERATION,
794 "CheckCRCResult", cache_type_,
795 crc_check_state_[i], CRC_CHECK_MAX);
799 CloseOperationComplete();
803 void SimpleEntryImpl::ReadDataInternal(int stream_index,
807 const CompletionCallback& callback) {
808 DCHECK(io_thread_checker_.CalledOnValidThread());
809 ScopedOperationRunner operation_runner(this);
811 if (net_log_.IsLogging()) {
813 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN,
814 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
818 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
819 if (!callback.is_null()) {
820 RecordReadResult(cache_type_, READ_RESULT_BAD_STATE);
821 // Note that the API states that client-provided callbacks for entry-level
822 // (i.e. non-backend) operations (e.g. read, write) are invoked even if
823 // the backend was already destroyed.
824 MessageLoopProxy::current()->PostTask(
825 FROM_HERE, base::Bind(callback, net::ERR_FAILED));
827 if (net_log_.IsLogging()) {
829 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
830 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
834 DCHECK_EQ(STATE_READY, state_);
835 if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
836 RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN);
837 // If there is nothing to read, we bail out before setting state_ to
839 if (!callback.is_null())
840 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 0));
844 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
846 // Since stream 0 data is kept in memory, it is read immediately.
847 if (stream_index == 0) {
848 int ret_value = ReadStream0Data(buf, offset, buf_len);
849 if (!callback.is_null()) {
850 MessageLoopProxy::current()->PostTask(FROM_HERE,
851 base::Bind(callback, ret_value));
856 state_ = STATE_IO_PENDING;
857 if (!doomed_ && backend_.get())
858 backend_->index()->UseIfExists(entry_hash_);
860 scoped_ptr<uint32> read_crc32(new uint32());
861 scoped_ptr<int> result(new int());
862 scoped_ptr<SimpleEntryStat> entry_stat(
863 new SimpleEntryStat(last_used_, last_modified_, data_size_,
865 Closure task = base::Bind(
866 &SimpleSynchronousEntry::ReadData,
867 base::Unretained(synchronous_entry_),
868 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len),
869 make_scoped_refptr(buf),
873 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete,
878 base::Passed(&read_crc32),
879 base::Passed(&entry_stat),
880 base::Passed(&result));
881 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
884 void SimpleEntryImpl::WriteDataInternal(int stream_index,
888 const CompletionCallback& callback,
890 DCHECK(io_thread_checker_.CalledOnValidThread());
891 ScopedOperationRunner operation_runner(this);
893 if (net_log_.IsLogging()) {
895 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
896 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
900 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
901 RecordWriteResult(cache_type_, WRITE_RESULT_BAD_STATE);
902 if (net_log_.IsLogging()) {
904 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
905 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
907 if (!callback.is_null()) {
908 MessageLoopProxy::current()->PostTask(
909 FROM_HERE, base::Bind(callback, net::ERR_FAILED));
911 // |this| may be destroyed after return here.
915 DCHECK_EQ(STATE_READY, state_);
917 // Since stream 0 data is kept in memory, it will be written immediatly.
918 if (stream_index == 0) {
919 int ret_value = SetStream0Data(buf, offset, buf_len, truncate);
920 if (!callback.is_null()) {
921 MessageLoopProxy::current()->PostTask(FROM_HERE,
922 base::Bind(callback, ret_value));
927 // Ignore zero-length writes that do not change the file size.
929 int32 data_size = data_size_[stream_index];
930 if (truncate ? (offset == data_size) : (offset <= data_size)) {
931 RecordWriteResult(cache_type_, WRITE_RESULT_FAST_EMPTY_RETURN);
932 if (!callback.is_null()) {
933 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
939 state_ = STATE_IO_PENDING;
940 if (!doomed_ && backend_.get())
941 backend_->index()->UseIfExists(entry_hash_);
943 AdvanceCrc(buf, offset, buf_len, stream_index);
945 // |entry_stat| needs to be initialized before modifying |data_size_|.
946 scoped_ptr<SimpleEntryStat> entry_stat(
947 new SimpleEntryStat(last_used_, last_modified_, data_size_,
950 data_size_[stream_index] = offset + buf_len;
952 data_size_[stream_index] = std::max(offset + buf_len,
953 GetDataSize(stream_index));
956 // Since we don't know the correct values for |last_used_| and
957 // |last_modified_| yet, we make this approximation.
958 last_used_ = last_modified_ = base::Time::Now();
960 have_written_[stream_index] = true;
961 // Writing on stream 1 affects the placement of stream 0 in the file, the EOF
962 // record will have to be rewritten.
963 if (stream_index == 1)
964 have_written_[0] = true;
966 scoped_ptr<int> result(new int());
967 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
968 base::Unretained(synchronous_entry_),
969 SimpleSynchronousEntry::EntryOperationData(
970 stream_index, offset, buf_len, truncate,
972 make_scoped_refptr(buf),
975 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete,
979 base::Passed(&entry_stat),
980 base::Passed(&result));
981 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
984 void SimpleEntryImpl::ReadSparseDataInternal(
988 const CompletionCallback& callback) {
989 DCHECK(io_thread_checker_.CalledOnValidThread());
990 ScopedOperationRunner operation_runner(this);
992 DCHECK_EQ(STATE_READY, state_);
993 state_ = STATE_IO_PENDING;
995 scoped_ptr<int> result(new int());
996 scoped_ptr<base::Time> last_used(new base::Time());
997 Closure task = base::Bind(&SimpleSynchronousEntry::ReadSparseData,
998 base::Unretained(synchronous_entry_),
999 SimpleSynchronousEntry::EntryOperationData(
1000 sparse_offset, buf_len),
1001 make_scoped_refptr(buf),
1004 Closure reply = base::Bind(&SimpleEntryImpl::ReadSparseOperationComplete,
1007 base::Passed(&last_used),
1008 base::Passed(&result));
1009 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1012 void SimpleEntryImpl::WriteSparseDataInternal(
1013 int64 sparse_offset,
1016 const CompletionCallback& callback) {
1017 DCHECK(io_thread_checker_.CalledOnValidThread());
1018 ScopedOperationRunner operation_runner(this);
1020 DCHECK_EQ(STATE_READY, state_);
1021 state_ = STATE_IO_PENDING;
1023 int64 max_sparse_data_size = kint64max;
1024 if (backend_.get()) {
1025 int64 max_cache_size = backend_->index()->max_size();
1026 max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor;
1029 scoped_ptr<SimpleEntryStat> entry_stat(
1030 new SimpleEntryStat(last_used_, last_modified_, data_size_,
1031 sparse_data_size_));
1033 last_used_ = last_modified_ = base::Time::Now();
1035 scoped_ptr<int> result(new int());
1036 Closure task = base::Bind(&SimpleSynchronousEntry::WriteSparseData,
1037 base::Unretained(synchronous_entry_),
1038 SimpleSynchronousEntry::EntryOperationData(
1039 sparse_offset, buf_len),
1040 make_scoped_refptr(buf),
1041 max_sparse_data_size,
1044 Closure reply = base::Bind(&SimpleEntryImpl::WriteSparseOperationComplete,
1047 base::Passed(&entry_stat),
1048 base::Passed(&result));
1049 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1052 void SimpleEntryImpl::GetAvailableRangeInternal(
1053 int64 sparse_offset,
1056 const CompletionCallback& callback) {
1057 DCHECK(io_thread_checker_.CalledOnValidThread());
1058 ScopedOperationRunner operation_runner(this);
1060 DCHECK_EQ(STATE_READY, state_);
1061 state_ = STATE_IO_PENDING;
1063 scoped_ptr<int> result(new int());
1064 Closure task = base::Bind(&SimpleSynchronousEntry::GetAvailableRange,
1065 base::Unretained(synchronous_entry_),
1066 SimpleSynchronousEntry::EntryOperationData(
1067 sparse_offset, len),
1070 Closure reply = base::Bind(
1071 &SimpleEntryImpl::GetAvailableRangeOperationComplete,
1074 base::Passed(&result));
1075 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1078 void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback& callback) {
1079 PostTaskAndReplyWithResult(
1080 worker_pool_, FROM_HERE,
1081 base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, entry_hash_),
1082 base::Bind(&SimpleEntryImpl::DoomOperationComplete, this, callback,
1084 state_ = STATE_IO_PENDING;
1087 void SimpleEntryImpl::CreationOperationComplete(
1088 const CompletionCallback& completion_callback,
1089 const base::TimeTicks& start_time,
1090 scoped_ptr<SimpleEntryCreationResults> in_results,
1092 net::NetLog::EventType end_event_type) {
1093 DCHECK(io_thread_checker_.CalledOnValidThread());
1094 DCHECK_EQ(state_, STATE_IO_PENDING);
1096 ScopedOperationRunner operation_runner(this);
1097 SIMPLE_CACHE_UMA(BOOLEAN,
1098 "EntryCreationResult", cache_type_,
1099 in_results->result == net::OK);
1100 if (in_results->result != net::OK) {
1101 if (in_results->result != net::ERR_FILE_EXISTS)
1104 net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
1105 PostClientCallback(completion_callback, net::ERR_FAILED);
1106 MakeUninitialized();
1109 // If out_entry is NULL, it means we already called ReturnEntryToCaller from
1110 // the optimistic Create case.
1112 ReturnEntryToCaller(out_entry);
1114 state_ = STATE_READY;
1115 synchronous_entry_ = in_results->sync_entry;
1116 if (in_results->stream_0_data) {
1117 stream_0_data_ = in_results->stream_0_data;
1118 // The crc was read in SimpleSynchronousEntry.
1119 crc_check_state_[0] = CRC_CHECK_DONE;
1120 crc32s_[0] = in_results->stream_0_crc32;
1121 crc32s_end_offset_[0] = in_results->entry_stat.data_size(0);
1124 SetKey(synchronous_entry_->key());
1126 // This should only be triggered when creating an entry. The key check in
1127 // the open case is handled in SimpleBackendImpl.
1128 DCHECK_EQ(key_, synchronous_entry_->key());
1130 UpdateDataFromEntryStat(in_results->entry_stat);
1131 SIMPLE_CACHE_UMA(TIMES,
1132 "EntryCreationTime", cache_type_,
1133 (base::TimeTicks::Now() - start_time));
1134 AdjustOpenEntryCountBy(cache_type_, 1);
1136 net_log_.AddEvent(end_event_type);
1137 PostClientCallback(completion_callback, net::OK);
1140 void SimpleEntryImpl::EntryOperationComplete(
1141 const CompletionCallback& completion_callback,
1142 const SimpleEntryStat& entry_stat,
1143 scoped_ptr<int> result) {
1144 DCHECK(io_thread_checker_.CalledOnValidThread());
1145 DCHECK(synchronous_entry_);
1146 DCHECK_EQ(STATE_IO_PENDING, state_);
1149 state_ = STATE_FAILURE;
1152 state_ = STATE_READY;
1153 UpdateDataFromEntryStat(entry_stat);
1156 if (!completion_callback.is_null()) {
1157 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
1158 completion_callback, *result));
1160 RunNextOperationIfNeeded();
1163 void SimpleEntryImpl::ReadOperationComplete(
1166 const CompletionCallback& completion_callback,
1167 scoped_ptr<uint32> read_crc32,
1168 scoped_ptr<SimpleEntryStat> entry_stat,
1169 scoped_ptr<int> result) {
1170 DCHECK(io_thread_checker_.CalledOnValidThread());
1171 DCHECK(synchronous_entry_);
1172 DCHECK_EQ(STATE_IO_PENDING, state_);
1177 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) {
1178 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END;
1181 if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
1182 uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
1183 : crc32s_[stream_index];
1184 crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
1185 crc32s_end_offset_[stream_index] += *result;
1186 if (!have_written_[stream_index] &&
1187 GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
1188 // We have just read a file from start to finish, and so we have
1189 // computed a crc of the entire file. We can check it now. If a cache
1190 // entry has a single reader, the normal pattern is to read from start
1193 // Other cases are possible. In the case of two readers on the same
1194 // entry, one reader can be behind the other. In this case we compute
1195 // the crc as the most advanced reader progresses, and check it for
1196 // both readers as they read the last byte.
1198 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN);
1200 scoped_ptr<int> new_result(new int());
1201 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
1202 base::Unretained(synchronous_entry_),
1205 crc32s_[stream_index],
1207 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
1208 this, *result, stream_index,
1209 completion_callback,
1210 base::Passed(&new_result));
1211 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1212 crc_check_state_[stream_index] = CRC_CHECK_DONE;
1218 crc32s_end_offset_[stream_index] = 0;
1222 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1224 RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1225 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
1226 offset + *result == GetDataSize(stream_index)) {
1227 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
1230 if (net_log_.IsLogging()) {
1232 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1233 CreateNetLogReadWriteCompleteCallback(*result));
1236 EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1239 void SimpleEntryImpl::WriteOperationComplete(
1241 const CompletionCallback& completion_callback,
1242 scoped_ptr<SimpleEntryStat> entry_stat,
1243 scoped_ptr<int> result) {
1245 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1247 RecordWriteResult(cache_type_, WRITE_RESULT_SYNC_WRITE_FAILURE);
1248 if (net_log_.IsLogging()) {
1249 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
1250 CreateNetLogReadWriteCompleteCallback(*result));
1254 crc32s_end_offset_[stream_index] = 0;
1257 EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1260 void SimpleEntryImpl::ReadSparseOperationComplete(
1261 const CompletionCallback& completion_callback,
1262 scoped_ptr<base::Time> last_used,
1263 scoped_ptr<int> result) {
1264 DCHECK(io_thread_checker_.CalledOnValidThread());
1265 DCHECK(synchronous_entry_);
1268 SimpleEntryStat entry_stat(*last_used, last_modified_, data_size_,
1270 EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1273 void SimpleEntryImpl::WriteSparseOperationComplete(
1274 const CompletionCallback& completion_callback,
1275 scoped_ptr<SimpleEntryStat> entry_stat,
1276 scoped_ptr<int> result) {
1277 DCHECK(io_thread_checker_.CalledOnValidThread());
1278 DCHECK(synchronous_entry_);
1281 EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1284 void SimpleEntryImpl::GetAvailableRangeOperationComplete(
1285 const CompletionCallback& completion_callback,
1286 scoped_ptr<int> result) {
1287 DCHECK(io_thread_checker_.CalledOnValidThread());
1288 DCHECK(synchronous_entry_);
1291 SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
1293 EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1296 void SimpleEntryImpl::DoomOperationComplete(
1297 const CompletionCallback& callback,
1298 State state_to_restore,
1300 state_ = state_to_restore;
1301 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_END);
1302 if (!callback.is_null())
1303 callback.Run(result);
1304 RunNextOperationIfNeeded();
1306 backend_->OnDoomComplete(entry_hash_);
1309 void SimpleEntryImpl::ChecksumOperationComplete(
1312 const CompletionCallback& completion_callback,
1313 scoped_ptr<int> result) {
1314 DCHECK(io_thread_checker_.CalledOnValidThread());
1315 DCHECK(synchronous_entry_);
1316 DCHECK_EQ(STATE_IO_PENDING, state_);
1319 if (net_log_.IsLogging()) {
1320 net_log_.AddEventWithNetErrorCode(
1321 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END,
1325 if (*result == net::OK) {
1326 *result = orig_result;
1327 if (orig_result >= 0)
1328 RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1330 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1332 RecordReadResult(cache_type_, READ_RESULT_SYNC_CHECKSUM_FAILURE);
1334 if (net_log_.IsLogging()) {
1335 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1336 CreateNetLogReadWriteCompleteCallback(*result));
1339 SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
1341 EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1344 void SimpleEntryImpl::CloseOperationComplete() {
1345 DCHECK(!synchronous_entry_);
1346 DCHECK_EQ(0, open_count_);
1347 DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
1348 STATE_UNINITIALIZED == state_);
1349 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END);
1350 AdjustOpenEntryCountBy(cache_type_, -1);
1351 MakeUninitialized();
1352 RunNextOperationIfNeeded();
1355 void SimpleEntryImpl::UpdateDataFromEntryStat(
1356 const SimpleEntryStat& entry_stat) {
1357 DCHECK(io_thread_checker_.CalledOnValidThread());
1358 DCHECK(synchronous_entry_);
1359 DCHECK_EQ(STATE_READY, state_);
1361 last_used_ = entry_stat.last_used();
1362 last_modified_ = entry_stat.last_modified();
1363 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1364 data_size_[i] = entry_stat.data_size(i);
1366 sparse_data_size_ = entry_stat.sparse_data_size();
1367 if (!doomed_ && backend_.get())
1368 backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage());
1371 int64 SimpleEntryImpl::GetDiskUsage() const {
1372 int64 file_size = 0;
1373 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1375 simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
1377 file_size += sparse_data_size_;
1381 void SimpleEntryImpl::RecordReadIsParallelizable(
1382 const SimpleEntryOperation& operation) const {
1383 if (!executing_operation_)
1385 // Used in histograms, please only add entries at the end.
1386 enum ReadDependencyType {
1387 // READ_STANDALONE = 0, Deprecated.
1388 READ_FOLLOWS_READ = 1,
1389 READ_FOLLOWS_CONFLICTING_WRITE = 2,
1390 READ_FOLLOWS_NON_CONFLICTING_WRITE = 3,
1391 READ_FOLLOWS_OTHER = 4,
1392 READ_ALONE_IN_QUEUE = 5,
1393 READ_DEPENDENCY_TYPE_MAX = 6,
1396 ReadDependencyType type = READ_FOLLOWS_OTHER;
1397 if (operation.alone_in_queue()) {
1398 type = READ_ALONE_IN_QUEUE;
1399 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1400 type = READ_FOLLOWS_READ;
1401 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1402 if (executing_operation_->ConflictsWith(operation))
1403 type = READ_FOLLOWS_CONFLICTING_WRITE;
1405 type = READ_FOLLOWS_NON_CONFLICTING_WRITE;
1407 SIMPLE_CACHE_UMA(ENUMERATION,
1408 "ReadIsParallelizable", cache_type_,
1409 type, READ_DEPENDENCY_TYPE_MAX);
1412 void SimpleEntryImpl::RecordWriteDependencyType(
1413 const SimpleEntryOperation& operation) const {
1414 if (!executing_operation_)
1416 // Used in histograms, please only add entries at the end.
1417 enum WriteDependencyType {
1418 WRITE_OPTIMISTIC = 0,
1419 WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1,
1420 WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC = 2,
1421 WRITE_FOLLOWS_CONFLICTING_WRITE = 3,
1422 WRITE_FOLLOWS_NON_CONFLICTING_WRITE = 4,
1423 WRITE_FOLLOWS_CONFLICTING_READ = 5,
1424 WRITE_FOLLOWS_NON_CONFLICTING_READ = 6,
1425 WRITE_FOLLOWS_OTHER = 7,
1426 WRITE_DEPENDENCY_TYPE_MAX = 8,
1429 WriteDependencyType type = WRITE_FOLLOWS_OTHER;
1430 if (operation.optimistic()) {
1431 type = WRITE_OPTIMISTIC;
1432 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ ||
1433 executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1434 bool conflicting = executing_operation_->ConflictsWith(operation);
1436 if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1437 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_READ
1438 : WRITE_FOLLOWS_NON_CONFLICTING_READ;
1439 } else if (executing_operation_->optimistic()) {
1440 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
1441 : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC;
1443 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE
1444 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE;
1447 SIMPLE_CACHE_UMA(ENUMERATION,
1448 "WriteDependencyType", cache_type_,
1449 type, WRITE_DEPENDENCY_TYPE_MAX);
1452 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf,
1456 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1459 memcpy(buf->data(), stream_0_data_->data() + offset, buf_len);
1460 UpdateDataFromEntryStat(
1461 SimpleEntryStat(base::Time::Now(), last_modified_, data_size_,
1462 sparse_data_size_));
1463 RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1467 int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
1471 // Currently, stream 0 is only used for HTTP headers, and always writes them
1472 // with a single, truncating write. Detect these writes and record the size
1473 // changes of the headers. Also, support writes to stream 0 that have
1474 // different access patterns, as required by the API contract.
1475 // All other clients of the Simple Cache are encouraged to use stream 1.
1476 have_written_[0] = true;
1477 int data_size = GetDataSize(0);
1478 if (offset == 0 && truncate) {
1479 RecordHeaderSizeChange(cache_type_, data_size, buf_len);
1480 stream_0_data_->SetCapacity(buf_len);
1481 memcpy(stream_0_data_->data(), buf->data(), buf_len);
1482 data_size_[0] = buf_len;
1484 RecordUnexpectedStream0Write(cache_type_);
1485 const int buffer_size =
1486 truncate ? offset + buf_len : std::max(offset + buf_len, data_size);
1487 stream_0_data_->SetCapacity(buffer_size);
1488 // If |stream_0_data_| was extended, the extension until offset needs to be
1490 const int fill_size = offset <= data_size ? 0 : offset - data_size;
1492 memset(stream_0_data_->data() + data_size, 0, fill_size);
1494 memcpy(stream_0_data_->data() + offset, buf->data(), buf_len);
1495 data_size_[0] = buffer_size;
1497 base::Time modification_time = base::Time::Now();
1498 AdvanceCrc(buf, offset, buf_len, 0);
1499 UpdateDataFromEntryStat(
1500 SimpleEntryStat(modification_time, modification_time, data_size_,
1501 sparse_data_size_));
1502 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1506 void SimpleEntryImpl::AdvanceCrc(net::IOBuffer* buffer,
1510 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
1511 // if |offset == 0| or we have already computed the CRC for [0 .. offset).
1512 // We rely on most write operations being sequential, start to end to compute
1513 // the crc of the data. When we write to an entry and close without having
1514 // done a sequential write, we don't check the CRC on read.
1515 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
1516 uint32 initial_crc =
1517 (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0);
1519 crc32s_[stream_index] = crc32(
1520 initial_crc, reinterpret_cast<const Bytef*>(buffer->data()), length);
1522 crc32s_end_offset_[stream_index] = offset + length;
1523 } else if (offset < crc32s_end_offset_[stream_index]) {
1524 // If a range for which the crc32 was already computed is rewritten, the
1525 // computation of the crc32 need to start from 0 again.
1526 crc32s_end_offset_[stream_index] = 0;
1530 } // namespace disk_cache