1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/files/file_util.h"
7 #include "base/metrics/field_trial.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
12 #include "base/thread_task_runner_handle.h"
13 #include "base/threading/platform_thread.h"
14 #include "base/threading/thread_restrictions.h"
15 #include "net/base/cache_type.h"
16 #include "net/base/io_buffer.h"
17 #include "net/base/net_errors.h"
18 #include "net/base/test_completion_callback.h"
19 #include "net/disk_cache/blockfile/backend_impl.h"
20 #include "net/disk_cache/blockfile/entry_impl.h"
21 #include "net/disk_cache/blockfile/experiments.h"
22 #include "net/disk_cache/blockfile/histogram_macros.h"
23 #include "net/disk_cache/blockfile/mapped_file.h"
24 #include "net/disk_cache/cache_util.h"
25 #include "net/disk_cache/disk_cache_test_base.h"
26 #include "net/disk_cache/disk_cache_test_util.h"
27 #include "net/disk_cache/memory/mem_backend_impl.h"
28 #include "net/disk_cache/simple/simple_backend_impl.h"
29 #include "net/disk_cache/simple/simple_entry_format.h"
30 #include "net/disk_cache/simple/simple_test_util.h"
31 #include "net/disk_cache/simple/simple_util.h"
32 #include "testing/gtest/include/gtest/gtest.h"
35 #include "base/win/scoped_handle.h"
38 // Provide a BackendImpl object to macros from histogram_macros.h.
39 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
45 const char kExistingEntryKey[] = "existing entry key";
47 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
48 const base::Thread& cache_thread,
49 base::FilePath& cache_path) {
50 net::TestCompletionCallback cb;
52 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
53 cache_path, cache_thread.message_loop_proxy(), NULL));
54 int rv = cache->Init(cb.callback());
55 if (cb.GetResult(rv) != net::OK)
56 return scoped_ptr<disk_cache::BackendImpl>();
58 disk_cache::Entry* entry = NULL;
59 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
60 if (cb.GetResult(rv) != net::OK)
61 return scoped_ptr<disk_cache::BackendImpl>();
69 // Tests that can run with different types of caches.
70 class DiskCacheBackendTest : public DiskCacheTestWithCache {
72 // Some utility methods:
74 // Perform IO operations on the cache until there is pending IO.
75 int GeneratePendingIO(net::TestCompletionCallback* cb);
77 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
78 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
79 // There are 4 entries after doomed_start and 2 after doomed_end.
80 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
82 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
83 bool EnumerateAndMatchKeys(int max_to_open,
85 std::set<std::string>* keys_to_match,
91 void BackendShutdownWithPendingFileIO(bool fast);
92 void BackendShutdownWithPendingIO(bool fast);
93 void BackendShutdownWithPendingCreate(bool fast);
94 void BackendSetSize();
97 void BackendValidEntry();
98 void BackendInvalidEntry();
99 void BackendInvalidEntryRead();
100 void BackendInvalidEntryWithLoad();
101 void BackendTrimInvalidEntry();
102 void BackendTrimInvalidEntry2();
103 void BackendEnumerations();
104 void BackendEnumerations2();
105 void BackendInvalidEntryEnumeration();
106 void BackendFixEnumerators();
107 void BackendDoomRecent();
108 void BackendDoomBetween();
109 void BackendTransaction(const std::string& name, int num_entries, bool load);
110 void BackendRecoverInsert();
111 void BackendRecoverRemove();
112 void BackendRecoverWithEviction();
113 void BackendInvalidEntry2();
114 void BackendInvalidEntry3();
115 void BackendInvalidEntry7();
116 void BackendInvalidEntry8();
117 void BackendInvalidEntry9(bool eviction);
118 void BackendInvalidEntry10(bool eviction);
119 void BackendInvalidEntry11(bool eviction);
120 void BackendTrimInvalidEntry12();
121 void BackendDoomAll();
122 void BackendDoomAll2();
123 void BackendInvalidRankings();
124 void BackendInvalidRankings2();
125 void BackendDisable();
126 void BackendDisable2();
127 void BackendDisable3();
128 void BackendDisable4();
131 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
132 if (!use_current_thread_) {
134 return net::ERR_FAILED;
137 disk_cache::Entry* entry;
138 int rv = cache_->CreateEntry("some key", &entry, cb->callback());
139 if (cb->GetResult(rv) != net::OK)
140 return net::ERR_CACHE_CREATE_FAILURE;
142 const int kSize = 25000;
143 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
144 CacheTestFillBuffer(buffer->data(), kSize, false);
146 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
147 // We are using the current thread as the cache thread because we want to
148 // be able to call directly this method to make sure that the OS (instead
149 // of us switching thread) is returning IO pending.
150 if (!simple_cache_mode_) {
151 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
152 0, i, buffer.get(), kSize, cb->callback(), false);
154 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
157 if (rv == net::ERR_IO_PENDING)
160 rv = net::ERR_FAILED;
163 // Don't call Close() to avoid going through the queue or we'll deadlock
164 // waiting for the operation to finish.
165 if (!simple_cache_mode_)
166 static_cast<disk_cache::EntryImpl*>(entry)->Release();
173 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
174 base::Time* doomed_end) {
177 const int kSize = 50;
178 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
179 const int kOffset = 10 + 1024 * 1024;
181 disk_cache::Entry* entry0 = NULL;
182 disk_cache::Entry* entry1 = NULL;
183 disk_cache::Entry* entry2 = NULL;
185 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
186 CacheTestFillBuffer(buffer->data(), kSize, false);
188 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
189 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
191 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
197 *doomed_start = base::Time::Now();
199 // Order in rankings list:
200 // first_part1, first_part2, second_part1, second_part2
201 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
202 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
204 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
207 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
208 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
210 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
216 *doomed_end = base::Time::Now();
218 // Order in rankings list:
219 // third_part1, fourth_part1, third_part2, fourth_part2
220 disk_cache::Entry* entry3 = NULL;
221 disk_cache::Entry* entry4 = NULL;
222 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
223 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
224 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
225 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
227 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
229 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
237 // Creates entries based on random keys. Stores these keys in |key_pool|.
238 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
239 std::set<std::string>* key_pool) {
240 const int kNumEntries = 10;
242 for (int i = 0; i < kNumEntries; ++i) {
243 std::string key = GenerateKey(true);
244 disk_cache::Entry* entry;
245 if (CreateEntry(key, &entry) != net::OK)
247 key_pool->insert(key);
250 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
253 // Performs iteration over the backend and checks that the keys of entries
254 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
255 // will be opened, if it is positive. Otherwise, iteration will continue until
256 // OpenNextEntry stops returning net::OK.
257 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
260 std::set<std::string>* keys_to_match,
262 disk_cache::Entry* entry;
266 while (iter->OpenNextEntry(&entry) == net::OK) {
269 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
272 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
279 void DiskCacheBackendTest::BackendBasics() {
281 disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
282 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
283 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
284 ASSERT_TRUE(NULL != entry1);
288 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
289 ASSERT_TRUE(NULL != entry1);
293 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
294 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
295 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
296 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
297 ASSERT_TRUE(NULL != entry1);
298 ASSERT_TRUE(NULL != entry2);
299 EXPECT_EQ(2, cache_->GetEntryCount());
301 disk_cache::Entry* entry3 = NULL;
302 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
303 ASSERT_TRUE(NULL != entry3);
304 EXPECT_TRUE(entry2 == entry3);
305 EXPECT_EQ(2, cache_->GetEntryCount());
307 EXPECT_EQ(net::OK, DoomEntry("some other key"));
308 EXPECT_EQ(1, cache_->GetEntryCount());
313 EXPECT_EQ(net::OK, DoomEntry("the first key"));
314 EXPECT_EQ(0, cache_->GetEntryCount());
316 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
317 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
320 EXPECT_EQ(net::OK, DoomEntry("some other key"));
321 EXPECT_EQ(0, cache_->GetEntryCount());
325 TEST_F(DiskCacheBackendTest, Basics) {
329 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
334 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
339 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
340 SetCacheType(net::APP_CACHE);
344 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
345 SetCacheType(net::SHADER_CACHE);
349 void DiskCacheBackendTest::BackendKeying() {
351 const char* kName1 = "the first key";
352 const char* kName2 = "the first Key";
353 disk_cache::Entry *entry1, *entry2;
354 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
356 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
357 EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
361 base::strlcpy(buffer, kName1, arraysize(buffer));
362 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
363 EXPECT_TRUE(entry1 == entry2);
366 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
367 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
368 EXPECT_TRUE(entry1 == entry2);
371 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3);
372 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
373 EXPECT_TRUE(entry1 == entry2);
376 // Now verify long keys.
378 memset(buffer2, 's', sizeof(buffer2));
379 buffer2[1023] = '\0';
380 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
384 buffer2[19999] = '\0';
385 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
390 TEST_F(DiskCacheBackendTest, Keying) {
394 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
399 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
404 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
405 SetCacheType(net::APP_CACHE);
409 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
410 SetCacheType(net::SHADER_CACHE);
414 TEST_F(DiskCacheTest, CreateBackend) {
415 net::TestCompletionCallback cb;
418 ASSERT_TRUE(CleanupCacheDir());
419 base::Thread cache_thread("CacheThread");
420 ASSERT_TRUE(cache_thread.StartWithOptions(
421 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
423 // Test the private factory method(s).
424 scoped_ptr<disk_cache::Backend> cache;
425 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
426 ASSERT_TRUE(cache.get());
429 // Now test the public API.
430 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
431 net::CACHE_BACKEND_DEFAULT,
435 cache_thread.task_runner(),
439 ASSERT_EQ(net::OK, cb.GetResult(rv));
440 ASSERT_TRUE(cache.get());
443 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
444 net::CACHE_BACKEND_DEFAULT,
446 false, NULL, NULL, &cache,
448 ASSERT_EQ(net::OK, cb.GetResult(rv));
449 ASSERT_TRUE(cache.get());
453 base::MessageLoop::current()->RunUntilIdle();
456 // Tests that |BackendImpl| fails to initialize with a missing file.
457 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
458 ASSERT_TRUE(CopyTestCache("bad_entry"));
459 base::FilePath filename = cache_path_.AppendASCII("data_1");
460 base::DeleteFile(filename, false);
461 base::Thread cache_thread("CacheThread");
462 ASSERT_TRUE(cache_thread.StartWithOptions(
463 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
464 net::TestCompletionCallback cb;
466 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
467 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
468 cache_path_, cache_thread.task_runner(), NULL));
469 int rv = cache->Init(cb.callback());
470 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
471 base::ThreadRestrictions::SetIOAllowed(prev);
474 DisableIntegrityCheck();
477 TEST_F(DiskCacheBackendTest, ExternalFiles) {
479 // First, let's create a file on the folder.
480 base::FilePath filename = cache_path_.AppendASCII("f_000001");
482 const int kSize = 50;
483 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
484 CacheTestFillBuffer(buffer1->data(), kSize, false);
485 ASSERT_EQ(kSize, base::WriteFile(filename, buffer1->data(), kSize));
487 // Now let's create a file with the cache.
488 disk_cache::Entry* entry;
489 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
490 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
493 // And verify that the first file is still there.
494 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
495 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
496 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
499 // Tests that we deal with file-level pending operations at destruction time.
500 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
501 ASSERT_TRUE(CleanupCacheDir());
502 uint32 flags = disk_cache::kNoBuffering;
504 flags |= disk_cache::kNoRandom;
507 CreateBackend(flags, NULL);
509 net::TestCompletionCallback cb;
510 int rv = GeneratePendingIO(&cb);
512 // The cache destructor will see one pending operation here.
515 if (rv == net::ERR_IO_PENDING) {
516 if (fast || simple_cache_mode_)
517 EXPECT_FALSE(cb.have_result());
519 EXPECT_TRUE(cb.have_result());
522 base::MessageLoop::current()->RunUntilIdle();
525 // Wait for the actual operation to complete, or we'll keep a file handle that
526 // may cause issues later. Note that on iOS systems even though this test
527 // uses a single thread, the actual IO is posted to a worker thread and the
528 // cache destructor breaks the link to reach cb when the operation completes.
529 rv = cb.GetResult(rv);
533 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
534 BackendShutdownWithPendingFileIO(false);
537 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
538 // builds because they contain a lot of intentional memory leaks.
539 // The wrapper scripts used to run tests under Valgrind Memcheck will also
540 // disable these tests. See:
541 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
542 #if !defined(LEAK_SANITIZER)
543 // We'll be leaking from this test.
544 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
545 // The integrity test sets kNoRandom so there's a version mismatch if we don't
546 // force new eviction.
548 BackendShutdownWithPendingFileIO(true);
552 // See crbug.com/330074
554 // Tests that one cache instance is not affected by another one going away.
555 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
556 base::ScopedTempDir store;
557 ASSERT_TRUE(store.CreateUniqueTempDir());
559 net::TestCompletionCallback cb;
560 scoped_ptr<disk_cache::Backend> extra_cache;
561 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
562 net::CACHE_BACKEND_DEFAULT,
566 base::ThreadTaskRunnerHandle::Get(),
570 ASSERT_EQ(net::OK, cb.GetResult(rv));
571 ASSERT_TRUE(extra_cache.get() != NULL);
573 ASSERT_TRUE(CleanupCacheDir());
574 SetNewEviction(); // Match the expected behavior for integrity verification.
577 CreateBackend(disk_cache::kNoBuffering, NULL);
578 rv = GeneratePendingIO(&cb);
580 // cache_ has a pending operation, and extra_cache will go away.
583 if (rv == net::ERR_IO_PENDING)
584 EXPECT_FALSE(cb.have_result());
586 base::MessageLoop::current()->RunUntilIdle();
588 // Wait for the actual operation to complete, or we'll keep a file handle that
589 // may cause issues later.
590 rv = cb.GetResult(rv);
594 // Tests that we deal with background-thread pending operations.
595 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
596 net::TestCompletionCallback cb;
599 ASSERT_TRUE(CleanupCacheDir());
600 base::Thread cache_thread("CacheThread");
601 ASSERT_TRUE(cache_thread.StartWithOptions(
602 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
604 uint32 flags = disk_cache::kNoBuffering;
606 flags |= disk_cache::kNoRandom;
608 CreateBackend(flags, &cache_thread);
610 disk_cache::Entry* entry;
611 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
612 ASSERT_EQ(net::OK, cb.GetResult(rv));
616 // The cache destructor will see one pending operation here.
620 base::MessageLoop::current()->RunUntilIdle();
623 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
624 BackendShutdownWithPendingIO(false);
627 #if !defined(LEAK_SANITIZER)
628 // We'll be leaking from this test.
629 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
630 // The integrity test sets kNoRandom so there's a version mismatch if we don't
631 // force new eviction.
633 BackendShutdownWithPendingIO(true);
637 // Tests that we deal with create-type pending operations.
638 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
639 net::TestCompletionCallback cb;
642 ASSERT_TRUE(CleanupCacheDir());
643 base::Thread cache_thread("CacheThread");
644 ASSERT_TRUE(cache_thread.StartWithOptions(
645 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
647 disk_cache::BackendFlags flags =
648 fast ? disk_cache::kNone : disk_cache::kNoRandom;
649 CreateBackend(flags, &cache_thread);
651 disk_cache::Entry* entry;
652 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
653 ASSERT_EQ(net::ERR_IO_PENDING, rv);
656 EXPECT_FALSE(cb.have_result());
659 base::MessageLoop::current()->RunUntilIdle();
662 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
663 BackendShutdownWithPendingCreate(false);
666 #if !defined(LEAK_SANITIZER)
667 // We'll be leaking an entry from this test.
668 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
669 // The integrity test sets kNoRandom so there's a version mismatch if we don't
670 // force new eviction.
672 BackendShutdownWithPendingCreate(true);
676 // Disabled on android since this test requires cache creator to create
678 #if !defined(OS_ANDROID)
679 TEST_F(DiskCacheTest, TruncatedIndex) {
680 ASSERT_TRUE(CleanupCacheDir());
681 base::FilePath index = cache_path_.AppendASCII("index");
682 ASSERT_EQ(5, base::WriteFile(index, "hello", 5));
684 base::Thread cache_thread("CacheThread");
685 ASSERT_TRUE(cache_thread.StartWithOptions(
686 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
687 net::TestCompletionCallback cb;
689 scoped_ptr<disk_cache::Backend> backend;
690 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
691 net::CACHE_BACKEND_BLOCKFILE,
695 cache_thread.task_runner(),
699 ASSERT_NE(net::OK, cb.GetResult(rv));
701 ASSERT_FALSE(backend);
705 void DiskCacheBackendTest::BackendSetSize() {
706 const int cache_size = 0x10000; // 64 kB
707 SetMaxSize(cache_size);
710 std::string first("some key");
711 std::string second("something else");
712 disk_cache::Entry* entry;
713 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
715 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
716 memset(buffer->data(), 0, cache_size);
717 EXPECT_EQ(cache_size / 10,
718 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
721 EXPECT_EQ(net::ERR_FAILED,
722 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
723 << "file size above the limit";
725 // By doubling the total size, we make this file cacheable.
726 SetMaxSize(cache_size * 2);
727 EXPECT_EQ(cache_size / 5,
728 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
730 // Let's fill up the cache!.
731 SetMaxSize(cache_size * 10);
732 EXPECT_EQ(cache_size * 3 / 4,
733 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
737 SetMaxSize(cache_size);
739 // The cache is 95% full.
741 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
742 EXPECT_EQ(cache_size / 10,
743 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
745 disk_cache::Entry* entry2;
746 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
747 EXPECT_EQ(cache_size / 10,
748 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
749 entry2->Close(); // This will trigger the cache trim.
751 EXPECT_NE(net::OK, OpenEntry(first, &entry2));
753 FlushQueueForTest(); // Make sure that we are done trimming the cache.
754 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
757 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
758 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
762 TEST_F(DiskCacheBackendTest, SetSize) {
766 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
771 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
776 void DiskCacheBackendTest::BackendLoad() {
778 int seed = static_cast<int>(Time::Now().ToInternalValue());
781 disk_cache::Entry* entries[100];
782 for (int i = 0; i < 100; i++) {
783 std::string key = GenerateKey(true);
784 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
786 EXPECT_EQ(100, cache_->GetEntryCount());
788 for (int i = 0; i < 100; i++) {
789 int source1 = rand() % 100;
790 int source2 = rand() % 100;
791 disk_cache::Entry* temp = entries[source1];
792 entries[source1] = entries[source2];
793 entries[source2] = temp;
796 for (int i = 0; i < 100; i++) {
797 disk_cache::Entry* entry;
798 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
799 EXPECT_TRUE(entry == entries[i]);
805 EXPECT_EQ(0, cache_->GetEntryCount());
808 TEST_F(DiskCacheBackendTest, Load) {
809 // Work with a tiny index table (16 entries)
811 SetMaxSize(0x100000);
815 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
817 // Work with a tiny index table (16 entries)
819 SetMaxSize(0x100000);
823 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
824 SetMaxSize(0x100000);
829 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
830 SetCacheType(net::APP_CACHE);
831 // Work with a tiny index table (16 entries)
833 SetMaxSize(0x100000);
837 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
838 SetCacheType(net::SHADER_CACHE);
839 // Work with a tiny index table (16 entries)
841 SetMaxSize(0x100000);
845 // Tests the chaining of an entry to the current head.
846 void DiskCacheBackendTest::BackendChain() {
847 SetMask(0x1); // 2-entry table.
848 SetMaxSize(0x3000); // 12 kB.
851 disk_cache::Entry* entry;
852 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
854 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
858 TEST_F(DiskCacheBackendTest, Chain) {
862 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
867 TEST_F(DiskCacheBackendTest, AppCacheChain) {
868 SetCacheType(net::APP_CACHE);
872 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
873 SetCacheType(net::SHADER_CACHE);
877 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
881 disk_cache::Entry* entry;
882 for (int i = 0; i < 100; i++) {
883 std::string name(base::StringPrintf("Key %d", i));
884 ASSERT_EQ(net::OK, CreateEntry(name, &entry));
887 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
888 ASSERT_EQ(net::OK, OpenEntry(name, &entry));
893 // The first eviction must come from list 1 (10% limit), the second must come
896 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
898 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
900 // Double check that we still have the list tails.
901 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
903 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
907 // Before looking for invalid entries, let's check a valid entry.
908 void DiskCacheBackendTest::BackendValidEntry() {
911 std::string key("Some key");
912 disk_cache::Entry* entry;
913 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
915 const int kSize = 50;
916 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
917 memset(buffer1->data(), 0, kSize);
918 base::strlcpy(buffer1->data(), "And the data to save", kSize);
919 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
923 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
925 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
926 memset(buffer2->data(), 0, kSize);
927 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
929 EXPECT_STREQ(buffer1->data(), buffer2->data());
932 TEST_F(DiskCacheBackendTest, ValidEntry) {
936 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
941 // The same logic of the previous test (ValidEntry), but this time force the
942 // entry to be invalid, simulating a crash in the middle.
943 // We'll be leaking memory from this test.
944 void DiskCacheBackendTest::BackendInvalidEntry() {
947 std::string key("Some key");
948 disk_cache::Entry* entry;
949 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
951 const int kSize = 50;
952 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
953 memset(buffer->data(), 0, kSize);
954 base::strlcpy(buffer->data(), "And the data to save", kSize);
955 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
958 EXPECT_NE(net::OK, OpenEntry(key, &entry));
959 EXPECT_EQ(0, cache_->GetEntryCount());
962 #if !defined(LEAK_SANITIZER)
963 // We'll be leaking memory from this test.
964 TEST_F(DiskCacheBackendTest, InvalidEntry) {
965 BackendInvalidEntry();
968 // We'll be leaking memory from this test.
969 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
971 BackendInvalidEntry();
974 // We'll be leaking memory from this test.
975 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
976 SetCacheType(net::APP_CACHE);
977 BackendInvalidEntry();
980 // We'll be leaking memory from this test.
981 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
982 SetCacheType(net::SHADER_CACHE);
983 BackendInvalidEntry();
986 // Almost the same test, but this time crash the cache after reading an entry.
987 // We'll be leaking memory from this test.
988 void DiskCacheBackendTest::BackendInvalidEntryRead() {
991 std::string key("Some key");
992 disk_cache::Entry* entry;
993 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
995 const int kSize = 50;
996 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
997 memset(buffer->data(), 0, kSize);
998 base::strlcpy(buffer->data(), "And the data to save", kSize);
999 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1001 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1002 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
1006 if (type_ == net::APP_CACHE) {
1007 // Reading an entry and crashing should not make it dirty.
1008 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1009 EXPECT_EQ(1, cache_->GetEntryCount());
1012 EXPECT_NE(net::OK, OpenEntry(key, &entry));
1013 EXPECT_EQ(0, cache_->GetEntryCount());
1017 // We'll be leaking memory from this test.
1018 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1019 BackendInvalidEntryRead();
1022 // We'll be leaking memory from this test.
1023 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1025 BackendInvalidEntryRead();
1028 // We'll be leaking memory from this test.
1029 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1030 SetCacheType(net::APP_CACHE);
1031 BackendInvalidEntryRead();
1034 // We'll be leaking memory from this test.
1035 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1036 SetCacheType(net::SHADER_CACHE);
1037 BackendInvalidEntryRead();
1040 // We'll be leaking memory from this test.
1041 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1042 // Work with a tiny index table (16 entries)
1044 SetMaxSize(0x100000);
1047 int seed = static_cast<int>(Time::Now().ToInternalValue());
1050 const int kNumEntries = 100;
1051 disk_cache::Entry* entries[kNumEntries];
1052 for (int i = 0; i < kNumEntries; i++) {
1053 std::string key = GenerateKey(true);
1054 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
1056 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1058 for (int i = 0; i < kNumEntries; i++) {
1059 int source1 = rand() % kNumEntries;
1060 int source2 = rand() % kNumEntries;
1061 disk_cache::Entry* temp = entries[source1];
1062 entries[source1] = entries[source2];
1063 entries[source2] = temp;
1066 std::string keys[kNumEntries];
1067 for (int i = 0; i < kNumEntries; i++) {
1068 keys[i] = entries[i]->GetKey();
1069 if (i < kNumEntries / 2)
1070 entries[i]->Close();
1075 for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1076 disk_cache::Entry* entry;
1077 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1080 for (int i = 0; i < kNumEntries / 2; i++) {
1081 disk_cache::Entry* entry;
1082 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
1086 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1089 // We'll be leaking memory from this test.
1090 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1091 BackendInvalidEntryWithLoad();
1094 // We'll be leaking memory from this test.
1095 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1097 BackendInvalidEntryWithLoad();
1100 // We'll be leaking memory from this test.
1101 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1102 SetCacheType(net::APP_CACHE);
1103 BackendInvalidEntryWithLoad();
1106 // We'll be leaking memory from this test.
1107 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1108 SetCacheType(net::SHADER_CACHE);
1109 BackendInvalidEntryWithLoad();
1112 // We'll be leaking memory from this test.
1113 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1114 const int kSize = 0x3000; // 12 kB
1115 SetMaxSize(kSize * 10);
1118 std::string first("some key");
1119 std::string second("something else");
1120 disk_cache::Entry* entry;
1121 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
1123 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1124 memset(buffer->data(), 0, kSize);
1125 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1127 // Simulate a crash.
1130 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
1131 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1133 EXPECT_EQ(2, cache_->GetEntryCount());
1135 entry->Close(); // Trim the cache.
1136 FlushQueueForTest();
1138 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1139 // if it took more than that, we posted a task and we'll delete the second
1141 base::MessageLoop::current()->RunUntilIdle();
1143 // This may be not thread-safe in general, but for now it's OK so add some
1144 // ThreadSanitizer annotations to ignore data races on cache_.
1145 // See http://crbug.com/55970
1146 ANNOTATE_IGNORE_READS_BEGIN();
1147 EXPECT_GE(1, cache_->GetEntryCount());
1148 ANNOTATE_IGNORE_READS_END();
1150 EXPECT_NE(net::OK, OpenEntry(first, &entry));
1153 // We'll be leaking memory from this test.
1154 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1155 BackendTrimInvalidEntry();
1158 // We'll be leaking memory from this test.
1159 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1161 BackendTrimInvalidEntry();
1164 // We'll be leaking memory from this test.
1165 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1166 SetMask(0xf); // 16-entry table.
1168 const int kSize = 0x3000; // 12 kB
1169 SetMaxSize(kSize * 40);
1172 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1173 memset(buffer->data(), 0, kSize);
1174 disk_cache::Entry* entry;
1176 // Writing 32 entries to this cache chains most of them.
1177 for (int i = 0; i < 32; i++) {
1178 std::string key(base::StringPrintf("some key %d", i));
1179 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1180 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1182 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1183 // Note that we are not closing the entries.
1186 // Simulate a crash.
1189 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
1190 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1192 FlushQueueForTest();
1193 EXPECT_EQ(33, cache_->GetEntryCount());
1196 // For the new eviction code, all corrupt entries are on the second list so
1197 // they are not going away that easy.
1198 if (new_eviction_) {
1199 EXPECT_EQ(net::OK, DoomAllEntries());
1202 entry->Close(); // Trim the cache.
1203 FlushQueueForTest();
1205 // We may abort the eviction before cleaning up everything.
1206 base::MessageLoop::current()->RunUntilIdle();
1207 FlushQueueForTest();
1208 // If it's not clear enough: we may still have eviction tasks running at this
1209 // time, so the number of entries is changing while we read it.
1210 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1211 EXPECT_GE(30, cache_->GetEntryCount());
1212 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1215 // We'll be leaking memory from this test.
1216 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1217 BackendTrimInvalidEntry2();
1220 // We'll be leaking memory from this test.
1221 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1223 BackendTrimInvalidEntry2();
1225 #endif // !defined(LEAK_SANITIZER)
1227 void DiskCacheBackendTest::BackendEnumerations() {
1229 Time initial = Time::Now();
1231 const int kNumEntries = 100;
1232 for (int i = 0; i < kNumEntries; i++) {
1233 std::string key = GenerateKey(true);
1234 disk_cache::Entry* entry;
1235 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1238 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1239 Time final = Time::Now();
1241 disk_cache::Entry* entry;
1242 scoped_ptr<TestIterator> iter = CreateIterator();
1244 Time last_modified[kNumEntries];
1245 Time last_used[kNumEntries];
1246 while (iter->OpenNextEntry(&entry) == net::OK) {
1247 ASSERT_TRUE(NULL != entry);
1248 if (count < kNumEntries) {
1249 last_modified[count] = entry->GetLastModified();
1250 last_used[count] = entry->GetLastUsed();
1251 EXPECT_TRUE(initial <= last_modified[count]);
1252 EXPECT_TRUE(final >= last_modified[count]);
1258 EXPECT_EQ(kNumEntries, count);
1260 iter = CreateIterator();
1262 // The previous enumeration should not have changed the timestamps.
1263 while (iter->OpenNextEntry(&entry) == net::OK) {
1264 ASSERT_TRUE(NULL != entry);
1265 if (count < kNumEntries) {
1266 EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1267 EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1272 EXPECT_EQ(kNumEntries, count);
1275 TEST_F(DiskCacheBackendTest, Enumerations) {
1276 BackendEnumerations();
1279 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1281 BackendEnumerations();
1284 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1285 SetMemoryOnlyMode();
1286 BackendEnumerations();
1289 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1290 SetCacheType(net::SHADER_CACHE);
1291 BackendEnumerations();
1294 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1295 SetCacheType(net::APP_CACHE);
1296 BackendEnumerations();
1299 // Verifies enumerations while entries are open.
1300 void DiskCacheBackendTest::BackendEnumerations2() {
1302 const std::string first("first");
1303 const std::string second("second");
1304 disk_cache::Entry *entry1, *entry2;
1305 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1307 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1309 FlushQueueForTest();
1311 // Make sure that the timestamp is not the same.
1313 ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
1314 scoped_ptr<TestIterator> iter = CreateIterator();
1315 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1316 EXPECT_EQ(entry2->GetKey(), second);
1318 // Two entries and the iterator pointing at "first".
1322 // The iterator should still be valid, so we should not crash.
1323 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1324 EXPECT_EQ(entry2->GetKey(), first);
1326 iter = CreateIterator();
1328 // Modify the oldest entry and get the newest element.
1329 ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
1330 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
1331 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1332 if (type_ == net::APP_CACHE) {
1333 // The list is not updated.
1334 EXPECT_EQ(entry2->GetKey(), second);
1336 EXPECT_EQ(entry2->GetKey(), first);
1343 TEST_F(DiskCacheBackendTest, Enumerations2) {
1344 BackendEnumerations2();
1347 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1349 BackendEnumerations2();
1352 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
1353 SetMemoryOnlyMode();
1354 BackendEnumerations2();
1357 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1358 SetCacheType(net::APP_CACHE);
1359 BackendEnumerations2();
1362 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1363 SetCacheType(net::SHADER_CACHE);
1364 BackendEnumerations2();
1367 // Verify that ReadData calls do not update the LRU cache
1368 // when using the SHADER_CACHE type.
1369 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1370 SetCacheType(net::SHADER_CACHE);
1372 const std::string first("first");
1373 const std::string second("second");
1374 disk_cache::Entry *entry1, *entry2;
1375 const int kSize = 50;
1376 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1378 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1379 memset(buffer1->data(), 0, kSize);
1380 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1381 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1383 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1386 FlushQueueForTest();
1388 // Make sure that the timestamp is not the same.
1391 // Read from the last item in the LRU.
1392 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1395 scoped_ptr<TestIterator> iter = CreateIterator();
1396 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1397 EXPECT_EQ(entry2->GetKey(), second);
1401 #if !defined(LEAK_SANITIZER)
1402 // Verify handling of invalid entries while doing enumerations.
1403 // We'll be leaking memory from this test.
1404 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1407 std::string key("Some key");
1408 disk_cache::Entry *entry, *entry1, *entry2;
1409 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1411 const int kSize = 50;
1412 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1413 memset(buffer1->data(), 0, kSize);
1414 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1415 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1417 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1418 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1420 std::string key2("Another key");
1421 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1423 ASSERT_EQ(2, cache_->GetEntryCount());
1427 scoped_ptr<TestIterator> iter = CreateIterator();
1429 while (iter->OpenNextEntry(&entry) == net::OK) {
1430 ASSERT_TRUE(NULL != entry);
1431 EXPECT_EQ(key2, entry->GetKey());
1435 EXPECT_EQ(1, count);
1436 EXPECT_EQ(1, cache_->GetEntryCount());
1439 // We'll be leaking memory from this test.
1440 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1441 BackendInvalidEntryEnumeration();
1444 // We'll be leaking memory from this test.
1445 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1447 BackendInvalidEntryEnumeration();
1449 #endif // !defined(LEAK_SANITIZER)
1451 // Tests that if for some reason entries are modified close to existing cache
1452 // iterators, we don't generate fatal errors or reset the cache.
1453 void DiskCacheBackendTest::BackendFixEnumerators() {
1456 int seed = static_cast<int>(Time::Now().ToInternalValue());
1459 const int kNumEntries = 10;
1460 for (int i = 0; i < kNumEntries; i++) {
1461 std::string key = GenerateKey(true);
1462 disk_cache::Entry* entry;
1463 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1466 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1468 disk_cache::Entry *entry1, *entry2;
1469 scoped_ptr<TestIterator> iter1 = CreateIterator(), iter2 = CreateIterator();
1470 ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1));
1471 ASSERT_TRUE(NULL != entry1);
1475 // Let's go to the middle of the list.
1476 for (int i = 0; i < kNumEntries / 2; i++) {
1479 ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1));
1480 ASSERT_TRUE(NULL != entry1);
1482 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1483 ASSERT_TRUE(NULL != entry2);
1487 // Messing up with entry1 will modify entry2->next.
1489 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1490 ASSERT_TRUE(NULL != entry2);
1492 // The link entry2->entry1 should be broken.
1493 EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1497 // And the second iterator should keep working.
1498 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1499 ASSERT_TRUE(NULL != entry2);
1503 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1504 BackendFixEnumerators();
1507 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1509 BackendFixEnumerators();
1512 void DiskCacheBackendTest::BackendDoomRecent() {
1515 disk_cache::Entry *entry;
1516 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1518 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1520 FlushQueueForTest();
1523 Time middle = Time::Now();
1525 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1527 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1529 FlushQueueForTest();
1532 Time final = Time::Now();
1534 ASSERT_EQ(4, cache_->GetEntryCount());
1535 EXPECT_EQ(net::OK, DoomEntriesSince(final));
1536 ASSERT_EQ(4, cache_->GetEntryCount());
1538 EXPECT_EQ(net::OK, DoomEntriesSince(middle));
1539 ASSERT_EQ(2, cache_->GetEntryCount());
1541 ASSERT_EQ(net::OK, OpenEntry("second", &entry));
1545 TEST_F(DiskCacheBackendTest, DoomRecent) {
1546 BackendDoomRecent();
1549 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
1551 BackendDoomRecent();
1554 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
1555 SetMemoryOnlyMode();
1556 BackendDoomRecent();
1559 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
1560 SetMemoryOnlyMode();
1562 InitSparseCache(&start, NULL);
1563 DoomEntriesSince(start);
1564 EXPECT_EQ(1, cache_->GetEntryCount());
1567 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
1569 InitSparseCache(&start, NULL);
1570 DoomEntriesSince(start);
1571 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1572 // MemBackendImpl does not. Thats why expected value differs here from
1573 // MemoryOnlyDoomEntriesSinceSparse.
1574 EXPECT_EQ(3, cache_->GetEntryCount());
1577 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
1578 SetMemoryOnlyMode();
1579 InitSparseCache(NULL, NULL);
1580 EXPECT_EQ(net::OK, DoomAllEntries());
1581 EXPECT_EQ(0, cache_->GetEntryCount());
1584 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
1585 InitSparseCache(NULL, NULL);
1586 EXPECT_EQ(net::OK, DoomAllEntries());
1587 EXPECT_EQ(0, cache_->GetEntryCount());
1590 void DiskCacheBackendTest::BackendDoomBetween() {
1593 disk_cache::Entry *entry;
1594 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1596 FlushQueueForTest();
1599 Time middle_start = Time::Now();
1601 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1603 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1605 FlushQueueForTest();
1608 Time middle_end = Time::Now();
1610 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1612 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1614 FlushQueueForTest();
1617 Time final = Time::Now();
1619 ASSERT_EQ(4, cache_->GetEntryCount());
1620 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
1621 ASSERT_EQ(2, cache_->GetEntryCount());
1623 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1626 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
1627 ASSERT_EQ(1, cache_->GetEntryCount());
1629 ASSERT_EQ(net::OK, OpenEntry("first", &entry));
1633 TEST_F(DiskCacheBackendTest, DoomBetween) {
1634 BackendDoomBetween();
1637 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
1639 BackendDoomBetween();
1642 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
1643 SetMemoryOnlyMode();
1644 BackendDoomBetween();
1647 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
1648 SetMemoryOnlyMode();
1649 base::Time start, end;
1650 InitSparseCache(&start, &end);
1651 DoomEntriesBetween(start, end);
1652 EXPECT_EQ(3, cache_->GetEntryCount());
1655 end = base::Time::Now();
1656 DoomEntriesBetween(start, end);
1657 EXPECT_EQ(1, cache_->GetEntryCount());
1660 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
1661 base::Time start, end;
1662 InitSparseCache(&start, &end);
1663 DoomEntriesBetween(start, end);
1664 EXPECT_EQ(9, cache_->GetEntryCount());
1667 end = base::Time::Now();
1668 DoomEntriesBetween(start, end);
1669 EXPECT_EQ(3, cache_->GetEntryCount());
1672 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
1673 int num_entries, bool load) {
1675 ASSERT_TRUE(CopyTestCache(name));
1676 DisableFirstCleanup();
1681 SetMaxSize(0x100000);
1683 // Clear the settings from the previous run.
1690 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
1692 std::string key("the first key");
1693 disk_cache::Entry* entry1;
1694 ASSERT_NE(net::OK, OpenEntry(key, &entry1));
1696 int actual = cache_->GetEntryCount();
1697 if (num_entries != actual) {
1699 // If there is a heavy load, inserting an entry will make another entry
1700 // dirty (on the hash bucket) so two entries are removed.
1701 ASSERT_EQ(num_entries - 1, actual);
1707 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
1711 void DiskCacheBackendTest::BackendRecoverInsert() {
1712 // Tests with an empty cache.
1713 BackendTransaction("insert_empty1", 0, false);
1714 ASSERT_TRUE(success_) << "insert_empty1";
1715 BackendTransaction("insert_empty2", 0, false);
1716 ASSERT_TRUE(success_) << "insert_empty2";
1717 BackendTransaction("insert_empty3", 0, false);
1718 ASSERT_TRUE(success_) << "insert_empty3";
1720 // Tests with one entry on the cache.
1721 BackendTransaction("insert_one1", 1, false);
1722 ASSERT_TRUE(success_) << "insert_one1";
1723 BackendTransaction("insert_one2", 1, false);
1724 ASSERT_TRUE(success_) << "insert_one2";
1725 BackendTransaction("insert_one3", 1, false);
1726 ASSERT_TRUE(success_) << "insert_one3";
1728 // Tests with one hundred entries on the cache, tiny index.
1729 BackendTransaction("insert_load1", 100, true);
1730 ASSERT_TRUE(success_) << "insert_load1";
1731 BackendTransaction("insert_load2", 100, true);
1732 ASSERT_TRUE(success_) << "insert_load2";
1735 TEST_F(DiskCacheBackendTest, RecoverInsert) {
1736 BackendRecoverInsert();
1739 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
1741 BackendRecoverInsert();
1744 void DiskCacheBackendTest::BackendRecoverRemove() {
1745 // Removing the only element.
1746 BackendTransaction("remove_one1", 0, false);
1747 ASSERT_TRUE(success_) << "remove_one1";
1748 BackendTransaction("remove_one2", 0, false);
1749 ASSERT_TRUE(success_) << "remove_one2";
1750 BackendTransaction("remove_one3", 0, false);
1751 ASSERT_TRUE(success_) << "remove_one3";
1753 // Removing the head.
1754 BackendTransaction("remove_head1", 1, false);
1755 ASSERT_TRUE(success_) << "remove_head1";
1756 BackendTransaction("remove_head2", 1, false);
1757 ASSERT_TRUE(success_) << "remove_head2";
1758 BackendTransaction("remove_head3", 1, false);
1759 ASSERT_TRUE(success_) << "remove_head3";
1761 // Removing the tail.
1762 BackendTransaction("remove_tail1", 1, false);
1763 ASSERT_TRUE(success_) << "remove_tail1";
1764 BackendTransaction("remove_tail2", 1, false);
1765 ASSERT_TRUE(success_) << "remove_tail2";
1766 BackendTransaction("remove_tail3", 1, false);
1767 ASSERT_TRUE(success_) << "remove_tail3";
1769 // Removing with one hundred entries on the cache, tiny index.
1770 BackendTransaction("remove_load1", 100, true);
1771 ASSERT_TRUE(success_) << "remove_load1";
1772 BackendTransaction("remove_load2", 100, true);
1773 ASSERT_TRUE(success_) << "remove_load2";
1774 BackendTransaction("remove_load3", 100, true);
1775 ASSERT_TRUE(success_) << "remove_load3";
1777 // This case cannot be reverted.
1778 BackendTransaction("remove_one4", 0, false);
1779 ASSERT_TRUE(success_) << "remove_one4";
1780 BackendTransaction("remove_head4", 1, false);
1781 ASSERT_TRUE(success_) << "remove_head4";
1785 // http://crbug.com/396392
1786 #define MAYBE_RecoverRemove DISABLED_RecoverRemove
1788 #define MAYBE_RecoverRemove RecoverRemove
1790 TEST_F(DiskCacheBackendTest, MAYBE_RecoverRemove) {
1791 BackendRecoverRemove();
1795 // http://crbug.com/396392
1796 #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
1798 #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
1800 TEST_F(DiskCacheBackendTest, MAYBE_NewEvictionRecoverRemove) {
1802 BackendRecoverRemove();
1805 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1807 ASSERT_TRUE(CopyTestCache("insert_load1"));
1808 DisableFirstCleanup();
1813 // We should not crash here.
1815 DisableIntegrityCheck();
1818 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
1819 BackendRecoverWithEviction();
1822 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
1824 BackendRecoverWithEviction();
1827 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1828 TEST_F(DiskCacheTest, WrongVersion) {
1829 ASSERT_TRUE(CopyTestCache("wrong_version"));
1830 base::Thread cache_thread("CacheThread");
1831 ASSERT_TRUE(cache_thread.StartWithOptions(
1832 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1833 net::TestCompletionCallback cb;
1835 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1836 cache_path_, cache_thread.task_runner(), NULL));
1837 int rv = cache->Init(cb.callback());
1838 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
1841 class BadEntropyProvider : public base::FieldTrial::EntropyProvider {
1843 virtual ~BadEntropyProvider() {}
1845 virtual double GetEntropyForTrial(const std::string& trial_name,
1846 uint32 randomization_seed) const OVERRIDE {
1851 // Tests that the disk cache successfully joins the control group, dropping the
1852 // existing cache in favour of a new empty cache.
1853 // Disabled on android since this test requires cache creator to create
1854 // blockfile caches.
1855 #if !defined(OS_ANDROID)
1856 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
1857 base::Thread cache_thread("CacheThread");
1858 ASSERT_TRUE(cache_thread.StartWithOptions(
1859 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1861 scoped_ptr<disk_cache::BackendImpl> cache =
1862 CreateExistingEntryCache(cache_thread, cache_path_);
1863 ASSERT_TRUE(cache.get());
1866 // Instantiate the SimpleCacheTrial, forcing this run into the
1867 // ExperimentControl group.
1868 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1869 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1870 "ExperimentControl");
1871 net::TestCompletionCallback cb;
1872 scoped_ptr<disk_cache::Backend> base_cache;
1873 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1874 net::CACHE_BACKEND_BLOCKFILE,
1878 cache_thread.task_runner(),
1882 ASSERT_EQ(net::OK, cb.GetResult(rv));
1883 EXPECT_EQ(0, base_cache->GetEntryCount());
1887 // Tests that the disk cache can restart in the control group preserving
1888 // existing entries.
1889 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
1890 // Instantiate the SimpleCacheTrial, forcing this run into the
1891 // ExperimentControl group.
1892 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1893 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1894 "ExperimentControl");
1896 base::Thread cache_thread("CacheThread");
1897 ASSERT_TRUE(cache_thread.StartWithOptions(
1898 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1900 scoped_ptr<disk_cache::BackendImpl> cache =
1901 CreateExistingEntryCache(cache_thread, cache_path_);
1902 ASSERT_TRUE(cache.get());
1904 net::TestCompletionCallback cb;
1906 const int kRestartCount = 5;
1907 for (int i = 0; i < kRestartCount; ++i) {
1908 cache.reset(new disk_cache::BackendImpl(
1909 cache_path_, cache_thread.message_loop_proxy(), NULL));
1910 int rv = cache->Init(cb.callback());
1911 ASSERT_EQ(net::OK, cb.GetResult(rv));
1912 EXPECT_EQ(1, cache->GetEntryCount());
1914 disk_cache::Entry* entry = NULL;
1915 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1916 EXPECT_EQ(net::OK, cb.GetResult(rv));
1922 // Tests that the disk cache can leave the control group preserving existing
1924 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
1925 base::Thread cache_thread("CacheThread");
1926 ASSERT_TRUE(cache_thread.StartWithOptions(
1927 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1930 // Instantiate the SimpleCacheTrial, forcing this run into the
1931 // ExperimentControl group.
1932 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1933 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1934 "ExperimentControl");
1936 scoped_ptr<disk_cache::BackendImpl> cache =
1937 CreateExistingEntryCache(cache_thread, cache_path_);
1938 ASSERT_TRUE(cache.get());
1941 // Instantiate the SimpleCacheTrial, forcing this run into the
1942 // ExperimentNo group.
1943 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1944 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1945 net::TestCompletionCallback cb;
1947 const int kRestartCount = 5;
1948 for (int i = 0; i < kRestartCount; ++i) {
1949 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1950 cache_path_, cache_thread.message_loop_proxy(), NULL));
1951 int rv = cache->Init(cb.callback());
1952 ASSERT_EQ(net::OK, cb.GetResult(rv));
1953 EXPECT_EQ(1, cache->GetEntryCount());
1955 disk_cache::Entry* entry = NULL;
1956 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1957 EXPECT_EQ(net::OK, cb.GetResult(rv));
1963 // Tests that the cache is properly restarted on recovery error.
1964 // Disabled on android since this test requires cache creator to create
1965 // blockfile caches.
1966 #if !defined(OS_ANDROID)
1967 TEST_F(DiskCacheBackendTest, DeleteOld) {
1968 ASSERT_TRUE(CopyTestCache("wrong_version"));
1970 base::Thread cache_thread("CacheThread");
1971 ASSERT_TRUE(cache_thread.StartWithOptions(
1972 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1974 net::TestCompletionCallback cb;
1975 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
1976 base::FilePath path(cache_path_);
1977 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1978 net::CACHE_BACKEND_BLOCKFILE,
1982 cache_thread.task_runner(),
1986 path.clear(); // Make sure path was captured by the previous call.
1987 ASSERT_EQ(net::OK, cb.GetResult(rv));
1988 base::ThreadRestrictions::SetIOAllowed(prev);
1990 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
1994 // We want to be able to deal with messed up entries on disk.
1995 void DiskCacheBackendTest::BackendInvalidEntry2() {
1996 ASSERT_TRUE(CopyTestCache("bad_entry"));
1997 DisableFirstCleanup();
2000 disk_cache::Entry *entry1, *entry2;
2001 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
2002 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
2005 // CheckCacheIntegrity will fail at this point.
2006 DisableIntegrityCheck();
2009 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
2010 BackendInvalidEntry2();
2013 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
2015 BackendInvalidEntry2();
2018 // Tests that we don't crash or hang when enumerating this cache.
2019 void DiskCacheBackendTest::BackendInvalidEntry3() {
2020 SetMask(0x1); // 2-entry table.
2021 SetMaxSize(0x3000); // 12 kB.
2022 DisableFirstCleanup();
2025 disk_cache::Entry* entry;
2026 scoped_ptr<TestIterator> iter = CreateIterator();
2027 while (iter->OpenNextEntry(&entry) == net::OK) {
2032 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2033 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2034 BackendInvalidEntry3();
2037 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2038 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2040 BackendInvalidEntry3();
2041 DisableIntegrityCheck();
2044 // Test that we handle a dirty entry on the LRU list, already replaced with
2045 // the same key, and with hash collisions.
2046 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2047 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2048 SetMask(0x1); // 2-entry table.
2049 SetMaxSize(0x3000); // 12 kB.
2050 DisableFirstCleanup();
2056 // Test that we handle a dirty entry on the deleted list, already replaced with
2057 // the same key, and with hash collisions.
2058 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2059 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2061 SetMask(0x1); // 2-entry table.
2062 SetMaxSize(0x3000); // 12 kB.
2063 DisableFirstCleanup();
2066 TrimDeletedListForTest(false);
2069 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2070 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2071 SetMask(0x1); // 2-entry table.
2072 SetMaxSize(0x3000); // 12 kB.
2073 DisableFirstCleanup();
2076 // There is a dirty entry (but marked as clean) at the end, pointing to a
2077 // deleted entry through the hash collision list. We should not re-insert the
2078 // deleted entry into the index table.
2081 // The cache should be clean (as detected by CheckCacheIntegrity).
2084 // Tests that we don't hang when there is a loop on the hash collision list.
2085 // The test cache could be a result of bug 69135.
2086 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2087 ASSERT_TRUE(CopyTestCache("list_loop2"));
2088 SetMask(0x1); // 2-entry table.
2089 SetMaxSize(0x3000); // 12 kB.
2090 DisableFirstCleanup();
2093 // The second entry points at itselft, and the first entry is not accessible
2094 // though the index, but it is at the head of the LRU.
2096 disk_cache::Entry* entry;
2097 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
2102 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
2104 EXPECT_EQ(1, cache_->GetEntryCount());
2107 // Tests that we don't hang when there is a loop on the hash collision list.
2108 // The test cache could be a result of bug 69135.
2109 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2110 ASSERT_TRUE(CopyTestCache("list_loop3"));
2111 SetMask(0x1); // 2-entry table.
2112 SetMaxSize(0x3000); // 12 kB.
2113 DisableFirstCleanup();
2116 // There is a wide loop of 5 entries.
2118 disk_cache::Entry* entry;
2119 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2122 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2123 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2124 DisableFirstCleanup();
2128 // The second entry is dirty, but removing it should not corrupt the list.
2129 disk_cache::Entry* entry;
2130 ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2131 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
2133 // This should not delete the cache.
2135 FlushQueueForTest();
2138 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
2142 // Tests handling of corrupt entries by keeping the rankings node around, with
2144 void DiskCacheBackendTest::BackendInvalidEntry7() {
2145 const int kSize = 0x3000; // 12 kB.
2146 SetMaxSize(kSize * 10);
2149 std::string first("some key");
2150 std::string second("something else");
2151 disk_cache::Entry* entry;
2152 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2154 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2156 // Corrupt this entry.
2157 disk_cache::EntryImpl* entry_impl =
2158 static_cast<disk_cache::EntryImpl*>(entry);
2160 entry_impl->rankings()->Data()->next = 0;
2161 entry_impl->rankings()->Store();
2163 FlushQueueForTest();
2164 EXPECT_EQ(2, cache_->GetEntryCount());
2166 // This should detect the bad entry.
2167 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2168 EXPECT_EQ(1, cache_->GetEntryCount());
2170 // We should delete the cache. The list still has a corrupt node.
2171 scoped_ptr<TestIterator> iter = CreateIterator();
2172 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2173 FlushQueueForTest();
2174 EXPECT_EQ(0, cache_->GetEntryCount());
2177 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2178 BackendInvalidEntry7();
2181 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2183 BackendInvalidEntry7();
2186 // Tests handling of corrupt entries by keeping the rankings node around, with
2187 // a non fatal failure.
2188 void DiskCacheBackendTest::BackendInvalidEntry8() {
2189 const int kSize = 0x3000; // 12 kB
2190 SetMaxSize(kSize * 10);
2193 std::string first("some key");
2194 std::string second("something else");
2195 disk_cache::Entry* entry;
2196 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2198 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2200 // Corrupt this entry.
2201 disk_cache::EntryImpl* entry_impl =
2202 static_cast<disk_cache::EntryImpl*>(entry);
2204 entry_impl->rankings()->Data()->contents = 0;
2205 entry_impl->rankings()->Store();
2207 FlushQueueForTest();
2208 EXPECT_EQ(2, cache_->GetEntryCount());
2210 // This should detect the bad entry.
2211 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2212 EXPECT_EQ(1, cache_->GetEntryCount());
2214 // We should not delete the cache.
2215 scoped_ptr<TestIterator> iter = CreateIterator();
2216 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2218 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2219 EXPECT_EQ(1, cache_->GetEntryCount());
2222 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2223 BackendInvalidEntry8();
2226 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2228 BackendInvalidEntry8();
2231 // Tests handling of corrupt entries detected by enumerations. Note that these
2232 // tests (xx9 to xx11) are basically just going though slightly different
2233 // codepaths so they are tighlty coupled with the code, but that is better than
2234 // not testing error handling code.
2235 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2236 const int kSize = 0x3000; // 12 kB.
2237 SetMaxSize(kSize * 10);
2240 std::string first("some key");
2241 std::string second("something else");
2242 disk_cache::Entry* entry;
2243 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2245 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2247 // Corrupt this entry.
2248 disk_cache::EntryImpl* entry_impl =
2249 static_cast<disk_cache::EntryImpl*>(entry);
2251 entry_impl->entry()->Data()->state = 0xbad;
2252 entry_impl->entry()->Store();
2254 FlushQueueForTest();
2255 EXPECT_EQ(2, cache_->GetEntryCount());
2259 EXPECT_EQ(1, cache_->GetEntryCount());
2261 EXPECT_EQ(1, cache_->GetEntryCount());
2263 // We should detect the problem through the list, but we should not delete
2264 // the entry, just fail the iteration.
2265 scoped_ptr<TestIterator> iter = CreateIterator();
2266 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2268 // Now a full iteration will work, and return one entry.
2269 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2271 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2273 // This should detect what's left of the bad entry.
2274 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2275 EXPECT_EQ(2, cache_->GetEntryCount());
2277 DisableIntegrityCheck();
2280 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2281 BackendInvalidEntry9(false);
2284 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2286 BackendInvalidEntry9(false);
2289 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2290 BackendInvalidEntry9(true);
2293 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2295 BackendInvalidEntry9(true);
2298 // Tests handling of corrupt entries detected by enumerations.
2299 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2300 const int kSize = 0x3000; // 12 kB.
2301 SetMaxSize(kSize * 10);
2305 std::string first("some key");
2306 std::string second("something else");
2307 disk_cache::Entry* entry;
2308 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2310 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2311 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2313 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2315 // Corrupt this entry.
2316 disk_cache::EntryImpl* entry_impl =
2317 static_cast<disk_cache::EntryImpl*>(entry);
2319 entry_impl->entry()->Data()->state = 0xbad;
2320 entry_impl->entry()->Store();
2322 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2324 EXPECT_EQ(3, cache_->GetEntryCount());
2327 // List 0: third -> second (bad).
2331 // Detection order: second -> first -> third.
2333 EXPECT_EQ(3, cache_->GetEntryCount());
2335 EXPECT_EQ(2, cache_->GetEntryCount());
2337 EXPECT_EQ(1, cache_->GetEntryCount());
2339 // Detection order: third -> second -> first.
2340 // We should detect the problem through the list, but we should not delete
2342 scoped_ptr<TestIterator> iter = CreateIterator();
2343 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2345 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2346 EXPECT_EQ(first, entry->GetKey());
2348 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2350 DisableIntegrityCheck();
2353 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2354 BackendInvalidEntry10(false);
2357 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
2358 BackendInvalidEntry10(true);
2361 // Tests handling of corrupt entries detected by enumerations.
2362 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
2363 const int kSize = 0x3000; // 12 kB.
2364 SetMaxSize(kSize * 10);
2368 std::string first("some key");
2369 std::string second("something else");
2370 disk_cache::Entry* entry;
2371 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2373 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2374 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2376 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2378 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
2379 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2381 // Corrupt this entry.
2382 disk_cache::EntryImpl* entry_impl =
2383 static_cast<disk_cache::EntryImpl*>(entry);
2385 entry_impl->entry()->Data()->state = 0xbad;
2386 entry_impl->entry()->Store();
2388 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2390 FlushQueueForTest();
2391 EXPECT_EQ(3, cache_->GetEntryCount());
2395 // List 1: second (bad) -> first.
2398 // Detection order: third -> first -> second.
2400 EXPECT_EQ(2, cache_->GetEntryCount());
2402 EXPECT_EQ(1, cache_->GetEntryCount());
2404 EXPECT_EQ(1, cache_->GetEntryCount());
2406 // Detection order: third -> second.
2407 // We should detect the problem through the list, but we should not delete
2408 // the entry, just fail the iteration.
2409 scoped_ptr<TestIterator> iter = CreateIterator();
2410 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2412 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2414 // Now a full iteration will work, and return two entries.
2415 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2417 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2419 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2421 DisableIntegrityCheck();
2424 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
2425 BackendInvalidEntry11(false);
2428 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
2429 BackendInvalidEntry11(true);
2432 // Tests handling of corrupt entries in the middle of a long eviction run.
2433 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2434 const int kSize = 0x3000; // 12 kB
2435 SetMaxSize(kSize * 10);
2438 std::string first("some key");
2439 std::string second("something else");
2440 disk_cache::Entry* entry;
2441 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2443 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2445 // Corrupt this entry.
2446 disk_cache::EntryImpl* entry_impl =
2447 static_cast<disk_cache::EntryImpl*>(entry);
2449 entry_impl->entry()->Data()->state = 0xbad;
2450 entry_impl->entry()->Store();
2452 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2454 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
2456 EXPECT_EQ(1, cache_->GetEntryCount());
2458 DisableIntegrityCheck();
2461 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
2462 BackendTrimInvalidEntry12();
2465 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
2467 BackendTrimInvalidEntry12();
2470 // We want to be able to deal with messed up entries on disk.
2471 void DiskCacheBackendTest::BackendInvalidRankings2() {
2472 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2473 DisableFirstCleanup();
2476 disk_cache::Entry *entry1, *entry2;
2477 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
2478 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
2481 // CheckCacheIntegrity will fail at this point.
2482 DisableIntegrityCheck();
2485 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
2486 BackendInvalidRankings2();
2489 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
2491 BackendInvalidRankings2();
2494 // If the LRU is corrupt, we delete the cache.
2495 void DiskCacheBackendTest::BackendInvalidRankings() {
2496 disk_cache::Entry* entry;
2497 scoped_ptr<TestIterator> iter = CreateIterator();
2498 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2500 EXPECT_EQ(2, cache_->GetEntryCount());
2502 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2503 FlushQueueForTest(); // Allow the restart to finish.
2504 EXPECT_EQ(0, cache_->GetEntryCount());
2507 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
2508 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2509 DisableFirstCleanup();
2511 BackendInvalidRankings();
2514 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
2515 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2516 DisableFirstCleanup();
2519 BackendInvalidRankings();
2522 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
2523 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2524 DisableFirstCleanup();
2526 SetTestMode(); // Fail cache reinitialization.
2527 BackendInvalidRankings();
2530 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
2531 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2532 DisableFirstCleanup();
2535 SetTestMode(); // Fail cache reinitialization.
2536 BackendInvalidRankings();
2539 // If the LRU is corrupt and we have open entries, we disable the cache.
2540 void DiskCacheBackendTest::BackendDisable() {
2541 disk_cache::Entry *entry1, *entry2;
2542 scoped_ptr<TestIterator> iter = CreateIterator();
2543 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2545 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2546 EXPECT_EQ(0, cache_->GetEntryCount());
2547 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2550 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2551 FlushQueueForTest(); // This one actually allows that task to complete.
2553 EXPECT_EQ(0, cache_->GetEntryCount());
2556 TEST_F(DiskCacheBackendTest, DisableSuccess) {
2557 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2558 DisableFirstCleanup();
2563 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
2564 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2565 DisableFirstCleanup();
2571 TEST_F(DiskCacheBackendTest, DisableFailure) {
2572 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2573 DisableFirstCleanup();
2575 SetTestMode(); // Fail cache reinitialization.
2579 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
2580 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2581 DisableFirstCleanup();
2584 SetTestMode(); // Fail cache reinitialization.
2588 // This is another type of corruption on the LRU; disable the cache.
2589 void DiskCacheBackendTest::BackendDisable2() {
2590 EXPECT_EQ(8, cache_->GetEntryCount());
2592 disk_cache::Entry* entry;
2593 scoped_ptr<TestIterator> iter = CreateIterator();
2595 while (iter->OpenNextEntry(&entry) == net::OK) {
2596 ASSERT_TRUE(NULL != entry);
2599 ASSERT_LT(count, 9);
2602 FlushQueueForTest();
2603 EXPECT_EQ(0, cache_->GetEntryCount());
2606 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
2607 ASSERT_TRUE(CopyTestCache("list_loop"));
2608 DisableFirstCleanup();
2613 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
2614 ASSERT_TRUE(CopyTestCache("list_loop"));
2615 DisableFirstCleanup();
2621 TEST_F(DiskCacheBackendTest, DisableFailure2) {
2622 ASSERT_TRUE(CopyTestCache("list_loop"));
2623 DisableFirstCleanup();
2625 SetTestMode(); // Fail cache reinitialization.
2629 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
2630 ASSERT_TRUE(CopyTestCache("list_loop"));
2631 DisableFirstCleanup();
2634 SetTestMode(); // Fail cache reinitialization.
2638 // If the index size changes when we disable the cache, we should not crash.
2639 void DiskCacheBackendTest::BackendDisable3() {
2640 disk_cache::Entry *entry1, *entry2;
2641 scoped_ptr<TestIterator> iter = CreateIterator();
2642 EXPECT_EQ(2, cache_->GetEntryCount());
2643 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2646 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2647 FlushQueueForTest();
2649 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
2652 EXPECT_EQ(1, cache_->GetEntryCount());
2655 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
2656 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2657 DisableFirstCleanup();
2658 SetMaxSize(20 * 1024 * 1024);
2663 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
2664 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2665 DisableFirstCleanup();
2666 SetMaxSize(20 * 1024 * 1024);
2672 // If we disable the cache, already open entries should work as far as possible.
2673 void DiskCacheBackendTest::BackendDisable4() {
2674 disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
2675 scoped_ptr<TestIterator> iter = CreateIterator();
2676 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2680 CacheTestFillBuffer(key2, sizeof(key2), true);
2681 CacheTestFillBuffer(key3, sizeof(key3), true);
2682 key2[sizeof(key2) - 1] = '\0';
2683 key3[sizeof(key3) - 1] = '\0';
2684 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2685 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
2687 const int kBufSize = 20000;
2688 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
2689 memset(buf->data(), 0, kBufSize);
2690 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2691 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2693 // This line should disable the cache but not delete it.
2694 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry4));
2695 EXPECT_EQ(0, cache_->GetEntryCount());
2697 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
2699 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
2700 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2701 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
2703 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
2704 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2705 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
2707 std::string key = entry2->GetKey();
2708 EXPECT_EQ(sizeof(key2) - 1, key.size());
2709 key = entry3->GetKey();
2710 EXPECT_EQ(sizeof(key3) - 1, key.size());
2715 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2716 FlushQueueForTest(); // This one actually allows that task to complete.
2718 EXPECT_EQ(0, cache_->GetEntryCount());
2721 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
2722 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2723 DisableFirstCleanup();
2728 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
2729 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2730 DisableFirstCleanup();
2736 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
2737 MessageLoopHelper helper;
2739 ASSERT_TRUE(CleanupCacheDir());
2740 scoped_ptr<disk_cache::BackendImpl> cache;
2741 cache.reset(new disk_cache::BackendImpl(
2742 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2743 ASSERT_TRUE(NULL != cache.get());
2744 cache->SetUnitTestMode();
2745 ASSERT_EQ(net::OK, cache->SyncInit());
2747 // Wait for a callback that never comes... about 2 secs :). The message loop
2748 // has to run to allow invocation of the usage timer.
2749 helper.WaitUntilCacheIoFinished(1);
2752 TEST_F(DiskCacheBackendTest, TimerNotCreated) {
2753 ASSERT_TRUE(CopyTestCache("wrong_version"));
2755 scoped_ptr<disk_cache::BackendImpl> cache;
2756 cache.reset(new disk_cache::BackendImpl(
2757 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2758 ASSERT_TRUE(NULL != cache.get());
2759 cache->SetUnitTestMode();
2760 ASSERT_NE(net::OK, cache->SyncInit());
2762 ASSERT_TRUE(NULL == cache->GetTimerForTest());
2764 DisableIntegrityCheck();
2767 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
2769 disk_cache::Entry* entry;
2770 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
2772 FlushQueueForTest();
2774 disk_cache::StatsItems stats;
2775 cache_->GetStats(&stats);
2776 EXPECT_FALSE(stats.empty());
2778 disk_cache::StatsItems::value_type hits("Create hit", "0x1");
2779 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2783 // Now open the cache and verify that the stats are still there.
2784 DisableFirstCleanup();
2786 EXPECT_EQ(1, cache_->GetEntryCount());
2789 cache_->GetStats(&stats);
2790 EXPECT_FALSE(stats.empty());
2792 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2795 void DiskCacheBackendTest::BackendDoomAll() {
2798 disk_cache::Entry *entry1, *entry2;
2799 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
2800 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
2804 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2805 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2807 ASSERT_EQ(4, cache_->GetEntryCount());
2808 EXPECT_EQ(net::OK, DoomAllEntries());
2809 ASSERT_EQ(0, cache_->GetEntryCount());
2811 // We should stop posting tasks at some point (if we post any).
2812 base::MessageLoop::current()->RunUntilIdle();
2814 disk_cache::Entry *entry3, *entry4;
2815 EXPECT_NE(net::OK, OpenEntry("third", &entry3));
2816 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
2817 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
2819 EXPECT_EQ(net::OK, DoomAllEntries());
2820 ASSERT_EQ(0, cache_->GetEntryCount());
2824 entry3->Doom(); // The entry should be already doomed, but this must work.
2828 // Now try with all references released.
2829 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2830 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2834 ASSERT_EQ(2, cache_->GetEntryCount());
2835 EXPECT_EQ(net::OK, DoomAllEntries());
2836 ASSERT_EQ(0, cache_->GetEntryCount());
2838 EXPECT_EQ(net::OK, DoomAllEntries());
2841 TEST_F(DiskCacheBackendTest, DoomAll) {
2845 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
2850 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
2851 SetMemoryOnlyMode();
2855 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
2856 SetCacheType(net::APP_CACHE);
2860 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
2861 SetCacheType(net::SHADER_CACHE);
2865 // If the index size changes when we doom the cache, we should not crash.
2866 void DiskCacheBackendTest::BackendDoomAll2() {
2867 EXPECT_EQ(2, cache_->GetEntryCount());
2868 EXPECT_EQ(net::OK, DoomAllEntries());
2870 disk_cache::Entry* entry;
2871 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
2874 EXPECT_EQ(1, cache_->GetEntryCount());
2877 TEST_F(DiskCacheBackendTest, DoomAll2) {
2878 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2879 DisableFirstCleanup();
2880 SetMaxSize(20 * 1024 * 1024);
2885 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
2886 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2887 DisableFirstCleanup();
2888 SetMaxSize(20 * 1024 * 1024);
2894 // We should be able to create the same entry on multiple simultaneous instances
2896 TEST_F(DiskCacheTest, MultipleInstances) {
2897 base::ScopedTempDir store1, store2;
2898 ASSERT_TRUE(store1.CreateUniqueTempDir());
2899 ASSERT_TRUE(store2.CreateUniqueTempDir());
2901 base::Thread cache_thread("CacheThread");
2902 ASSERT_TRUE(cache_thread.StartWithOptions(
2903 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
2904 net::TestCompletionCallback cb;
2906 const int kNumberOfCaches = 2;
2907 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
2909 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
2910 net::CACHE_BACKEND_DEFAULT,
2914 cache_thread.task_runner(),
2918 ASSERT_EQ(net::OK, cb.GetResult(rv));
2919 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
2920 net::CACHE_BACKEND_DEFAULT,
2924 cache_thread.task_runner(),
2928 ASSERT_EQ(net::OK, cb.GetResult(rv));
2930 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
2932 std::string key("the first key");
2933 disk_cache::Entry* entry;
2934 for (int i = 0; i < kNumberOfCaches; i++) {
2935 rv = cache[i]->CreateEntry(key, &entry, cb.callback());
2936 ASSERT_EQ(net::OK, cb.GetResult(rv));
2941 // Test the six regions of the curve that determines the max cache size.
2942 TEST_F(DiskCacheTest, AutomaticMaxSize) {
2943 using disk_cache::kDefaultCacheSize;
2944 int64 large_size = kDefaultCacheSize;
2946 // Region 1: expected = available * 0.8
2947 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
2948 disk_cache::PreferredCacheSize(large_size - 1));
2949 EXPECT_EQ(kDefaultCacheSize * 8 / 10,
2950 disk_cache::PreferredCacheSize(large_size));
2951 EXPECT_EQ(kDefaultCacheSize - 1,
2952 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
2954 // Region 2: expected = default_size
2955 EXPECT_EQ(kDefaultCacheSize,
2956 disk_cache::PreferredCacheSize(large_size * 10 / 8));
2957 EXPECT_EQ(kDefaultCacheSize,
2958 disk_cache::PreferredCacheSize(large_size * 10 - 1));
2960 // Region 3: expected = available * 0.1
2961 EXPECT_EQ(kDefaultCacheSize,
2962 disk_cache::PreferredCacheSize(large_size * 10));
2963 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
2964 disk_cache::PreferredCacheSize(large_size * 25 - 1));
2966 // Region 4: expected = default_size * 2.5
2967 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2968 disk_cache::PreferredCacheSize(large_size * 25));
2969 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2970 disk_cache::PreferredCacheSize(large_size * 100 - 1));
2971 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2972 disk_cache::PreferredCacheSize(large_size * 100));
2973 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2974 disk_cache::PreferredCacheSize(large_size * 250 - 1));
2976 // Region 5: expected = available * 0.1
2977 int64 largest_size = kDefaultCacheSize * 4;
2978 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2979 disk_cache::PreferredCacheSize(large_size * 250));
2980 EXPECT_EQ(largest_size - 1,
2981 disk_cache::PreferredCacheSize(largest_size * 100 - 1));
2983 // Region 6: expected = largest possible size
2984 EXPECT_EQ(largest_size,
2985 disk_cache::PreferredCacheSize(largest_size * 100));
2986 EXPECT_EQ(largest_size,
2987 disk_cache::PreferredCacheSize(largest_size * 10000));
2990 // Tests that we can "migrate" a running instance from one experiment group to
2992 TEST_F(DiskCacheBackendTest, Histograms) {
2994 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro.
2996 for (int i = 1; i < 3; i++) {
2997 CACHE_UMA(HOURS, "FillupTime", i, 28);
3001 // Make sure that we keep the total memory used by the internal buffers under
3003 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
3005 std::string key("the first key");
3006 disk_cache::Entry* entry;
3007 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3009 const int kSize = 200;
3010 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3011 CacheTestFillBuffer(buffer->data(), kSize, true);
3013 for (int i = 0; i < 10; i++) {
3015 // Allocate 2MB for this entry.
3016 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
3017 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
3019 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3021 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3023 // Delete one of the buffers and truncate the other.
3024 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3025 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3027 // Delete the second buffer, writing 10 bytes to disk.
3029 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3033 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3036 // This test assumes at least 150MB of system memory.
3037 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3040 const int kOneMB = 1024 * 1024;
3041 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3042 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3044 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3045 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3047 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3048 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3050 cache_impl_->BufferDeleted(kOneMB);
3051 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3053 // Check the upper limit.
3054 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3056 for (int i = 0; i < 30; i++)
3057 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
3059 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3062 // Tests that sharing of external files works and we are able to delete the
3063 // files when we need to.
3064 TEST_F(DiskCacheBackendTest, FileSharing) {
3067 disk_cache::Addr address(0x80000001);
3068 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3069 base::FilePath name = cache_impl_->GetFileName(address);
3071 scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
3075 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3076 DWORD access = GENERIC_READ | GENERIC_WRITE;
3077 base::win::ScopedHandle file2(CreateFile(
3078 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
3079 EXPECT_FALSE(file2.IsValid());
3081 sharing |= FILE_SHARE_DELETE;
3082 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
3083 OPEN_EXISTING, 0, NULL));
3084 EXPECT_TRUE(file2.IsValid());
3087 EXPECT_TRUE(base::DeleteFile(name, false));
3089 // We should be able to use the file.
3090 const int kSize = 200;
3091 char buffer1[kSize];
3092 char buffer2[kSize];
3093 memset(buffer1, 't', kSize);
3094 memset(buffer2, 0, kSize);
3095 EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3096 EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3097 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3099 EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
3102 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3105 disk_cache::Entry* entry;
3107 for (int i = 0; i < 2; ++i) {
3108 std::string key = base::StringPrintf("key%d", i);
3109 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3113 // Ping the oldest entry.
3114 cache_->OnExternalCacheHit("key0");
3118 // Make sure the older key remains.
3119 EXPECT_EQ(1, cache_->GetEntryCount());
3120 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3124 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3125 SetCacheType(net::SHADER_CACHE);
3128 disk_cache::Entry* entry;
3130 for (int i = 0; i < 2; ++i) {
3131 std::string key = base::StringPrintf("key%d", i);
3132 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3136 // Ping the oldest entry.
3137 cache_->OnExternalCacheHit("key0");
3141 // Make sure the older key remains.
3142 EXPECT_EQ(1, cache_->GetEntryCount());
3143 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3147 // The Simple Cache backend requires a few guarantees from the filesystem like
3148 // atomic renaming of recently open files. Those guarantees are not provided in
3149 // general on Windows.
3150 #if defined(OS_POSIX)
3152 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3153 SetCacheType(net::APP_CACHE);
3154 SetSimpleCacheMode();
3155 BackendShutdownWithPendingCreate(false);
3158 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3159 SetCacheType(net::APP_CACHE);
3160 SetSimpleCacheMode();
3161 BackendShutdownWithPendingFileIO(false);
3164 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3165 SetSimpleCacheMode();
3169 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3170 SetCacheType(net::APP_CACHE);
3171 SetSimpleCacheMode();
3175 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3176 SetSimpleCacheMode();
3180 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3181 SetSimpleCacheMode();
3182 SetCacheType(net::APP_CACHE);
3186 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) {
3187 SetSimpleCacheMode();
3191 // MacOS has a default open file limit of 256 files, which is incompatible with
3192 // this simple cache test.
3193 #if defined(OS_MACOSX)
3194 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3196 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3199 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
3200 SetMaxSize(0x100000);
3201 SetSimpleCacheMode();
3205 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
3206 SetCacheType(net::APP_CACHE);
3207 SetSimpleCacheMode();
3208 SetMaxSize(0x100000);
3212 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
3213 SetSimpleCacheMode();
3214 BackendDoomRecent();
3217 // crbug.com/330926, crbug.com/370677
3218 TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) {
3219 SetSimpleCacheMode();
3220 BackendDoomBetween();
3223 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
3224 SetSimpleCacheMode();
3228 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
3229 SetCacheType(net::APP_CACHE);
3230 SetSimpleCacheMode();
3234 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
3235 SetSimpleCacheMode();
3238 const char* key = "the first key";
3239 disk_cache::Entry* entry = NULL;
3241 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3242 ASSERT_TRUE(entry != NULL);
3246 // To make sure the file creation completed we need to call open again so that
3247 // we block until it actually created the files.
3248 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3249 ASSERT_TRUE(entry != NULL);
3253 // Delete one of the files in the entry.
3254 base::FilePath to_delete_file = cache_path_.AppendASCII(
3255 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3256 EXPECT_TRUE(base::PathExists(to_delete_file));
3257 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
3259 // Failing to open the entry should delete the rest of these files.
3260 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3262 // Confirm the rest of the files are gone.
3263 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
3264 base::FilePath should_be_gone_file(cache_path_.AppendASCII(
3265 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
3266 EXPECT_FALSE(base::PathExists(should_be_gone_file));
3270 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
3271 SetSimpleCacheMode();
3274 const char* key = "the first key";
3275 disk_cache::Entry* entry = NULL;
3277 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3278 disk_cache::Entry* null = NULL;
3279 ASSERT_NE(null, entry);
3283 // To make sure the file creation completed we need to call open again so that
3284 // we block until it actually created the files.
3285 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3286 ASSERT_NE(null, entry);
3290 // Write an invalid header for stream 0 and stream 1.
3291 base::FilePath entry_file1_path = cache_path_.AppendASCII(
3292 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3294 disk_cache::SimpleFileHeader header;
3295 header.initial_magic_number = GG_UINT64_C(0xbadf00d);
3297 implicit_cast<int>(sizeof(header)),
3298 base::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
3300 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3303 // Tests that the Simple Cache Backend fails to initialize with non-matching
3304 // file structure on disk.
3305 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
3306 // Create a cache structure with the |BackendImpl|.
3308 disk_cache::Entry* entry;
3309 const int kSize = 50;
3310 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3311 CacheTestFillBuffer(buffer->data(), kSize, false);
3312 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3313 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3317 // Check that the |SimpleBackendImpl| does not favor this structure.
3318 base::Thread cache_thread("CacheThread");
3319 ASSERT_TRUE(cache_thread.StartWithOptions(
3320 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3321 disk_cache::SimpleBackendImpl* simple_cache =
3322 new disk_cache::SimpleBackendImpl(
3323 cache_path_, 0, net::DISK_CACHE, cache_thread.task_runner(), NULL);
3324 net::TestCompletionCallback cb;
3325 int rv = simple_cache->Init(cb.callback());
3326 EXPECT_NE(net::OK, cb.GetResult(rv));
3327 delete simple_cache;
3328 DisableIntegrityCheck();
3331 // Tests that the |BackendImpl| refuses to initialize on top of the files
3332 // generated by the Simple Cache Backend.
3333 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
3334 // Create a cache structure with the |SimpleBackendImpl|.
3335 SetSimpleCacheMode();
3337 disk_cache::Entry* entry;
3338 const int kSize = 50;
3339 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3340 CacheTestFillBuffer(buffer->data(), kSize, false);
3341 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3342 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3346 // Check that the |BackendImpl| does not favor this structure.
3347 base::Thread cache_thread("CacheThread");
3348 ASSERT_TRUE(cache_thread.StartWithOptions(
3349 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3350 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
3351 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL);
3352 cache->SetUnitTestMode();
3353 net::TestCompletionCallback cb;
3354 int rv = cache->Init(cb.callback());
3355 EXPECT_NE(net::OK, cb.GetResult(rv));
3357 DisableIntegrityCheck();
3360 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
3361 SetSimpleCacheMode();
3362 BackendFixEnumerators();
3365 // Tests basic functionality of the SimpleBackend implementation of the
3367 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
3368 SetSimpleCacheMode();
3370 std::set<std::string> key_pool;
3371 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3373 // Check that enumeration returns all entries.
3374 std::set<std::string> keys_to_match(key_pool);
3375 scoped_ptr<TestIterator> iter = CreateIterator();
3377 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3379 EXPECT_EQ(key_pool.size(), count);
3380 EXPECT_TRUE(keys_to_match.empty());
3382 // Check that opening entries does not affect enumeration.
3383 keys_to_match = key_pool;
3384 iter = CreateIterator();
3386 disk_cache::Entry* entry_opened_before;
3387 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
3388 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3393 disk_cache::Entry* entry_opened_middle;
3395 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
3396 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3398 entry_opened_before->Close();
3399 entry_opened_middle->Close();
3401 EXPECT_EQ(key_pool.size(), count);
3402 EXPECT_TRUE(keys_to_match.empty());
3405 // Tests that the enumerations are not affected by dooming an entry in the
3407 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
3408 SetSimpleCacheMode();
3410 std::set<std::string> key_pool;
3411 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3413 // Check that enumeration returns all entries but the doomed one.
3414 std::set<std::string> keys_to_match(key_pool);
3415 scoped_ptr<TestIterator> iter = CreateIterator();
3417 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3422 std::string key_to_delete = *(keys_to_match.begin());
3423 DoomEntry(key_to_delete);
3424 keys_to_match.erase(key_to_delete);
3425 key_pool.erase(key_to_delete);
3426 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3429 EXPECT_EQ(key_pool.size(), count);
3430 EXPECT_TRUE(keys_to_match.empty());
3433 // Tests that enumerations are not affected by corrupt files.
3434 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
3435 SetSimpleCacheMode();
3437 std::set<std::string> key_pool;
3438 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3440 // Create a corrupt entry. The write/read sequence ensures that the entry will
3441 // have been created before corrupting the platform files, in the case of
3442 // optimistic operations.
3443 const std::string key = "the key";
3444 disk_cache::Entry* corrupted_entry;
3446 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
3447 ASSERT_TRUE(corrupted_entry);
3448 const int kSize = 50;
3449 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3450 CacheTestFillBuffer(buffer->data(), kSize, false);
3452 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
3453 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
3454 corrupted_entry->Close();
3456 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3458 EXPECT_EQ(key_pool.size() + 1,
3459 implicit_cast<size_t>(cache_->GetEntryCount()));
3461 // Check that enumeration returns all entries but the corrupt one.
3462 std::set<std::string> keys_to_match(key_pool);
3463 scoped_ptr<TestIterator> iter = CreateIterator();
3465 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3468 EXPECT_EQ(key_pool.size(), count);
3469 EXPECT_TRUE(keys_to_match.empty());
3472 // Tests that enumerations don't leak memory when the backend is destructed
3474 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) {
3475 SetSimpleCacheMode();
3477 std::set<std::string> key_pool;
3478 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3480 scoped_ptr<TestIterator> iter = CreateIterator();
3481 disk_cache::Entry* entry = NULL;
3482 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
3484 disk_cache::ScopedEntryPtr entry_closer(entry);
3487 // This test passes if we don't leak memory.
3490 #endif // defined(OS_POSIX)