1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/file_util.h"
7 #include "base/metrics/field_trial.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
12 #include "base/threading/platform_thread.h"
13 #include "base/threading/thread_restrictions.h"
14 #include "net/base/cache_type.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/base/test_completion_callback.h"
18 #include "net/disk_cache/backend_impl.h"
19 #include "net/disk_cache/cache_util.h"
20 #include "net/disk_cache/disk_cache_test_base.h"
21 #include "net/disk_cache/disk_cache_test_util.h"
22 #include "net/disk_cache/entry_impl.h"
23 #include "net/disk_cache/experiments.h"
24 #include "net/disk_cache/histogram_macros.h"
25 #include "net/disk_cache/mapped_file.h"
26 #include "net/disk_cache/mem_backend_impl.h"
27 #include "net/disk_cache/simple/simple_backend_impl.h"
28 #include "net/disk_cache/simple/simple_entry_format.h"
29 #include "net/disk_cache/simple/simple_test_util.h"
30 #include "net/disk_cache/simple/simple_util.h"
31 #include "net/disk_cache/tracing_cache_backend.h"
32 #include "testing/gtest/include/gtest/gtest.h"
35 #include "base/win/scoped_handle.h"
42 const char kExistingEntryKey[] = "existing entry key";
44 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
45 const base::Thread& cache_thread,
46 base::FilePath& cache_path) {
47 net::TestCompletionCallback cb;
49 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
50 cache_path, cache_thread.message_loop_proxy(), NULL));
51 int rv = cache->Init(cb.callback());
52 if (cb.GetResult(rv) != net::OK)
53 return scoped_ptr<disk_cache::BackendImpl>();
55 disk_cache::Entry* entry = NULL;
56 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
57 if (cb.GetResult(rv) != net::OK)
58 return scoped_ptr<disk_cache::BackendImpl>();
66 // Tests that can run with different types of caches.
67 class DiskCacheBackendTest : public DiskCacheTestWithCache {
69 // Some utility methods:
71 // Perform IO operations on the cache until there is pending IO.
72 int GeneratePendingIO(net::TestCompletionCallback* cb);
74 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
75 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
76 // There are 4 entries after doomed_start and 2 after doomed_end.
77 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
79 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
80 bool EnumerateAndMatchKeys(int max_to_open,
82 std::set<std::string>* keys_to_match,
88 void BackendShutdownWithPendingFileIO(bool fast);
89 void BackendShutdownWithPendingIO(bool fast);
90 void BackendShutdownWithPendingCreate(bool fast);
91 void BackendSetSize();
94 void BackendValidEntry();
95 void BackendInvalidEntry();
96 void BackendInvalidEntryRead();
97 void BackendInvalidEntryWithLoad();
98 void BackendTrimInvalidEntry();
99 void BackendTrimInvalidEntry2();
100 void BackendEnumerations();
101 void BackendEnumerations2();
102 void BackendInvalidEntryEnumeration();
103 void BackendFixEnumerators();
104 void BackendDoomRecent();
105 void BackendDoomBetween();
106 void BackendTransaction(const std::string& name, int num_entries, bool load);
107 void BackendRecoverInsert();
108 void BackendRecoverRemove();
109 void BackendRecoverWithEviction();
110 void BackendInvalidEntry2();
111 void BackendInvalidEntry3();
112 void BackendInvalidEntry7();
113 void BackendInvalidEntry8();
114 void BackendInvalidEntry9(bool eviction);
115 void BackendInvalidEntry10(bool eviction);
116 void BackendInvalidEntry11(bool eviction);
117 void BackendTrimInvalidEntry12();
118 void BackendDoomAll();
119 void BackendDoomAll2();
120 void BackendInvalidRankings();
121 void BackendInvalidRankings2();
122 void BackendDisable();
123 void BackendDisable2();
124 void BackendDisable3();
125 void BackendDisable4();
126 void TracingBackendBasics();
129 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
130 if (!use_current_thread_) {
132 return net::ERR_FAILED;
135 disk_cache::Entry* entry;
136 int rv = cache_->CreateEntry("some key", &entry, cb->callback());
137 if (cb->GetResult(rv) != net::OK)
138 return net::ERR_CACHE_CREATE_FAILURE;
140 const int kSize = 25000;
141 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
142 CacheTestFillBuffer(buffer->data(), kSize, false);
144 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
145 // We are using the current thread as the cache thread because we want to
146 // be able to call directly this method to make sure that the OS (instead
147 // of us switching thread) is returning IO pending.
148 if (!simple_cache_mode_) {
149 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
150 0, i, buffer.get(), kSize, cb->callback(), false);
152 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
155 if (rv == net::ERR_IO_PENDING)
158 rv = net::ERR_FAILED;
161 // Don't call Close() to avoid going through the queue or we'll deadlock
162 // waiting for the operation to finish.
163 if (!simple_cache_mode_)
164 static_cast<disk_cache::EntryImpl*>(entry)->Release();
171 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
172 base::Time* doomed_end) {
175 const int kSize = 50;
176 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
177 const int kOffset = 10 + 1024 * 1024;
179 disk_cache::Entry* entry0 = NULL;
180 disk_cache::Entry* entry1 = NULL;
181 disk_cache::Entry* entry2 = NULL;
183 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
184 CacheTestFillBuffer(buffer->data(), kSize, false);
186 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
187 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
189 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
195 *doomed_start = base::Time::Now();
197 // Order in rankings list:
198 // first_part1, first_part2, second_part1, second_part2
199 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
200 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
202 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
205 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
206 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
208 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
214 *doomed_end = base::Time::Now();
216 // Order in rankings list:
217 // third_part1, fourth_part1, third_part2, fourth_part2
218 disk_cache::Entry* entry3 = NULL;
219 disk_cache::Entry* entry4 = NULL;
220 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
221 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
222 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
223 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
225 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
227 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
235 // Creates entries based on random keys. Stores these keys in |key_pool|.
236 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
237 std::set<std::string>* key_pool) {
238 const int kNumEntries = 10;
240 for (int i = 0; i < kNumEntries; ++i) {
241 std::string key = GenerateKey(true);
242 disk_cache::Entry* entry;
243 if (CreateEntry(key, &entry) != net::OK)
245 key_pool->insert(key);
248 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
251 // Performs iteration over the backend and checks that the keys of entries
252 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
253 // will be opened, if it is positive. Otherwise, iteration will continue until
254 // OpenNextEntry stops returning net::OK.
255 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
258 std::set<std::string>* keys_to_match,
260 disk_cache::Entry* entry;
262 while (OpenNextEntry(iter, &entry) == net::OK) {
265 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
268 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
275 void DiskCacheBackendTest::BackendBasics() {
277 disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
278 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
279 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
280 ASSERT_TRUE(NULL != entry1);
284 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
285 ASSERT_TRUE(NULL != entry1);
289 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
290 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
291 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
292 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
293 ASSERT_TRUE(NULL != entry1);
294 ASSERT_TRUE(NULL != entry2);
295 EXPECT_EQ(2, cache_->GetEntryCount());
297 disk_cache::Entry* entry3 = NULL;
298 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
299 ASSERT_TRUE(NULL != entry3);
300 EXPECT_TRUE(entry2 == entry3);
301 EXPECT_EQ(2, cache_->GetEntryCount());
303 EXPECT_EQ(net::OK, DoomEntry("some other key"));
304 EXPECT_EQ(1, cache_->GetEntryCount());
309 EXPECT_EQ(net::OK, DoomEntry("the first key"));
310 EXPECT_EQ(0, cache_->GetEntryCount());
312 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
313 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
316 EXPECT_EQ(net::OK, DoomEntry("some other key"));
317 EXPECT_EQ(0, cache_->GetEntryCount());
321 TEST_F(DiskCacheBackendTest, Basics) {
325 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
330 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
335 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
336 SetCacheType(net::APP_CACHE);
340 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
341 SetCacheType(net::SHADER_CACHE);
345 void DiskCacheBackendTest::BackendKeying() {
347 const char* kName1 = "the first key";
348 const char* kName2 = "the first Key";
349 disk_cache::Entry *entry1, *entry2;
350 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
352 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
353 EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
357 base::strlcpy(buffer, kName1, arraysize(buffer));
358 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
359 EXPECT_TRUE(entry1 == entry2);
362 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
363 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
364 EXPECT_TRUE(entry1 == entry2);
367 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3);
368 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
369 EXPECT_TRUE(entry1 == entry2);
372 // Now verify long keys.
374 memset(buffer2, 's', sizeof(buffer2));
375 buffer2[1023] = '\0';
376 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
380 buffer2[19999] = '\0';
381 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
386 TEST_F(DiskCacheBackendTest, Keying) {
390 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
395 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
400 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
401 SetCacheType(net::APP_CACHE);
405 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
406 SetCacheType(net::SHADER_CACHE);
410 TEST_F(DiskCacheTest, CreateBackend) {
411 net::TestCompletionCallback cb;
414 ASSERT_TRUE(CleanupCacheDir());
415 base::Thread cache_thread("CacheThread");
416 ASSERT_TRUE(cache_thread.StartWithOptions(
417 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
419 // Test the private factory method(s).
420 scoped_ptr<disk_cache::Backend> cache;
421 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
422 ASSERT_TRUE(cache.get());
425 // Now test the public API.
427 disk_cache::CreateCacheBackend(net::DISK_CACHE,
428 net::CACHE_BACKEND_DEFAULT,
432 cache_thread.message_loop_proxy().get(),
436 ASSERT_EQ(net::OK, cb.GetResult(rv));
437 ASSERT_TRUE(cache.get());
440 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
441 net::CACHE_BACKEND_DEFAULT,
443 false, NULL, NULL, &cache,
445 ASSERT_EQ(net::OK, cb.GetResult(rv));
446 ASSERT_TRUE(cache.get());
450 base::MessageLoop::current()->RunUntilIdle();
453 // Tests that |BackendImpl| fails to initialize with a missing file.
454 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
455 ASSERT_TRUE(CopyTestCache("bad_entry"));
456 base::FilePath filename = cache_path_.AppendASCII("data_1");
457 base::DeleteFile(filename, false);
458 base::Thread cache_thread("CacheThread");
459 ASSERT_TRUE(cache_thread.StartWithOptions(
460 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
461 net::TestCompletionCallback cb;
463 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
464 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
465 cache_path_, cache_thread.message_loop_proxy().get(), NULL));
466 int rv = cache->Init(cb.callback());
467 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
468 base::ThreadRestrictions::SetIOAllowed(prev);
471 DisableIntegrityCheck();
474 TEST_F(DiskCacheBackendTest, ExternalFiles) {
476 // First, let's create a file on the folder.
477 base::FilePath filename = cache_path_.AppendASCII("f_000001");
479 const int kSize = 50;
480 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
481 CacheTestFillBuffer(buffer1->data(), kSize, false);
482 ASSERT_EQ(kSize, file_util::WriteFile(filename, buffer1->data(), kSize));
484 // Now let's create a file with the cache.
485 disk_cache::Entry* entry;
486 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
487 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
490 // And verify that the first file is still there.
491 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
492 ASSERT_EQ(kSize, file_util::ReadFile(filename, buffer2->data(), kSize));
493 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
496 // Tests that we deal with file-level pending operations at destruction time.
497 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
498 ASSERT_TRUE(CleanupCacheDir());
499 uint32 flags = disk_cache::kNoBuffering;
501 flags |= disk_cache::kNoRandom;
504 CreateBackend(flags, NULL);
506 net::TestCompletionCallback cb;
507 int rv = GeneratePendingIO(&cb);
509 // The cache destructor will see one pending operation here.
512 if (rv == net::ERR_IO_PENDING) {
513 if (fast || simple_cache_mode_)
514 EXPECT_FALSE(cb.have_result());
516 EXPECT_TRUE(cb.have_result());
519 base::MessageLoop::current()->RunUntilIdle();
521 // Wait for the actual operation to complete, or we'll keep a file handle that
522 // may cause issues later.
523 rv = cb.GetResult(rv);
526 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
527 BackendShutdownWithPendingFileIO(false);
530 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
531 // builds because they contain a lot of intentional memory leaks.
532 // The wrapper scripts used to run tests under Valgrind Memcheck and
533 // Heapchecker will also disable these tests under those tools. See:
534 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
535 // tools/heapcheck/net_unittests.gtest-heapcheck.txt
536 #if !defined(LEAK_SANITIZER)
537 // We'll be leaking from this test.
538 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
539 // The integrity test sets kNoRandom so there's a version mismatch if we don't
540 // force new eviction.
542 BackendShutdownWithPendingFileIO(true);
546 // Tests that one cache instance is not affected by another one going away.
547 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
548 base::ScopedTempDir store;
549 ASSERT_TRUE(store.CreateUniqueTempDir());
551 net::TestCompletionCallback cb;
552 scoped_ptr<disk_cache::Backend> extra_cache;
553 int rv = disk_cache::CreateCacheBackend(
554 net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, store.path(), 0,
555 false, base::MessageLoopProxy::current().get(), NULL,
556 &extra_cache, cb.callback());
557 ASSERT_EQ(net::OK, cb.GetResult(rv));
558 ASSERT_TRUE(extra_cache.get() != NULL);
560 ASSERT_TRUE(CleanupCacheDir());
561 SetNewEviction(); // Match the expected behavior for integrity verification.
564 CreateBackend(disk_cache::kNoBuffering, NULL);
565 rv = GeneratePendingIO(&cb);
567 // cache_ has a pending operation, and extra_cache will go away.
570 if (rv == net::ERR_IO_PENDING)
571 EXPECT_FALSE(cb.have_result());
573 base::MessageLoop::current()->RunUntilIdle();
575 // Wait for the actual operation to complete, or we'll keep a file handle that
576 // may cause issues later.
577 rv = cb.GetResult(rv);
580 // Tests that we deal with background-thread pending operations.
581 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
582 net::TestCompletionCallback cb;
585 ASSERT_TRUE(CleanupCacheDir());
586 base::Thread cache_thread("CacheThread");
587 ASSERT_TRUE(cache_thread.StartWithOptions(
588 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
590 uint32 flags = disk_cache::kNoBuffering;
592 flags |= disk_cache::kNoRandom;
594 CreateBackend(flags, &cache_thread);
596 disk_cache::Entry* entry;
597 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
598 ASSERT_EQ(net::OK, cb.GetResult(rv));
602 // The cache destructor will see one pending operation here.
606 base::MessageLoop::current()->RunUntilIdle();
609 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
610 BackendShutdownWithPendingIO(false);
613 #if !defined(LEAK_SANITIZER)
614 // We'll be leaking from this test.
615 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
616 // The integrity test sets kNoRandom so there's a version mismatch if we don't
617 // force new eviction.
619 BackendShutdownWithPendingIO(true);
623 // Tests that we deal with create-type pending operations.
624 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
625 net::TestCompletionCallback cb;
628 ASSERT_TRUE(CleanupCacheDir());
629 base::Thread cache_thread("CacheThread");
630 ASSERT_TRUE(cache_thread.StartWithOptions(
631 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
633 disk_cache::BackendFlags flags =
634 fast ? disk_cache::kNone : disk_cache::kNoRandom;
635 CreateBackend(flags, &cache_thread);
637 disk_cache::Entry* entry;
638 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
639 ASSERT_EQ(net::ERR_IO_PENDING, rv);
642 EXPECT_FALSE(cb.have_result());
645 base::MessageLoop::current()->RunUntilIdle();
648 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
649 BackendShutdownWithPendingCreate(false);
652 #if !defined(LEAK_SANITIZER)
653 // We'll be leaking an entry from this test.
654 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
655 // The integrity test sets kNoRandom so there's a version mismatch if we don't
656 // force new eviction.
658 BackendShutdownWithPendingCreate(true);
662 TEST_F(DiskCacheTest, TruncatedIndex) {
663 ASSERT_TRUE(CleanupCacheDir());
664 base::FilePath index = cache_path_.AppendASCII("index");
665 ASSERT_EQ(5, file_util::WriteFile(index, "hello", 5));
667 base::Thread cache_thread("CacheThread");
668 ASSERT_TRUE(cache_thread.StartWithOptions(
669 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
670 net::TestCompletionCallback cb;
672 scoped_ptr<disk_cache::Backend> backend;
674 disk_cache::CreateCacheBackend(net::DISK_CACHE,
675 net::CACHE_BACKEND_BLOCKFILE,
679 cache_thread.message_loop_proxy().get(),
683 ASSERT_NE(net::OK, cb.GetResult(rv));
685 ASSERT_FALSE(backend);
688 void DiskCacheBackendTest::BackendSetSize() {
689 const int cache_size = 0x10000; // 64 kB
690 SetMaxSize(cache_size);
693 std::string first("some key");
694 std::string second("something else");
695 disk_cache::Entry* entry;
696 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
698 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
699 memset(buffer->data(), 0, cache_size);
700 EXPECT_EQ(cache_size / 10,
701 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
704 EXPECT_EQ(net::ERR_FAILED,
705 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
706 << "file size above the limit";
708 // By doubling the total size, we make this file cacheable.
709 SetMaxSize(cache_size * 2);
710 EXPECT_EQ(cache_size / 5,
711 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
713 // Let's fill up the cache!.
714 SetMaxSize(cache_size * 10);
715 EXPECT_EQ(cache_size * 3 / 4,
716 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
720 SetMaxSize(cache_size);
722 // The cache is 95% full.
724 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
725 EXPECT_EQ(cache_size / 10,
726 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
728 disk_cache::Entry* entry2;
729 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
730 EXPECT_EQ(cache_size / 10,
731 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
732 entry2->Close(); // This will trigger the cache trim.
734 EXPECT_NE(net::OK, OpenEntry(first, &entry2));
736 FlushQueueForTest(); // Make sure that we are done trimming the cache.
737 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
740 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
741 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
745 TEST_F(DiskCacheBackendTest, SetSize) {
749 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
754 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
759 void DiskCacheBackendTest::BackendLoad() {
761 int seed = static_cast<int>(Time::Now().ToInternalValue());
764 disk_cache::Entry* entries[100];
765 for (int i = 0; i < 100; i++) {
766 std::string key = GenerateKey(true);
767 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
769 EXPECT_EQ(100, cache_->GetEntryCount());
771 for (int i = 0; i < 100; i++) {
772 int source1 = rand() % 100;
773 int source2 = rand() % 100;
774 disk_cache::Entry* temp = entries[source1];
775 entries[source1] = entries[source2];
776 entries[source2] = temp;
779 for (int i = 0; i < 100; i++) {
780 disk_cache::Entry* entry;
781 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
782 EXPECT_TRUE(entry == entries[i]);
788 EXPECT_EQ(0, cache_->GetEntryCount());
791 TEST_F(DiskCacheBackendTest, Load) {
792 // Work with a tiny index table (16 entries)
794 SetMaxSize(0x100000);
798 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
800 // Work with a tiny index table (16 entries)
802 SetMaxSize(0x100000);
806 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
807 SetMaxSize(0x100000);
812 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
813 SetCacheType(net::APP_CACHE);
814 // Work with a tiny index table (16 entries)
816 SetMaxSize(0x100000);
820 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
821 SetCacheType(net::SHADER_CACHE);
822 // Work with a tiny index table (16 entries)
824 SetMaxSize(0x100000);
828 // Tests the chaining of an entry to the current head.
829 void DiskCacheBackendTest::BackendChain() {
830 SetMask(0x1); // 2-entry table.
831 SetMaxSize(0x3000); // 12 kB.
834 disk_cache::Entry* entry;
835 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
837 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
841 TEST_F(DiskCacheBackendTest, Chain) {
845 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
850 TEST_F(DiskCacheBackendTest, AppCacheChain) {
851 SetCacheType(net::APP_CACHE);
855 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
856 SetCacheType(net::SHADER_CACHE);
860 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
864 disk_cache::Entry* entry;
865 for (int i = 0; i < 100; i++) {
866 std::string name(base::StringPrintf("Key %d", i));
867 ASSERT_EQ(net::OK, CreateEntry(name, &entry));
870 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
871 ASSERT_EQ(net::OK, OpenEntry(name, &entry));
876 // The first eviction must come from list 1 (10% limit), the second must come
879 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
881 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
883 // Double check that we still have the list tails.
884 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
886 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
890 // Before looking for invalid entries, let's check a valid entry.
891 void DiskCacheBackendTest::BackendValidEntry() {
894 std::string key("Some key");
895 disk_cache::Entry* entry;
896 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
898 const int kSize = 50;
899 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
900 memset(buffer1->data(), 0, kSize);
901 base::strlcpy(buffer1->data(), "And the data to save", kSize);
902 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
906 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
908 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
909 memset(buffer2->data(), 0, kSize);
910 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
912 EXPECT_STREQ(buffer1->data(), buffer2->data());
915 TEST_F(DiskCacheBackendTest, ValidEntry) {
919 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
924 // The same logic of the previous test (ValidEntry), but this time force the
925 // entry to be invalid, simulating a crash in the middle.
926 // We'll be leaking memory from this test.
927 void DiskCacheBackendTest::BackendInvalidEntry() {
930 std::string key("Some key");
931 disk_cache::Entry* entry;
932 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
934 const int kSize = 50;
935 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
936 memset(buffer->data(), 0, kSize);
937 base::strlcpy(buffer->data(), "And the data to save", kSize);
938 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
941 EXPECT_NE(net::OK, OpenEntry(key, &entry));
942 EXPECT_EQ(0, cache_->GetEntryCount());
945 #if !defined(LEAK_SANITIZER)
946 // We'll be leaking memory from this test.
947 TEST_F(DiskCacheBackendTest, InvalidEntry) {
948 BackendInvalidEntry();
951 // We'll be leaking memory from this test.
952 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
954 BackendInvalidEntry();
957 // We'll be leaking memory from this test.
958 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
959 SetCacheType(net::APP_CACHE);
960 BackendInvalidEntry();
963 // We'll be leaking memory from this test.
964 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
965 SetCacheType(net::SHADER_CACHE);
966 BackendInvalidEntry();
969 // Almost the same test, but this time crash the cache after reading an entry.
970 // We'll be leaking memory from this test.
971 void DiskCacheBackendTest::BackendInvalidEntryRead() {
974 std::string key("Some key");
975 disk_cache::Entry* entry;
976 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
978 const int kSize = 50;
979 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
980 memset(buffer->data(), 0, kSize);
981 base::strlcpy(buffer->data(), "And the data to save", kSize);
982 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
984 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
985 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
989 if (type_ == net::APP_CACHE) {
990 // Reading an entry and crashing should not make it dirty.
991 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
992 EXPECT_EQ(1, cache_->GetEntryCount());
995 EXPECT_NE(net::OK, OpenEntry(key, &entry));
996 EXPECT_EQ(0, cache_->GetEntryCount());
1000 // We'll be leaking memory from this test.
1001 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1002 BackendInvalidEntryRead();
1005 // We'll be leaking memory from this test.
1006 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1008 BackendInvalidEntryRead();
1011 // We'll be leaking memory from this test.
1012 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1013 SetCacheType(net::APP_CACHE);
1014 BackendInvalidEntryRead();
1017 // We'll be leaking memory from this test.
1018 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1019 SetCacheType(net::SHADER_CACHE);
1020 BackendInvalidEntryRead();
1023 // We'll be leaking memory from this test.
1024 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1025 // Work with a tiny index table (16 entries)
1027 SetMaxSize(0x100000);
1030 int seed = static_cast<int>(Time::Now().ToInternalValue());
1033 const int kNumEntries = 100;
1034 disk_cache::Entry* entries[kNumEntries];
1035 for (int i = 0; i < kNumEntries; i++) {
1036 std::string key = GenerateKey(true);
1037 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
1039 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1041 for (int i = 0; i < kNumEntries; i++) {
1042 int source1 = rand() % kNumEntries;
1043 int source2 = rand() % kNumEntries;
1044 disk_cache::Entry* temp = entries[source1];
1045 entries[source1] = entries[source2];
1046 entries[source2] = temp;
1049 std::string keys[kNumEntries];
1050 for (int i = 0; i < kNumEntries; i++) {
1051 keys[i] = entries[i]->GetKey();
1052 if (i < kNumEntries / 2)
1053 entries[i]->Close();
1058 for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1059 disk_cache::Entry* entry;
1060 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1063 for (int i = 0; i < kNumEntries / 2; i++) {
1064 disk_cache::Entry* entry;
1065 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
1069 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1072 // We'll be leaking memory from this test.
1073 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1074 BackendInvalidEntryWithLoad();
1077 // We'll be leaking memory from this test.
1078 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1080 BackendInvalidEntryWithLoad();
1083 // We'll be leaking memory from this test.
1084 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1085 SetCacheType(net::APP_CACHE);
1086 BackendInvalidEntryWithLoad();
1089 // We'll be leaking memory from this test.
1090 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1091 SetCacheType(net::SHADER_CACHE);
1092 BackendInvalidEntryWithLoad();
1095 // We'll be leaking memory from this test.
1096 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1097 const int kSize = 0x3000; // 12 kB
1098 SetMaxSize(kSize * 10);
1101 std::string first("some key");
1102 std::string second("something else");
1103 disk_cache::Entry* entry;
1104 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
1106 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1107 memset(buffer->data(), 0, kSize);
1108 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1110 // Simulate a crash.
1113 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
1114 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1116 EXPECT_EQ(2, cache_->GetEntryCount());
1118 entry->Close(); // Trim the cache.
1119 FlushQueueForTest();
1121 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1122 // if it took more than that, we posted a task and we'll delete the second
1124 base::MessageLoop::current()->RunUntilIdle();
1126 // This may be not thread-safe in general, but for now it's OK so add some
1127 // ThreadSanitizer annotations to ignore data races on cache_.
1128 // See http://crbug.com/55970
1129 ANNOTATE_IGNORE_READS_BEGIN();
1130 EXPECT_GE(1, cache_->GetEntryCount());
1131 ANNOTATE_IGNORE_READS_END();
1133 EXPECT_NE(net::OK, OpenEntry(first, &entry));
1136 // We'll be leaking memory from this test.
1137 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1138 BackendTrimInvalidEntry();
1141 // We'll be leaking memory from this test.
1142 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1144 BackendTrimInvalidEntry();
1147 // We'll be leaking memory from this test.
1148 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1149 SetMask(0xf); // 16-entry table.
1151 const int kSize = 0x3000; // 12 kB
1152 SetMaxSize(kSize * 40);
1155 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1156 memset(buffer->data(), 0, kSize);
1157 disk_cache::Entry* entry;
1159 // Writing 32 entries to this cache chains most of them.
1160 for (int i = 0; i < 32; i++) {
1161 std::string key(base::StringPrintf("some key %d", i));
1162 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1163 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1165 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1166 // Note that we are not closing the entries.
1169 // Simulate a crash.
1172 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
1173 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1175 FlushQueueForTest();
1176 EXPECT_EQ(33, cache_->GetEntryCount());
1179 // For the new eviction code, all corrupt entries are on the second list so
1180 // they are not going away that easy.
1181 if (new_eviction_) {
1182 EXPECT_EQ(net::OK, DoomAllEntries());
1185 entry->Close(); // Trim the cache.
1186 FlushQueueForTest();
1188 // We may abort the eviction before cleaning up everything.
1189 base::MessageLoop::current()->RunUntilIdle();
1190 FlushQueueForTest();
1191 // If it's not clear enough: we may still have eviction tasks running at this
1192 // time, so the number of entries is changing while we read it.
1193 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1194 EXPECT_GE(30, cache_->GetEntryCount());
1195 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1198 // We'll be leaking memory from this test.
1199 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1200 BackendTrimInvalidEntry2();
1203 // We'll be leaking memory from this test.
1204 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1206 BackendTrimInvalidEntry2();
1208 #endif // !defined(LEAK_SANITIZER)
1210 void DiskCacheBackendTest::BackendEnumerations() {
1212 Time initial = Time::Now();
1214 const int kNumEntries = 100;
1215 for (int i = 0; i < kNumEntries; i++) {
1216 std::string key = GenerateKey(true);
1217 disk_cache::Entry* entry;
1218 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1221 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1222 Time final = Time::Now();
1224 disk_cache::Entry* entry;
1227 Time last_modified[kNumEntries];
1228 Time last_used[kNumEntries];
1229 while (OpenNextEntry(&iter, &entry) == net::OK) {
1230 ASSERT_TRUE(NULL != entry);
1231 if (count < kNumEntries) {
1232 last_modified[count] = entry->GetLastModified();
1233 last_used[count] = entry->GetLastUsed();
1234 EXPECT_TRUE(initial <= last_modified[count]);
1235 EXPECT_TRUE(final >= last_modified[count]);
1241 EXPECT_EQ(kNumEntries, count);
1245 // The previous enumeration should not have changed the timestamps.
1246 while (OpenNextEntry(&iter, &entry) == net::OK) {
1247 ASSERT_TRUE(NULL != entry);
1248 if (count < kNumEntries) {
1249 EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1250 EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1255 EXPECT_EQ(kNumEntries, count);
1258 TEST_F(DiskCacheBackendTest, Enumerations) {
1259 BackendEnumerations();
1262 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1264 BackendEnumerations();
1267 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1268 SetMemoryOnlyMode();
1269 BackendEnumerations();
1272 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1273 SetCacheType(net::SHADER_CACHE);
1274 BackendEnumerations();
1277 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1278 SetCacheType(net::APP_CACHE);
1279 BackendEnumerations();
1282 // Verifies enumerations while entries are open.
1283 void DiskCacheBackendTest::BackendEnumerations2() {
1285 const std::string first("first");
1286 const std::string second("second");
1287 disk_cache::Entry *entry1, *entry2;
1288 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1290 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1292 FlushQueueForTest();
1294 // Make sure that the timestamp is not the same.
1296 ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
1298 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1299 EXPECT_EQ(entry2->GetKey(), second);
1301 // Two entries and the iterator pointing at "first".
1305 // The iterator should still be valid, so we should not crash.
1306 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1307 EXPECT_EQ(entry2->GetKey(), first);
1309 cache_->EndEnumeration(&iter);
1311 // Modify the oldest entry and get the newest element.
1312 ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
1313 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
1314 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1315 if (type_ == net::APP_CACHE) {
1316 // The list is not updated.
1317 EXPECT_EQ(entry2->GetKey(), second);
1319 EXPECT_EQ(entry2->GetKey(), first);
1324 cache_->EndEnumeration(&iter);
1327 TEST_F(DiskCacheBackendTest, Enumerations2) {
1328 BackendEnumerations2();
1331 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1333 BackendEnumerations2();
1336 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
1337 SetMemoryOnlyMode();
1338 BackendEnumerations2();
1341 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1342 SetCacheType(net::APP_CACHE);
1343 BackendEnumerations2();
1346 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1347 SetCacheType(net::SHADER_CACHE);
1348 BackendEnumerations2();
1351 // Verify that ReadData calls do not update the LRU cache
1352 // when using the SHADER_CACHE type.
1353 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1354 SetCacheType(net::SHADER_CACHE);
1356 const std::string first("first");
1357 const std::string second("second");
1358 disk_cache::Entry *entry1, *entry2;
1359 const int kSize = 50;
1360 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1362 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1363 memset(buffer1->data(), 0, kSize);
1364 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1365 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1367 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1370 FlushQueueForTest();
1372 // Make sure that the timestamp is not the same.
1375 // Read from the last item in the LRU.
1376 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1380 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1381 EXPECT_EQ(entry2->GetKey(), second);
1383 cache_->EndEnumeration(&iter);
1386 #if !defined(LEAK_SANITIZER)
1387 // Verify handling of invalid entries while doing enumerations.
1388 // We'll be leaking memory from this test.
1389 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1392 std::string key("Some key");
1393 disk_cache::Entry *entry, *entry1, *entry2;
1394 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1396 const int kSize = 50;
1397 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1398 memset(buffer1->data(), 0, kSize);
1399 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1400 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1402 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1403 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1405 std::string key2("Another key");
1406 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1408 ASSERT_EQ(2, cache_->GetEntryCount());
1414 while (OpenNextEntry(&iter, &entry) == net::OK) {
1415 ASSERT_TRUE(NULL != entry);
1416 EXPECT_EQ(key2, entry->GetKey());
1420 EXPECT_EQ(1, count);
1421 EXPECT_EQ(1, cache_->GetEntryCount());
1424 // We'll be leaking memory from this test.
1425 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1426 BackendInvalidEntryEnumeration();
1429 // We'll be leaking memory from this test.
1430 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1432 BackendInvalidEntryEnumeration();
1434 #endif // !defined(LEAK_SANITIZER)
1436 // Tests that if for some reason entries are modified close to existing cache
1437 // iterators, we don't generate fatal errors or reset the cache.
1438 void DiskCacheBackendTest::BackendFixEnumerators() {
1441 int seed = static_cast<int>(Time::Now().ToInternalValue());
1444 const int kNumEntries = 10;
1445 for (int i = 0; i < kNumEntries; i++) {
1446 std::string key = GenerateKey(true);
1447 disk_cache::Entry* entry;
1448 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1451 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1453 disk_cache::Entry *entry1, *entry2;
1456 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1457 ASSERT_TRUE(NULL != entry1);
1461 // Let's go to the middle of the list.
1462 for (int i = 0; i < kNumEntries / 2; i++) {
1465 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1466 ASSERT_TRUE(NULL != entry1);
1468 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1469 ASSERT_TRUE(NULL != entry2);
1473 // Messing up with entry1 will modify entry2->next.
1475 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1476 ASSERT_TRUE(NULL != entry2);
1478 // The link entry2->entry1 should be broken.
1479 EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1483 // And the second iterator should keep working.
1484 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1485 ASSERT_TRUE(NULL != entry2);
1488 cache_->EndEnumeration(&iter1);
1489 cache_->EndEnumeration(&iter2);
1492 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1493 BackendFixEnumerators();
1496 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1498 BackendFixEnumerators();
1501 void DiskCacheBackendTest::BackendDoomRecent() {
1504 disk_cache::Entry *entry;
1505 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1507 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1509 FlushQueueForTest();
1512 Time middle = Time::Now();
1514 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1516 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1518 FlushQueueForTest();
1521 Time final = Time::Now();
1523 ASSERT_EQ(4, cache_->GetEntryCount());
1524 EXPECT_EQ(net::OK, DoomEntriesSince(final));
1525 ASSERT_EQ(4, cache_->GetEntryCount());
1527 EXPECT_EQ(net::OK, DoomEntriesSince(middle));
1528 ASSERT_EQ(2, cache_->GetEntryCount());
1530 ASSERT_EQ(net::OK, OpenEntry("second", &entry));
1534 TEST_F(DiskCacheBackendTest, DoomRecent) {
1535 BackendDoomRecent();
1538 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
1540 BackendDoomRecent();
1543 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
1544 SetMemoryOnlyMode();
1545 BackendDoomRecent();
1548 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
1549 SetMemoryOnlyMode();
1551 InitSparseCache(&start, NULL);
1552 DoomEntriesSince(start);
1553 EXPECT_EQ(1, cache_->GetEntryCount());
1556 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
1558 InitSparseCache(&start, NULL);
1559 DoomEntriesSince(start);
1560 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1561 // MemBackendImpl does not. Thats why expected value differs here from
1562 // MemoryOnlyDoomEntriesSinceSparse.
1563 EXPECT_EQ(3, cache_->GetEntryCount());
1566 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
1567 SetMemoryOnlyMode();
1568 InitSparseCache(NULL, NULL);
1569 EXPECT_EQ(net::OK, DoomAllEntries());
1570 EXPECT_EQ(0, cache_->GetEntryCount());
1573 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
1574 InitSparseCache(NULL, NULL);
1575 EXPECT_EQ(net::OK, DoomAllEntries());
1576 EXPECT_EQ(0, cache_->GetEntryCount());
1579 void DiskCacheBackendTest::BackendDoomBetween() {
1582 disk_cache::Entry *entry;
1583 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1585 FlushQueueForTest();
1588 Time middle_start = Time::Now();
1590 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1592 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1594 FlushQueueForTest();
1597 Time middle_end = Time::Now();
1600 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1602 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1604 FlushQueueForTest();
1607 Time final = Time::Now();
1609 ASSERT_EQ(4, cache_->GetEntryCount());
1610 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
1611 ASSERT_EQ(2, cache_->GetEntryCount());
1613 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1616 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
1617 ASSERT_EQ(1, cache_->GetEntryCount());
1619 ASSERT_EQ(net::OK, OpenEntry("first", &entry));
1623 TEST_F(DiskCacheBackendTest, DoomBetween) {
1624 BackendDoomBetween();
1627 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
1629 BackendDoomBetween();
1632 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
1633 SetMemoryOnlyMode();
1634 BackendDoomBetween();
1637 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
1638 SetMemoryOnlyMode();
1639 base::Time start, end;
1640 InitSparseCache(&start, &end);
1641 DoomEntriesBetween(start, end);
1642 EXPECT_EQ(3, cache_->GetEntryCount());
1645 end = base::Time::Now();
1646 DoomEntriesBetween(start, end);
1647 EXPECT_EQ(1, cache_->GetEntryCount());
1650 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
1651 base::Time start, end;
1652 InitSparseCache(&start, &end);
1653 DoomEntriesBetween(start, end);
1654 EXPECT_EQ(9, cache_->GetEntryCount());
1657 end = base::Time::Now();
1658 DoomEntriesBetween(start, end);
1659 EXPECT_EQ(3, cache_->GetEntryCount());
1662 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
1663 int num_entries, bool load) {
1665 ASSERT_TRUE(CopyTestCache(name));
1666 DisableFirstCleanup();
1671 SetMaxSize(0x100000);
1673 // Clear the settings from the previous run.
1680 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
1682 std::string key("the first key");
1683 disk_cache::Entry* entry1;
1684 ASSERT_NE(net::OK, OpenEntry(key, &entry1));
1686 int actual = cache_->GetEntryCount();
1687 if (num_entries != actual) {
1689 // If there is a heavy load, inserting an entry will make another entry
1690 // dirty (on the hash bucket) so two entries are removed.
1691 ASSERT_EQ(num_entries - 1, actual);
1697 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
1701 void DiskCacheBackendTest::BackendRecoverInsert() {
1702 // Tests with an empty cache.
1703 BackendTransaction("insert_empty1", 0, false);
1704 ASSERT_TRUE(success_) << "insert_empty1";
1705 BackendTransaction("insert_empty2", 0, false);
1706 ASSERT_TRUE(success_) << "insert_empty2";
1707 BackendTransaction("insert_empty3", 0, false);
1708 ASSERT_TRUE(success_) << "insert_empty3";
1710 // Tests with one entry on the cache.
1711 BackendTransaction("insert_one1", 1, false);
1712 ASSERT_TRUE(success_) << "insert_one1";
1713 BackendTransaction("insert_one2", 1, false);
1714 ASSERT_TRUE(success_) << "insert_one2";
1715 BackendTransaction("insert_one3", 1, false);
1716 ASSERT_TRUE(success_) << "insert_one3";
1718 // Tests with one hundred entries on the cache, tiny index.
1719 BackendTransaction("insert_load1", 100, true);
1720 ASSERT_TRUE(success_) << "insert_load1";
1721 BackendTransaction("insert_load2", 100, true);
1722 ASSERT_TRUE(success_) << "insert_load2";
1725 TEST_F(DiskCacheBackendTest, RecoverInsert) {
1726 BackendRecoverInsert();
1729 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
1731 BackendRecoverInsert();
1734 void DiskCacheBackendTest::BackendRecoverRemove() {
1735 // Removing the only element.
1736 BackendTransaction("remove_one1", 0, false);
1737 ASSERT_TRUE(success_) << "remove_one1";
1738 BackendTransaction("remove_one2", 0, false);
1739 ASSERT_TRUE(success_) << "remove_one2";
1740 BackendTransaction("remove_one3", 0, false);
1741 ASSERT_TRUE(success_) << "remove_one3";
1743 // Removing the head.
1744 BackendTransaction("remove_head1", 1, false);
1745 ASSERT_TRUE(success_) << "remove_head1";
1746 BackendTransaction("remove_head2", 1, false);
1747 ASSERT_TRUE(success_) << "remove_head2";
1748 BackendTransaction("remove_head3", 1, false);
1749 ASSERT_TRUE(success_) << "remove_head3";
1751 // Removing the tail.
1752 BackendTransaction("remove_tail1", 1, false);
1753 ASSERT_TRUE(success_) << "remove_tail1";
1754 BackendTransaction("remove_tail2", 1, false);
1755 ASSERT_TRUE(success_) << "remove_tail2";
1756 BackendTransaction("remove_tail3", 1, false);
1757 ASSERT_TRUE(success_) << "remove_tail3";
1759 // Removing with one hundred entries on the cache, tiny index.
1760 BackendTransaction("remove_load1", 100, true);
1761 ASSERT_TRUE(success_) << "remove_load1";
1762 BackendTransaction("remove_load2", 100, true);
1763 ASSERT_TRUE(success_) << "remove_load2";
1764 BackendTransaction("remove_load3", 100, true);
1765 ASSERT_TRUE(success_) << "remove_load3";
1767 // This case cannot be reverted.
1768 BackendTransaction("remove_one4", 0, false);
1769 ASSERT_TRUE(success_) << "remove_one4";
1770 BackendTransaction("remove_head4", 1, false);
1771 ASSERT_TRUE(success_) << "remove_head4";
1774 TEST_F(DiskCacheBackendTest, RecoverRemove) {
1775 BackendRecoverRemove();
1778 TEST_F(DiskCacheBackendTest, NewEvictionRecoverRemove) {
1780 BackendRecoverRemove();
1783 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1785 ASSERT_TRUE(CopyTestCache("insert_load1"));
1786 DisableFirstCleanup();
1791 // We should not crash here.
1793 DisableIntegrityCheck();
1796 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
1797 BackendRecoverWithEviction();
1800 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
1802 BackendRecoverWithEviction();
1805 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1806 TEST_F(DiskCacheTest, WrongVersion) {
1807 ASSERT_TRUE(CopyTestCache("wrong_version"));
1808 base::Thread cache_thread("CacheThread");
1809 ASSERT_TRUE(cache_thread.StartWithOptions(
1810 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1811 net::TestCompletionCallback cb;
1813 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1814 cache_path_, cache_thread.message_loop_proxy().get(), NULL));
1815 int rv = cache->Init(cb.callback());
1816 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
1819 class BadEntropyProvider : public base::FieldTrial::EntropyProvider {
1821 virtual ~BadEntropyProvider() {}
1823 virtual double GetEntropyForTrial(const std::string& trial_name,
1824 uint32 randomization_seed) const OVERRIDE {
1829 // Tests that the disk cache successfully joins the control group, dropping the
1830 // existing cache in favour of a new empty cache.
1831 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
1832 base::Thread cache_thread("CacheThread");
1833 ASSERT_TRUE(cache_thread.StartWithOptions(
1834 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1836 scoped_ptr<disk_cache::BackendImpl> cache =
1837 CreateExistingEntryCache(cache_thread, cache_path_);
1838 ASSERT_TRUE(cache.get());
1841 // Instantiate the SimpleCacheTrial, forcing this run into the
1842 // ExperimentControl group.
1843 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1844 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1845 "ExperimentControl");
1846 net::TestCompletionCallback cb;
1847 scoped_ptr<disk_cache::Backend> base_cache;
1849 disk_cache::CreateCacheBackend(net::DISK_CACHE,
1850 net::CACHE_BACKEND_BLOCKFILE,
1854 cache_thread.message_loop_proxy().get(),
1858 ASSERT_EQ(net::OK, cb.GetResult(rv));
1859 EXPECT_EQ(0, base_cache->GetEntryCount());
1862 // Tests that the disk cache can restart in the control group preserving
1863 // existing entries.
1864 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
1865 // Instantiate the SimpleCacheTrial, forcing this run into the
1866 // ExperimentControl group.
1867 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1868 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1869 "ExperimentControl");
1871 base::Thread cache_thread("CacheThread");
1872 ASSERT_TRUE(cache_thread.StartWithOptions(
1873 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1875 scoped_ptr<disk_cache::BackendImpl> cache =
1876 CreateExistingEntryCache(cache_thread, cache_path_);
1877 ASSERT_TRUE(cache.get());
1879 net::TestCompletionCallback cb;
1881 const int kRestartCount = 5;
1882 for (int i = 0; i < kRestartCount; ++i) {
1883 cache.reset(new disk_cache::BackendImpl(
1884 cache_path_, cache_thread.message_loop_proxy(), NULL));
1885 int rv = cache->Init(cb.callback());
1886 ASSERT_EQ(net::OK, cb.GetResult(rv));
1887 EXPECT_EQ(1, cache->GetEntryCount());
1889 disk_cache::Entry* entry = NULL;
1890 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1891 EXPECT_EQ(net::OK, cb.GetResult(rv));
1897 // Tests that the disk cache can leave the control group preserving existing
1899 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
1900 base::Thread cache_thread("CacheThread");
1901 ASSERT_TRUE(cache_thread.StartWithOptions(
1902 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1905 // Instantiate the SimpleCacheTrial, forcing this run into the
1906 // ExperimentControl group.
1907 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1908 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1909 "ExperimentControl");
1911 scoped_ptr<disk_cache::BackendImpl> cache =
1912 CreateExistingEntryCache(cache_thread, cache_path_);
1913 ASSERT_TRUE(cache.get());
1916 // Instantiate the SimpleCacheTrial, forcing this run into the
1917 // ExperimentNo group.
1918 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1919 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1920 net::TestCompletionCallback cb;
1922 const int kRestartCount = 5;
1923 for (int i = 0; i < kRestartCount; ++i) {
1924 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1925 cache_path_, cache_thread.message_loop_proxy(), NULL));
1926 int rv = cache->Init(cb.callback());
1927 ASSERT_EQ(net::OK, cb.GetResult(rv));
1928 EXPECT_EQ(1, cache->GetEntryCount());
1930 disk_cache::Entry* entry = NULL;
1931 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1932 EXPECT_EQ(net::OK, cb.GetResult(rv));
1938 // Tests that the cache is properly restarted on recovery error.
1939 TEST_F(DiskCacheBackendTest, DeleteOld) {
1940 ASSERT_TRUE(CopyTestCache("wrong_version"));
1942 base::Thread cache_thread("CacheThread");
1943 ASSERT_TRUE(cache_thread.StartWithOptions(
1944 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1946 net::TestCompletionCallback cb;
1947 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
1948 base::FilePath path(cache_path_);
1950 disk_cache::CreateCacheBackend(net::DISK_CACHE,
1951 net::CACHE_BACKEND_BLOCKFILE,
1955 cache_thread.message_loop_proxy().get(),
1959 path.clear(); // Make sure path was captured by the previous call.
1960 ASSERT_EQ(net::OK, cb.GetResult(rv));
1961 base::ThreadRestrictions::SetIOAllowed(prev);
1963 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
1966 // We want to be able to deal with messed up entries on disk.
1967 void DiskCacheBackendTest::BackendInvalidEntry2() {
1968 ASSERT_TRUE(CopyTestCache("bad_entry"));
1969 DisableFirstCleanup();
1972 disk_cache::Entry *entry1, *entry2;
1973 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
1974 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
1977 // CheckCacheIntegrity will fail at this point.
1978 DisableIntegrityCheck();
1981 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
1982 BackendInvalidEntry2();
1985 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
1987 BackendInvalidEntry2();
1990 // Tests that we don't crash or hang when enumerating this cache.
1991 void DiskCacheBackendTest::BackendInvalidEntry3() {
1992 SetMask(0x1); // 2-entry table.
1993 SetMaxSize(0x3000); // 12 kB.
1994 DisableFirstCleanup();
1997 disk_cache::Entry* entry;
1999 while (OpenNextEntry(&iter, &entry) == net::OK) {
2004 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2005 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2006 BackendInvalidEntry3();
2009 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2010 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2012 BackendInvalidEntry3();
2013 DisableIntegrityCheck();
2016 // Test that we handle a dirty entry on the LRU list, already replaced with
2017 // the same key, and with hash collisions.
2018 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2019 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2020 SetMask(0x1); // 2-entry table.
2021 SetMaxSize(0x3000); // 12 kB.
2022 DisableFirstCleanup();
2028 // Test that we handle a dirty entry on the deleted list, already replaced with
2029 // the same key, and with hash collisions.
2030 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2031 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2033 SetMask(0x1); // 2-entry table.
2034 SetMaxSize(0x3000); // 12 kB.
2035 DisableFirstCleanup();
2038 TrimDeletedListForTest(false);
2041 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2042 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2043 SetMask(0x1); // 2-entry table.
2044 SetMaxSize(0x3000); // 12 kB.
2045 DisableFirstCleanup();
2048 // There is a dirty entry (but marked as clean) at the end, pointing to a
2049 // deleted entry through the hash collision list. We should not re-insert the
2050 // deleted entry into the index table.
2053 // The cache should be clean (as detected by CheckCacheIntegrity).
2056 // Tests that we don't hang when there is a loop on the hash collision list.
2057 // The test cache could be a result of bug 69135.
2058 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2059 ASSERT_TRUE(CopyTestCache("list_loop2"));
2060 SetMask(0x1); // 2-entry table.
2061 SetMaxSize(0x3000); // 12 kB.
2062 DisableFirstCleanup();
2065 // The second entry points at itselft, and the first entry is not accessible
2066 // though the index, but it is at the head of the LRU.
2068 disk_cache::Entry* entry;
2069 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
2074 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
2076 EXPECT_EQ(1, cache_->GetEntryCount());
2079 // Tests that we don't hang when there is a loop on the hash collision list.
2080 // The test cache could be a result of bug 69135.
2081 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2082 ASSERT_TRUE(CopyTestCache("list_loop3"));
2083 SetMask(0x1); // 2-entry table.
2084 SetMaxSize(0x3000); // 12 kB.
2085 DisableFirstCleanup();
2088 // There is a wide loop of 5 entries.
2090 disk_cache::Entry* entry;
2091 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2094 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2095 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2096 DisableFirstCleanup();
2100 // The second entry is dirty, but removing it should not corrupt the list.
2101 disk_cache::Entry* entry;
2102 ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2103 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
2105 // This should not delete the cache.
2107 FlushQueueForTest();
2110 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
2114 // Tests handling of corrupt entries by keeping the rankings node around, with
2116 void DiskCacheBackendTest::BackendInvalidEntry7() {
2117 const int kSize = 0x3000; // 12 kB.
2118 SetMaxSize(kSize * 10);
2121 std::string first("some key");
2122 std::string second("something else");
2123 disk_cache::Entry* entry;
2124 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2126 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2128 // Corrupt this entry.
2129 disk_cache::EntryImpl* entry_impl =
2130 static_cast<disk_cache::EntryImpl*>(entry);
2132 entry_impl->rankings()->Data()->next = 0;
2133 entry_impl->rankings()->Store();
2135 FlushQueueForTest();
2136 EXPECT_EQ(2, cache_->GetEntryCount());
2138 // This should detect the bad entry.
2139 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2140 EXPECT_EQ(1, cache_->GetEntryCount());
2142 // We should delete the cache. The list still has a corrupt node.
2144 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2145 FlushQueueForTest();
2146 EXPECT_EQ(0, cache_->GetEntryCount());
2149 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2150 BackendInvalidEntry7();
2153 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2155 BackendInvalidEntry7();
2158 // Tests handling of corrupt entries by keeping the rankings node around, with
2159 // a non fatal failure.
2160 void DiskCacheBackendTest::BackendInvalidEntry8() {
2161 const int kSize = 0x3000; // 12 kB
2162 SetMaxSize(kSize * 10);
2165 std::string first("some key");
2166 std::string second("something else");
2167 disk_cache::Entry* entry;
2168 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2170 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2172 // Corrupt this entry.
2173 disk_cache::EntryImpl* entry_impl =
2174 static_cast<disk_cache::EntryImpl*>(entry);
2176 entry_impl->rankings()->Data()->contents = 0;
2177 entry_impl->rankings()->Store();
2179 FlushQueueForTest();
2180 EXPECT_EQ(2, cache_->GetEntryCount());
2182 // This should detect the bad entry.
2183 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2184 EXPECT_EQ(1, cache_->GetEntryCount());
2186 // We should not delete the cache.
2188 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2190 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2191 EXPECT_EQ(1, cache_->GetEntryCount());
2194 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2195 BackendInvalidEntry8();
2198 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2200 BackendInvalidEntry8();
2203 // Tests handling of corrupt entries detected by enumerations. Note that these
2204 // tests (xx9 to xx11) are basically just going though slightly different
2205 // codepaths so they are tighlty coupled with the code, but that is better than
2206 // not testing error handling code.
2207 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2208 const int kSize = 0x3000; // 12 kB.
2209 SetMaxSize(kSize * 10);
2212 std::string first("some key");
2213 std::string second("something else");
2214 disk_cache::Entry* entry;
2215 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2217 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2219 // Corrupt this entry.
2220 disk_cache::EntryImpl* entry_impl =
2221 static_cast<disk_cache::EntryImpl*>(entry);
2223 entry_impl->entry()->Data()->state = 0xbad;
2224 entry_impl->entry()->Store();
2226 FlushQueueForTest();
2227 EXPECT_EQ(2, cache_->GetEntryCount());
2231 EXPECT_EQ(1, cache_->GetEntryCount());
2233 EXPECT_EQ(1, cache_->GetEntryCount());
2235 // We should detect the problem through the list, but we should not delete
2236 // the entry, just fail the iteration.
2238 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2240 // Now a full iteration will work, and return one entry.
2241 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2243 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2245 // This should detect what's left of the bad entry.
2246 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2247 EXPECT_EQ(2, cache_->GetEntryCount());
2249 DisableIntegrityCheck();
2252 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2253 BackendInvalidEntry9(false);
2256 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2258 BackendInvalidEntry9(false);
2261 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2262 BackendInvalidEntry9(true);
2265 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2267 BackendInvalidEntry9(true);
2270 // Tests handling of corrupt entries detected by enumerations.
2271 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2272 const int kSize = 0x3000; // 12 kB.
2273 SetMaxSize(kSize * 10);
2277 std::string first("some key");
2278 std::string second("something else");
2279 disk_cache::Entry* entry;
2280 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2282 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2283 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2285 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2287 // Corrupt this entry.
2288 disk_cache::EntryImpl* entry_impl =
2289 static_cast<disk_cache::EntryImpl*>(entry);
2291 entry_impl->entry()->Data()->state = 0xbad;
2292 entry_impl->entry()->Store();
2294 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2296 EXPECT_EQ(3, cache_->GetEntryCount());
2299 // List 0: third -> second (bad).
2303 // Detection order: second -> first -> third.
2305 EXPECT_EQ(3, cache_->GetEntryCount());
2307 EXPECT_EQ(2, cache_->GetEntryCount());
2309 EXPECT_EQ(1, cache_->GetEntryCount());
2311 // Detection order: third -> second -> first.
2312 // We should detect the problem through the list, but we should not delete
2315 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2317 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2318 EXPECT_EQ(first, entry->GetKey());
2320 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2322 DisableIntegrityCheck();
2325 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2326 BackendInvalidEntry10(false);
2329 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
2330 BackendInvalidEntry10(true);
2333 // Tests handling of corrupt entries detected by enumerations.
2334 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
2335 const int kSize = 0x3000; // 12 kB.
2336 SetMaxSize(kSize * 10);
2340 std::string first("some key");
2341 std::string second("something else");
2342 disk_cache::Entry* entry;
2343 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2345 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2346 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2348 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2350 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
2351 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2353 // Corrupt this entry.
2354 disk_cache::EntryImpl* entry_impl =
2355 static_cast<disk_cache::EntryImpl*>(entry);
2357 entry_impl->entry()->Data()->state = 0xbad;
2358 entry_impl->entry()->Store();
2360 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2362 FlushQueueForTest();
2363 EXPECT_EQ(3, cache_->GetEntryCount());
2367 // List 1: second (bad) -> first.
2370 // Detection order: third -> first -> second.
2372 EXPECT_EQ(2, cache_->GetEntryCount());
2374 EXPECT_EQ(1, cache_->GetEntryCount());
2376 EXPECT_EQ(1, cache_->GetEntryCount());
2378 // Detection order: third -> second.
2379 // We should detect the problem through the list, but we should not delete
2380 // the entry, just fail the iteration.
2382 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2384 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2386 // Now a full iteration will work, and return two entries.
2387 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2389 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2391 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2393 DisableIntegrityCheck();
2396 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
2397 BackendInvalidEntry11(false);
2400 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
2401 BackendInvalidEntry11(true);
2404 // Tests handling of corrupt entries in the middle of a long eviction run.
2405 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2406 const int kSize = 0x3000; // 12 kB
2407 SetMaxSize(kSize * 10);
2410 std::string first("some key");
2411 std::string second("something else");
2412 disk_cache::Entry* entry;
2413 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2415 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2417 // Corrupt this entry.
2418 disk_cache::EntryImpl* entry_impl =
2419 static_cast<disk_cache::EntryImpl*>(entry);
2421 entry_impl->entry()->Data()->state = 0xbad;
2422 entry_impl->entry()->Store();
2424 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2426 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
2428 EXPECT_EQ(1, cache_->GetEntryCount());
2430 DisableIntegrityCheck();
2433 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
2434 BackendTrimInvalidEntry12();
2437 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
2439 BackendTrimInvalidEntry12();
2442 // We want to be able to deal with messed up entries on disk.
2443 void DiskCacheBackendTest::BackendInvalidRankings2() {
2444 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2445 DisableFirstCleanup();
2448 disk_cache::Entry *entry1, *entry2;
2449 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
2450 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
2453 // CheckCacheIntegrity will fail at this point.
2454 DisableIntegrityCheck();
2457 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
2458 BackendInvalidRankings2();
2461 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
2463 BackendInvalidRankings2();
2466 // If the LRU is corrupt, we delete the cache.
2467 void DiskCacheBackendTest::BackendInvalidRankings() {
2468 disk_cache::Entry* entry;
2470 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2472 EXPECT_EQ(2, cache_->GetEntryCount());
2474 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2475 FlushQueueForTest(); // Allow the restart to finish.
2476 EXPECT_EQ(0, cache_->GetEntryCount());
2479 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
2480 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2481 DisableFirstCleanup();
2483 BackendInvalidRankings();
2486 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
2487 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2488 DisableFirstCleanup();
2491 BackendInvalidRankings();
2494 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
2495 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2496 DisableFirstCleanup();
2498 SetTestMode(); // Fail cache reinitialization.
2499 BackendInvalidRankings();
2502 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
2503 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2504 DisableFirstCleanup();
2507 SetTestMode(); // Fail cache reinitialization.
2508 BackendInvalidRankings();
2511 // If the LRU is corrupt and we have open entries, we disable the cache.
2512 void DiskCacheBackendTest::BackendDisable() {
2513 disk_cache::Entry *entry1, *entry2;
2515 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2517 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2518 EXPECT_EQ(0, cache_->GetEntryCount());
2519 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2522 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2523 FlushQueueForTest(); // This one actually allows that task to complete.
2525 EXPECT_EQ(0, cache_->GetEntryCount());
2528 TEST_F(DiskCacheBackendTest, DisableSuccess) {
2529 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2530 DisableFirstCleanup();
2535 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
2536 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2537 DisableFirstCleanup();
2543 TEST_F(DiskCacheBackendTest, DisableFailure) {
2544 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2545 DisableFirstCleanup();
2547 SetTestMode(); // Fail cache reinitialization.
2551 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
2552 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2553 DisableFirstCleanup();
2556 SetTestMode(); // Fail cache reinitialization.
2560 // This is another type of corruption on the LRU; disable the cache.
2561 void DiskCacheBackendTest::BackendDisable2() {
2562 EXPECT_EQ(8, cache_->GetEntryCount());
2564 disk_cache::Entry* entry;
2567 while (OpenNextEntry(&iter, &entry) == net::OK) {
2568 ASSERT_TRUE(NULL != entry);
2571 ASSERT_LT(count, 9);
2574 FlushQueueForTest();
2575 EXPECT_EQ(0, cache_->GetEntryCount());
2578 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
2579 ASSERT_TRUE(CopyTestCache("list_loop"));
2580 DisableFirstCleanup();
2585 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
2586 ASSERT_TRUE(CopyTestCache("list_loop"));
2587 DisableFirstCleanup();
2593 TEST_F(DiskCacheBackendTest, DisableFailure2) {
2594 ASSERT_TRUE(CopyTestCache("list_loop"));
2595 DisableFirstCleanup();
2597 SetTestMode(); // Fail cache reinitialization.
2601 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
2602 ASSERT_TRUE(CopyTestCache("list_loop"));
2603 DisableFirstCleanup();
2606 SetTestMode(); // Fail cache reinitialization.
2610 // If the index size changes when we disable the cache, we should not crash.
2611 void DiskCacheBackendTest::BackendDisable3() {
2612 disk_cache::Entry *entry1, *entry2;
2614 EXPECT_EQ(2, cache_->GetEntryCount());
2615 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2618 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2619 FlushQueueForTest();
2621 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
2624 EXPECT_EQ(1, cache_->GetEntryCount());
2627 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
2628 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2629 DisableFirstCleanup();
2630 SetMaxSize(20 * 1024 * 1024);
2635 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
2636 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2637 DisableFirstCleanup();
2638 SetMaxSize(20 * 1024 * 1024);
2644 // If we disable the cache, already open entries should work as far as possible.
2645 void DiskCacheBackendTest::BackendDisable4() {
2646 disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
2648 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2652 CacheTestFillBuffer(key2, sizeof(key2), true);
2653 CacheTestFillBuffer(key3, sizeof(key3), true);
2654 key2[sizeof(key2) - 1] = '\0';
2655 key3[sizeof(key3) - 1] = '\0';
2656 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2657 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
2659 const int kBufSize = 20000;
2660 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
2661 memset(buf->data(), 0, kBufSize);
2662 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2663 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2665 // This line should disable the cache but not delete it.
2666 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4));
2667 EXPECT_EQ(0, cache_->GetEntryCount());
2669 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
2671 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
2672 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2673 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
2675 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
2676 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2677 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
2679 std::string key = entry2->GetKey();
2680 EXPECT_EQ(sizeof(key2) - 1, key.size());
2681 key = entry3->GetKey();
2682 EXPECT_EQ(sizeof(key3) - 1, key.size());
2687 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2688 FlushQueueForTest(); // This one actually allows that task to complete.
2690 EXPECT_EQ(0, cache_->GetEntryCount());
2693 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
2694 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2695 DisableFirstCleanup();
2700 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
2701 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2702 DisableFirstCleanup();
2708 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
2709 MessageLoopHelper helper;
2711 ASSERT_TRUE(CleanupCacheDir());
2712 scoped_ptr<disk_cache::BackendImpl> cache;
2713 cache.reset(new disk_cache::BackendImpl(
2714 cache_path_, base::MessageLoopProxy::current().get(), NULL));
2715 ASSERT_TRUE(NULL != cache.get());
2716 cache->SetUnitTestMode();
2717 ASSERT_EQ(net::OK, cache->SyncInit());
2719 // Wait for a callback that never comes... about 2 secs :). The message loop
2720 // has to run to allow invocation of the usage timer.
2721 helper.WaitUntilCacheIoFinished(1);
2724 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
2726 disk_cache::Entry* entry;
2727 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
2729 FlushQueueForTest();
2731 disk_cache::StatsItems stats;
2732 cache_->GetStats(&stats);
2733 EXPECT_FALSE(stats.empty());
2735 disk_cache::StatsItems::value_type hits("Create hit", "0x1");
2736 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2740 // Now open the cache and verify that the stats are still there.
2741 DisableFirstCleanup();
2743 EXPECT_EQ(1, cache_->GetEntryCount());
2746 cache_->GetStats(&stats);
2747 EXPECT_FALSE(stats.empty());
2749 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2752 void DiskCacheBackendTest::BackendDoomAll() {
2755 disk_cache::Entry *entry1, *entry2;
2756 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
2757 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
2761 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2762 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2764 ASSERT_EQ(4, cache_->GetEntryCount());
2765 EXPECT_EQ(net::OK, DoomAllEntries());
2766 ASSERT_EQ(0, cache_->GetEntryCount());
2768 // We should stop posting tasks at some point (if we post any).
2769 base::MessageLoop::current()->RunUntilIdle();
2771 disk_cache::Entry *entry3, *entry4;
2772 EXPECT_NE(net::OK, OpenEntry("third", &entry3));
2773 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
2774 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
2776 EXPECT_EQ(net::OK, DoomAllEntries());
2777 ASSERT_EQ(0, cache_->GetEntryCount());
2781 entry3->Doom(); // The entry should be already doomed, but this must work.
2785 // Now try with all references released.
2786 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2787 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2791 ASSERT_EQ(2, cache_->GetEntryCount());
2792 EXPECT_EQ(net::OK, DoomAllEntries());
2793 ASSERT_EQ(0, cache_->GetEntryCount());
2795 EXPECT_EQ(net::OK, DoomAllEntries());
2798 TEST_F(DiskCacheBackendTest, DoomAll) {
2802 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
2807 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
2808 SetMemoryOnlyMode();
2812 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
2813 SetCacheType(net::APP_CACHE);
2817 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
2818 SetCacheType(net::SHADER_CACHE);
2822 // If the index size changes when we doom the cache, we should not crash.
2823 void DiskCacheBackendTest::BackendDoomAll2() {
2824 EXPECT_EQ(2, cache_->GetEntryCount());
2825 EXPECT_EQ(net::OK, DoomAllEntries());
2827 disk_cache::Entry* entry;
2828 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
2831 EXPECT_EQ(1, cache_->GetEntryCount());
2834 TEST_F(DiskCacheBackendTest, DoomAll2) {
2835 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2836 DisableFirstCleanup();
2837 SetMaxSize(20 * 1024 * 1024);
2842 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
2843 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2844 DisableFirstCleanup();
2845 SetMaxSize(20 * 1024 * 1024);
2851 // We should be able to create the same entry on multiple simultaneous instances
2853 TEST_F(DiskCacheTest, MultipleInstances) {
2854 base::ScopedTempDir store1, store2;
2855 ASSERT_TRUE(store1.CreateUniqueTempDir());
2856 ASSERT_TRUE(store2.CreateUniqueTempDir());
2858 base::Thread cache_thread("CacheThread");
2859 ASSERT_TRUE(cache_thread.StartWithOptions(
2860 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
2861 net::TestCompletionCallback cb;
2863 const int kNumberOfCaches = 2;
2864 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
2867 disk_cache::CreateCacheBackend(net::DISK_CACHE,
2868 net::CACHE_BACKEND_DEFAULT,
2872 cache_thread.message_loop_proxy().get(),
2876 ASSERT_EQ(net::OK, cb.GetResult(rv));
2877 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
2878 net::CACHE_BACKEND_DEFAULT,
2882 cache_thread.message_loop_proxy().get(),
2886 ASSERT_EQ(net::OK, cb.GetResult(rv));
2888 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
2890 std::string key("the first key");
2891 disk_cache::Entry* entry;
2892 for (int i = 0; i < kNumberOfCaches; i++) {
2893 rv = cache[i]->CreateEntry(key, &entry, cb.callback());
2894 ASSERT_EQ(net::OK, cb.GetResult(rv));
2899 // Test the six regions of the curve that determines the max cache size.
2900 TEST_F(DiskCacheTest, AutomaticMaxSize) {
2901 const int kDefaultSize = 80 * 1024 * 1024;
2902 int64 large_size = kDefaultSize;
2903 int64 largest_size = kint32max;
2905 // Region 1: expected = available * 0.8
2906 EXPECT_EQ((kDefaultSize - 1) * 8 / 10,
2907 disk_cache::PreferedCacheSize(large_size - 1));
2908 EXPECT_EQ(kDefaultSize * 8 / 10,
2909 disk_cache::PreferedCacheSize(large_size));
2910 EXPECT_EQ(kDefaultSize - 1,
2911 disk_cache::PreferedCacheSize(large_size * 10 / 8 - 1));
2913 // Region 2: expected = default_size
2914 EXPECT_EQ(kDefaultSize,
2915 disk_cache::PreferedCacheSize(large_size * 10 / 8));
2916 EXPECT_EQ(kDefaultSize,
2917 disk_cache::PreferedCacheSize(large_size * 10 - 1));
2919 // Region 3: expected = available * 0.1
2920 EXPECT_EQ(kDefaultSize,
2921 disk_cache::PreferedCacheSize(large_size * 10));
2922 EXPECT_EQ((kDefaultSize * 25 - 1) / 10,
2923 disk_cache::PreferedCacheSize(large_size * 25 - 1));
2925 // Region 4: expected = default_size * 2.5
2926 EXPECT_EQ(kDefaultSize * 25 / 10,
2927 disk_cache::PreferedCacheSize(large_size * 25));
2928 EXPECT_EQ(kDefaultSize * 25 / 10,
2929 disk_cache::PreferedCacheSize(large_size * 100 - 1));
2930 EXPECT_EQ(kDefaultSize * 25 / 10,
2931 disk_cache::PreferedCacheSize(large_size * 100));
2932 EXPECT_EQ(kDefaultSize * 25 / 10,
2933 disk_cache::PreferedCacheSize(large_size * 250 - 1));
2935 // Region 5: expected = available * 0.1
2936 EXPECT_EQ(kDefaultSize * 25 / 10,
2937 disk_cache::PreferedCacheSize(large_size * 250));
2938 EXPECT_EQ(kint32max - 1,
2939 disk_cache::PreferedCacheSize(largest_size * 100 - 1));
2941 // Region 6: expected = kint32max
2942 EXPECT_EQ(kint32max,
2943 disk_cache::PreferedCacheSize(largest_size * 100));
2944 EXPECT_EQ(kint32max,
2945 disk_cache::PreferedCacheSize(largest_size * 10000));
2948 // Tests that we can "migrate" a running instance from one experiment group to
2950 TEST_F(DiskCacheBackendTest, Histograms) {
2952 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro.
2954 for (int i = 1; i < 3; i++) {
2955 CACHE_UMA(HOURS, "FillupTime", i, 28);
2959 // Make sure that we keep the total memory used by the internal buffers under
2961 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
2963 std::string key("the first key");
2964 disk_cache::Entry* entry;
2965 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2967 const int kSize = 200;
2968 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
2969 CacheTestFillBuffer(buffer->data(), kSize, true);
2971 for (int i = 0; i < 10; i++) {
2973 // Allocate 2MB for this entry.
2974 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
2975 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
2977 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
2979 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
2981 // Delete one of the buffers and truncate the other.
2982 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
2983 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
2985 // Delete the second buffer, writing 10 bytes to disk.
2987 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2991 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
2994 // This test assumes at least 150MB of system memory.
2995 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
2998 const int kOneMB = 1024 * 1024;
2999 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3000 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3002 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3003 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3005 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3006 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3008 cache_impl_->BufferDeleted(kOneMB);
3009 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3011 // Check the upper limit.
3012 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3014 for (int i = 0; i < 30; i++)
3015 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
3017 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3020 // Tests that sharing of external files works and we are able to delete the
3021 // files when we need to.
3022 TEST_F(DiskCacheBackendTest, FileSharing) {
3025 disk_cache::Addr address(0x80000001);
3026 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3027 base::FilePath name = cache_impl_->GetFileName(address);
3029 scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
3033 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3034 DWORD access = GENERIC_READ | GENERIC_WRITE;
3035 base::win::ScopedHandle file2(CreateFile(
3036 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
3037 EXPECT_FALSE(file2.IsValid());
3039 sharing |= FILE_SHARE_DELETE;
3040 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
3041 OPEN_EXISTING, 0, NULL));
3042 EXPECT_TRUE(file2.IsValid());
3045 EXPECT_TRUE(base::DeleteFile(name, false));
3047 // We should be able to use the file.
3048 const int kSize = 200;
3049 char buffer1[kSize];
3050 char buffer2[kSize];
3051 memset(buffer1, 't', kSize);
3052 memset(buffer2, 0, kSize);
3053 EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3054 EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3055 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3057 EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
3060 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3063 disk_cache::Entry* entry;
3065 for (int i = 0; i < 2; ++i) {
3066 std::string key = base::StringPrintf("key%d", i);
3067 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3071 // Ping the oldest entry.
3072 cache_->OnExternalCacheHit("key0");
3076 // Make sure the older key remains.
3077 EXPECT_EQ(1, cache_->GetEntryCount());
3078 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3082 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3083 SetCacheType(net::SHADER_CACHE);
3086 disk_cache::Entry* entry;
3088 for (int i = 0; i < 2; ++i) {
3089 std::string key = base::StringPrintf("key%d", i);
3090 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3094 // Ping the oldest entry.
3095 cache_->OnExternalCacheHit("key0");
3099 // Make sure the older key remains.
3100 EXPECT_EQ(1, cache_->GetEntryCount());
3101 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3105 void DiskCacheBackendTest::TracingBackendBasics() {
3107 cache_.reset(new disk_cache::TracingCacheBackend(cache_.Pass()));
3109 EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
3110 if (!simple_cache_mode_) {
3111 EXPECT_EQ(0, cache_->GetEntryCount());
3114 net::TestCompletionCallback cb;
3115 disk_cache::Entry* entry = NULL;
3116 EXPECT_NE(net::OK, OpenEntry("key", &entry));
3117 EXPECT_TRUE(NULL == entry);
3119 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3120 EXPECT_TRUE(NULL != entry);
3122 disk_cache::Entry* same_entry = NULL;
3123 ASSERT_EQ(net::OK, OpenEntry("key", &same_entry));
3124 EXPECT_TRUE(NULL != same_entry);
3126 if (!simple_cache_mode_) {
3127 EXPECT_EQ(1, cache_->GetEntryCount());
3131 same_entry->Close();
3135 TEST_F(DiskCacheBackendTest, TracingBackendBasics) {
3136 TracingBackendBasics();
3139 // The Simple Cache backend requires a few guarantees from the filesystem like
3140 // atomic renaming of recently open files. Those guarantees are not provided in
3141 // general on Windows.
3142 #if defined(OS_POSIX)
3144 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3145 SetCacheType(net::APP_CACHE);
3146 SetSimpleCacheMode();
3147 BackendShutdownWithPendingCreate(false);
3150 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3151 SetCacheType(net::APP_CACHE);
3152 SetSimpleCacheMode();
3153 BackendShutdownWithPendingFileIO(false);
3156 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3157 SetSimpleCacheMode();
3161 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3162 SetCacheType(net::APP_CACHE);
3163 SetSimpleCacheMode();
3167 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3168 SetSimpleCacheMode();
3172 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3173 SetSimpleCacheMode();
3174 SetCacheType(net::APP_CACHE);
3178 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) {
3179 SetSimpleCacheMode();
3183 // MacOS has a default open file limit of 256 files, which is incompatible with
3184 // this simple cache test.
3185 #if defined(OS_MACOSX)
3186 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3188 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3191 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
3192 SetMaxSize(0x100000);
3193 SetSimpleCacheMode();
3197 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
3198 SetCacheType(net::APP_CACHE);
3199 SetSimpleCacheMode();
3200 SetMaxSize(0x100000);
3204 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
3205 SetSimpleCacheMode();
3206 BackendDoomRecent();
3209 TEST_F(DiskCacheBackendTest, SimpleDoomBetween) {
3210 SetSimpleCacheMode();
3211 BackendDoomBetween();
3214 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
3215 SetSimpleCacheMode();
3219 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
3220 SetCacheType(net::APP_CACHE);
3221 SetSimpleCacheMode();
3225 TEST_F(DiskCacheBackendTest, SimpleCacheTracingBackendBasics) {
3226 SetSimpleCacheMode();
3227 TracingBackendBasics();
3228 // TODO(pasko): implement integrity checking on the Simple Backend.
3229 DisableIntegrityCheck();
3232 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
3233 SetSimpleCacheMode();
3236 const char* key = "the first key";
3237 disk_cache::Entry* entry = NULL;
3239 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3240 ASSERT_TRUE(entry != NULL);
3244 // To make sure the file creation completed we need to call open again so that
3245 // we block until it actually created the files.
3246 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3247 ASSERT_TRUE(entry != NULL);
3251 // Delete one of the files in the entry.
3252 base::FilePath to_delete_file = cache_path_.AppendASCII(
3253 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3254 EXPECT_TRUE(base::PathExists(to_delete_file));
3255 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
3257 // Failing to open the entry should delete the rest of these files.
3258 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3260 // Confirm the rest of the files are gone.
3261 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
3262 base::FilePath should_be_gone_file(cache_path_.AppendASCII(
3263 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
3264 EXPECT_FALSE(base::PathExists(should_be_gone_file));
3268 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
3269 SetSimpleCacheMode();
3272 const char* key = "the first key";
3273 disk_cache::Entry* entry = NULL;
3275 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3276 disk_cache::Entry* null = NULL;
3277 ASSERT_NE(null, entry);
3281 // To make sure the file creation completed we need to call open again so that
3282 // we block until it actually created the files.
3283 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3284 ASSERT_NE(null, entry);
3288 // Write an invalid header for stream 0 and stream 1.
3289 base::FilePath entry_file1_path = cache_path_.AppendASCII(
3290 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3292 disk_cache::SimpleFileHeader header;
3293 header.initial_magic_number = GG_UINT64_C(0xbadf00d);
3295 implicit_cast<int>(sizeof(header)),
3296 file_util::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
3298 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3301 // Tests that the Simple Cache Backend fails to initialize with non-matching
3302 // file structure on disk.
3303 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
3304 // Create a cache structure with the |BackendImpl|.
3306 disk_cache::Entry* entry;
3307 const int kSize = 50;
3308 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3309 CacheTestFillBuffer(buffer->data(), kSize, false);
3310 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3311 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3315 // Check that the |SimpleBackendImpl| does not favor this structure.
3316 base::Thread cache_thread("CacheThread");
3317 ASSERT_TRUE(cache_thread.StartWithOptions(
3318 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3319 disk_cache::SimpleBackendImpl* simple_cache =
3320 new disk_cache::SimpleBackendImpl(cache_path_,
3323 cache_thread.message_loop_proxy().get(),
3325 net::TestCompletionCallback cb;
3326 int rv = simple_cache->Init(cb.callback());
3327 EXPECT_NE(net::OK, cb.GetResult(rv));
3328 delete simple_cache;
3329 DisableIntegrityCheck();
3332 // Tests that the |BackendImpl| refuses to initialize on top of the files
3333 // generated by the Simple Cache Backend.
3334 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
3335 // Create a cache structure with the |SimpleBackendImpl|.
3336 SetSimpleCacheMode();
3338 disk_cache::Entry* entry;
3339 const int kSize = 50;
3340 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3341 CacheTestFillBuffer(buffer->data(), kSize, false);
3342 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3343 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3347 // Check that the |BackendImpl| does not favor this structure.
3348 base::Thread cache_thread("CacheThread");
3349 ASSERT_TRUE(cache_thread.StartWithOptions(
3350 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3351 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
3352 cache_path_, base::MessageLoopProxy::current().get(), NULL);
3353 cache->SetUnitTestMode();
3354 net::TestCompletionCallback cb;
3355 int rv = cache->Init(cb.callback());
3356 EXPECT_NE(net::OK, cb.GetResult(rv));
3358 DisableIntegrityCheck();
3361 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
3362 SetSimpleCacheMode();
3363 BackendFixEnumerators();
3366 // Tests basic functionality of the SimpleBackend implementation of the
3368 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
3369 SetSimpleCacheMode();
3371 std::set<std::string> key_pool;
3372 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3374 // Check that enumeration returns all entries.
3375 std::set<std::string> keys_to_match(key_pool);
3378 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3379 cache_->EndEnumeration(&iter);
3380 EXPECT_EQ(key_pool.size(), count);
3381 EXPECT_TRUE(keys_to_match.empty());
3383 // Check that opening entries does not affect enumeration.
3384 keys_to_match = key_pool;
3387 disk_cache::Entry* entry_opened_before;
3388 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
3389 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3394 disk_cache::Entry* entry_opened_middle;
3396 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
3397 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3398 cache_->EndEnumeration(&iter);
3399 entry_opened_before->Close();
3400 entry_opened_middle->Close();
3402 EXPECT_EQ(key_pool.size(), count);
3403 EXPECT_TRUE(keys_to_match.empty());
3406 // Tests that the enumerations are not affected by dooming an entry in the
3408 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
3409 SetSimpleCacheMode();
3411 std::set<std::string> key_pool;
3412 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3414 // Check that enumeration returns all entries but the doomed one.
3415 std::set<std::string> keys_to_match(key_pool);
3418 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3423 std::string key_to_delete = *(keys_to_match.begin());
3424 DoomEntry(key_to_delete);
3425 keys_to_match.erase(key_to_delete);
3426 key_pool.erase(key_to_delete);
3427 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3428 cache_->EndEnumeration(&iter);
3430 EXPECT_EQ(key_pool.size(), count);
3431 EXPECT_TRUE(keys_to_match.empty());
3434 // Tests that enumerations are not affected by corrupt files.
3435 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
3436 SetSimpleCacheMode();
3438 std::set<std::string> key_pool;
3439 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3441 // Create a corrupt entry. The write/read sequence ensures that the entry will
3442 // have been created before corrupting the platform files, in the case of
3443 // optimistic operations.
3444 const std::string key = "the key";
3445 disk_cache::Entry* corrupted_entry;
3447 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
3448 ASSERT_TRUE(corrupted_entry);
3449 const int kSize = 50;
3450 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3451 CacheTestFillBuffer(buffer->data(), kSize, false);
3453 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
3454 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
3455 corrupted_entry->Close();
3457 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3459 EXPECT_EQ(key_pool.size() + 1,
3460 implicit_cast<size_t>(cache_->GetEntryCount()));
3462 // Check that enumeration returns all entries but the corrupt one.
3463 std::set<std::string> keys_to_match(key_pool);
3466 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3467 cache_->EndEnumeration(&iter);
3469 EXPECT_EQ(key_pool.size(), count);
3470 EXPECT_TRUE(keys_to_match.empty());
3473 #endif // defined(OS_POSIX)