summaryrefslogtreecommitdiff
path: root/net/disk_cache
diff options
context:
space:
mode:
authorPatrick Scott <phanna@android.com>2010-02-04 10:37:17 -0500
committerPatrick Scott <phanna@android.com>2010-02-04 10:39:42 -0500
commitc7f5f8508d98d5952d42ed7648c2a8f30a4da156 (patch)
treedd51dbfbf6670daa61279b3a19e7b1835b301dbf /net/disk_cache
parent139d8152182f9093f03d9089822b688e49fa7667 (diff)
downloadchromium-c7f5f8508d98d5952d42ed7648c2a8f30a4da156.tar.gz
Initial source checkin.
The source files were determined by building net_unittests in chromium's source tree. Some of the obvious libraries were left out (v8, gmock, gtest). The Android.mk file has all the sources (minus unittests and tools) that were used during net_unittests compilation. Nothing builds yet because of STL but that is the next task. The .cpp files will most likely not compile anyways because of the LOCAL_CPP_EXTENSION mod. I will have to break this into multiple projects to get around that limitation.
Diffstat (limited to 'net/disk_cache')
-rw-r--r--net/disk_cache/addr.cc29
-rw-r--r--net/disk_cache/addr.h137
-rw-r--r--net/disk_cache/addr_unittest.cc37
-rw-r--r--net/disk_cache/backend_impl.cc1684
-rw-r--r--net/disk_cache/backend_impl.h314
-rw-r--r--net/disk_cache/backend_unittest.cc1581
-rw-r--r--net/disk_cache/bitmap.cc284
-rw-r--r--net/disk_cache/bitmap.h153
-rw-r--r--net/disk_cache/bitmap_unittest.cc293
-rw-r--r--net/disk_cache/block_files.cc488
-rw-r--r--net/disk_cache/block_files.h88
-rw-r--r--net/disk_cache/block_files_unittest.cc206
-rw-r--r--net/disk_cache/cache_util.h29
-rw-r--r--net/disk_cache/cache_util_posix.cc37
-rw-r--r--net/disk_cache/cache_util_win.cc64
-rw-r--r--net/disk_cache/disk_cache.h354
-rw-r--r--net/disk_cache/disk_cache_perftest.cc243
-rw-r--r--net/disk_cache/disk_cache_test_base.cc113
-rw-r--r--net/disk_cache/disk_cache_test_base.h98
-rw-r--r--net/disk_cache/disk_cache_test_util.cc163
-rw-r--r--net/disk_cache/disk_cache_test_util.h101
-rw-r--r--net/disk_cache/disk_format.h266
-rw-r--r--net/disk_cache/entry_impl.cc932
-rw-r--r--net/disk_cache/entry_impl.h197
-rw-r--r--net/disk_cache/entry_unittest.cc1401
-rw-r--r--net/disk_cache/errors.h30
-rw-r--r--net/disk_cache/eviction.cc488
-rw-r--r--net/disk_cache/eviction.h81
-rw-r--r--net/disk_cache/file.h94
-rw-r--r--net/disk_cache/file_block.h31
-rw-r--r--net/disk_cache/file_lock.cc27
-rw-r--r--net/disk_cache/file_lock.h45
-rw-r--r--net/disk_cache/file_posix.cc379
-rw-r--r--net/disk_cache/file_win.cc287
-rw-r--r--net/disk_cache/hash.cc67
-rw-r--r--net/disk_cache/hash.h30
-rw-r--r--net/disk_cache/histogram_macros.h62
-rw-r--r--net/disk_cache/mapped_file.h56
-rw-r--r--net/disk_cache/mapped_file_posix.cc55
-rw-r--r--net/disk_cache/mapped_file_unittest.cc121
-rw-r--r--net/disk_cache/mapped_file_win.cc55
-rw-r--r--net/disk_cache/mem_backend_impl.cc318
-rw-r--r--net/disk_cache/mem_backend_impl.h102
-rw-r--r--net/disk_cache/mem_entry_impl.cc487
-rw-r--r--net/disk_cache/mem_entry_impl.h167
-rw-r--r--net/disk_cache/mem_rankings.cc67
-rw-r--r--net/disk_cache/mem_rankings.h44
-rw-r--r--net/disk_cache/rankings.cc804
-rw-r--r--net/disk_cache/rankings.h205
-rw-r--r--net/disk_cache/sparse_control.cc744
-rw-r--r--net/disk_cache/sparse_control.h177
-rw-r--r--net/disk_cache/stats.cc321
-rw-r--r--net/disk_cache/stats.h94
-rw-r--r--net/disk_cache/stats_histogram.cc86
-rw-r--r--net/disk_cache/stats_histogram.h57
-rw-r--r--net/disk_cache/storage_block-inl.h160
-rw-r--r--net/disk_cache/storage_block.h93
-rw-r--r--net/disk_cache/storage_block_unittest.cc69
-rw-r--r--net/disk_cache/stress_cache.cc207
-rw-r--r--net/disk_cache/trace.cc148
-rw-r--r--net/disk_cache/trace.h47
61 files changed, 15597 insertions, 0 deletions
diff --git a/net/disk_cache/addr.cc b/net/disk_cache/addr.cc
new file mode 100644
index 00000000..aa84c01a
--- /dev/null
+++ b/net/disk_cache/addr.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/addr.h"
+
+#include "base/logging.h"
+
+namespace disk_cache {
+
+int Addr::start_block() const {
+ DCHECK(is_block_file());
+ return value_ & kStartBlockMask;
+}
+
+int Addr::num_blocks() const {
+ DCHECK(is_block_file() || !value_);
+ return ((value_ & kNumBlocksMask) >> kNumBlocksOffset) + 1;
+}
+
+bool Addr::SetFileNumber(int file_number) {
+ DCHECK(is_separate_file());
+ if (file_number & ~kFileNameMask)
+ return false;
+ value_ = kInitializedMask | file_number;
+ return true;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/addr.h b/net/disk_cache/addr.h
new file mode 100644
index 00000000..a504019d
--- /dev/null
+++ b/net/disk_cache/addr.h
@@ -0,0 +1,137 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is an internal class that handles the address of a cache record.
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_ADDR_H_
+#define NET_DISK_CACHE_ADDR_H_
+
+#include "net/disk_cache/disk_format.h"
+
+namespace disk_cache {
+
+enum FileType {
+ EXTERNAL = 0,
+ RANKINGS = 1,
+ BLOCK_256,
+ BLOCK_1K,
+ BLOCK_4K,
+};
+
+const int kMaxBlockSize = 4096 * 4;
+const int kMaxBlockFile = 255;
+const int kMaxNumBlocks = 4;
+const int kFirstAdditionlBlockFile = 4;
+
+// Defines a storage address for a cache record
+//
+// Header:
+// 1000 0000 0000 0000 0000 0000 0000 0000 : initialized bit
+// 0111 0000 0000 0000 0000 0000 0000 0000 : file type
+//
+// File type values:
+// 0 = separate file on disk
+// 1 = rankings block file
+// 2 = 256 byte block file
+// 3 = 1k byte block file
+// 4 = 4k byte block file
+//
+// If separate file:
+// 0000 1111 1111 1111 1111 1111 1111 1111 : file# 0 - 268,435,456 (2^28)
+//
+// If block file:
+// 0000 1100 0000 0000 0000 0000 0000 0000 : reserved bits
+// 0000 0011 0000 0000 0000 0000 0000 0000 : number of contiguous blocks 1-4
+// 0000 0000 1111 1111 0000 0000 0000 0000 : file selector 0 - 255
+// 0000 0000 0000 0000 1111 1111 1111 1111 : block# 0 - 65,535 (2^16)
+class Addr {
+ public:
+ Addr() : value_(0) {}
+ explicit Addr(CacheAddr address) : value_(address) {}
+ Addr(FileType file_type, int max_blocks, int block_file, int index) {
+ value_ = ((file_type << kFileTypeOffset) & kFileTypeMask) |
+ (((max_blocks - 1) << kNumBlocksOffset) & kNumBlocksMask) |
+ ((block_file << kFileSelectorOffset) & kFileSelectorMask) |
+ (index & kStartBlockMask) | kInitializedMask;
+ }
+
+ CacheAddr value() const { return value_; }
+ void set_value(CacheAddr address) {
+ value_ = address;
+ }
+
+ bool is_initialized() const {
+ return (value_ & kInitializedMask) != 0;
+ }
+
+ bool is_separate_file() const {
+ return (value_ & kFileTypeMask) == 0;
+ }
+
+ bool is_block_file() const {
+ return !is_separate_file();
+ }
+
+ FileType file_type() const {
+ return static_cast<FileType>((value_ & kFileTypeMask) >> kFileTypeOffset);
+ }
+
+ int FileNumber() const {
+ if (is_separate_file())
+ return value_ & kFileNameMask;
+ else
+ return ((value_ & kFileSelectorMask) >> kFileSelectorOffset);
+ }
+
+ int start_block() const;
+ int num_blocks() const;
+ bool SetFileNumber(int file_number);
+ int BlockSize() const {
+ return BlockSizeForFileType(file_type());
+ }
+
+ static int BlockSizeForFileType(FileType file_type) {
+ switch (file_type) {
+ case RANKINGS:
+ return 36;
+ case BLOCK_256:
+ return 256;
+ case BLOCK_1K:
+ return 1024;
+ case BLOCK_4K:
+ return 4096;
+ default:
+ return 0;
+ }
+ }
+
+ static FileType RequiredFileType(int size) {
+ if (size < 1024)
+ return BLOCK_256;
+ else if (size < 4096)
+ return BLOCK_1K;
+ else if (size <= 4096 * 4)
+ return BLOCK_4K;
+ else
+ return EXTERNAL;
+ }
+
+ private:
+ static const uint32 kInitializedMask = 0x80000000;
+ static const uint32 kFileTypeMask = 0x70000000;
+ static const uint32 kFileTypeOffset = 28;
+ static const uint32 kNumBlocksMask = 0x03000000;
+ static const uint32 kNumBlocksOffset = 24;
+ static const uint32 kFileSelectorMask = 0x00ff0000;
+ static const uint32 kFileSelectorOffset = 16;
+ static const uint32 kStartBlockMask = 0x0000FFFF;
+ static const uint32 kFileNameMask = 0x0FFFFFFF;
+
+ CacheAddr value_;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_ADDR_H_
diff --git a/net/disk_cache/addr_unittest.cc b/net/disk_cache/addr_unittest.cc
new file mode 100644
index 00000000..551310e4
--- /dev/null
+++ b/net/disk_cache/addr_unittest.cc
@@ -0,0 +1,37 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace disk_cache {
+
+TEST_F(DiskCacheTest, CacheAddr_Size) {
+ Addr addr1(0);
+ EXPECT_FALSE(addr1.is_initialized());
+
+ // The object should not be more expensive than the actual address.
+ EXPECT_EQ(sizeof(uint32), sizeof(addr1));
+}
+
+TEST_F(DiskCacheTest, CacheAddr_ValidValues) {
+ Addr addr2(BLOCK_1K, 3, 5, 25);
+ EXPECT_EQ(BLOCK_1K, addr2.file_type());
+ EXPECT_EQ(3, addr2.num_blocks());
+ EXPECT_EQ(5, addr2.FileNumber());
+ EXPECT_EQ(25, addr2.start_block());
+ EXPECT_EQ(1024, addr2.BlockSize());
+}
+
+TEST_F(DiskCacheTest, CacheAddr_InvalidValues) {
+ Addr addr3(BLOCK_4K, 0x44, 0x41508, 0x952536);
+ EXPECT_EQ(BLOCK_4K, addr3.file_type());
+ EXPECT_EQ(4, addr3.num_blocks());
+ EXPECT_EQ(8, addr3.FileNumber());
+ EXPECT_EQ(0x2536, addr3.start_block());
+ EXPECT_EQ(4096, addr3.BlockSize());
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc
new file mode 100644
index 00000000..3b3ca6af
--- /dev/null
+++ b/net/disk_cache/backend_impl.cc
@@ -0,0 +1,1684 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/backend_impl.h"
+
+#include "base/field_trial.h"
+#include "base/file_path.h"
+#include "base/file_util.h"
+#include "base/histogram.h"
+#include "base/message_loop.h"
+#include "base/rand_util.h"
+#include "base/string_util.h"
+#include "base/sys_info.h"
+#include "base/timer.h"
+#include "base/worker_pool.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/errors.h"
+#include "net/disk_cache/hash.h"
+#include "net/disk_cache/file.h"
+
+// This has to be defined before including histogram_macros.h from this file.
+#define NET_DISK_CACHE_BACKEND_IMPL_CC_
+#include "net/disk_cache/histogram_macros.h"
+
+using base::Time;
+using base::TimeDelta;
+
+namespace {
+
+const char* kIndexName = "index";
+const int kMaxOldFolders = 100;
+
+// Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
+const int k64kEntriesStore = 240 * 1000 * 1000;
+const int kBaseTableLen = 64 * 1024;
+const int kDefaultCacheSize = 80 * 1024 * 1024;
+
+int DesiredIndexTableLen(int32 storage_size) {
+ if (storage_size <= k64kEntriesStore)
+ return kBaseTableLen;
+ if (storage_size <= k64kEntriesStore * 2)
+ return kBaseTableLen * 2;
+ if (storage_size <= k64kEntriesStore * 4)
+ return kBaseTableLen * 4;
+ if (storage_size <= k64kEntriesStore * 8)
+ return kBaseTableLen * 8;
+
+ // The biggest storage_size for int32 requires a 4 MB table.
+ return kBaseTableLen * 16;
+}
+
+int MaxStorageSizeForTable(int table_len) {
+ return table_len * (k64kEntriesStore / kBaseTableLen);
+}
+
+size_t GetIndexSize(int table_len) {
+ size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
+ return sizeof(disk_cache::IndexHeader) + table_size;
+}
+
+// ------------------------------------------------------------------------
+
+// Returns a fully qualified name from path and name, using a given name prefix
+// and index number. For instance, if the arguments are "/foo", "bar" and 5, it
+// will return "/foo/old_bar_005".
+FilePath GetPrefixedName(const FilePath& path, const std::string& name,
+ int index) {
+ std::string tmp = StringPrintf("%s%s_%03d", "old_", name.c_str(), index);
+ return path.AppendASCII(tmp);
+}
+
+// This is a simple Task to cleanup old caches.
+class CleanupTask : public Task {
+ public:
+ CleanupTask(const FilePath& path, const std::string& name)
+ : path_(path), name_(name) {}
+
+ virtual void Run();
+
+ private:
+ FilePath path_;
+ std::string name_;
+ DISALLOW_EVIL_CONSTRUCTORS(CleanupTask);
+};
+
+void CleanupTask::Run() {
+ for (int i = 0; i < kMaxOldFolders; i++) {
+ FilePath to_delete = GetPrefixedName(path_, name_, i);
+ disk_cache::DeleteCache(to_delete, true);
+ }
+}
+
+// Returns a full path to rename the current cache, in order to delete it. path
+// is the current folder location, and name is the current folder name.
+FilePath GetTempCacheName(const FilePath& path, const std::string& name) {
+ // We'll attempt to have up to kMaxOldFolders folders for deletion.
+ for (int i = 0; i < kMaxOldFolders; i++) {
+ FilePath to_delete = GetPrefixedName(path, name, i);
+ if (!file_util::PathExists(to_delete))
+ return to_delete;
+ }
+ return FilePath();
+}
+
+// Moves the cache files to a new folder and creates a task to delete them.
+bool DelayedCacheCleanup(const FilePath& full_path) {
+ FilePath current_path = full_path.StripTrailingSeparators();
+
+ FilePath path = current_path.DirName();
+ FilePath name = current_path.BaseName();
+#if defined(OS_POSIX)
+ std::string name_str = name.value();
+#elif defined(OS_WIN)
+ // We created this file so it should only contain ASCII.
+ std::string name_str = WideToASCII(name.value());
+#endif
+
+ FilePath to_delete = GetTempCacheName(path, name_str);
+ if (to_delete.empty()) {
+ LOG(ERROR) << "Unable to get another cache folder";
+ return false;
+ }
+
+ if (!disk_cache::MoveCache(full_path, to_delete)) {
+ LOG(ERROR) << "Unable to rename cache folder";
+ return false;
+ }
+
+#if defined(OS_WIN)
+ WorkerPool::PostTask(FROM_HERE, new CleanupTask(path, name_str), true);
+#elif defined(OS_POSIX)
+ // TODO(rvargas): Use the worker pool.
+ MessageLoop::current()->PostTask(FROM_HERE, new CleanupTask(path, name_str));
+#endif
+ return true;
+}
+
+// Sets |current_group| for the current experiment. Returns false if the files
+// should be discarded.
+bool InitExperiment(int* current_group) {
+ if (*current_group == 3 || *current_group == 4) {
+ // Discard current cache for groups 3 and 4.
+ return false;
+ }
+
+ // There is no experiment.
+ *current_group = 0;
+ return true;
+}
+
+// Initializes the field trial structures to allow performance measurements
+// for the current cache configuration.
+void SetFieldTrialInfo(int size_group) {
+ static bool first = true;
+ if (!first)
+ return;
+
+ // Field trials involve static objects so we have to do this only once.
+ first = false;
+ scoped_refptr<FieldTrial> trial1 = new FieldTrial("CacheSize", 10);
+ std::string group1 = StringPrintf("CacheSizeGroup_%d", size_group);
+ trial1->AppendGroup(group1, FieldTrial::kAllRemainingProbability);
+}
+
+} // namespace
+
+// ------------------------------------------------------------------------
+
+namespace disk_cache {
+
+Backend* CreateCacheBackend(const FilePath& full_path, bool force,
+ int max_bytes, net::CacheType type) {
+ // Create a backend without extra flags.
+ return BackendImpl::CreateBackend(full_path, force, max_bytes, type, kNone);
+}
+
+int PreferedCacheSize(int64 available) {
+ // If there is not enough space to use kDefaultCacheSize, use 80% of the
+ // available space.
+ if (available < kDefaultCacheSize)
+ return static_cast<int32>(available * 8 / 10);
+
+ // Don't use more than 10% of the available space.
+ if (available < 10 * kDefaultCacheSize)
+ return kDefaultCacheSize;
+
+ // Use 10% of the free space until we reach 2.5 * kDefaultCacheSize.
+ if (available < static_cast<int64>(kDefaultCacheSize) * 25)
+ return static_cast<int32>(available / 10);
+
+ // After reaching our target size (2.5 * kDefaultCacheSize), attempt to use
+ // 1% of the availabe space.
+ if (available < static_cast<int64>(kDefaultCacheSize) * 100)
+ return kDefaultCacheSize * 5 / 2;
+
+ int64 one_percent = available / 100;
+ if (one_percent > kint32max)
+ return kint32max;
+
+ return static_cast<int32>(one_percent);
+}
+
+// ------------------------------------------------------------------------
+
+// If the initialization of the cache fails, and force is true, we will discard
+// the whole cache and create a new one. In order to process a potentially large
+// number of files, we'll rename the cache folder to old_ + original_name +
+// number, (located on the same parent folder), and spawn a worker thread to
+// delete all the files on all the stale cache folders. The whole process can
+// still fail if we are not able to rename the cache folder (for instance due to
+// a sharing violation), and in that case a cache for this profile (on the
+// desired path) cannot be created.
+//
+// Static.
+Backend* BackendImpl::CreateBackend(const FilePath& full_path, bool force,
+ int max_bytes, net::CacheType type,
+ BackendFlags flags) {
+ BackendImpl* cache = new BackendImpl(full_path);
+ cache->SetMaxSize(max_bytes);
+ cache->SetType(type);
+ cache->SetFlags(flags);
+ if (cache->Init())
+ return cache;
+
+ delete cache;
+ if (!force)
+ return NULL;
+
+ if (!DelayedCacheCleanup(full_path))
+ return NULL;
+
+ // The worker thread will start deleting files soon, but the original folder
+ // is not there anymore... let's create a new set of files.
+ cache = new BackendImpl(full_path);
+ cache->SetMaxSize(max_bytes);
+ cache->SetType(type);
+ cache->SetFlags(flags);
+ if (cache->Init())
+ return cache;
+
+ delete cache;
+ LOG(ERROR) << "Unable to create cache";
+ return NULL;
+}
+
+bool BackendImpl::Init() {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ bool create_files = false;
+ if (!InitBackingStore(&create_files)) {
+ ReportError(ERR_STORAGE_ERROR);
+ return false;
+ }
+
+ num_refs_ = num_pending_io_ = max_refs_ = 0;
+
+ if (!restarted_) {
+ trace_object_ = TraceObject::GetTraceObject();
+ // Create a recurrent timer of 30 secs.
+ int timer_delay = unit_test_ ? 1000 : 30000;
+ timer_.Start(TimeDelta::FromMilliseconds(timer_delay), this,
+ &BackendImpl::OnStatsTimer);
+ }
+
+ init_ = true;
+
+ if (data_->header.experiment != 0 && cache_type_ != net::DISK_CACHE) {
+ // No experiment for other caches.
+ return false;
+ }
+
+ if (!(user_flags_ & disk_cache::kNoRandom)) {
+ // The unit test controls directly what to test.
+ if (!InitExperiment(&data_->header.experiment))
+ return false;
+
+ new_eviction_ = (cache_type_ == net::DISK_CACHE);
+ }
+
+ if (!CheckIndex()) {
+ ReportError(ERR_INIT_FAILED);
+ return false;
+ }
+
+ // We don't care if the value overflows. The only thing we care about is that
+ // the id cannot be zero, because that value is used as "not dirty".
+ // Increasing the value once per second gives us many years before a we start
+ // having collisions.
+ data_->header.this_id++;
+ if (!data_->header.this_id)
+ data_->header.this_id++;
+
+ if (data_->header.crash) {
+ ReportError(ERR_PREVIOUS_CRASH);
+ } else {
+ ReportError(0);
+ data_->header.crash = 1;
+ }
+
+ if (!block_files_.Init(create_files))
+ return false;
+
+ // stats_ and rankings_ may end up calling back to us so we better be enabled.
+ disabled_ = false;
+ if (!stats_.Init(this, &data_->header.stats))
+ return false;
+
+ disabled_ = !rankings_.Init(this, new_eviction_);
+ eviction_.Init(this);
+
+ // Setup load-time data only for the main cache.
+ if (cache_type() == net::DISK_CACHE)
+ SetFieldTrialInfo(GetSizeGroup());
+
+ return !disabled_;
+}
+
+BackendImpl::~BackendImpl() {
+ Trace("Backend destructor");
+ if (!init_)
+ return;
+
+ if (data_)
+ data_->header.crash = 0;
+
+ timer_.Stop();
+
+ File::WaitForPendingIO(&num_pending_io_);
+ DCHECK(!num_refs_);
+}
+
+// ------------------------------------------------------------------------
+
+int32 BackendImpl::GetEntryCount() const {
+ if (!index_)
+ return 0;
+ // num_entries includes entries already evicted.
+ int32 not_deleted = data_->header.num_entries -
+ data_->header.lru.sizes[Rankings::DELETED];
+
+ if (not_deleted < 0) {
+ NOTREACHED();
+ not_deleted = 0;
+ }
+
+ return not_deleted;
+}
+
+bool BackendImpl::OpenEntry(const std::string& key, Entry** entry) {
+ if (disabled_)
+ return false;
+
+ Time start = Time::Now();
+ uint32 hash = Hash(key);
+
+ EntryImpl* cache_entry = MatchEntry(key, hash, false);
+ if (!cache_entry) {
+ stats_.OnEvent(Stats::OPEN_MISS);
+ return false;
+ }
+
+ if (ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
+ // The entry was already evicted.
+ cache_entry->Release();
+ stats_.OnEvent(Stats::OPEN_MISS);
+ return false;
+ }
+
+ eviction_.OnOpenEntry(cache_entry);
+ DCHECK(entry);
+ *entry = cache_entry;
+
+ CACHE_UMA(AGE_MS, "OpenTime", GetSizeGroup(), start);
+ stats_.OnEvent(Stats::OPEN_HIT);
+ return true;
+}
+
+int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
+ CompletionCallback* callback) {
+ if (OpenEntry(key, entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+bool BackendImpl::CreateEntry(const std::string& key, Entry** entry) {
+ if (disabled_ || key.empty())
+ return false;
+
+ DCHECK(entry);
+ *entry = NULL;
+
+ Time start = Time::Now();
+ uint32 hash = Hash(key);
+
+ scoped_refptr<EntryImpl> parent;
+ Addr entry_address(data_->table[hash & mask_]);
+ if (entry_address.is_initialized()) {
+ // We have an entry already. It could be the one we are looking for, or just
+ // a hash conflict.
+ EntryImpl* old_entry = MatchEntry(key, hash, false);
+ if (old_entry)
+ return ResurrectEntry(old_entry, entry);
+
+ EntryImpl* parent_entry = MatchEntry(key, hash, true);
+ if (!parent_entry) {
+ NOTREACHED();
+ return false;
+ }
+ parent.swap(&parent_entry);
+ }
+
+ int num_blocks;
+ size_t key1_len = sizeof(EntryStore) - offsetof(EntryStore, key);
+ if (key.size() < key1_len ||
+ key.size() > static_cast<size_t>(kMaxInternalKeyLength))
+ num_blocks = 1;
+ else
+ num_blocks = static_cast<int>((key.size() - key1_len) / 256 + 2);
+
+ if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return false;
+ }
+
+ Addr node_address(0);
+ if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
+ block_files_.DeleteBlock(entry_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return false;
+ }
+
+ scoped_refptr<EntryImpl> cache_entry(new EntryImpl(this, entry_address));
+ IncreaseNumRefs();
+
+ if (!cache_entry->CreateEntry(node_address, key, hash)) {
+ block_files_.DeleteBlock(entry_address, false);
+ block_files_.DeleteBlock(node_address, false);
+ LOG(ERROR) << "Create entry failed " << key.c_str();
+ stats_.OnEvent(Stats::CREATE_ERROR);
+ return false;
+ }
+
+ // We are not failing the operation; let's add this to the map.
+ open_entries_[entry_address.value()] = cache_entry;
+
+ if (parent.get())
+ parent->SetNextAddress(entry_address);
+
+ block_files_.GetFile(entry_address)->Store(cache_entry->entry());
+ block_files_.GetFile(node_address)->Store(cache_entry->rankings());
+
+ IncreaseNumEntries();
+ eviction_.OnCreateEntry(cache_entry);
+ if (!parent.get())
+ data_->table[hash & mask_] = entry_address.value();
+
+ cache_entry.swap(reinterpret_cast<EntryImpl**>(entry));
+
+ CACHE_UMA(AGE_MS, "CreateTime", GetSizeGroup(), start);
+ stats_.OnEvent(Stats::CREATE_HIT);
+ Trace("create entry hit ");
+ return true;
+}
+
+int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
+ CompletionCallback* callback) {
+ if (CreateEntry(key, entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+bool BackendImpl::DoomEntry(const std::string& key) {
+ if (disabled_)
+ return false;
+
+ Entry* entry;
+ if (!OpenEntry(key, &entry))
+ return false;
+
+ // Note that you'd think you could just pass &entry_impl to OpenEntry,
+ // but that triggers strict aliasing problems with gcc.
+ EntryImpl* entry_impl = reinterpret_cast<EntryImpl*>(entry);
+ entry_impl->Doom();
+ entry_impl->Release();
+ return true;
+}
+
+int BackendImpl::DoomEntry(const std::string& key,
+ CompletionCallback* callback) {
+ if (DoomEntry(key))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+bool BackendImpl::DoomAllEntries() {
+ if (!num_refs_) {
+ PrepareForRestart();
+ DeleteCache(path_, false);
+ return Init();
+ } else {
+ if (disabled_)
+ return false;
+
+ eviction_.TrimCache(true);
+ stats_.OnEvent(Stats::DOOM_CACHE);
+ return true;
+ }
+}
+
+int BackendImpl::DoomAllEntries(CompletionCallback* callback) {
+ if (DoomAllEntries())
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+bool BackendImpl::DoomEntriesBetween(const Time initial_time,
+ const Time end_time) {
+ if (end_time.is_null())
+ return DoomEntriesSince(initial_time);
+
+ DCHECK(end_time >= initial_time);
+
+ if (disabled_)
+ return false;
+
+ Entry* node, *next;
+ void* iter = NULL;
+ if (!OpenNextEntry(&iter, &next))
+ return true;
+
+ while (next) {
+ node = next;
+ if (!OpenNextEntry(&iter, &next))
+ next = NULL;
+
+ if (node->GetLastUsed() >= initial_time &&
+ node->GetLastUsed() < end_time) {
+ node->Doom();
+ } else if (node->GetLastUsed() < initial_time) {
+ if (next)
+ next->Close();
+ next = NULL;
+ EndEnumeration(&iter);
+ }
+
+ node->Close();
+ }
+
+ return true;
+}
+
+int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
+ CompletionCallback* callback) {
+ if (DoomEntriesBetween(initial_time, end_time))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+// We use OpenNextEntry to retrieve elements from the cache, until we get
+// entries that are too old.
+bool BackendImpl::DoomEntriesSince(const Time initial_time) {
+ if (disabled_)
+ return false;
+
+ for (;;) {
+ Entry* entry;
+ void* iter = NULL;
+ if (!OpenNextEntry(&iter, &entry))
+ return true;
+
+ if (initial_time > entry->GetLastUsed()) {
+ entry->Close();
+ EndEnumeration(&iter);
+ return true;
+ }
+
+ entry->Doom();
+ entry->Close();
+ EndEnumeration(&iter); // Dooming the entry invalidates the iterator.
+ }
+}
+
+int BackendImpl::DoomEntriesSince(const base::Time initial_time,
+ CompletionCallback* callback) {
+ if (DoomEntriesSince(initial_time))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+bool BackendImpl::OpenNextEntry(void** iter, Entry** next_entry) {
+ return OpenFollowingEntry(true, iter, next_entry);
+}
+
+int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
+ CompletionCallback* callback) {
+ if (OpenNextEntry(iter, next_entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+void BackendImpl::EndEnumeration(void** iter) {
+ scoped_ptr<Rankings::Iterator> iterator(
+ reinterpret_cast<Rankings::Iterator*>(*iter));
+ *iter = NULL;
+}
+
+void BackendImpl::GetStats(StatsItems* stats) {
+ if (disabled_)
+ return;
+
+ std::pair<std::string, std::string> item;
+
+ item.first = "Entries";
+ item.second = StringPrintf("%d", data_->header.num_entries);
+ stats->push_back(item);
+
+ item.first = "Pending IO";
+ item.second = StringPrintf("%d", num_pending_io_);
+ stats->push_back(item);
+
+ item.first = "Max size";
+ item.second = StringPrintf("%d", max_size_);
+ stats->push_back(item);
+
+ item.first = "Current size";
+ item.second = StringPrintf("%d", data_->header.num_bytes);
+ stats->push_back(item);
+
+ stats_.GetItems(stats);
+}
+
+// ------------------------------------------------------------------------
+
+bool BackendImpl::SetMaxSize(int max_bytes) {
+ COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
+ if (max_bytes < 0)
+ return false;
+
+ // Zero size means use the default.
+ if (!max_bytes)
+ return true;
+
+ // Avoid a DCHECK later on.
+ if (max_bytes >= kint32max - kint32max / 10)
+ max_bytes = kint32max - kint32max / 10 - 1;
+
+ user_flags_ |= kMaxSize;
+ max_size_ = max_bytes;
+ return true;
+}
+
+void BackendImpl::SetType(net::CacheType type) {
+ DCHECK(type != net::MEMORY_CACHE);
+ cache_type_ = type;
+}
+
+FilePath BackendImpl::GetFileName(Addr address) const {
+ if (!address.is_separate_file() || !address.is_initialized()) {
+ NOTREACHED();
+ return FilePath();
+ }
+
+ std::string tmp = StringPrintf("f_%06x", address.FileNumber());
+ return path_.AppendASCII(tmp);
+}
+
+MappedFile* BackendImpl::File(Addr address) {
+ if (disabled_)
+ return NULL;
+ return block_files_.GetFile(address);
+}
+
+bool BackendImpl::CreateExternalFile(Addr* address) {
+ int file_number = data_->header.last_file + 1;
+ Addr file_address(0);
+ bool success = false;
+ for (int i = 0; i < 0x0fffffff; i++, file_number++) {
+ if (!file_address.SetFileNumber(file_number)) {
+ file_number = 1;
+ continue;
+ }
+ FilePath name = GetFileName(file_address);
+ int flags = base::PLATFORM_FILE_READ |
+ base::PLATFORM_FILE_WRITE |
+ base::PLATFORM_FILE_CREATE |
+ base::PLATFORM_FILE_EXCLUSIVE_WRITE;
+ scoped_refptr<disk_cache::File> file(new disk_cache::File(
+ base::CreatePlatformFile(name, flags, NULL)));
+ if (!file->IsValid())
+ continue;
+
+ success = true;
+ break;
+ }
+
+ DCHECK(success);
+ if (!success)
+ return false;
+
+ data_->header.last_file = file_number;
+ address->set_value(file_address.value());
+ return true;
+}
+
+bool BackendImpl::CreateBlock(FileType block_type, int block_count,
+ Addr* block_address) {
+ return block_files_.CreateBlock(block_type, block_count, block_address);
+}
+
+void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
+ block_files_.DeleteBlock(block_address, deep);
+}
+
+LruData* BackendImpl::GetLruData() {
+ return &data_->header.lru;
+}
+
+void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
+ if (!read_only_) {
+ eviction_.UpdateRank(entry, modified);
+ }
+}
+
+void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
+ Addr address(rankings->Data()->contents);
+ EntryImpl* cache_entry = NULL;
+ bool dirty;
+ if (NewEntry(address, &cache_entry, &dirty))
+ return;
+
+ uint32 hash = cache_entry->GetHash();
+ cache_entry->Release();
+
+ // Anything on the table means that this entry is there.
+ if (data_->table[hash & mask_])
+ return;
+
+ data_->table[hash & mask_] = address.value();
+}
+
+void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
+ uint32 hash = entry->GetHash();
+ std::string key = entry->GetKey();
+ EntryImpl* parent_entry = MatchEntry(key, hash, true);
+ CacheAddr child(entry->GetNextAddress());
+
+ Trace("Doom entry 0x%p", entry);
+
+ eviction_.OnDoomEntry(entry);
+ entry->InternalDoom();
+
+ if (parent_entry) {
+ parent_entry->SetNextAddress(Addr(child));
+ parent_entry->Release();
+ } else {
+ data_->table[hash & mask_] = child;
+ }
+
+ if (!new_eviction_) {
+ DecreaseNumEntries();
+ }
+
+ stats_.OnEvent(Stats::DOOM_ENTRY);
+}
+
+// An entry may be linked on the DELETED list for a while after being doomed.
+// This function is called when we want to remove it.
+void BackendImpl::RemoveEntry(EntryImpl* entry) {
+ if (!new_eviction_)
+ return;
+
+ DCHECK(ENTRY_NORMAL != entry->entry()->Data()->state);
+
+ Trace("Remove entry 0x%p", entry);
+ eviction_.OnDestroyEntry(entry);
+ DecreaseNumEntries();
+}
+
+void BackendImpl::CacheEntryDestroyed(Addr address) {
+ EntriesMap::iterator it = open_entries_.find(address.value());
+ if (it != open_entries_.end())
+ open_entries_.erase(it);
+ DecreaseNumRefs();
+}
+
+EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
+ DCHECK(rankings->HasData());
+ EntriesMap::const_iterator it =
+ open_entries_.find(rankings->Data()->contents);
+ if (it != open_entries_.end()) {
+ // We have this entry in memory.
+ return it->second;
+ }
+
+ return NULL;
+}
+
+int32 BackendImpl::GetCurrentEntryId() const {
+ return data_->header.this_id;
+}
+
+int BackendImpl::MaxFileSize() const {
+ return max_size_ / 8;
+}
+
+void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
+ if (disabled_ || old_size == new_size)
+ return;
+ if (old_size > new_size)
+ SubstractStorageSize(old_size - new_size);
+ else
+ AddStorageSize(new_size - old_size);
+
+ // Update the usage statistics.
+ stats_.ModifyStorageStats(old_size, new_size);
+}
+
+void BackendImpl::TooMuchStorageRequested(int32 size) {
+ stats_.ModifyStorageStats(0, size);
+}
+
+bool BackendImpl::IsLoaded() const {
+ CACHE_UMA(COUNTS, "PendingIO", GetSizeGroup(), num_pending_io_);
+ if (user_flags_ & kNoLoadProtection)
+ return false;
+
+ return num_pending_io_ > 5;
+}
+
+std::string BackendImpl::HistogramName(const char* name, int experiment) const {
+ if (!experiment)
+ return StringPrintf("DiskCache.%d.%s", cache_type_, name);
+ return StringPrintf("DiskCache.%d.%s_%d", cache_type_, name, experiment);
+}
+
+int BackendImpl::GetSizeGroup() const {
+ if (disabled_)
+ return 0;
+
+ // We want to report times grouped by the current cache size (50 MB groups).
+ int group = data_->header.num_bytes / (50 * 1024 * 1024);
+ if (group > 6)
+ group = 6; // Limit the number of groups, just in case.
+ return group;
+}
+
+// We want to remove biases from some histograms so we only send data once per
+// week.
+bool BackendImpl::ShouldReportAgain() {
+ if (uma_report_)
+ return uma_report_ == 2;
+
+ uma_report_++;
+ int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
+ Time last_time = Time::FromInternalValue(last_report);
+ if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
+ stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
+ uma_report_++;
+ return true;
+ }
+ return false;
+}
+
+void BackendImpl::FirstEviction() {
+ DCHECK(data_->header.create_time);
+
+ Time create_time = Time::FromInternalValue(data_->header.create_time);
+ CACHE_UMA(AGE, "FillupAge", 0, create_time);
+
+ int64 use_hours = stats_.GetCounter(Stats::TIMER) / 120;
+ CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_hours));
+ CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
+
+ int avg_size = data_->header.num_bytes / GetEntryCount();
+ CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
+
+ int large_entries_bytes = stats_.GetLargeEntriesSize();
+ int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
+ CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
+
+ if (new_eviction_) {
+ CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
+ CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
+ data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
+ data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
+ data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
+ }
+
+ stats_.ResetRatios();
+}
+
+void BackendImpl::CriticalError(int error) {
+ LOG(ERROR) << "Critical error found " << error;
+ if (disabled_)
+ return;
+
+ LogStats();
+ ReportError(error);
+
+ // Setting the index table length to an invalid value will force re-creation
+ // of the cache files.
+ data_->header.table_len = 1;
+ disabled_ = true;
+
+ if (!num_refs_)
+ MessageLoop::current()->PostTask(FROM_HERE,
+ factory_.NewRunnableMethod(&BackendImpl::RestartCache));
+}
+
+void BackendImpl::ReportError(int error) {
+ // We transmit positive numbers, instead of direct error codes.
+ DCHECK(error <= 0);
+ CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
+}
+
+void BackendImpl::OnEvent(Stats::Counters an_event) {
+ stats_.OnEvent(an_event);
+}
+
+void BackendImpl::OnStatsTimer() {
+ stats_.OnEvent(Stats::TIMER);
+ int64 time = stats_.GetCounter(Stats::TIMER);
+ int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
+
+ // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
+ // the bias towards 0.
+ if (num_refs_ && (current != num_refs_)) {
+ int64 diff = (num_refs_ - current) / 50;
+ if (!diff)
+ diff = num_refs_ > current ? 1 : -1;
+ current = current + diff;
+ stats_.SetCounter(Stats::OPEN_ENTRIES, current);
+ stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
+ }
+
+ CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
+
+ if (!data_)
+ first_timer_ = false;
+ if (first_timer_) {
+ first_timer_ = false;
+ if (ShouldReportAgain())
+ ReportStats();
+ }
+
+ // Save stats to disk at 5 min intervals.
+ if (time % 10 == 0)
+ stats_.Store();
+}
+
+void BackendImpl::IncrementIoCount() {
+ num_pending_io_++;
+}
+
+void BackendImpl::DecrementIoCount() {
+ num_pending_io_--;
+}
+
+void BackendImpl::SetUnitTestMode() {
+ user_flags_ |= kUnitTestMode;
+ unit_test_ = true;
+}
+
+void BackendImpl::SetUpgradeMode() {
+ user_flags_ |= kUpgradeMode;
+ read_only_ = true;
+}
+
+void BackendImpl::SetNewEviction() {
+ user_flags_ |= kNewEviction;
+ new_eviction_ = true;
+}
+
+void BackendImpl::SetFlags(uint32 flags) {
+ user_flags_ |= flags;
+}
+
+void BackendImpl::ClearRefCountForTest() {
+ num_refs_ = 0;
+}
+
+int BackendImpl::SelfCheck() {
+ if (!init_) {
+ LOG(ERROR) << "Init failed";
+ return ERR_INIT_FAILED;
+ }
+
+ int num_entries = rankings_.SelfCheck();
+ if (num_entries < 0) {
+ LOG(ERROR) << "Invalid rankings list, error " << num_entries;
+ return num_entries;
+ }
+
+ if (num_entries != data_->header.num_entries) {
+ LOG(ERROR) << "Number of entries mismatch";
+ return ERR_NUM_ENTRIES_MISMATCH;
+ }
+
+ return CheckAllEntries();
+}
+
+bool BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry) {
+ return OpenFollowingEntry(false, iter, prev_entry);
+}
+
+// ------------------------------------------------------------------------
+
+// We just created a new file so we're going to write the header and set the
+// file length to include the hash table (zero filled).
+bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
+ AdjustMaxCacheSize(0);
+
+ IndexHeader header;
+ header.table_len = DesiredIndexTableLen(max_size_);
+
+ // We need file version 2.1 for the new eviction algorithm.
+ if (new_eviction_)
+ header.version = 0x20001;
+
+ header.create_time = Time::Now().ToInternalValue();
+
+ if (!file->Write(&header, sizeof(header), 0))
+ return false;
+
+ return file->SetLength(GetIndexSize(header.table_len));
+}
+
+bool BackendImpl::InitBackingStore(bool* file_created) {
+ file_util::CreateDirectory(path_);
+
+ FilePath index_name = path_.AppendASCII(kIndexName);
+
+ int flags = base::PLATFORM_FILE_READ |
+ base::PLATFORM_FILE_WRITE |
+ base::PLATFORM_FILE_OPEN_ALWAYS |
+ base::PLATFORM_FILE_EXCLUSIVE_WRITE;
+ scoped_refptr<disk_cache::File> file(new disk_cache::File(
+ base::CreatePlatformFile(index_name, flags, file_created)));
+
+ if (!file->IsValid())
+ return false;
+
+ bool ret = true;
+ if (*file_created)
+ ret = CreateBackingStore(file);
+
+ file = NULL;
+ if (!ret)
+ return false;
+
+ index_ = new MappedFile();
+ data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
+ if (!data_) {
+ LOG(ERROR) << "Unable to map Index file";
+ return false;
+ }
+ return true;
+}
+
+// The maximum cache size will be either set explicitly by the caller, or
+// calculated by this code.
+void BackendImpl::AdjustMaxCacheSize(int table_len) {
+ if (max_size_)
+ return;
+
+ // If table_len is provided, the index file exists.
+ DCHECK(!table_len || data_->header.magic);
+
+ // The user is not setting the size, let's figure it out.
+ int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
+ if (available < 0) {
+ max_size_ = kDefaultCacheSize;
+ return;
+ }
+
+ if (table_len)
+ available += data_->header.num_bytes;
+
+ max_size_ = PreferedCacheSize(available);
+
+ // Let's not use more than the default size while we tune-up the performance
+ // of bigger caches. TODO(rvargas): remove this limit.
+ if (max_size_ > kDefaultCacheSize * 4)
+ max_size_ = kDefaultCacheSize * 4;
+
+ if (!table_len)
+ return;
+
+ // If we already have a table, adjust the size to it.
+ int current_max_size = MaxStorageSizeForTable(table_len);
+ if (max_size_ > current_max_size)
+ max_size_= current_max_size;
+}
+
+// We always execute this method from the message loop so that we can freely
+// release files, memory pointers etc.
+void BackendImpl::RestartCache() {
+ DCHECK(!num_refs_);
+ DCHECK(!open_entries_.size());
+ PrepareForRestart();
+ DelayedCacheCleanup(path_);
+
+ int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
+
+ // Don't call Init() if directed by the unit test: we are simulating a failure
+ // trying to re-enable the cache.
+ if (unit_test_)
+ init_ = true; // Let the destructor do proper cleanup.
+ else if (Init())
+ stats_.SetCounter(Stats::FATAL_ERROR, errors + 1);
+}
+
+void BackendImpl::PrepareForRestart() {
+ // Reset the mask_ if it was not given by the user.
+ if (!(user_flags_ & kMask))
+ mask_ = 0;
+
+ if (!(user_flags_ & kNewEviction))
+ new_eviction_ = false;
+
+ data_->header.crash = 0;
+ index_ = NULL;
+ data_ = NULL;
+ block_files_.CloseFiles();
+ rankings_.Reset();
+ init_ = false;
+ restarted_ = true;
+}
+
+int BackendImpl::NewEntry(Addr address, EntryImpl** entry, bool* dirty) {
+ EntriesMap::iterator it = open_entries_.find(address.value());
+ if (it != open_entries_.end()) {
+ // Easy job. This entry is already in memory.
+ EntryImpl* this_entry = it->second;
+ this_entry->AddRef();
+ *entry = this_entry;
+ *dirty = false;
+ return 0;
+ }
+
+ scoped_refptr<EntryImpl> cache_entry(new EntryImpl(this, address));
+ IncreaseNumRefs();
+ *entry = NULL;
+
+ if (!address.is_initialized() || address.is_separate_file() ||
+ address.file_type() != BLOCK_256) {
+ LOG(WARNING) << "Wrong entry address.";
+ return ERR_INVALID_ADDRESS;
+ }
+
+ if (!cache_entry->entry()->Load())
+ return ERR_READ_FAILURE;
+
+ if (!cache_entry->SanityCheck()) {
+ LOG(WARNING) << "Messed up entry found.";
+ return ERR_INVALID_ENTRY;
+ }
+
+ if (!cache_entry->LoadNodeAddress())
+ return ERR_READ_FAILURE;
+
+ *dirty = cache_entry->IsDirty(GetCurrentEntryId());
+
+ // Prevent overwriting the dirty flag on the destructor.
+ cache_entry->ClearDirtyFlag();
+
+ if (!rankings_.SanityCheck(cache_entry->rankings(), false))
+ return ERR_INVALID_LINKS;
+
+ // We only add clean entries to the map.
+ if (!*dirty)
+ open_entries_[address.value()] = cache_entry;
+
+ cache_entry.swap(entry);
+ return 0;
+}
+
+EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
+ bool find_parent) {
+ Addr address(data_->table[hash & mask_]);
+ scoped_refptr<EntryImpl> cache_entry, parent_entry;
+ EntryImpl* tmp = NULL;
+ bool found = false;
+
+ for (;;) {
+ if (disabled_)
+ break;
+
+ if (!address.is_initialized()) {
+ if (find_parent)
+ found = true;
+ break;
+ }
+
+ bool dirty;
+ int error = NewEntry(address, &tmp, &dirty);
+ cache_entry.swap(&tmp);
+
+ if (error || dirty) {
+ // This entry is dirty on disk (it was not properly closed): we cannot
+ // trust it.
+ Addr child(0);
+ if (!error)
+ child.set_value(cache_entry->GetNextAddress());
+
+ if (parent_entry) {
+ parent_entry->SetNextAddress(child);
+ parent_entry = NULL;
+ } else {
+ data_->table[hash & mask_] = child.value();
+ }
+
+ if (!error) {
+ // It is important to call DestroyInvalidEntry after removing this
+ // entry from the table.
+ DestroyInvalidEntry(cache_entry);
+ cache_entry = NULL;
+ } else {
+ Trace("NewEntry failed on MatchEntry 0x%x", address.value());
+ }
+
+ // Restart the search.
+ address.set_value(data_->table[hash & mask_]);
+ continue;
+ }
+
+ if (cache_entry->IsSameEntry(key, hash)) {
+ if (!cache_entry->Update())
+ cache_entry = NULL;
+ found = true;
+ break;
+ }
+ if (!cache_entry->Update())
+ cache_entry = NULL;
+ parent_entry = cache_entry;
+ cache_entry = NULL;
+ if (!parent_entry)
+ break;
+
+ address.set_value(parent_entry->GetNextAddress());
+ }
+
+ if (parent_entry && (!find_parent || !found))
+ parent_entry = NULL;
+
+ if (cache_entry && (find_parent || !found))
+ cache_entry = NULL;
+
+ find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp);
+ return tmp;
+}
+
+// This is the actual implementation for OpenNextEntry and OpenPrevEntry.
+bool BackendImpl::OpenFollowingEntry(bool forward, void** iter,
+ Entry** next_entry) {
+ if (disabled_)
+ return false;
+
+ DCHECK(iter);
+ DCHECK(next_entry);
+ *next_entry = NULL;
+
+ const int kListsToSearch = 3;
+ scoped_refptr<EntryImpl> entries[kListsToSearch];
+ scoped_ptr<Rankings::Iterator> iterator(
+ reinterpret_cast<Rankings::Iterator*>(*iter));
+ *iter = NULL;
+
+ if (!iterator.get()) {
+ iterator.reset(new Rankings::Iterator(&rankings_));
+ bool ret = false;
+
+ // Get an entry from each list.
+ for (int i = 0; i < kListsToSearch; i++) {
+ EntryImpl* temp = NULL;
+ ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
+ &iterator->nodes[i], &temp);
+ entries[i].swap(&temp); // The entry was already addref'd.
+ }
+ if (!ret)
+ return false;
+ } else {
+ // Get the next entry from the last list, and the actual entries for the
+ // elements on the other lists.
+ for (int i = 0; i < kListsToSearch; i++) {
+ EntryImpl* temp = NULL;
+ if (iterator->list == i) {
+ OpenFollowingEntryFromList(forward, iterator->list,
+ &iterator->nodes[i], &temp);
+ } else {
+ temp = GetEnumeratedEntry(iterator->nodes[i], false);
+ }
+
+ entries[i].swap(&temp); // The entry was already addref'd.
+ }
+ }
+
+ int newest = -1;
+ int oldest = -1;
+ Time access_times[kListsToSearch];
+ for (int i = 0; i < kListsToSearch; i++) {
+ if (entries[i].get()) {
+ access_times[i] = entries[i]->GetLastUsed();
+ if (newest < 0) {
+ DCHECK(oldest < 0);
+ newest = oldest = i;
+ continue;
+ }
+ if (access_times[i] > access_times[newest])
+ newest = i;
+ if (access_times[i] < access_times[oldest])
+ oldest = i;
+ }
+ }
+
+ if (newest < 0 || oldest < 0)
+ return false;
+
+ if (forward) {
+ entries[newest].swap(reinterpret_cast<EntryImpl**>(next_entry));
+ iterator->list = static_cast<Rankings::List>(newest);
+ } else {
+ entries[oldest].swap(reinterpret_cast<EntryImpl**>(next_entry));
+ iterator->list = static_cast<Rankings::List>(oldest);
+ }
+
+ *iter = iterator.release();
+ return true;
+}
+
+bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
+ CacheRankingsBlock** from_entry,
+ EntryImpl** next_entry) {
+ if (disabled_)
+ return false;
+
+ if (!new_eviction_ && Rankings::NO_USE != list)
+ return false;
+
+ Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
+ CacheRankingsBlock* next_block = forward ?
+ rankings_.GetNext(rankings.get(), list) :
+ rankings_.GetPrev(rankings.get(), list);
+ Rankings::ScopedRankingsBlock next(&rankings_, next_block);
+ *from_entry = NULL;
+
+ *next_entry = GetEnumeratedEntry(next.get(), false);
+ if (!*next_entry)
+ return false;
+
+ *from_entry = next.release();
+ return true;
+}
+
+EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
+ bool to_evict) {
+ if (!next || disabled_)
+ return NULL;
+
+ EntryImpl* entry;
+ bool dirty;
+ if (NewEntry(Addr(next->Data()->contents), &entry, &dirty))
+ return NULL;
+
+ if (dirty) {
+ // We cannot trust this entry. This code also releases the reference.
+ DestroyInvalidEntryFromEnumeration(entry);
+ return NULL;
+ }
+
+ // There is no need to store the entry to disk if we want to delete it.
+ if (!to_evict && !entry->Update()) {
+ entry->Release();
+ return NULL;
+ }
+
+ return entry;
+}
+
+bool BackendImpl::ResurrectEntry(EntryImpl* deleted_entry, Entry** entry) {
+ if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
+ deleted_entry->Release();
+ stats_.OnEvent(Stats::CREATE_MISS);
+ Trace("create entry miss ");
+ return false;
+ }
+
+ // We are attempting to create an entry and found out that the entry was
+ // previously deleted.
+
+ eviction_.OnCreateEntry(deleted_entry);
+ *entry = deleted_entry;
+
+ stats_.OnEvent(Stats::CREATE_HIT);
+ Trace("Resurrect entry hit ");
+ return true;
+}
+
+void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
+ LOG(WARNING) << "Destroying invalid entry.";
+ Trace("Destroying invalid entry 0x%p", entry);
+
+ entry->SetPointerForInvalidEntry(GetCurrentEntryId());
+
+ eviction_.OnDoomEntry(entry);
+ entry->InternalDoom();
+
+ if (!new_eviction_)
+ DecreaseNumEntries();
+ stats_.OnEvent(Stats::INVALID_ENTRY);
+}
+
+// This is kind of ugly. The entry may or may not be part of the cache index
+// table, and it may even have corrupt fields. If we just doom it, we may end up
+// deleting it twice (if all fields are right, and when looking up the parent of
+// chained entries wee see this one... and we delete it because it is dirty). If
+// we ignore it, we may leave it here forever. So we're going to attempt to
+// delete it through the provided object, without touching the index table
+// (because we cannot jus call MatchEntry()), and also attempt to delete it from
+// the table through the key: this may find a new entry (too bad), or an entry
+// that was just deleted and consider it a very corrupt entry.
+void BackendImpl::DestroyInvalidEntryFromEnumeration(EntryImpl* entry) {
+ std::string key = entry->GetKey();
+ entry->SetPointerForInvalidEntry(GetCurrentEntryId());
+ CacheAddr next_entry = entry->entry()->Data()->next;
+ if (!next_entry) {
+ DestroyInvalidEntry(entry);
+ entry->Release();
+ }
+ DoomEntry(key);
+
+ if (!next_entry)
+ return;
+
+ // We have a chained entry so instead of destroying first this entry and then
+ // anything with this key, we just called DoomEntry() first. If that call
+ // deleted everything, |entry| has invalid data. Let's see if there is
+ // something else to do. We started with just a rankings node (we come from
+ // an enumeration), so that one may still be there.
+ CacheRankingsBlock* rankings = entry->rankings();
+ rankings->Load();
+ if (rankings->Data()->contents) {
+ // We still have something. Clean this up.
+ DestroyInvalidEntry(entry);
+ }
+ entry->Release();
+}
+
+void BackendImpl::AddStorageSize(int32 bytes) {
+ data_->header.num_bytes += bytes;
+ DCHECK(data_->header.num_bytes >= 0);
+
+ if (data_->header.num_bytes > max_size_)
+ eviction_.TrimCache(false);
+}
+
+void BackendImpl::SubstractStorageSize(int32 bytes) {
+ data_->header.num_bytes -= bytes;
+ DCHECK(data_->header.num_bytes >= 0);
+}
+
+void BackendImpl::IncreaseNumRefs() {
+ num_refs_++;
+ if (max_refs_ < num_refs_)
+ max_refs_ = num_refs_;
+}
+
+void BackendImpl::DecreaseNumRefs() {
+ DCHECK(num_refs_);
+ num_refs_--;
+
+ if (!num_refs_ && disabled_)
+ MessageLoop::current()->PostTask(FROM_HERE,
+ factory_.NewRunnableMethod(&BackendImpl::RestartCache));
+}
+
+void BackendImpl::IncreaseNumEntries() {
+ data_->header.num_entries++;
+ DCHECK(data_->header.num_entries > 0);
+}
+
+void BackendImpl::DecreaseNumEntries() {
+ data_->header.num_entries--;
+ if (data_->header.num_entries < 0) {
+ NOTREACHED();
+ data_->header.num_entries = 0;
+ }
+}
+
+void BackendImpl::LogStats() {
+ StatsItems stats;
+ GetStats(&stats);
+
+ for (size_t index = 0; index < stats.size(); index++) {
+ LOG(INFO) << stats[index].first << ": " << stats[index].second;
+ }
+}
+
+void BackendImpl::ReportStats() {
+ CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
+ CACHE_UMA(COUNTS, "Size", 0, data_->header.num_bytes / (1024 * 1024));
+ CACHE_UMA(COUNTS, "MaxSize", 0, max_size_ / (1024 * 1024));
+
+ CACHE_UMA(COUNTS, "AverageOpenEntries", 0,
+ static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
+ CACHE_UMA(COUNTS, "MaxOpenEntries", 0,
+ static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
+ stats_.SetCounter(Stats::MAX_ENTRIES, 0);
+
+ if (!data_->header.create_time || !data_->header.lru.filled)
+ return;
+
+ // This is an up to date client that will report FirstEviction() data. After
+ // that event, start reporting this:
+
+ int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
+ CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
+
+ int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
+ stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
+
+ // We may see users with no use_hours at this point if this is the first time
+ // we are running this code.
+ if (use_hours)
+ use_hours = total_hours - use_hours;
+
+ if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
+ return;
+
+ CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
+ CACHE_UMA(PERCENTAGE, "HitRatio", 0, stats_.GetHitRatio());
+
+ int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
+ CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
+
+ int avg_size = data_->header.num_bytes / GetEntryCount();
+ CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
+
+ int large_entries_bytes = stats_.GetLargeEntriesSize();
+ int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
+ CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
+
+ if (new_eviction_) {
+ CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
+ CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
+ data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
+ data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
+ data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
+ CACHE_UMA(PERCENTAGE, "DeletedRatio", 0,
+ data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
+ }
+
+ stats_.ResetRatios();
+ stats_.SetCounter(Stats::TRIM_ENTRY, 0);
+}
+
+void BackendImpl::UpgradeTo2_1() {
+ // 2.1 is basically the same as 2.0, except that new fields are actually
+ // updated by the new eviction algorithm.
+ DCHECK(0x20000 == data_->header.version);
+ data_->header.version = 0x20001;
+ data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
+}
+
+bool BackendImpl::CheckIndex() {
+ DCHECK(data_);
+
+ size_t current_size = index_->GetLength();
+ if (current_size < sizeof(Index)) {
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ if (new_eviction_) {
+ // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
+ if (kIndexMagic != data_->header.magic ||
+ kCurrentVersion >> 16 != data_->header.version >> 16) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+ if (kCurrentVersion == data_->header.version) {
+ // We need file version 2.1 for the new eviction algorithm.
+ UpgradeTo2_1();
+ }
+ } else {
+ if (kIndexMagic != data_->header.magic ||
+ kCurrentVersion != data_->header.version) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+ }
+
+ if (!data_->header.table_len) {
+ LOG(ERROR) << "Invalid table size";
+ return false;
+ }
+
+ if (current_size < GetIndexSize(data_->header.table_len) ||
+ data_->header.table_len & (kBaseTableLen - 1)) {
+ LOG(ERROR) << "Corrupt Index file";
+ return false;
+ }
+
+ AdjustMaxCacheSize(data_->header.table_len);
+
+ if (data_->header.num_bytes < 0) {
+ LOG(ERROR) << "Invalid cache (current) size";
+ return false;
+ }
+
+ if (data_->header.num_entries < 0) {
+ LOG(ERROR) << "Invalid number of entries";
+ return false;
+ }
+
+ if (!mask_)
+ mask_ = data_->header.table_len - 1;
+
+ return true;
+}
+
+int BackendImpl::CheckAllEntries() {
+ int num_dirty = 0;
+ int num_entries = 0;
+ DCHECK(mask_ < kuint32max);
+ for (int i = 0; i <= static_cast<int>(mask_); i++) {
+ Addr address(data_->table[i]);
+ if (!address.is_initialized())
+ continue;
+ for (;;) {
+ bool dirty;
+ EntryImpl* tmp;
+ int ret = NewEntry(address, &tmp, &dirty);
+ if (ret)
+ return ret;
+ scoped_refptr<EntryImpl> cache_entry;
+ cache_entry.swap(&tmp);
+
+ if (dirty)
+ num_dirty++;
+ else if (CheckEntry(cache_entry.get()))
+ num_entries++;
+ else
+ return ERR_INVALID_ENTRY;
+
+ address.set_value(cache_entry->GetNextAddress());
+ if (!address.is_initialized())
+ break;
+ }
+ }
+
+ if (num_entries + num_dirty != data_->header.num_entries) {
+ LOG(ERROR) << "Number of entries mismatch";
+ return ERR_NUM_ENTRIES_MISMATCH;
+ }
+
+ return num_dirty;
+}
+
+bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
+ RankingsNode* rankings = cache_entry->rankings()->Data();
+ return !rankings->dummy;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h
new file mode 100644
index 00000000..6267facd
--- /dev/null
+++ b/net/disk_cache/backend_impl.h
@@ -0,0 +1,314 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_BACKEND_IMPL_H_
+#define NET_DISK_CACHE_BACKEND_IMPL_H_
+
+#include "base/file_path.h"
+#include "base/hash_tables.h"
+#include "base/timer.h"
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/eviction.h"
+#include "net/disk_cache/rankings.h"
+#include "net/disk_cache/stats.h"
+#include "net/disk_cache/trace.h"
+
+namespace disk_cache {
+
+enum BackendFlags {
+ kNone = 0,
+ kMask = 1, // A mask (for the index table) was specified.
+ kMaxSize = 1 << 1, // A maximum size was provided.
+ kUnitTestMode = 1 << 2, // We are modifying the behavior for testing.
+ kUpgradeMode = 1 << 3, // This is the upgrade tool (dump).
+ kNewEviction = 1 << 4, // Use of new eviction was specified.
+ kNoRandom = 1 << 5, // Don't add randomness to the behavior.
+ kNoLoadProtection = 1 << 6 // Don't act conservatively under load.
+};
+
+// This class implements the Backend interface. An object of this
+// class handles the operations of the cache for a particular profile.
+class BackendImpl : public Backend {
+ friend class Eviction;
+ public:
+ explicit BackendImpl(const FilePath& path)
+ : path_(path), block_files_(path), mask_(0), max_size_(0),
+ cache_type_(net::DISK_CACHE), uma_report_(0), user_flags_(0),
+ init_(false), restarted_(false), unit_test_(false), read_only_(false),
+ new_eviction_(false), first_timer_(true),
+ ALLOW_THIS_IN_INITIALIZER_LIST(factory_(this)) {}
+ // mask can be used to limit the usable size of the hash table, for testing.
+ BackendImpl(const FilePath& path, uint32 mask)
+ : path_(path), block_files_(path), mask_(mask), max_size_(0),
+ cache_type_(net::DISK_CACHE), uma_report_(0), user_flags_(kMask),
+ init_(false), restarted_(false), unit_test_(false), read_only_(false),
+ new_eviction_(false), first_timer_(true),
+ ALLOW_THIS_IN_INITIALIZER_LIST(factory_(this)) {}
+ ~BackendImpl();
+
+ // Returns a new backend with the desired flags. See the declaration of
+ // CreateCacheBackend().
+ static Backend* CreateBackend(const FilePath& full_path, bool force,
+ int max_bytes, net::CacheType type,
+ BackendFlags flags);
+
+ // Performs general initialization for this current instance of the cache.
+ bool Init();
+
+ // Backend interface.
+ virtual int32 GetEntryCount() const;
+ virtual bool OpenEntry(const std::string& key, Entry** entry);
+ virtual int OpenEntry(const std::string& key, Entry** entry,
+ CompletionCallback* callback);
+ virtual bool CreateEntry(const std::string& key, Entry** entry);
+ virtual int CreateEntry(const std::string& key, Entry** entry,
+ CompletionCallback* callback);
+ virtual bool DoomEntry(const std::string& key);
+ virtual int DoomEntry(const std::string& key, CompletionCallback* callback);
+ virtual bool DoomAllEntries();
+ virtual int DoomAllEntries(CompletionCallback* callback);
+ virtual bool DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time);
+ virtual int DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
+ CompletionCallback* callback);
+ virtual bool DoomEntriesSince(const base::Time initial_time);
+ virtual int DoomEntriesSince(const base::Time initial_time,
+ CompletionCallback* callback);
+ virtual bool OpenNextEntry(void** iter, Entry** next_entry);
+ virtual int OpenNextEntry(void** iter, Entry** next_entry,
+ CompletionCallback* callback);
+ virtual void EndEnumeration(void** iter);
+ virtual void GetStats(StatsItems* stats);
+
+ // Sets the maximum size for the total amount of data stored by this instance.
+ bool SetMaxSize(int max_bytes);
+
+ // Sets the cache type for this backend.
+ void SetType(net::CacheType type);
+
+ // Returns the full name for an external storage file.
+ FilePath GetFileName(Addr address) const;
+
+ // Returns the actual file used to store a given (non-external) address.
+ MappedFile* File(Addr address);
+
+ // Creates an external storage file.
+ bool CreateExternalFile(Addr* address);
+
+ // Creates a new storage block of size block_count.
+ bool CreateBlock(FileType block_type, int block_count,
+ Addr* block_address);
+
+ // Deletes a given storage block. deep set to true can be used to zero-fill
+ // the related storage in addition of releasing the related block.
+ void DeleteBlock(Addr block_address, bool deep);
+
+ // Retrieves a pointer to the lru-related data.
+ LruData* GetLruData();
+
+ // Updates the ranking information for an entry.
+ void UpdateRank(EntryImpl* entry, bool modified);
+
+ // A node was recovered from a crash, it may not be on the index, so this
+ // method checks it and takes the appropriate action.
+ void RecoveredEntry(CacheRankingsBlock* rankings);
+
+ // Permanently deletes an entry, but still keeps track of it.
+ void InternalDoomEntry(EntryImpl* entry);
+
+ // Removes all references to this entry.
+ void RemoveEntry(EntryImpl* entry);
+
+ // This method must be called whenever an entry is released for the last time.
+ // |address| is the cache address of the entry.
+ void CacheEntryDestroyed(Addr address);
+
+ // If the data stored by the provided |rankings| points to an open entry,
+ // returns a pointer to that entry, otherwise returns NULL. Note that this
+ // method does NOT increase the ref counter for the entry.
+ EntryImpl* GetOpenEntry(CacheRankingsBlock* rankings) const;
+
+ // Returns the id being used on this run of the cache.
+ int32 GetCurrentEntryId() const;
+
+ // Returns the maximum size for a file to reside on the cache.
+ int MaxFileSize() const;
+
+ // A user data block is being created, extended or truncated.
+ void ModifyStorageSize(int32 old_size, int32 new_size);
+
+ // Logs requests that are denied due to being too big.
+ void TooMuchStorageRequested(int32 size);
+
+ // Returns true if this instance seems to be under heavy load.
+ bool IsLoaded() const;
+
+ // Returns the full histogram name, for the given base |name| and experiment,
+ // and the current cache type. The name will be "DiskCache.t.name_e" where n
+ // is the cache type and e the provided |experiment|.
+ std::string HistogramName(const char* name, int experiment) const;
+
+ net::CacheType cache_type() const {
+ return cache_type_;
+ }
+
+ // Returns the group for this client, based on the current cache size.
+ int GetSizeGroup() const;
+
+ // Returns true if we should send histograms for this user again. The caller
+ // must call this function only once per run (because it returns always the
+ // same thing on a given run).
+ bool ShouldReportAgain();
+
+ // Reports some data when we filled up the cache.
+ void FirstEviction();
+
+ // Reports a critical error (and disables the cache).
+ void CriticalError(int error);
+
+ // Reports an uncommon, recoverable error.
+ void ReportError(int error);
+
+ // Called when an interesting event should be logged (counted).
+ void OnEvent(Stats::Counters an_event);
+
+ // Timer callback to calculate usage statistics.
+ void OnStatsTimer();
+
+ // Handles the pending asynchronous IO count.
+ void IncrementIoCount();
+ void DecrementIoCount();
+
+ // Sets internal parameters to enable unit testing mode.
+ void SetUnitTestMode();
+
+ // Sets internal parameters to enable upgrade mode (for internal tools).
+ void SetUpgradeMode();
+
+ // Sets the eviction algorithm to version 2.
+ void SetNewEviction();
+
+ // Sets an explicit set of BackendFlags.
+ void SetFlags(uint32 flags);
+
+ // Clears the counter of references to test handling of corruptions.
+ void ClearRefCountForTest();
+
+ // Peforms a simple self-check, and returns the number of dirty items
+ // or an error code (negative value).
+ int SelfCheck();
+
+ // Same bahavior as OpenNextEntry but walks the list from back to front.
+ bool OpenPrevEntry(void** iter, Entry** prev_entry);
+
+ private:
+ typedef base::hash_map<CacheAddr, EntryImpl*> EntriesMap;
+
+ // Creates a new backing file for the cache index.
+ bool CreateBackingStore(disk_cache::File* file);
+ bool InitBackingStore(bool* file_created);
+ void AdjustMaxCacheSize(int table_len);
+
+ // Deletes the cache and starts again.
+ void RestartCache();
+ void PrepareForRestart();
+
+ // Creates a new entry object and checks to see if it is dirty. Returns zero
+ // on success, or a disk_cache error on failure.
+ int NewEntry(Addr address, EntryImpl** entry, bool* dirty);
+
+ // Returns a given entry from the cache. The entry to match is determined by
+ // key and hash, and the returned entry may be the matched one or it's parent
+ // on the list of entries with the same hash (or bucket).
+ EntryImpl* MatchEntry(const std::string& key, uint32 hash, bool find_parent);
+
+ // Opens the next or previous entry on a cache iteration.
+ bool OpenFollowingEntry(bool forward, void** iter, Entry** next_entry);
+
+ // Opens the next or previous entry on a single list. If successfull,
+ // |from_entry| will be updated to point to the new entry, otherwise it will
+ // be set to NULL; in other words, it is used as an explicit iterator.
+ bool OpenFollowingEntryFromList(bool forward, Rankings::List list,
+ CacheRankingsBlock** from_entry,
+ EntryImpl** next_entry);
+
+ // Returns the entry that is pointed by |next|. If we are trimming the cache,
+ // |to_evict| should be true so that we don't perform extra disk writes.
+ EntryImpl* GetEnumeratedEntry(CacheRankingsBlock* next, bool to_evict);
+
+ // Re-opens an entry that was previously deleted.
+ bool ResurrectEntry(EntryImpl* deleted_entry, Entry** entry);
+
+ void DestroyInvalidEntry(EntryImpl* entry);
+ void DestroyInvalidEntryFromEnumeration(EntryImpl* entry);
+
+ // Handles the used storage count.
+ void AddStorageSize(int32 bytes);
+ void SubstractStorageSize(int32 bytes);
+
+ // Update the number of referenced cache entries.
+ void IncreaseNumRefs();
+ void DecreaseNumRefs();
+ void IncreaseNumEntries();
+ void DecreaseNumEntries();
+
+ // Dumps current cache statistics to the log.
+ void LogStats();
+
+ // Send UMA stats.
+ void ReportStats();
+
+ // Upgrades the index file to version 2.1.
+ void UpgradeTo2_1();
+
+ // Performs basic checks on the index file. Returns false on failure.
+ bool CheckIndex();
+
+ // Part of the selt test. Returns the number or dirty entries, or an error.
+ int CheckAllEntries();
+
+ // Part of the self test. Returns false if the entry is corrupt.
+ bool CheckEntry(EntryImpl* cache_entry);
+
+ scoped_refptr<MappedFile> index_; // The main cache index.
+ FilePath path_; // Path to the folder used as backing storage.
+ Index* data_; // Pointer to the index data.
+ BlockFiles block_files_; // Set of files used to store all data.
+ Rankings rankings_; // Rankings to be able to trim the cache.
+ uint32 mask_; // Binary mask to map a hash to the hash table.
+ int32 max_size_; // Maximum data size for this instance.
+ Eviction eviction_; // Handler of the eviction algorithm.
+ EntriesMap open_entries_; // Map of open entries.
+ int num_refs_; // Number of referenced cache entries.
+ int max_refs_; // Max number of referenced cache entries.
+ int num_pending_io_; // Number of pending IO operations.
+ net::CacheType cache_type_;
+ int uma_report_; // Controls transmision of UMA data.
+ uint32 user_flags_; // Flags set by the user.
+ bool init_; // controls the initialization of the system.
+ bool restarted_;
+ bool unit_test_;
+ bool read_only_; // Prevents updates of the rankings data (used by tools).
+ bool disabled_;
+ bool new_eviction_; // What eviction algorithm should be used.
+ bool first_timer_; // True if the timer has not been called.
+
+ Stats stats_; // Usage statistcs.
+ base::RepeatingTimer<BackendImpl> timer_; // Usage timer.
+ scoped_refptr<TraceObject> trace_object_; // Inits internal tracing.
+ ScopedRunnableMethodFactory<BackendImpl> factory_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(BackendImpl);
+};
+
+// Returns the prefered max cache size given the available disk space.
+int PreferedCacheSize(int64 available);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_BACKEND_IMPL_H_
diff --git a/net/disk_cache/backend_unittest.cc b/net/disk_cache/backend_unittest.cc
new file mode 100644
index 00000000..9d4194b1
--- /dev/null
+++ b/net/disk_cache/backend_unittest.cc
@@ -0,0 +1,1581 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/file_util.h"
+#include "base/path_service.h"
+#include "base/platform_thread.h"
+#include "base/string_util.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/mapped_file.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::Time;
+
+namespace {
+
+// Copies a set of cache files from the data folder to the test folder.
+bool CopyTestCache(const std::wstring& name) {
+ FilePath path;
+ PathService::Get(base::DIR_SOURCE_ROOT, &path);
+ path = path.AppendASCII("net");
+ path = path.AppendASCII("data");
+ path = path.AppendASCII("cache_tests");
+ path = path.Append(FilePath::FromWStringHack(name));
+
+ FilePath dest = GetCacheFilePath();
+ if (!DeleteCache(dest))
+ return false;
+ return file_util::CopyDirectory(path, dest, false);
+}
+
+} // namespace
+
+// Tests that can run with different types of caches.
+class DiskCacheBackendTest : public DiskCacheTestWithCache {
+ protected:
+ void BackendBasics();
+ void BackendKeying();
+ void BackendSetSize();
+ void BackendLoad();
+ void BackendValidEntry();
+ void BackendInvalidEntry();
+ void BackendInvalidEntryRead();
+ void BackendInvalidEntryWithLoad();
+ void BackendTrimInvalidEntry();
+ void BackendTrimInvalidEntry2();
+ void BackendEnumerations();
+ void BackendEnumerations2();
+ void BackendInvalidEntryEnumeration();
+ void BackendFixEnumerators();
+ void BackendDoomRecent();
+ void BackendDoomBetween();
+ void BackendTransaction(const std::wstring& name, int num_entries, bool load);
+ void BackendRecoverInsert();
+ void BackendRecoverRemove();
+ void BackendInvalidEntry2();
+ void BackendNotMarkedButDirty(const std::wstring& name);
+ void BackendDoomAll();
+ void BackendDoomAll2();
+ void BackendInvalidRankings();
+ void BackendInvalidRankings2();
+ void BackendDisable();
+ void BackendDisable2();
+ void BackendDisable3();
+ void BackendDisable4();
+};
+
+void DiskCacheBackendTest::BackendBasics() {
+ InitCache();
+ disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
+ EXPECT_FALSE(cache_->OpenEntry("the first key", &entry1));
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+ ASSERT_TRUE(NULL != entry1);
+ entry1->Close();
+ entry1 = NULL;
+
+ ASSERT_TRUE(cache_->OpenEntry("the first key", &entry1));
+ ASSERT_TRUE(NULL != entry1);
+ entry1->Close();
+ entry1 = NULL;
+
+ EXPECT_FALSE(cache_->CreateEntry("the first key", &entry1));
+ ASSERT_TRUE(cache_->OpenEntry("the first key", &entry1));
+ EXPECT_FALSE(cache_->OpenEntry("some other key", &entry2));
+ ASSERT_TRUE(cache_->CreateEntry("some other key", &entry2));
+ ASSERT_TRUE(NULL != entry1);
+ ASSERT_TRUE(NULL != entry2);
+ EXPECT_EQ(2, cache_->GetEntryCount());
+
+ disk_cache::Entry* entry3 = NULL;
+ ASSERT_TRUE(cache_->OpenEntry("some other key", &entry3));
+ ASSERT_TRUE(NULL != entry3);
+ EXPECT_TRUE(entry2 == entry3);
+ EXPECT_EQ(2, cache_->GetEntryCount());
+
+ EXPECT_TRUE(cache_->DoomEntry("some other key"));
+ EXPECT_EQ(1, cache_->GetEntryCount());
+ entry1->Close();
+ entry2->Close();
+ entry3->Close();
+
+ EXPECT_TRUE(cache_->DoomEntry("the first key"));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+ ASSERT_TRUE(cache_->CreateEntry("some other key", &entry2));
+ entry1->Doom();
+ entry1->Close();
+ EXPECT_TRUE(cache_->DoomEntry("some other key"));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ entry2->Close();
+}
+
+TEST_F(DiskCacheBackendTest, Basics) {
+ BackendBasics();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
+ SetNewEviction();
+ BackendBasics();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
+ SetMemoryOnlyMode();
+ BackendBasics();
+}
+
+void DiskCacheBackendTest::BackendKeying() {
+ InitCache();
+ const char* kName1 = "the first key";
+ const char* kName2 = "the first Key";
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_TRUE(cache_->CreateEntry(kName1, &entry1));
+
+ ASSERT_TRUE(cache_->CreateEntry(kName2, &entry2));
+ EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
+ entry2->Close();
+
+ char buffer[30];
+ base::strlcpy(buffer, kName1, arraysize(buffer));
+ ASSERT_TRUE(cache_->OpenEntry(buffer, &entry2));
+ EXPECT_TRUE(entry1 == entry2);
+ entry2->Close();
+
+ base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
+ ASSERT_TRUE(cache_->OpenEntry(buffer + 1, &entry2));
+ EXPECT_TRUE(entry1 == entry2);
+ entry2->Close();
+
+ base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3);
+ ASSERT_TRUE(cache_->OpenEntry(buffer + 3, &entry2));
+ EXPECT_TRUE(entry1 == entry2);
+ entry2->Close();
+
+ // Now verify long keys.
+ char buffer2[20000];
+ memset(buffer2, 's', sizeof(buffer2));
+ buffer2[1023] = '\0';
+ ASSERT_TRUE(cache_->CreateEntry(buffer2, &entry2)) << "key on block file";
+ entry2->Close();
+
+ buffer2[1023] = 'g';
+ buffer2[19999] = '\0';
+ ASSERT_TRUE(cache_->CreateEntry(buffer2, &entry2)) << "key on external file";
+ entry2->Close();
+ entry1->Close();
+}
+
+TEST_F(DiskCacheBackendTest, Keying) {
+ BackendKeying();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
+ SetNewEviction();
+ BackendKeying();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
+ SetMemoryOnlyMode();
+ BackendKeying();
+}
+
+TEST_F(DiskCacheBackendTest, ExternalFiles) {
+ InitCache();
+ // First, lets create a file on the folder.
+ FilePath filename = GetCacheFilePath().AppendASCII("f_000001");
+
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buffer1->data(), kSize, false);
+ ASSERT_EQ(kSize, file_util::WriteFile(filename, buffer1->data(), kSize));
+
+ // Now let's create a file with the cache.
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry("key", &entry));
+ ASSERT_EQ(0, entry->WriteData(0, 20000, buffer1, 0, NULL, false));
+ entry->Close();
+
+ // And verify that the first file is still there.
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize);
+ ASSERT_EQ(kSize, file_util::ReadFile(filename, buffer2->data(), kSize));
+ EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
+}
+
+TEST_F(DiskCacheTest, ShutdownWithPendingIO) {
+ TestCompletionCallback callback;
+
+ {
+ FilePath path = GetCacheFilePath();
+ ASSERT_TRUE(DeleteCache(path));
+
+ disk_cache::Backend* cache =
+ disk_cache::CreateCacheBackend(path, false, 0, net::DISK_CACHE);
+
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache->CreateEntry("some key", &entry));
+
+ const int kSize = 25000;
+ scoped_refptr<net::IOBuffer> buffer = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buffer->data(), kSize, false);
+
+ for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
+ int rv = entry->WriteData(0, i, buffer, kSize, &callback, false);
+ if (rv == net::ERR_IO_PENDING)
+ break;
+ EXPECT_EQ(kSize, rv);
+ }
+
+ entry->Close();
+
+ // The cache destructor will see one pending operation here.
+ delete cache;
+ }
+
+ MessageLoop::current()->RunAllPending();
+}
+
+void DiskCacheBackendTest::BackendSetSize() {
+ SetDirectMode();
+ const int cache_size = 0x10000; // 64 kB
+ SetMaxSize(cache_size);
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(first, &entry));
+
+ scoped_refptr<net::IOBuffer> buffer = new net::IOBuffer(cache_size);
+ memset(buffer->data(), 0, cache_size);
+ EXPECT_EQ(cache_size / 10, entry->WriteData(0, 0, buffer, cache_size / 10,
+ NULL, false)) << "normal file";
+
+ EXPECT_EQ(net::ERR_FAILED, entry->WriteData(1, 0, buffer, cache_size / 5,
+ NULL, false)) << "file size above the limit";
+
+ // By doubling the total size, we make this file cacheable.
+ SetMaxSize(cache_size * 2);
+ EXPECT_EQ(cache_size / 5, entry->WriteData(1, 0, buffer, cache_size / 5,
+ NULL, false));
+
+ // Let's fill up the cache!.
+ SetMaxSize(cache_size * 10);
+ EXPECT_EQ(cache_size * 3 / 4, entry->WriteData(0, 0, buffer,
+ cache_size * 3 / 4, NULL, false));
+ entry->Close();
+
+ SetMaxSize(cache_size);
+
+ // The cache is 95% full.
+
+ ASSERT_TRUE(cache_->CreateEntry(second, &entry));
+ EXPECT_EQ(cache_size / 10, entry->WriteData(0, 0, buffer, cache_size / 10,
+ NULL, false)) << "trim the cache";
+ entry->Close();
+
+ EXPECT_FALSE(cache_->OpenEntry(first, &entry));
+ ASSERT_TRUE(cache_->OpenEntry(second, &entry));
+ EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
+ entry->Close();
+}
+
+TEST_F(DiskCacheBackendTest, SetSize) {
+ BackendSetSize();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
+ SetNewEviction();
+ BackendSetSize();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
+ SetMemoryOnlyMode();
+ BackendSetSize();
+}
+
+void DiskCacheBackendTest::BackendLoad() {
+ InitCache();
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ disk_cache::Entry* entries[100];
+ for (int i = 0; i < 100; i++) {
+ std::string key = GenerateKey(true);
+ ASSERT_TRUE(cache_->CreateEntry(key, &entries[i]));
+ }
+ EXPECT_EQ(100, cache_->GetEntryCount());
+
+ for (int i = 0; i < 100; i++) {
+ int source1 = rand() % 100;
+ int source2 = rand() % 100;
+ disk_cache::Entry* temp = entries[source1];
+ entries[source1] = entries[source2];
+ entries[source2] = temp;
+ }
+
+ for (int i = 0; i < 100; i++) {
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->OpenEntry(entries[i]->GetKey(), &entry));
+ EXPECT_TRUE(entry == entries[i]);
+ entry->Close();
+ entries[i]->Doom();
+ entries[i]->Close();
+ }
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, Load) {
+ // Work with a tiny index table (16 entries)
+ SetMask(0xf);
+ SetMaxSize(0x100000);
+ BackendLoad();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
+ SetNewEviction();
+ // Work with a tiny index table (16 entries)
+ SetMask(0xf);
+ SetMaxSize(0x100000);
+ BackendLoad();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
+ // Work with a tiny index table (16 entries)
+ SetMaxSize(0x100000);
+ SetMemoryOnlyMode();
+ BackendLoad();
+}
+
+// Before looking for invalid entries, let's check a valid entry.
+void DiskCacheBackendTest::BackendValidEntry() {
+ SetDirectMode();
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry* entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry1));
+
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize);
+ memset(buffer1->data(), 0, kSize);
+ base::strlcpy(buffer1->data(), "And the data to save", kSize);
+ EXPECT_EQ(kSize, entry1->WriteData(0, 0, buffer1, kSize, NULL, false));
+ entry1->Close();
+ SimulateCrash();
+
+ ASSERT_TRUE(cache_->OpenEntry(key, &entry1));
+
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize);
+ memset(buffer2->data(), 0, kSize);
+ EXPECT_EQ(kSize, entry1->ReadData(0, 0, buffer2, kSize, NULL));
+ entry1->Close();
+ EXPECT_STREQ(buffer1->data(), buffer2->data());
+}
+
+TEST_F(DiskCacheBackendTest, ValidEntry) {
+ BackendValidEntry();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
+ SetNewEviction();
+ BackendValidEntry();
+}
+
+// The same logic of the previous test (ValidEntry), but this time force the
+// entry to be invalid, simulating a crash in the middle.
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendInvalidEntry() {
+ // Use the implementation directly... we need to simulate a crash.
+ SetDirectMode();
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry* entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry1));
+
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize);
+ memset(buffer1->data(), 0, kSize);
+ base::strlcpy(buffer1->data(), "And the data to save", kSize);
+ EXPECT_EQ(kSize, entry1->WriteData(0, 0, buffer1, kSize, NULL, false));
+ SimulateCrash();
+
+ EXPECT_FALSE(cache_->OpenEntry(key, &entry1));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+// This and the other intentionally leaky tests below are excluded from
+// purify and valgrind runs by naming them in the files
+// net/data/purify/net_unittests.exe.gtest.txt and
+// net/data/valgrind/net_unittests.gtest.txt
+// The scripts tools/{purify,valgrind}/chrome_tests.sh
+// read those files and pass the appropriate --gtest_filter to net_unittests.
+TEST_F(DiskCacheBackendTest, InvalidEntry) {
+ BackendInvalidEntry();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
+ SetNewEviction();
+ BackendInvalidEntry();
+}
+
+// Almost the same test, but this time crash the cache after reading an entry.
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendInvalidEntryRead() {
+ // Use the implementation directly... we need to simulate a crash.
+ SetDirectMode();
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry* entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry1));
+
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize);
+ memset(buffer1->data(), 0, kSize);
+ base::strlcpy(buffer1->data(), "And the data to save", kSize);
+ EXPECT_EQ(kSize, entry1->WriteData(0, 0, buffer1, kSize, NULL, false));
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key, &entry1));
+ EXPECT_EQ(kSize, entry1->ReadData(0, 0, buffer1, kSize, NULL));
+
+ SimulateCrash();
+
+ EXPECT_FALSE(cache_->OpenEntry(key, &entry1));
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
+ BackendInvalidEntryRead();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
+ SetNewEviction();
+ BackendInvalidEntryRead();
+}
+
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
+ // Work with a tiny index table (16 entries)
+ SetMask(0xf);
+ SetMaxSize(0x100000);
+ InitCache();
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ const int kNumEntries = 100;
+ disk_cache::Entry* entries[kNumEntries];
+ for (int i = 0; i < kNumEntries; i++) {
+ std::string key = GenerateKey(true);
+ ASSERT_TRUE(cache_->CreateEntry(key, &entries[i]));
+ }
+ EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
+
+ for (int i = 0; i < kNumEntries; i++) {
+ int source1 = rand() % kNumEntries;
+ int source2 = rand() % kNumEntries;
+ disk_cache::Entry* temp = entries[source1];
+ entries[source1] = entries[source2];
+ entries[source2] = temp;
+ }
+
+ std::string keys[kNumEntries];
+ for (int i = 0; i < kNumEntries; i++) {
+ keys[i] = entries[i]->GetKey();
+ if (i < kNumEntries / 2)
+ entries[i]->Close();
+ }
+
+ SimulateCrash();
+
+ for (int i = kNumEntries / 2; i < kNumEntries; i++) {
+ disk_cache::Entry* entry;
+ EXPECT_FALSE(cache_->OpenEntry(keys[i], &entry));
+ }
+
+ for (int i = 0; i < kNumEntries / 2; i++) {
+ disk_cache::Entry* entry;
+ EXPECT_TRUE(cache_->OpenEntry(keys[i], &entry));
+ entry->Close();
+ }
+
+ EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
+ BackendInvalidEntryWithLoad();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
+ SetNewEviction();
+ BackendInvalidEntryWithLoad();
+}
+
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendTrimInvalidEntry() {
+ // Use the implementation directly... we need to simulate a crash.
+ SetDirectMode();
+
+ const int kSize = 0x3000; // 12 kB
+ SetMaxSize(kSize * 10);
+ InitCache();
+
+ std::string first("some key");
+ std::string second("something else");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(first, &entry));
+
+ scoped_refptr<net::IOBuffer> buffer = new net::IOBuffer(kSize);
+ memset(buffer->data(), 0, kSize);
+ EXPECT_EQ(kSize, entry->WriteData(0, 0, buffer, kSize, NULL, false));
+
+ // Simulate a crash.
+ SimulateCrash();
+
+ ASSERT_TRUE(cache_->CreateEntry(second, &entry));
+ EXPECT_EQ(kSize, entry->WriteData(0, 0, buffer, kSize, NULL, false));
+
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ SetMaxSize(kSize);
+ entry->Close(); // Trim the cache.
+
+ // If we evicted the entry in less than 20mS, we have one entry in the cache;
+ // if it took more than that, we posted a task and we'll delete the second
+ // entry too.
+ MessageLoop::current()->RunAllPending();
+ EXPECT_GE(1, cache_->GetEntryCount());
+ EXPECT_FALSE(cache_->OpenEntry(first, &entry));
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
+ BackendTrimInvalidEntry();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
+ SetNewEviction();
+ BackendTrimInvalidEntry();
+}
+
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
+ // Use the implementation directly... we need to simulate a crash.
+ SetDirectMode();
+ SetMask(0xf); // 16-entry table.
+
+ const int kSize = 0x3000; // 12 kB
+ SetMaxSize(kSize * 40);
+ InitCache();
+
+ scoped_refptr<net::IOBuffer> buffer = new net::IOBuffer(kSize);
+ memset(buffer->data(), 0, kSize);
+ disk_cache::Entry* entry;
+
+ // Writing 32 entries to this cache chains most of them.
+ for (int i = 0; i < 32; i++) {
+ std::string key(StringPrintf("some key %d", i));
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+ EXPECT_EQ(kSize, entry->WriteData(0, 0, buffer, kSize, NULL, false));
+ entry->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key, &entry));
+ // Note that we are not closing the entries.
+ }
+
+ // Simulate a crash.
+ SimulateCrash();
+
+ ASSERT_TRUE(cache_->CreateEntry("Something else", &entry));
+ EXPECT_EQ(kSize, entry->WriteData(0, 0, buffer, kSize, NULL, false));
+
+ EXPECT_EQ(33, cache_->GetEntryCount());
+ SetMaxSize(kSize);
+
+ // For the new eviction code, all corrupt entries are on the second list so
+ // they are not going away that easy.
+ if (new_eviction_)
+ cache_->DoomAllEntries();
+
+ entry->Close(); // Trim the cache.
+
+ // We may abort the eviction before cleaning up everything.
+ MessageLoop::current()->RunAllPending();
+ EXPECT_GE(30, cache_->GetEntryCount());
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
+ BackendTrimInvalidEntry2();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
+ SetNewEviction();
+ BackendTrimInvalidEntry2();
+}
+
+void DiskCacheBackendTest::BackendEnumerations() {
+ InitCache();
+ Time initial = Time::Now();
+ int seed = static_cast<int>(initial.ToInternalValue());
+ srand(seed);
+
+ const int kNumEntries = 100;
+ for (int i = 0; i < kNumEntries; i++) {
+ std::string key = GenerateKey(true);
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+ entry->Close();
+ }
+ EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
+ Time final = Time::Now();
+
+ disk_cache::Entry* entry;
+ void* iter = NULL;
+ int count = 0;
+ Time last_modified[kNumEntries];
+ Time last_used[kNumEntries];
+ while (cache_->OpenNextEntry(&iter, &entry)) {
+ ASSERT_TRUE(NULL != entry);
+ if (count < kNumEntries) {
+ last_modified[count] = entry->GetLastModified();
+ last_used[count] = entry->GetLastUsed();
+ EXPECT_TRUE(initial <= last_modified[count]);
+ EXPECT_TRUE(final >= last_modified[count]);
+ }
+
+ entry->Close();
+ count++;
+ };
+ EXPECT_EQ(kNumEntries, count);
+
+ iter = NULL;
+ count = 0;
+ // The previous enumeration should not have changed the timestamps.
+ while (cache_->OpenNextEntry(&iter, &entry)) {
+ ASSERT_TRUE(NULL != entry);
+ if (count < kNumEntries) {
+ EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
+ EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
+ }
+ entry->Close();
+ count++;
+ };
+ EXPECT_EQ(kNumEntries, count);
+}
+
+TEST_F(DiskCacheBackendTest, Enumerations) {
+ BackendEnumerations();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
+ SetNewEviction();
+ BackendEnumerations();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
+ SetMemoryOnlyMode();
+ BackendEnumerations();
+}
+
+// Verifies enumerations while entries are open.
+void DiskCacheBackendTest::BackendEnumerations2() {
+ InitCache();
+ const std::string first("first");
+ const std::string second("second");
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_TRUE(cache_->CreateEntry(first, &entry1));
+ entry1->Close();
+ ASSERT_TRUE(cache_->CreateEntry(second, &entry2));
+ entry2->Close();
+
+ // Make sure that the timestamp is not the same.
+ PlatformThread::Sleep(20);
+ ASSERT_TRUE(cache_->OpenEntry(second, &entry1));
+ void* iter = NULL;
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter, &entry2));
+ ASSERT_EQ(entry2->GetKey(), second);
+
+ // Two entries and the iterator pointing at "first".
+ entry1->Close();
+ entry2->Close();
+
+ // The iterator should still be valid, se we should not crash.
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter, &entry2));
+ ASSERT_EQ(entry2->GetKey(), first);
+ entry2->Close();
+ cache_->EndEnumeration(&iter);
+}
+
+TEST_F(DiskCacheBackendTest, Enumerations2) {
+ BackendEnumerations2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
+ SetNewEviction();
+ BackendEnumerations2();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
+ SetMemoryOnlyMode();
+ BackendEnumerations2();
+}
+
+
+// Verify handling of invalid entries while doing enumerations.
+// We'll be leaking memory from this test.
+void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
+ // Use the implementation directly... we need to simulate a crash.
+ SetDirectMode();
+ InitCache();
+
+ std::string key("Some key");
+ disk_cache::Entry *entry, *entry1, *entry2;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry1));
+
+ const int kSize = 50;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize);
+ memset(buffer1->data(), 0, kSize);
+ base::strlcpy(buffer1->data(), "And the data to save", kSize);
+ EXPECT_EQ(kSize, entry1->WriteData(0, 0, buffer1, kSize, NULL, false));
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key, &entry1));
+ EXPECT_EQ(kSize, entry1->ReadData(0, 0, buffer1, kSize, NULL));
+
+ std::string key2("Another key");
+ ASSERT_TRUE(cache_->CreateEntry(key2, &entry2));
+ entry2->Close();
+ ASSERT_EQ(2, cache_->GetEntryCount());
+
+ SimulateCrash();
+
+ void* iter = NULL;
+ int count = 0;
+ while (cache_->OpenNextEntry(&iter, &entry)) {
+ ASSERT_TRUE(NULL != entry);
+ EXPECT_EQ(key2, entry->GetKey());
+ entry->Close();
+ count++;
+ };
+ EXPECT_EQ(1, count);
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
+ BackendInvalidEntryEnumeration();
+}
+
+// We'll be leaking memory from this test.
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
+ SetNewEviction();
+ BackendInvalidEntryEnumeration();
+}
+
+// Tests that if for some reason entries are modified close to existing cache
+// iterators, we don't generate fatal errors or reset the cache.
+void DiskCacheBackendTest::BackendFixEnumerators() {
+ InitCache();
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ const int kNumEntries = 10;
+ for (int i = 0; i < kNumEntries; i++) {
+ std::string key = GenerateKey(true);
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+ entry->Close();
+ }
+ EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
+
+ disk_cache::Entry *entry1, *entry2;
+ void* iter1 = NULL;
+ void* iter2 = NULL;
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter1, &entry1));
+ ASSERT_TRUE(NULL != entry1);
+ entry1->Close();
+ entry1 = NULL;
+
+ // Let's go to the middle of the list.
+ for (int i = 0; i < kNumEntries / 2; i++) {
+ if (entry1)
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter1, &entry1));
+ ASSERT_TRUE(NULL != entry1);
+
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter2, &entry2));
+ ASSERT_TRUE(NULL != entry2);
+ entry2->Close();
+ }
+
+ // Messing up with entry1 will modify entry2->next.
+ entry1->Doom();
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter2, &entry2));
+ ASSERT_TRUE(NULL != entry2);
+
+ // The link entry2->entry1 should be broken.
+ EXPECT_NE(entry2->GetKey(), entry1->GetKey());
+ entry1->Close();
+ entry2->Close();
+
+ // And the second iterator should keep working.
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter2, &entry2));
+ ASSERT_TRUE(NULL != entry2);
+ entry2->Close();
+
+ cache_->EndEnumeration(&iter1);
+ cache_->EndEnumeration(&iter2);
+}
+
+TEST_F(DiskCacheBackendTest, FixEnumerators) {
+ BackendFixEnumerators();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
+ SetNewEviction();
+ BackendFixEnumerators();
+}
+
+void DiskCacheBackendTest::BackendDoomRecent() {
+ InitCache();
+ Time initial = Time::Now();
+
+ disk_cache::Entry *entry;
+ ASSERT_TRUE(cache_->CreateEntry("first", &entry));
+ entry->Close();
+ ASSERT_TRUE(cache_->CreateEntry("second", &entry));
+ entry->Close();
+
+ PlatformThread::Sleep(20);
+ Time middle = Time::Now();
+
+ ASSERT_TRUE(cache_->CreateEntry("third", &entry));
+ entry->Close();
+ ASSERT_TRUE(cache_->CreateEntry("fourth", &entry));
+ entry->Close();
+
+ PlatformThread::Sleep(20);
+ Time final = Time::Now();
+
+ ASSERT_EQ(4, cache_->GetEntryCount());
+ EXPECT_TRUE(cache_->DoomEntriesSince(final));
+ ASSERT_EQ(4, cache_->GetEntryCount());
+
+ EXPECT_TRUE(cache_->DoomEntriesSince(middle));
+ ASSERT_EQ(2, cache_->GetEntryCount());
+
+ ASSERT_TRUE(cache_->OpenEntry("second", &entry));
+ entry->Close();
+}
+
+TEST_F(DiskCacheBackendTest, DoomRecent) {
+ BackendDoomRecent();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
+ SetNewEviction();
+ BackendDoomRecent();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
+ SetMemoryOnlyMode();
+ BackendDoomRecent();
+}
+
+void DiskCacheBackendTest::BackendDoomBetween() {
+ InitCache();
+ Time initial = Time::Now();
+
+ disk_cache::Entry *entry;
+ ASSERT_TRUE(cache_->CreateEntry("first", &entry));
+ entry->Close();
+
+ PlatformThread::Sleep(20);
+ Time middle_start = Time::Now();
+
+ ASSERT_TRUE(cache_->CreateEntry("second", &entry));
+ entry->Close();
+ ASSERT_TRUE(cache_->CreateEntry("third", &entry));
+ entry->Close();
+
+ PlatformThread::Sleep(20);
+ Time middle_end = Time::Now();
+
+ ASSERT_TRUE(cache_->CreateEntry("fourth", &entry));
+ entry->Close();
+ ASSERT_TRUE(cache_->OpenEntry("fourth", &entry));
+ entry->Close();
+
+ PlatformThread::Sleep(20);
+ Time final = Time::Now();
+
+ ASSERT_EQ(4, cache_->GetEntryCount());
+ EXPECT_TRUE(cache_->DoomEntriesBetween(middle_start, middle_end));
+ ASSERT_EQ(2, cache_->GetEntryCount());
+
+ ASSERT_TRUE(cache_->OpenEntry("fourth", &entry));
+ entry->Close();
+
+ EXPECT_TRUE(cache_->DoomEntriesBetween(middle_start, final));
+ ASSERT_EQ(1, cache_->GetEntryCount());
+
+ ASSERT_TRUE(cache_->OpenEntry("first", &entry));
+ entry->Close();
+}
+
+TEST_F(DiskCacheBackendTest, DoomBetween) {
+ BackendDoomBetween();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
+ SetNewEviction();
+ BackendDoomBetween();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
+ SetMemoryOnlyMode();
+ BackendDoomBetween();
+}
+
+void DiskCacheBackendTest::BackendTransaction(const std::wstring& name,
+ int num_entries, bool load) {
+ success_ = false;
+ ASSERT_TRUE(CopyTestCache(name));
+ DisableFirstCleanup();
+
+ if (load) {
+ SetMask(0xf);
+ SetMaxSize(0x100000);
+ } else {
+ // Clear the settings from the previous run.
+ SetMask(0);
+ SetMaxSize(0);
+ }
+
+ InitCache();
+ ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
+
+ std::string key("the first key");
+ disk_cache::Entry* entry1;
+ ASSERT_FALSE(cache_->OpenEntry(key, &entry1));
+
+ int actual = cache_->GetEntryCount();
+ if (num_entries != actual) {
+ ASSERT_TRUE(load);
+ // If there is a heavy load, inserting an entry will make another entry
+ // dirty (on the hash bucket) so two entries are removed.
+ ASSERT_EQ(num_entries - 1, actual);
+ }
+
+ delete cache_;
+ cache_ = NULL;
+ cache_impl_ = NULL;
+
+ ASSERT_TRUE(CheckCacheIntegrity(GetCacheFilePath(), new_eviction_));
+ success_ = true;
+}
+
+void DiskCacheBackendTest::BackendRecoverInsert() {
+ // Tests with an empty cache.
+ BackendTransaction(L"insert_empty1", 0, false);
+ ASSERT_TRUE(success_) << "insert_empty1";
+ BackendTransaction(L"insert_empty2", 0, false);
+ ASSERT_TRUE(success_) << "insert_empty2";
+ BackendTransaction(L"insert_empty3", 0, false);
+ ASSERT_TRUE(success_) << "insert_empty3";
+
+ // Tests with one entry on the cache.
+ BackendTransaction(L"insert_one1", 1, false);
+ ASSERT_TRUE(success_) << "insert_one1";
+ BackendTransaction(L"insert_one2", 1, false);
+ ASSERT_TRUE(success_) << "insert_one2";
+ BackendTransaction(L"insert_one3", 1, false);
+ ASSERT_TRUE(success_) << "insert_one3";
+
+ // Tests with one hundred entries on the cache, tiny index.
+ BackendTransaction(L"insert_load1", 100, true);
+ ASSERT_TRUE(success_) << "insert_load1";
+ BackendTransaction(L"insert_load2", 100, true);
+ ASSERT_TRUE(success_) << "insert_load2";
+}
+
+TEST_F(DiskCacheBackendTest, RecoverInsert) {
+ BackendRecoverInsert();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
+ SetNewEviction();
+ BackendRecoverInsert();
+}
+
+void DiskCacheBackendTest::BackendRecoverRemove() {
+ // Removing the only element.
+ BackendTransaction(L"remove_one1", 0, false);
+ ASSERT_TRUE(success_) << "remove_one1";
+ BackendTransaction(L"remove_one2", 0, false);
+ ASSERT_TRUE(success_) << "remove_one2";
+ BackendTransaction(L"remove_one3", 0, false);
+ ASSERT_TRUE(success_) << "remove_one3";
+
+ // Removing the head.
+ BackendTransaction(L"remove_head1", 1, false);
+ ASSERT_TRUE(success_) << "remove_head1";
+ BackendTransaction(L"remove_head2", 1, false);
+ ASSERT_TRUE(success_) << "remove_head2";
+ BackendTransaction(L"remove_head3", 1, false);
+ ASSERT_TRUE(success_) << "remove_head3";
+
+ // Removing the tail.
+ BackendTransaction(L"remove_tail1", 1, false);
+ ASSERT_TRUE(success_) << "remove_tail1";
+ BackendTransaction(L"remove_tail2", 1, false);
+ ASSERT_TRUE(success_) << "remove_tail2";
+ BackendTransaction(L"remove_tail3", 1, false);
+ ASSERT_TRUE(success_) << "remove_tail3";
+
+ // Removing with one hundred entries on the cache, tiny index.
+ BackendTransaction(L"remove_load1", 100, true);
+ ASSERT_TRUE(success_) << "remove_load1";
+ BackendTransaction(L"remove_load2", 100, true);
+ ASSERT_TRUE(success_) << "remove_load2";
+ BackendTransaction(L"remove_load3", 100, true);
+ ASSERT_TRUE(success_) << "remove_load3";
+
+#ifdef NDEBUG
+ // This case cannot be reverted, so it will assert on debug builds.
+ BackendTransaction(L"remove_one4", 0, false);
+ ASSERT_TRUE(success_) << "remove_one4";
+ BackendTransaction(L"remove_head4", 1, false);
+ ASSERT_TRUE(success_) << "remove_head4";
+#endif
+}
+
+TEST_F(DiskCacheBackendTest, RecoverRemove) {
+ BackendRecoverRemove();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionRecoverRemove) {
+ SetNewEviction();
+ BackendRecoverRemove();
+}
+
+// Tests dealing with cache files that cannot be recovered.
+TEST_F(DiskCacheTest, Backend_DeleteOld) {
+ ASSERT_TRUE(CopyTestCache(L"wrong_version"));
+ FilePath path = GetCacheFilePath();
+ scoped_ptr<disk_cache::Backend> cache;
+ cache.reset(disk_cache::CreateCacheBackend(path, true, 0, net::DISK_CACHE));
+
+ MessageLoopHelper helper;
+
+ ASSERT_TRUE(NULL != cache.get());
+ ASSERT_EQ(0, cache->GetEntryCount());
+
+ // Wait for a callback that never comes... about 2 secs :). The message loop
+ // has to run to allow destruction of the cleaner thread.
+ helper.WaitUntilCacheIoFinished(1);
+}
+
+// We want to be able to deal with messed up entries on disk.
+void DiskCacheBackendTest::BackendInvalidEntry2() {
+ ASSERT_TRUE(CopyTestCache(L"bad_entry"));
+ DisableFirstCleanup();
+ InitCache();
+
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_TRUE(cache_->OpenEntry("the first key", &entry1));
+ EXPECT_FALSE(cache_->OpenEntry("some other key", &entry2));
+ entry1->Close();
+
+ // CheckCacheIntegrity will fail at this point.
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, InvalidEntry2) {
+ BackendInvalidEntry2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
+ SetNewEviction();
+ BackendInvalidEntry2();
+}
+
+// We want to be able to deal with abnormal dirty entries.
+void DiskCacheBackendTest::BackendNotMarkedButDirty(const std::wstring& name) {
+ ASSERT_TRUE(CopyTestCache(name));
+ DisableFirstCleanup();
+ InitCache();
+
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_TRUE(cache_->OpenEntry("the first key", &entry1));
+ EXPECT_FALSE(cache_->OpenEntry("some other key", &entry2));
+ entry1->Close();
+}
+
+TEST_F(DiskCacheBackendTest, NotMarkedButDirty) {
+ BackendNotMarkedButDirty(L"dirty_entry");
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionNotMarkedButDirty) {
+ SetNewEviction();
+ BackendNotMarkedButDirty(L"dirty_entry");
+}
+
+TEST_F(DiskCacheBackendTest, NotMarkedButDirty2) {
+ BackendNotMarkedButDirty(L"dirty_entry2");
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionNotMarkedButDirty2) {
+ SetNewEviction();
+ BackendNotMarkedButDirty(L"dirty_entry2");
+}
+
+// We want to be able to deal with messed up entries on disk.
+void DiskCacheBackendTest::BackendInvalidRankings2() {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ FilePath path = GetCacheFilePath();
+ DisableFirstCleanup();
+ InitCache();
+
+ disk_cache::Entry *entry1, *entry2;
+ EXPECT_FALSE(cache_->OpenEntry("the first key", &entry1));
+ ASSERT_TRUE(cache_->OpenEntry("some other key", &entry2));
+ entry2->Close();
+
+ // CheckCacheIntegrity will fail at this point.
+ DisableIntegrityCheck();
+}
+
+TEST_F(DiskCacheBackendTest, InvalidRankings2) {
+ BackendInvalidRankings2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
+ SetNewEviction();
+ BackendInvalidRankings2();
+}
+
+// If the LRU is corrupt, we delete the cache.
+void DiskCacheBackendTest::BackendInvalidRankings() {
+ disk_cache::Entry* entry;
+ void* iter = NULL;
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter, &entry));
+ entry->Close();
+ EXPECT_EQ(2, cache_->GetEntryCount());
+
+ EXPECT_FALSE(cache_->OpenNextEntry(&iter, &entry));
+ MessageLoop::current()->RunAllPending();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ InitCache();
+ BackendInvalidRankings();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ SetNewEviction();
+ InitCache();
+ BackendInvalidRankings();
+}
+
+TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendInvalidRankings();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ SetNewEviction();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendInvalidRankings();
+}
+
+// If the LRU is corrupt and we have open entries, we disable the cache.
+void DiskCacheBackendTest::BackendDisable() {
+ disk_cache::Entry *entry1, *entry2;
+ void* iter = NULL;
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter, &entry1));
+
+ EXPECT_FALSE(cache_->OpenNextEntry(&iter, &entry2));
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ EXPECT_FALSE(cache_->CreateEntry("Something new", &entry2));
+
+ entry1->Close();
+ MessageLoop::current()->RunAllPending();
+
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DisableSuccess) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ InitCache();
+ BackendDisable();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ SetNewEviction();
+ InitCache();
+ BackendDisable();
+}
+
+TEST_F(DiskCacheBackendTest, DisableFailure) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendDisable();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ SetNewEviction();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendDisable();
+}
+
+// This is another type of corruption on the LRU; disable the cache.
+void DiskCacheBackendTest::BackendDisable2() {
+ EXPECT_EQ(8, cache_->GetEntryCount());
+
+ disk_cache::Entry* entry;
+ void* iter = NULL;
+ int count = 0;
+ while (cache_->OpenNextEntry(&iter, &entry)) {
+ ASSERT_TRUE(NULL != entry);
+ entry->Close();
+ count++;
+ ASSERT_LT(count, 9);
+ };
+
+ MessageLoop::current()->RunAllPending();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DisableSuccess2) {
+ ASSERT_TRUE(CopyTestCache(L"list_loop"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ InitCache();
+ BackendDisable2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
+ ASSERT_TRUE(CopyTestCache(L"list_loop"));
+ DisableFirstCleanup();
+ SetNewEviction();
+ SetDirectMode();
+ InitCache();
+ BackendDisable2();
+}
+
+TEST_F(DiskCacheBackendTest, DisableFailure2) {
+ ASSERT_TRUE(CopyTestCache(L"list_loop"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendDisable2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
+ ASSERT_TRUE(CopyTestCache(L"list_loop"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ SetNewEviction();
+ InitCache();
+ SetTestMode(); // Fail cache reinitialization.
+ BackendDisable2();
+}
+
+// If the index size changes when we disable the cache, we should not crash.
+void DiskCacheBackendTest::BackendDisable3() {
+ disk_cache::Entry *entry1, *entry2;
+ void* iter = NULL;
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter, &entry1));
+ entry1->Close();
+
+ EXPECT_FALSE(cache_->OpenNextEntry(&iter, &entry2));
+ MessageLoop::current()->RunAllPending();
+
+ ASSERT_TRUE(cache_->CreateEntry("Something new", &entry2));
+ entry2->Close();
+
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DisableSuccess3) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings2"));
+ DisableFirstCleanup();
+ SetMaxSize(20 * 1024 * 1024);
+ InitCache();
+ BackendDisable3();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings2"));
+ DisableFirstCleanup();
+ SetMaxSize(20 * 1024 * 1024);
+ SetNewEviction();
+ InitCache();
+ BackendDisable3();
+}
+
+// If we disable the cache, already open entries should work as far as possible.
+void DiskCacheBackendTest::BackendDisable4() {
+ disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
+ void* iter = NULL;
+ ASSERT_TRUE(cache_->OpenNextEntry(&iter, &entry1));
+
+ char key2[2000];
+ char key3[20000];
+ CacheTestFillBuffer(key2, sizeof(key2), true);
+ CacheTestFillBuffer(key3, sizeof(key3), true);
+ key2[sizeof(key2) - 1] = '\0';
+ key3[sizeof(key3) - 1] = '\0';
+ ASSERT_TRUE(cache_->CreateEntry(key2, &entry2));
+ ASSERT_TRUE(cache_->CreateEntry(key3, &entry3));
+
+ const int kBufSize = 20000;
+ scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kBufSize);
+ memset(buf->data(), 0, kBufSize);
+ EXPECT_EQ(100, entry2->WriteData(0, 0, buf, 100, NULL, false));
+ EXPECT_EQ(kBufSize, entry3->WriteData(0, 0, buf, kBufSize, NULL, false));
+
+ // This line should disable the cache but not delete it.
+ EXPECT_FALSE(cache_->OpenNextEntry(&iter, &entry4));
+ EXPECT_EQ(4, cache_->GetEntryCount());
+
+ EXPECT_FALSE(cache_->CreateEntry("cache is disabled", &entry4));
+
+ EXPECT_EQ(100, entry2->ReadData(0, 0, buf, 100, NULL));
+ EXPECT_EQ(100, entry2->WriteData(0, 0, buf, 100, NULL, false));
+ EXPECT_EQ(100, entry2->WriteData(1, 0, buf, 100, NULL, false));
+
+ EXPECT_EQ(kBufSize, entry3->ReadData(0, 0, buf, kBufSize, NULL));
+ EXPECT_EQ(kBufSize, entry3->WriteData(0, 0, buf, kBufSize, NULL, false));
+ EXPECT_EQ(kBufSize, entry3->WriteData(1, 0, buf, kBufSize, NULL, false));
+
+ std::string key = entry2->GetKey();
+ EXPECT_EQ(sizeof(key2) - 1, key.size());
+ key = entry3->GetKey();
+ EXPECT_EQ(sizeof(key3) - 1, key.size());
+
+ entry1->Close();
+ entry2->Close();
+ entry3->Close();
+ MessageLoop::current()->RunAllPending();
+
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DisableSuccess4) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ InitCache();
+ BackendDisable4();
+}
+
+// Flaky, http://crbug.com/21110.
+// TODO(rvargas): Add more debugging code to help identify the root cause.
+TEST_F(DiskCacheBackendTest, FLAKY_NewEvictionDisableSuccess4) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings"));
+ DisableFirstCleanup();
+ SetDirectMode();
+ SetNewEviction();
+ InitCache();
+ BackendDisable4();
+}
+
+TEST_F(DiskCacheTest, Backend_UsageStats) {
+ MessageLoopHelper helper;
+
+ FilePath path = GetCacheFilePath();
+ ASSERT_TRUE(DeleteCache(path));
+ scoped_ptr<disk_cache::BackendImpl> cache;
+ cache.reset(new disk_cache::BackendImpl(path));
+ ASSERT_TRUE(NULL != cache.get());
+ cache->SetUnitTestMode();
+ ASSERT_TRUE(cache->Init());
+
+ // Wait for a callback that never comes... about 2 secs :). The message loop
+ // has to run to allow invocation of the usage timer.
+ helper.WaitUntilCacheIoFinished(1);
+}
+
+void DiskCacheBackendTest::BackendDoomAll() {
+ InitCache();
+ Time initial = Time::Now();
+
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_TRUE(cache_->CreateEntry("first", &entry1));
+ ASSERT_TRUE(cache_->CreateEntry("second", &entry2));
+ entry1->Close();
+ entry2->Close();
+
+ ASSERT_TRUE(cache_->CreateEntry("third", &entry1));
+ ASSERT_TRUE(cache_->CreateEntry("fourth", &entry2));
+
+ ASSERT_EQ(4, cache_->GetEntryCount());
+ EXPECT_TRUE(cache_->DoomAllEntries());
+ ASSERT_EQ(0, cache_->GetEntryCount());
+
+ disk_cache::Entry *entry3, *entry4;
+ ASSERT_TRUE(cache_->CreateEntry("third", &entry3));
+ ASSERT_TRUE(cache_->CreateEntry("fourth", &entry4));
+
+ EXPECT_TRUE(cache_->DoomAllEntries());
+ ASSERT_EQ(0, cache_->GetEntryCount());
+
+ entry1->Close();
+ entry2->Close();
+ entry3->Doom(); // The entry should be already doomed, but this must work.
+ entry3->Close();
+ entry4->Close();
+
+ // Now try with all references released.
+ ASSERT_TRUE(cache_->CreateEntry("third", &entry1));
+ ASSERT_TRUE(cache_->CreateEntry("fourth", &entry2));
+ entry1->Close();
+ entry2->Close();
+
+ ASSERT_EQ(2, cache_->GetEntryCount());
+ EXPECT_TRUE(cache_->DoomAllEntries());
+ ASSERT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DoomAll) {
+ BackendDoomAll();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
+ SetNewEviction();
+ BackendDoomAll();
+}
+
+TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
+ SetMemoryOnlyMode();
+ BackendDoomAll();
+}
+
+// If the index size changes when we doom the cache, we should not crash.
+void DiskCacheBackendTest::BackendDoomAll2() {
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ EXPECT_TRUE(cache_->DoomAllEntries());
+
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry("Something new", &entry));
+ entry->Close();
+
+ EXPECT_EQ(1, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheBackendTest, DoomAll2) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings2"));
+ DisableFirstCleanup();
+ SetMaxSize(20 * 1024 * 1024);
+ InitCache();
+ BackendDoomAll2();
+}
+
+TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
+ ASSERT_TRUE(CopyTestCache(L"bad_rankings2"));
+ DisableFirstCleanup();
+ SetMaxSize(20 * 1024 * 1024);
+ SetNewEviction();
+ InitCache();
+ BackendDoomAll2();
+}
+
+// We should be able to create the same entry on multiple simultaneous instances
+// of the cache.
+TEST_F(DiskCacheTest, MultipleInstances) {
+ ScopedTestCache store1;
+ ScopedTestCache store2("cache_test2");
+ ScopedTestCache store3("cache_test3");
+
+ const int kNumberOfCaches = 2;
+ scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
+
+ cache[0].reset(disk_cache::CreateCacheBackend(store1.path(), false, 0,
+ net::DISK_CACHE));
+ cache[1].reset(disk_cache::CreateCacheBackend(store2.path(), false, 0,
+ net::MEDIA_CACHE));
+
+ ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
+
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ for (int i = 0; i < kNumberOfCaches; i++) {
+ ASSERT_TRUE(cache[i]->CreateEntry(key, &entry));
+ entry->Close();
+ }
+}
+
+// Test the four regions of the curve that determines the max cache size.
+TEST_F(DiskCacheTest, AutomaticMaxSize) {
+ const int kDefaultSize = 80 * 1024 * 1024;
+ int64 large_size = kDefaultSize;
+
+ EXPECT_EQ(kDefaultSize, disk_cache::PreferedCacheSize(large_size));
+ EXPECT_EQ((kDefaultSize / 2) * 8 / 10,
+ disk_cache::PreferedCacheSize(large_size / 2));
+
+ EXPECT_EQ(kDefaultSize, disk_cache::PreferedCacheSize(large_size * 2));
+ EXPECT_EQ(kDefaultSize, disk_cache::PreferedCacheSize(large_size * 4));
+ EXPECT_EQ(kDefaultSize, disk_cache::PreferedCacheSize(large_size * 10));
+
+ EXPECT_EQ(kDefaultSize * 2, disk_cache::PreferedCacheSize(large_size * 20));
+ EXPECT_EQ(kDefaultSize * 5 / 2,
+ disk_cache::PreferedCacheSize(large_size * 50 / 2));
+
+ EXPECT_EQ(kDefaultSize * 5 / 2,
+ disk_cache::PreferedCacheSize(large_size * 51 / 2));
+ EXPECT_EQ(kDefaultSize * 5 / 2,
+ disk_cache::PreferedCacheSize(large_size * 100 / 2));
+ EXPECT_EQ(kDefaultSize * 5 / 2,
+ disk_cache::PreferedCacheSize(large_size * 500 / 2));
+
+ EXPECT_EQ(kDefaultSize * 6 / 2,
+ disk_cache::PreferedCacheSize(large_size * 600 / 2));
+ EXPECT_EQ(kDefaultSize * 7 / 2,
+ disk_cache::PreferedCacheSize(large_size * 700 / 2));
+}
diff --git a/net/disk_cache/bitmap.cc b/net/disk_cache/bitmap.cc
new file mode 100644
index 00000000..e0250909
--- /dev/null
+++ b/net/disk_cache/bitmap.cc
@@ -0,0 +1,284 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/bitmap.h"
+
+#include "base/logging.h"
+
+namespace {
+
+// Returns the number of trailing zeros.
+int FindLSBSetNonZero(uint32 word) {
+ // Get the LSB, put it on the exponent of a 32 bit float and remove the
+ // mantisa and the bias. This code requires IEEE 32 bit float compliance.
+ float f = static_cast<float>(word & -static_cast<int>(word));
+
+ // We use a union to go around strict-aliasing complains.
+ union {
+ float ieee_float;
+ uint32 as_uint;
+ } x;
+
+ x.ieee_float = f;
+ return (x.as_uint >> 23) - 0x7f;
+}
+
+// Returns the index of the first bit set to |value| from |word|. This code
+// assumes that we'll be able to find that bit.
+int FindLSBNonEmpty(uint32 word, bool value) {
+ // If we are looking for 0, negate |word| and look for 1.
+ if (!value)
+ word = ~word;
+
+ return FindLSBSetNonZero(word);
+}
+
+}
+
+namespace disk_cache {
+
+void Bitmap::Resize(int num_bits, bool clear_bits) {
+ DCHECK(alloc_ || !map_);
+ const int old_maxsize = num_bits_;
+ const int old_array_size = array_size_;
+ array_size_ = RequiredArraySize(num_bits);
+
+ if (array_size_ != old_array_size) {
+ uint32* new_map = new uint32[array_size_];
+ // Always clear the unused bits in the last word.
+ new_map[array_size_ - 1] = 0;
+ memcpy(new_map, map_,
+ sizeof(*map_) * std::min(array_size_, old_array_size));
+ if (alloc_)
+ delete[] map_; // No need to check for NULL.
+ map_ = new_map;
+ alloc_ = true;
+ }
+
+ num_bits_ = num_bits;
+ if (old_maxsize < num_bits_ && clear_bits) {
+ SetRange(old_maxsize, num_bits_, false);
+ }
+}
+
+void Bitmap::Set(int index, bool value) {
+ DCHECK_LT(index, num_bits_);
+ DCHECK_GE(index, 0);
+ const int i = index & (kIntBits - 1);
+ const int j = index / kIntBits;
+ if (value)
+ map_[j] |= (1 << i);
+ else
+ map_[j] &= ~(1 << i);
+}
+
+bool Bitmap::Get(int index) const {
+ DCHECK_LT(index, num_bits_);
+ DCHECK_GE(index, 0);
+ const int i = index & (kIntBits-1);
+ const int j = index / kIntBits;
+ return map_[j] & (1 << i) ? true : false;
+}
+
+void Bitmap::Toggle(int index) {
+ DCHECK_LT(index, num_bits_);
+ DCHECK_GE(index, 0);
+ const int i = index & (kIntBits - 1);
+ const int j = index / kIntBits;
+ map_[j] ^= (1 << i);
+}
+
+void Bitmap::SetMapElement(int array_index, uint32 value) {
+ DCHECK_LT(array_index, array_size_);
+ DCHECK_GE(array_index, 0);
+ map_[array_index] = value;
+}
+
+uint32 Bitmap::GetMapElement(int array_index) const {
+ DCHECK_LT(array_index, array_size_);
+ DCHECK_GE(array_index, 0);
+ return map_[array_index];
+}
+
+void Bitmap::SetMap(const uint32* map, int size) {
+ memcpy(map_, map, std::min(size, array_size_) * sizeof(*map_));
+}
+
+void Bitmap::SetWordBits(int start, int len, bool value) {
+ DCHECK_LT(len, kIntBits);
+ DCHECK_GE(len, 0);
+ if (!len)
+ return;
+
+ int word = start / kIntBits;
+ int offset = start % kIntBits;
+
+ uint32 to_add = 0xffffffff << len;
+ to_add = (~to_add) << offset;
+ if (value) {
+ map_[word] |= to_add;
+ } else {
+ map_[word] &= ~to_add;
+ }
+}
+
+void Bitmap::SetRange(int begin, int end, bool value) {
+ DCHECK_LE(begin, end);
+ int start_offset = begin & (kIntBits - 1);
+ if (start_offset) {
+ // Set the bits in the first word.
+ int len = std::min(end - begin, kIntBits - start_offset);
+ SetWordBits(begin, len, value);
+ begin += len;
+ }
+
+ if (begin == end)
+ return;
+
+ // Now set the bits in the last word.
+ int end_offset = end & (kIntBits - 1);
+ end -= end_offset;
+ SetWordBits(end, end_offset, value);
+
+ // Set all the words in the middle.
+ memset(map_ + (begin / kIntBits), (value ? 0xFF : 0x00),
+ ((end / kIntBits) - (begin / kIntBits)) * sizeof(*map_));
+}
+
+// Return true if any bit between begin inclusive and end exclusive
+// is set. 0 <= begin <= end <= bits() is required.
+bool Bitmap::TestRange(int begin, int end, bool value) const {
+ DCHECK_LT(begin, num_bits_);
+ DCHECK_LE(end, num_bits_);
+ DCHECK_LE(begin, end);
+ DCHECK_GE(begin, 0);
+ DCHECK_GE(end, 0);
+
+ // Return false immediately if the range is empty.
+ if (begin >= end || end <= 0)
+ return false;
+
+ // Calculate the indices of the words containing the first and last bits,
+ // along with the positions of the bits within those words.
+ int word = begin / kIntBits;
+ int offset = begin & (kIntBits - 1);
+ int last_word = (end - 1) / kIntBits;
+ int last_offset = (end - 1) & (kIntBits - 1);
+
+ // If we are looking for zeros, negate the data from the map.
+ uint32 this_word = map_[word];
+ if (!value)
+ this_word = ~this_word;
+
+ // If the range spans multiple words, discard the extraneous bits of the
+ // first word by shifting to the right, and then test the remaining bits.
+ if (word < last_word) {
+ if (this_word >> offset)
+ return true;
+ offset = 0;
+
+ word++;
+ // Test each of the "middle" words that lies completely within the range.
+ while (word < last_word) {
+ this_word = map_[word++];
+ if (!value)
+ this_word = ~this_word;
+ if (this_word)
+ return true;
+ }
+ }
+
+ // Test the portion of the last word that lies within the range. (This logic
+ // also handles the case where the entire range lies within a single word.)
+ const uint32 mask = ((2 << (last_offset - offset)) - 1) << offset;
+
+ this_word = map_[last_word];
+ if (!value)
+ this_word = ~this_word;
+
+ return (this_word & mask) != 0;
+}
+
+bool Bitmap::FindNextBit(int* index, int limit, bool value) const {
+ DCHECK_LT(*index, num_bits_);
+ DCHECK_LE(limit, num_bits_);
+ DCHECK_LE(*index, limit);
+ DCHECK_GE(*index, 0);
+ DCHECK_GE(limit, 0);
+
+ const int bit_index = *index;
+ if (bit_index >= limit || limit <= 0)
+ return false;
+
+ // From now on limit != 0, since if it was we would have returned false.
+ int word_index = bit_index >> kLogIntBits;
+ uint32 one_word = map_[word_index];
+
+ // Simple optimization where we can immediately return true if the first
+ // bit is set. This helps for cases where many bits are set, and doesn't
+ // hurt too much if not.
+ if (Get(bit_index) == value)
+ return true;
+
+ const int first_bit_offset = bit_index & (kIntBits - 1);
+
+ // First word is special - we need to mask off leading bits.
+ uint32 mask = 0xFFFFFFFF << first_bit_offset;
+ if (value) {
+ one_word &= mask;
+ } else {
+ one_word |= ~mask;
+ }
+
+ uint32 empty_value = value ? 0 : 0xFFFFFFFF;
+
+ // Loop through all but the last word. Note that 'limit' is one
+ // past the last bit we want to check, and we don't want to read
+ // past the end of "words". E.g. if num_bits_ == 32 only words[0] is
+ // valid, so we want to avoid reading words[1] when limit == 32.
+ const int last_word_index = (limit - 1) >> kLogIntBits;
+ while (word_index < last_word_index) {
+ if (one_word != empty_value) {
+ *index = (word_index << kLogIntBits) + FindLSBNonEmpty(one_word, value);
+ return true;
+ }
+ one_word = map_[++word_index];
+ }
+
+ // Last word is special - we may need to mask off trailing bits. Note that
+ // 'limit' is one past the last bit we want to check, and if limit is a
+ // multiple of 32 we want to check all bits in this word.
+ const int last_bit_offset = (limit - 1) & (kIntBits - 1);
+ mask = 0xFFFFFFFE << last_bit_offset;
+ if (value) {
+ one_word &= ~mask;
+ } else {
+ one_word |= mask;
+ }
+ if (one_word != empty_value) {
+ *index = (word_index << kLogIntBits) + FindLSBNonEmpty(one_word, value);
+ return true;
+ }
+ return false;
+}
+
+int Bitmap::FindBits(int* index, int limit, bool value) const {
+ DCHECK_LT(*index, num_bits_);
+ DCHECK_LE(limit, num_bits_);
+ DCHECK_LE(*index, limit);
+ DCHECK_GE(*index, 0);
+ DCHECK_GE(limit, 0);
+
+ if (!FindNextBit(index, limit, value))
+ return false;
+
+ // Now see how many bits have the same value.
+ int end = *index;
+ if (!FindNextBit(&end, limit, !value))
+ return limit - *index;
+
+ return end - *index;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/bitmap.h b/net/disk_cache/bitmap.h
new file mode 100644
index 00000000..4d38f83d
--- /dev/null
+++ b/net/disk_cache/bitmap.h
@@ -0,0 +1,153 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_BITMAP_H_
+#define NET_DISK_CACHE_BITMAP_H_
+
+#include <algorithm>
+
+#include "base/basictypes.h"
+
+namespace disk_cache {
+
+// This class provides support for simple maps of bits.
+class Bitmap {
+ public:
+ Bitmap() : map_(NULL), num_bits_(0), array_size_(0), alloc_(false) {}
+
+ // This constructor will allocate on a uint32 boundary. If |clear_bits| is
+ // false, the bitmap bits will not be initialized.
+ Bitmap(int num_bits, bool clear_bits)
+ : num_bits_(num_bits), array_size_(RequiredArraySize(num_bits)),
+ alloc_(true) {
+ map_ = new uint32[array_size_];
+
+ // Initialize all of the bits.
+ if (clear_bits)
+ Clear();
+ }
+
+ // Constructs a Bitmap with the actual storage provided by the caller. |map|
+ // has to be valid until this object destruction. |num_bits| is the number of
+ // bits in the bitmap, and |num_words| is the size of |map| in 32-bit words.
+ Bitmap(uint32* map, int num_bits, int num_words)
+ : map_(map), num_bits_(num_bits),
+ // If size is larger than necessary, trim because array_size_ is used
+ // as a bound by various methods.
+ array_size_(std::min(RequiredArraySize(num_bits), num_words)),
+ alloc_(false) {}
+
+ ~Bitmap() {
+ if (alloc_)
+ delete[] map_;
+ }
+
+ // Resizes the bitmap.
+ // If |num_bits| < Size(), the extra bits will be discarded.
+ // If |num_bits| > Size(), the extra bits will be filled with zeros if
+ // |clear_bits| is true.
+ // This object cannot be using memory provided during construction.
+ void Resize(int num_bits, bool clear_bits);
+
+ // Returns the number of bits in the bitmap.
+ int Size() const { return num_bits_; }
+
+ // Returns the number of 32-bit words in the bitmap.
+ int ArraySize() const { return array_size_; }
+
+ // Sets all the bits to true or false.
+ void SetAll(bool value) {
+ memset(map_, (value ? 0xFF : 0x00), array_size_ * sizeof(*map_));
+ }
+
+ // Clears all bits in the bitmap
+ void Clear() { SetAll(false); }
+
+ // Sets the value, gets the value or toggles the value of a given bit.
+ void Set(int index, bool value);
+ bool Get(int index) const;
+ void Toggle(int index);
+
+ // Directly sets an element of the internal map. Requires |array_index| <
+ // ArraySize();
+ void SetMapElement(int array_index, uint32 value);
+
+ // Gets an entry of the internal map. Requires array_index <
+ // ArraySize()
+ uint32 GetMapElement(int array_index) const;
+
+ // Directly sets the whole internal map. |size| is the number of 32-bit words
+ // to set from |map|. If |size| > array_size(), it ignores the end of |map|.
+ void SetMap(const uint32* map, int size);
+
+ // Gets a pointer to the internal map.
+ const uint32* GetMap() const { return map_; }
+
+ // Sets a range of bits to |value|.
+ void SetRange(int begin, int end, bool value);
+
+ // Returns true if any bit between begin inclusive and end exclusive is set.
+ // 0 <= |begin| <= |end| <= Size() is required.
+ bool TestRange(int begin, int end, bool value) const;
+
+ // Scans bits starting at bit *|index|, looking for a bit set to |value|. If
+ // it finds that bit before reaching bit index |limit|, sets *|index| to the
+ // bit index and returns true. Otherwise returns false.
+ // Requires |limit| <= Size().
+ //
+ // Note that to use these methods in a loop you must increment the index
+ // after each use, as in:
+ //
+ // for (int index = 0 ; map.FindNextBit(&index, limit, value) ; ++index) {
+ // DoSomethingWith(index);
+ // }
+ bool FindNextBit(int* index, int limit, bool value) const;
+
+ // Finds the first offset >= *|index| and < |limit| that has its bit set.
+ // See FindNextBit() for more info.
+ bool FindNextSetBitBeforeLimit(int* index, int limit) const {
+ return FindNextBit(index, limit, true);
+ }
+
+ // Finds the first offset >= *|index| that has its bit set.
+ // See FindNextBit() for more info.
+ bool FindNextSetBit(int *index) const {
+ return FindNextSetBitBeforeLimit(index, num_bits_);
+ }
+
+ // Scans bits starting at bit *|index|, looking for a bit set to |value|. If
+ // it finds that bit before reaching bit index |limit|, sets *|index| to the
+ // bit index and then counts the number of consecutive bits set to |value|
+ // (before reaching |limit|), and returns that count. If no bit is found
+ // returns 0. Requires |limit| <= Size().
+ int FindBits(int* index, int limit, bool value) const;
+
+ // Returns number of allocated words required for a bitmap of size |num_bits|.
+ static int RequiredArraySize(int num_bits) {
+ // Force at least one allocated word.
+ if (num_bits <= kIntBits)
+ return 1;
+
+ return (num_bits + kIntBits - 1) >> kLogIntBits;
+ }
+
+ private:
+ static const int kIntBits = sizeof(uint32) * 8;
+ static const int kLogIntBits = 5; // 2^5 == 32 bits per word.
+
+ // Sets |len| bits from |start| to |value|. All the bits to be set should be
+ // stored in the same word, and len < kIntBits.
+ void SetWordBits(int start, int len, bool value);
+
+ uint32* map_; // The bitmap.
+ int num_bits_; // The upper bound of the bitmap.
+ int array_size_; // The physical size (in uint32s) of the bitmap.
+ bool alloc_; // Whether or not we allocated the memory.
+
+ DISALLOW_COPY_AND_ASSIGN(Bitmap);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_BITMAP_H_
diff --git a/net/disk_cache/bitmap_unittest.cc b/net/disk_cache/bitmap_unittest.cc
new file mode 100644
index 00000000..d80ea742
--- /dev/null
+++ b/net/disk_cache/bitmap_unittest.cc
@@ -0,0 +1,293 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/bitmap.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(BitmapTest, OverAllocate) {
+ // Test that we don't over allocate on boundaries.
+ disk_cache::Bitmap map32(32, false);
+ EXPECT_EQ(1, map32.ArraySize());
+
+ disk_cache::Bitmap map64(64, false);
+ EXPECT_EQ(2, map64.ArraySize());
+}
+
+TEST(BitmapTest, DefaultConstructor) {
+ // Verify that the default constructor doesn't allocate a bitmap.
+ disk_cache::Bitmap map;
+ EXPECT_EQ(0, map.Size());
+ EXPECT_EQ(0, map.ArraySize());
+ EXPECT_TRUE(NULL == map.GetMap());
+}
+
+TEST(BitmapTest, Basics) {
+ disk_cache::Bitmap bitmap(80, true);
+ const uint32 kValue = 0x74f10060;
+
+ // Test proper allocation size.
+ EXPECT_EQ(80, bitmap.Size());
+ EXPECT_EQ(3, bitmap.ArraySize());
+
+ // Test Set/GetMapElement.
+ EXPECT_EQ(0U, bitmap.GetMapElement(1));
+ bitmap.SetMapElement(1, kValue);
+ EXPECT_EQ(kValue, bitmap.GetMapElement(1));
+
+ // Test Set/Get.
+ EXPECT_TRUE(bitmap.Get(48));
+ EXPECT_FALSE(bitmap.Get(49));
+ EXPECT_FALSE(bitmap.Get(50));
+ bitmap.Set(49, true);
+ EXPECT_TRUE(bitmap.Get(48));
+ EXPECT_TRUE(bitmap.Get(49));
+ EXPECT_FALSE(bitmap.Get(50));
+ bitmap.Set(49, false);
+ EXPECT_TRUE(bitmap.Get(48));
+ EXPECT_FALSE(bitmap.Get(49));
+ EXPECT_FALSE(bitmap.Get(50));
+
+ for (int i = 0; i < 80; i++)
+ bitmap.Set(i, (i % 7) == 0);
+ for (int i = 0; i < 80; i++)
+ EXPECT_EQ(bitmap.Get(i), (i % 7) == 0);
+}
+
+TEST(BitmapTest, Toggle) {
+ static const int kSize = 100;
+ disk_cache::Bitmap map(kSize, true);
+ for (int i = 0; i < 100; i += 3)
+ map.Toggle(i);
+ for (int i = 0; i < 100; i += 9)
+ map.Toggle(i);
+ for (int i = 0; i < 100; ++i)
+ EXPECT_EQ((i % 3 == 0) && (i % 9 != 0), map.Get(i));
+}
+
+TEST(BitmapTest, Resize) {
+ const int kSize1 = 50;
+ const int kSize2 = 100;
+ const int kSize3 = 30;
+ disk_cache::Bitmap map(kSize1, true);
+ map.Resize(kSize1, true);
+ EXPECT_EQ(kSize1, map.Size());
+ EXPECT_FALSE(map.Get(0));
+ EXPECT_FALSE(map.Get(kSize1 - 1));
+
+ map.Resize(kSize2, true);
+ EXPECT_FALSE(map.Get(kSize1 - 1));
+ EXPECT_FALSE(map.Get(kSize1));
+ EXPECT_FALSE(map.Get(kSize2 - 1));
+ EXPECT_EQ(kSize2, map.Size());
+
+ map.Resize(kSize3, true);
+ EXPECT_FALSE(map.Get(kSize3 - 1));
+ EXPECT_EQ(kSize3, map.Size());
+}
+
+TEST(BitmapTest, Map) {
+ // Tests Set/GetMap and the constructor that takes an array.
+ const int kMapSize = 80;
+ char local_map[kMapSize];
+ for (int i = 0; i < kMapSize; i++)
+ local_map[i] = static_cast<char>(i);
+
+ disk_cache::Bitmap bitmap(kMapSize * 8, false);
+ bitmap.SetMap(reinterpret_cast<uint32*>(local_map), kMapSize / 4);
+ for (int i = 0; i < kMapSize; i++) {
+ if (i % 2)
+ EXPECT_TRUE(bitmap.Get(i * 8));
+ else
+ EXPECT_FALSE(bitmap.Get(i * 8));
+ }
+
+ EXPECT_EQ(0, memcmp(local_map, bitmap.GetMap(), kMapSize));
+
+ // Now let's create a bitmap that shares local_map as storage.
+ disk_cache::Bitmap bitmap2(reinterpret_cast<uint32*>(local_map),
+ kMapSize * 8, kMapSize / 4);
+ EXPECT_EQ(0, memcmp(local_map, bitmap2.GetMap(), kMapSize));
+
+ local_map[kMapSize / 2] = 'a';
+ EXPECT_EQ(0, memcmp(local_map, bitmap2.GetMap(), kMapSize));
+ EXPECT_NE(0, memcmp(local_map, bitmap.GetMap(), kMapSize));
+}
+
+TEST(BitmapTest, SetAll) {
+ // Tests SetAll and Clear.
+ const int kMapSize = 80;
+ char ones[kMapSize];
+ char zeros[kMapSize];
+ memset(ones, 0xff, kMapSize);
+ memset(zeros, 0, kMapSize);
+
+ disk_cache::Bitmap map(kMapSize * 8, true);
+ EXPECT_EQ(0, memcmp(zeros, map.GetMap(), kMapSize));
+ map.SetAll(true);
+ EXPECT_EQ(0, memcmp(ones, map.GetMap(), kMapSize));
+ map.SetAll(false);
+ EXPECT_EQ(0, memcmp(zeros, map.GetMap(), kMapSize));
+ map.SetAll(true);
+ map.Clear();
+ EXPECT_EQ(0, memcmp(zeros, map.GetMap(), kMapSize));
+}
+
+TEST(BitmapTest, Range) {
+ // Tests SetRange() and TestRange().
+ disk_cache::Bitmap map(100, true);
+ EXPECT_FALSE(map.TestRange(0, 100, true));
+ map.Set(50, true);
+ EXPECT_TRUE(map.TestRange(0, 100, true));
+
+ map.SetAll(false);
+ EXPECT_FALSE(map.TestRange(0, 1, true));
+ EXPECT_FALSE(map.TestRange(30, 31, true));
+ EXPECT_FALSE(map.TestRange(98, 99, true));
+ EXPECT_FALSE(map.TestRange(99, 100, true));
+ EXPECT_FALSE(map.TestRange(0, 100, true));
+
+ EXPECT_TRUE(map.TestRange(0, 1, false));
+ EXPECT_TRUE(map.TestRange(31, 32, false));
+ EXPECT_TRUE(map.TestRange(32, 33, false));
+ EXPECT_TRUE(map.TestRange(99, 100, false));
+ EXPECT_TRUE(map.TestRange(0, 32, false));
+
+ map.SetRange(11, 21, true);
+ for (int i = 0; i < 100; i++)
+ EXPECT_EQ(map.Get(i), (i >= 11) && (i < 21));
+
+ EXPECT_TRUE(map.TestRange(0, 32, true));
+ EXPECT_TRUE(map.TestRange(0, 100, true));
+ EXPECT_TRUE(map.TestRange(11, 21, true));
+ EXPECT_TRUE(map.TestRange(15, 16, true));
+ EXPECT_TRUE(map.TestRange(5, 12, true));
+ EXPECT_TRUE(map.TestRange(5, 11, false));
+ EXPECT_TRUE(map.TestRange(20, 60, true));
+ EXPECT_TRUE(map.TestRange(21, 60, false));
+
+ map.SetAll(true);
+ EXPECT_FALSE(map.TestRange(0, 100, false));
+
+ map.SetRange(70, 99, false);
+ EXPECT_TRUE(map.TestRange(69, 99, false));
+ EXPECT_TRUE(map.TestRange(70, 100, false));
+ EXPECT_FALSE(map.TestRange(70, 99, true));
+}
+
+TEST(BitmapTest, FindNextSetBitBeforeLimit) {
+ // Test FindNextSetBitBeforeLimit. Only check bits from 111 to 277 (limit
+ // bit == 278). Should find all multiples of 27 in that range.
+ disk_cache::Bitmap map(500, true);
+ for (int i = 0; i < 500; i++)
+ map.Set(i, (i % 27) == 0);
+
+ int find_me = 135; // First one expected.
+ for (int index = 111; map.FindNextSetBitBeforeLimit(&index, 278);
+ ++index) {
+ EXPECT_EQ(index, find_me);
+ find_me += 27;
+ }
+ EXPECT_EQ(find_me, 297); // The next find_me after 278.
+}
+
+TEST(BitmapTest, FindNextSetBitBeforeLimitAligned) {
+ // Test FindNextSetBitBeforeLimit on aligned scans.
+ disk_cache::Bitmap map(256, true);
+ for (int i = 0; i < 256; i++)
+ map.Set(i, (i % 32) == 0);
+ for (int i = 0; i < 256; i += 32) {
+ int index = i + 1;
+ EXPECT_FALSE(map.FindNextSetBitBeforeLimit(&index, i + 32));
+ }
+}
+
+TEST(BitmapTest, FindNextSetBit) {
+ // Test FindNextSetBit. Check all bits in map. Should find multiples
+ // of 7 from 0 to 98.
+ disk_cache::Bitmap map(100, true);
+ for (int i = 0; i < 100; i++)
+ map.Set(i, (i % 7) == 0);
+
+ int find_me = 0; // First one expected.
+ for (int index = 0; map.FindNextSetBit(&index); ++index) {
+ EXPECT_EQ(index, find_me);
+ find_me += 7;
+ }
+ EXPECT_EQ(find_me, 105); // The next find_me after 98.
+}
+
+TEST(BitmapTest, FindNextBit) {
+ // Almost the same test as FindNextSetBit, but find zeros instead of ones.
+ disk_cache::Bitmap map(100, false);
+ map.SetAll(true);
+ for (int i = 0; i < 100; i++)
+ map.Set(i, (i % 7) != 0);
+
+ int find_me = 0; // First one expected.
+ for (int index = 0; map.FindNextBit(&index, 100, false); ++index) {
+ EXPECT_EQ(index, find_me);
+ find_me += 7;
+ }
+ EXPECT_EQ(find_me, 105); // The next find_me after 98.
+}
+
+TEST(BitmapTest, SimpleFindBits) {
+ disk_cache::Bitmap bitmap(64, true);
+ bitmap.SetMapElement(0, 0x7ff10060);
+
+ // Bit at index off.
+ int index = 0;
+ EXPECT_EQ(5, bitmap.FindBits(&index, 63, false));
+ EXPECT_EQ(0, index);
+
+ EXPECT_EQ(2, bitmap.FindBits(&index, 63, true));
+ EXPECT_EQ(5, index);
+
+ index = 0;
+ EXPECT_EQ(2, bitmap.FindBits(&index, 63, true));
+ EXPECT_EQ(5, index);
+
+ index = 6;
+ EXPECT_EQ(9, bitmap.FindBits(&index, 63, false));
+ EXPECT_EQ(7, index);
+
+ // Bit at index on.
+ index = 16;
+ EXPECT_EQ(1, bitmap.FindBits(&index, 63, true));
+ EXPECT_EQ(16, index);
+
+ index = 17;
+ EXPECT_EQ(11, bitmap.FindBits(&index, 63, true));
+ EXPECT_EQ(20, index);
+
+ index = 31;
+ EXPECT_EQ(0, bitmap.FindBits(&index, 63, true));
+ EXPECT_EQ(31, index);
+
+ // With a limit.
+ index = 8;
+ EXPECT_EQ(0, bitmap.FindBits(&index, 16, true));
+}
+
+TEST(BitmapTest, MultiWordFindBits) {
+ disk_cache::Bitmap bitmap(500, true);
+ bitmap.SetMapElement(10, 0xff00);
+
+ int index = 0;
+ EXPECT_EQ(0, bitmap.FindBits(&index, 300, true));
+
+ EXPECT_EQ(8, bitmap.FindBits(&index, 500, true));
+ EXPECT_EQ(328, index);
+
+ bitmap.SetMapElement(10, 0xff000000);
+ bitmap.SetMapElement(11, 0xff);
+
+ index = 0;
+ EXPECT_EQ(16, bitmap.FindBits(&index, 500, true));
+ EXPECT_EQ(344, index);
+
+ index = 0;
+ EXPECT_EQ(4, bitmap.FindBits(&index, 348, true));
+ EXPECT_EQ(344, index);
+}
diff --git a/net/disk_cache/block_files.cc b/net/disk_cache/block_files.cc
new file mode 100644
index 00000000..fe02f67b
--- /dev/null
+++ b/net/disk_cache/block_files.cc
@@ -0,0 +1,488 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/block_files.h"
+
+#include "base/file_util.h"
+#include "base/histogram.h"
+#include "base/string_util.h"
+#include "base/time.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/file_lock.h"
+
+using base::Time;
+
+namespace {
+
+const char* kBlockName = "data_";
+
+// This array is used to perform a fast lookup of the nibble bit pattern to the
+// type of entry that can be stored there (number of consecutive blocks).
+const char s_types[16] = {4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0};
+
+// Returns the type of block (number of consecutive blocks that can be stored)
+// for a given nibble of the bitmap.
+inline int GetMapBlockType(uint8 value) {
+ value &= 0xf;
+ return s_types[value];
+}
+
+void FixAllocationCounters(disk_cache::BlockFileHeader* header);
+
+// Creates a new entry on the allocation map, updating the apropriate counters.
+// target is the type of block to use (number of empty blocks), and size is the
+// actual number of blocks to use.
+bool CreateMapBlock(int target, int size, disk_cache::BlockFileHeader* header,
+ int* index) {
+ if (target <= 0 || target > disk_cache::kMaxNumBlocks ||
+ size <= 0 || size > disk_cache::kMaxNumBlocks) {
+ NOTREACHED();
+ return false;
+ }
+
+ Time start = Time::Now();
+ // We are going to process the map on 32-block chunks (32 bits), and on every
+ // chunk, iterate through the 8 nibbles where the new block can be located.
+ int current = header->hints[target - 1];
+ for (int i = 0; i < header->max_entries / 32; i++, current++) {
+ if (current == header->max_entries / 32)
+ current = 0;
+ uint32 map_block = header->allocation_map[current];
+
+ for (int j = 0; j < 8; j++, map_block >>= 4) {
+ if (GetMapBlockType(map_block) != target)
+ continue;
+
+ disk_cache::FileLock lock(header);
+ int index_offset = j * 4 + 4 - target;
+ *index = current * 32 + index_offset;
+ uint32 to_add = ((1 << size) - 1) << index_offset;
+ header->allocation_map[current] |= to_add;
+
+ header->hints[target - 1] = current;
+ header->empty[target - 1]--;
+ DCHECK(header->empty[target - 1] >= 0);
+ header->num_entries++;
+ if (target != size) {
+ header->empty[target - size - 1]++;
+ }
+ HISTOGRAM_TIMES("DiskCache.CreateBlock", Time::Now() - start);
+ return true;
+ }
+ }
+
+ // It is possible to have an undetected corruption (for example when the OS
+ // crashes), fix it here.
+ LOG(ERROR) << "Failing CreateMapBlock";
+ FixAllocationCounters(header);
+ return false;
+}
+
+// Deletes the block pointed by index from allocation_map, and updates the
+// relevant counters on the header.
+void DeleteMapBlock(int index, int size, disk_cache::BlockFileHeader* header) {
+ if (size < 0 || size > disk_cache::kMaxNumBlocks) {
+ NOTREACHED();
+ return;
+ }
+ Time start = Time::Now();
+ int byte_index = index / 8;
+ uint8* byte_map = reinterpret_cast<uint8*>(header->allocation_map);
+ uint8 map_block = byte_map[byte_index];
+
+ if (index % 8 >= 4)
+ map_block >>= 4;
+
+ // See what type of block will be availabe after we delete this one.
+ int bits_at_end = 4 - size - index % 4;
+ uint8 end_mask = (0xf << (4 - bits_at_end)) & 0xf;
+ bool update_counters = (map_block & end_mask) == 0;
+ uint8 new_value = map_block & ~(((1 << size) - 1) << (index % 4));
+ int new_type = GetMapBlockType(new_value);
+
+ disk_cache::FileLock lock(header);
+ DCHECK((((1 << size) - 1) << (index % 8)) < 0x100);
+ uint8 to_clear = ((1 << size) - 1) << (index % 8);
+ DCHECK((byte_map[byte_index] & to_clear) == to_clear);
+ byte_map[byte_index] &= ~to_clear;
+
+ if (update_counters) {
+ if (bits_at_end)
+ header->empty[bits_at_end - 1]--;
+ header->empty[new_type - 1]++;
+ DCHECK(header->empty[bits_at_end - 1] >= 0);
+ }
+ header->num_entries--;
+ DCHECK(header->num_entries >= 0);
+ HISTOGRAM_TIMES("DiskCache.DeleteBlock", Time::Now() - start);
+}
+
+// Restores the "empty counters" and allocation hints.
+void FixAllocationCounters(disk_cache::BlockFileHeader* header) {
+ for (int i = 0; i < disk_cache::kMaxNumBlocks; i++) {
+ header->hints[i] = 0;
+ header->empty[i] = 0;
+ }
+
+ for (int i = 0; i < header->max_entries / 32; i++) {
+ uint32 map_block = header->allocation_map[i];
+
+ for (int j = 0; j < 8; j++, map_block >>= 4) {
+ int type = GetMapBlockType(map_block);
+ if (type)
+ header->empty[type -1]++;
+ }
+ }
+}
+
+// Returns true if the current block file should not be used as-is to store more
+// records. |block_count| is the number of blocks to allocate.
+bool NeedToGrowBlockFile(const disk_cache::BlockFileHeader* header,
+ int block_count) {
+ bool have_space = false;
+ int empty_blocks = 0;
+ for (int i = 0; i < disk_cache::kMaxNumBlocks; i++) {
+ empty_blocks += header->empty[i] * (i + 1);
+ if (i >= block_count - 1 && header->empty[i])
+ have_space = true;
+ }
+
+ if (header->next_file && (empty_blocks < disk_cache::kMaxBlocks / 10)) {
+ // This file is almost full but we already created another one, don't use
+ // this file yet so that it is easier to find empty blocks when we start
+ // using this file again.
+ return true;
+ }
+ return !have_space;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+BlockFiles::~BlockFiles() {
+ if (zero_buffer_)
+ delete[] zero_buffer_;
+ CloseFiles();
+}
+
+bool BlockFiles::Init(bool create_files) {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ block_files_.resize(kFirstAdditionlBlockFile);
+ for (int i = 0; i < kFirstAdditionlBlockFile; i++) {
+ if (create_files)
+ if (!CreateBlockFile(i, static_cast<FileType>(i + 1), true))
+ return false;
+
+ if (!OpenBlockFile(i))
+ return false;
+
+ // Walk this chain of files removing empty ones.
+ RemoveEmptyFile(static_cast<FileType>(i + 1));
+ }
+
+ init_ = true;
+ return true;
+}
+
+void BlockFiles::CloseFiles() {
+ init_ = false;
+ for (unsigned int i = 0; i < block_files_.size(); i++) {
+ if (block_files_[i]) {
+ block_files_[i]->Release();
+ block_files_[i] = NULL;
+ }
+ }
+ block_files_.clear();
+}
+
+FilePath BlockFiles::Name(int index) {
+ // The file format allows for 256 files.
+ DCHECK(index < 256 || index >= 0);
+ std::string tmp = StringPrintf("%s%d", kBlockName, index);
+ return path_.AppendASCII(tmp);
+}
+
+bool BlockFiles::CreateBlockFile(int index, FileType file_type, bool force) {
+ FilePath name = Name(index);
+ int flags =
+ force ? base::PLATFORM_FILE_CREATE_ALWAYS : base::PLATFORM_FILE_CREATE;
+ flags |= base::PLATFORM_FILE_WRITE | base::PLATFORM_FILE_EXCLUSIVE_WRITE;
+
+ scoped_refptr<File> file(new File(
+ base::CreatePlatformFile(name, flags, NULL)));
+ if (!file->IsValid())
+ return false;
+
+ BlockFileHeader header;
+ header.entry_size = Addr::BlockSizeForFileType(file_type);
+ header.this_file = static_cast<int16>(index);
+ DCHECK(index <= kint16max && index >= 0);
+
+ return file->Write(&header, sizeof(header), 0);
+}
+
+bool BlockFiles::OpenBlockFile(int index) {
+ if (block_files_.size() - 1 < static_cast<unsigned int>(index)) {
+ DCHECK(index > 0);
+ int to_add = index - static_cast<int>(block_files_.size()) + 1;
+ block_files_.resize(block_files_.size() + to_add);
+ }
+
+ FilePath name = Name(index);
+ scoped_refptr<MappedFile> file(new MappedFile());
+
+ if (!file->Init(name, kBlockHeaderSize)) {
+ LOG(ERROR) << "Failed to open " << name.value();
+ return false;
+ }
+
+ if (file->GetLength() < static_cast<size_t>(kBlockHeaderSize)) {
+ LOG(ERROR) << "File too small " << name.value();
+ return false;
+ }
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ if (kBlockMagic != header->magic || kCurrentVersion != header->version) {
+ LOG(ERROR) << "Invalid file version or magic";
+ return false;
+ }
+
+ if (header->updating) {
+ // Last instance was not properly shutdown.
+ if (!FixBlockFileHeader(file))
+ return false;
+ }
+
+ DCHECK(!block_files_[index]);
+ file.swap(&block_files_[index]);
+ return true;
+}
+
+MappedFile* BlockFiles::GetFile(Addr address) {
+ DCHECK(block_files_.size() >= 4);
+ DCHECK(address.is_block_file() || !address.is_initialized());
+ if (!address.is_initialized())
+ return NULL;
+
+ int file_index = address.FileNumber();
+ if (static_cast<unsigned int>(file_index) >= block_files_.size() ||
+ !block_files_[file_index]) {
+ // We need to open the file
+ if (!OpenBlockFile(file_index))
+ return NULL;
+ }
+ DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index));
+ return block_files_[file_index];
+}
+
+bool BlockFiles::GrowBlockFile(MappedFile* file, BlockFileHeader* header) {
+ if (kMaxBlocks == header->max_entries)
+ return false;
+
+ DCHECK(!header->empty[3]);
+ int new_size = header->max_entries + 1024;
+ if (new_size > kMaxBlocks)
+ new_size = kMaxBlocks;
+
+ int new_size_bytes = new_size * header->entry_size + sizeof(*header);
+
+ FileLock lock(header);
+ if (!file->SetLength(new_size_bytes)) {
+ // Most likely we are trying to truncate the file, so the header is wrong.
+ if (header->updating < 10 && !FixBlockFileHeader(file)) {
+ // If we can't fix the file increase the lock guard so we'll pick it on
+ // the next start and replace it.
+ header->updating = 100;
+ return false;
+ }
+ return (header->max_entries >= new_size);
+ }
+
+ header->empty[3] = (new_size - header->max_entries) / 4; // 4 blocks entries
+ header->max_entries = new_size;
+
+ return true;
+}
+
+MappedFile* BlockFiles::FileForNewBlock(FileType block_type, int block_count) {
+ COMPILE_ASSERT(RANKINGS == 1, invalid_fily_type);
+ MappedFile* file = block_files_[block_type - 1];
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+
+ Time start = Time::Now();
+ while (NeedToGrowBlockFile(header, block_count)) {
+ if (kMaxBlocks == header->max_entries) {
+ file = NextFile(file);
+ if (!file)
+ return NULL;
+ header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ continue;
+ }
+
+ if (!GrowBlockFile(file, header))
+ return NULL;
+ break;
+ }
+ HISTOGRAM_TIMES("DiskCache.GetFileForNewBlock", Time::Now() - start);
+ return file;
+}
+
+MappedFile* BlockFiles::NextFile(const MappedFile* file) {
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ int new_file = header->next_file;
+ if (!new_file) {
+ // RANKINGS is not reported as a type for small entries, but we may be
+ // extending the rankings block file.
+ FileType type = Addr::RequiredFileType(header->entry_size);
+ if (header->entry_size == Addr::BlockSizeForFileType(RANKINGS))
+ type = RANKINGS;
+
+ new_file = CreateNextBlockFile(type);
+ if (!new_file)
+ return NULL;
+
+ FileLock lock(header);
+ header->next_file = new_file;
+ }
+
+ // Only the block_file argument is relevant for what we want.
+ Addr address(BLOCK_256, 1, new_file, 0);
+ return GetFile(address);
+}
+
+int BlockFiles::CreateNextBlockFile(FileType block_type) {
+ for (int i = kFirstAdditionlBlockFile; i <= kMaxBlockFile; i++) {
+ if (CreateBlockFile(i, block_type, false))
+ return i;
+ }
+ return 0;
+}
+
+// We walk the list of files for this particular block type, deleting the ones
+// that are empty.
+void BlockFiles::RemoveEmptyFile(FileType block_type) {
+ MappedFile* file = block_files_[block_type - 1];
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+
+ while (header->next_file) {
+ // Only the block_file argument is relevant for what we want.
+ Addr address(BLOCK_256, 1, header->next_file, 0);
+ MappedFile* next_file = GetFile(address);
+ if (!next_file)
+ return;
+
+ BlockFileHeader* next_header =
+ reinterpret_cast<BlockFileHeader*>(next_file->buffer());
+ if (!next_header->num_entries) {
+ DCHECK_EQ(next_header->entry_size, header->entry_size);
+ // Delete next_file and remove it from the chain.
+ int file_index = header->next_file;
+ header->next_file = next_header->next_file;
+ DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index));
+ block_files_[file_index]->Release();
+ block_files_[file_index] = NULL;
+
+ FilePath name = Name(file_index);
+ int failure = DeleteCacheFile(name) ? 0 : 1;
+ UMA_HISTOGRAM_COUNTS("DiskCache.DeleteFailed2", failure);
+ if (failure)
+ LOG(ERROR) << "Failed to delete " << name.value() << " from the cache.";
+ continue;
+ }
+
+ header = next_header;
+ file = next_file;
+ }
+}
+
+bool BlockFiles::CreateBlock(FileType block_type, int block_count,
+ Addr* block_address) {
+ if (block_type < RANKINGS || block_type > BLOCK_4K ||
+ block_count < 1 || block_count > 4)
+ return false;
+ if (!init_)
+ return false;
+
+ MappedFile* file = FileForNewBlock(block_type, block_count);
+ if (!file)
+ return false;
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+
+ int target_size = 0;
+ for (int i = block_count; i <= 4; i++) {
+ if (header->empty[i - 1]) {
+ target_size = i;
+ break;
+ }
+ }
+
+ DCHECK(target_size);
+ int index;
+ if (!CreateMapBlock(target_size, block_count, header, &index))
+ return false;
+
+ Addr address(block_type, block_count, header->this_file, index);
+ block_address->set_value(address.value());
+ return true;
+}
+
+void BlockFiles::DeleteBlock(Addr address, bool deep) {
+ if (!address.is_initialized() || address.is_separate_file())
+ return;
+
+ if (!zero_buffer_) {
+ zero_buffer_ = new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4];
+ memset(zero_buffer_, 0, Addr::BlockSizeForFileType(BLOCK_4K) * 4);
+ }
+ MappedFile* file = GetFile(address);
+ if (!file)
+ return;
+
+ size_t size = address.BlockSize() * address.num_blocks();
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ if (deep)
+ file->Write(zero_buffer_, size, offset);
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ DeleteMapBlock(address.start_block(), address.num_blocks(), header);
+ if (!header->num_entries) {
+ // This file is now empty. Let's try to delete it.
+ FileType type = Addr::RequiredFileType(header->entry_size);
+ if (Addr::BlockSizeForFileType(RANKINGS) == header->entry_size)
+ type = RANKINGS;
+ RemoveEmptyFile(type);
+ }
+}
+
+bool BlockFiles::FixBlockFileHeader(MappedFile* file) {
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ int file_size = static_cast<int>(file->GetLength());
+ if (file_size < static_cast<int>(sizeof(*header)))
+ return false; // file_size > 2GB is also an error.
+
+ int expected = header->entry_size * header->max_entries + sizeof(*header);
+ if (file_size != expected) {
+ int max_expected = header->entry_size * kMaxBlocks + sizeof(*header);
+ if (file_size < expected || header->empty[3] || file_size > max_expected) {
+ NOTREACHED();
+ LOG(ERROR) << "Unexpected file size";
+ return false;
+ }
+ // We were in the middle of growing the file.
+ int num_entries = (file_size - sizeof(*header)) / header->entry_size;
+ header->max_entries = num_entries;
+ }
+
+ FixAllocationCounters(header);
+ header->updating = 0;
+ return true;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/block_files.h b/net/disk_cache/block_files.h
new file mode 100644
index 00000000..85030622
--- /dev/null
+++ b/net/disk_cache/block_files.h
@@ -0,0 +1,88 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_BLOCK_FILES_H__
+#define NET_DISK_CACHE_BLOCK_FILES_H__
+
+#include <vector>
+
+#include "base/file_path.h"
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/mapped_file.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
+
+namespace disk_cache {
+
+class EntryImpl;
+
+// This class handles the set of block-files open by the disk cache.
+class BlockFiles {
+ public:
+ explicit BlockFiles(const FilePath& path)
+ : init_(false), zero_buffer_(NULL), path_(path) {}
+ ~BlockFiles();
+
+ // Performs the object initialization. create_files indicates if the backing
+ // files should be created or just open.
+ bool Init(bool create_files);
+
+ // Returns the file that stores a given address.
+ MappedFile* GetFile(Addr address);
+
+ // Creates a new entry on a block file. block_type indicates the size of block
+ // to be used (as defined on cache_addr.h), block_count is the number of
+ // blocks to allocate, and block_address is the address of the new entry.
+ bool CreateBlock(FileType block_type, int block_count, Addr* block_address);
+
+ // Removes an entry from the block files. If deep is true, the storage is zero
+ // filled; otherwise the entry is removed but the data is not altered (must be
+ // already zeroed).
+ void DeleteBlock(Addr address, bool deep);
+
+ // Close all the files and set the internal state to be initializad again. The
+ // cache is being purged.
+ void CloseFiles();
+
+ private:
+ // Set force to true to overwrite the file if it exists.
+ bool CreateBlockFile(int index, FileType file_type, bool force);
+ bool OpenBlockFile(int index);
+
+ // Attemp to grow this file. Fails if the file cannot be extended anymore.
+ bool GrowBlockFile(MappedFile* file, BlockFileHeader* header);
+
+ // Returns the appropriate file to use for a new block.
+ MappedFile* FileForNewBlock(FileType block_type, int block_count);
+
+ // Returns the next block file on this chain, creating new files if needed.
+ MappedFile* NextFile(const MappedFile* file);
+
+ // Creates an empty block file and returns its index.
+ int CreateNextBlockFile(FileType block_type);
+
+ // Removes a chained block file that is now empty.
+ void RemoveEmptyFile(FileType block_type);
+
+ // Restores the header of a potentially inconsistent file.
+ bool FixBlockFileHeader(MappedFile* file);
+
+ // Returns the filename for a given file index.
+ FilePath Name(int index);
+
+ bool init_;
+ char* zero_buffer_; // Buffer to speed-up cleaning deleted entries.
+ FilePath path_; // Path to the backing folder.
+ std::vector<MappedFile*> block_files_; // The actual files.
+
+ FRIEND_TEST(DiskCacheTest, BlockFiles_ZeroSizeFile);
+ FRIEND_TEST(DiskCacheTest, BlockFiles_InvalidFile);
+
+ DISALLOW_EVIL_CONSTRUCTORS(BlockFiles);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_BLOCK_FILES_H__
diff --git a/net/disk_cache/block_files_unittest.cc b/net/disk_cache/block_files_unittest.cc
new file mode 100644
index 00000000..054641c3
--- /dev/null
+++ b/net/disk_cache/block_files_unittest.cc
@@ -0,0 +1,206 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_util.h"
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::Time;
+
+namespace {
+
+// Returns the number of files in this folder.
+int NumberOfFiles(const FilePath& path) {
+ file_util::FileEnumerator iter(path, false, file_util::FileEnumerator::FILES);
+ int count = 0;
+ for (FilePath file = iter.Next(); !file.value().empty(); file = iter.Next()) {
+ count++;
+ }
+ return count;
+}
+
+} // namespace;
+
+namespace disk_cache {
+
+TEST_F(DiskCacheTest, BlockFiles_Grow) {
+ FilePath path = GetCacheFilePath();
+ ASSERT_TRUE(DeleteCache(path));
+ ASSERT_TRUE(file_util::CreateDirectory(path));
+
+ BlockFiles files(path);
+ ASSERT_TRUE(files.Init(true));
+
+ const int kMaxSize = 35000;
+ Addr address[kMaxSize];
+
+ // Fill up the 32-byte block file (use three files).
+ for (int i = 0; i < kMaxSize; i++) {
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 4, &address[i]));
+ }
+ EXPECT_EQ(6, NumberOfFiles(path));
+
+ // Make sure we don't keep adding files.
+ for (int i = 0; i < kMaxSize * 4; i += 2) {
+ int target = i % kMaxSize;
+ files.DeleteBlock(address[target], false);
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 4, &address[target]));
+ }
+ EXPECT_EQ(6, NumberOfFiles(path));
+}
+
+// We should be able to delete empty block files.
+TEST_F(DiskCacheTest, BlockFiles_Shrink) {
+ FilePath path = GetCacheFilePath();
+ ASSERT_TRUE(DeleteCache(path));
+ ASSERT_TRUE(file_util::CreateDirectory(path));
+
+ BlockFiles files(path);
+ ASSERT_TRUE(files.Init(true));
+
+ const int kMaxSize = 35000;
+ Addr address[kMaxSize];
+
+ // Fill up the 32-byte block file (use three files).
+ for (int i = 0; i < kMaxSize; i++) {
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, 4, &address[i]));
+ }
+
+ // Now delete all the blocks, so that we can delete the two extra files.
+ for (int i = 0; i < kMaxSize; i++) {
+ files.DeleteBlock(address[i], false);
+ }
+ EXPECT_EQ(4, NumberOfFiles(path));
+}
+
+// Handling of block files not properly closed.
+TEST_F(DiskCacheTest, BlockFiles_Recover) {
+ FilePath path = GetCacheFilePath();
+ ASSERT_TRUE(DeleteCache(path));
+ ASSERT_TRUE(file_util::CreateDirectory(path));
+
+ BlockFiles files(path);
+ ASSERT_TRUE(files.Init(true));
+
+ const int kNumEntries = 2000;
+ CacheAddr entries[kNumEntries];
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+ for (int i = 0; i < kNumEntries; i++) {
+ Addr address(0);
+ int size = (rand() % 4) + 1;
+ EXPECT_TRUE(files.CreateBlock(RANKINGS, size, &address));
+ entries[i] = address.value();
+ }
+
+ for (int i = 0; i < kNumEntries; i++) {
+ int source1 = rand() % kNumEntries;
+ int source2 = rand() % kNumEntries;
+ CacheAddr temp = entries[source1];
+ entries[source1] = entries[source2];
+ entries[source2] = temp;
+ }
+
+ for (int i = 0; i < kNumEntries / 2; i++) {
+ Addr address(entries[i]);
+ files.DeleteBlock(address, false);
+ }
+
+ // At this point, there are kNumEntries / 2 entries on the file, randomly
+ // distributed both on location and size.
+
+ Addr address(entries[kNumEntries / 2]);
+ MappedFile* file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+
+ BlockFileHeader* header =
+ reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ ASSERT_EQ(0, header->updating);
+
+ int max_entries = header->max_entries;
+ int empty_1 = header->empty[0];
+ int empty_2 = header->empty[1];
+ int empty_3 = header->empty[2];
+ int empty_4 = header->empty[3];
+
+ // Corrupt the file.
+ header->max_entries = header->empty[0] = 0;
+ header->empty[1] = header->empty[2] = header->empty[3] = 0;
+ header->updating = -1;
+
+ files.CloseFiles();
+
+ ASSERT_TRUE(files.Init(false));
+
+ // The file must have been fixed.
+ file = files.GetFile(address);
+ ASSERT_TRUE(NULL != file);
+
+ header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ ASSERT_TRUE(NULL != header);
+
+ ASSERT_EQ(0, header->updating);
+
+ EXPECT_EQ(max_entries, header->max_entries);
+ EXPECT_EQ(empty_1, header->empty[0]);
+ EXPECT_EQ(empty_2, header->empty[1]);
+ EXPECT_EQ(empty_3, header->empty[2]);
+ EXPECT_EQ(empty_4, header->empty[3]);
+}
+
+// Handling of truncated files.
+TEST_F(DiskCacheTest, BlockFiles_ZeroSizeFile) {
+ FilePath path = GetCacheFilePath();
+ ASSERT_TRUE(DeleteCache(path));
+ ASSERT_TRUE(file_util::CreateDirectory(path));
+
+ BlockFiles files(path);
+ ASSERT_TRUE(files.Init(true));
+
+ FilePath filename = files.Name(0);
+ files.CloseFiles();
+ // Truncate one of the files.
+ {
+ scoped_refptr<File> file(new File);
+ ASSERT_TRUE(file->Init(filename));
+ EXPECT_TRUE(file->SetLength(0));
+ }
+
+ // Initializing should fail, not crash.
+ ASSERT_FALSE(files.Init(false));
+}
+
+// An invalid file can be detected after init.
+TEST_F(DiskCacheTest, BlockFiles_InvalidFile) {
+ FilePath path = GetCacheFilePath();
+ ASSERT_TRUE(DeleteCache(path));
+ ASSERT_TRUE(file_util::CreateDirectory(path));
+
+ BlockFiles files(path);
+ ASSERT_TRUE(files.Init(true));
+
+ // Let's access block 10 of file 5. (There is no file).
+ Addr addr(BLOCK_256, 1, 5, 10);
+ EXPECT_TRUE(NULL == files.GetFile(addr));
+
+ // Let's create an invalid file.
+ FilePath filename(files.Name(5));
+ char header[kBlockHeaderSize];
+ memset(header, 'a', kBlockHeaderSize);
+ EXPECT_EQ(kBlockHeaderSize,
+ file_util::WriteFile(filename, header, kBlockHeaderSize));
+
+ EXPECT_TRUE(NULL == files.GetFile(addr));
+
+ // The file should not have been cached (it is still invalid).
+ EXPECT_TRUE(NULL == files.GetFile(addr));
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/cache_util.h b/net/disk_cache/cache_util.h
new file mode 100644
index 00000000..cb78f169
--- /dev/null
+++ b/net/disk_cache/cache_util.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_CACHE_UTIL_H_
+#define NET_DISK_CACHE_CACHE_UTIL_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+
+class FilePath;
+
+namespace disk_cache {
+
+// Moves the cache files from the given path to another location.
+// Returns true if successful, false otherwise.
+bool MoveCache(const FilePath& from_path, const FilePath& to_path);
+
+// Deletes the cache files stored on |path|, and optionally also attempts to
+// delete the folder itself.
+void DeleteCache(const FilePath& path, bool remove_folder);
+
+// Deletes a cache file.
+bool DeleteCacheFile(const FilePath& name);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_CACHE_UTIL_H_
diff --git a/net/disk_cache/cache_util_posix.cc b/net/disk_cache/cache_util_posix.cc
new file mode 100644
index 00000000..a272cb8d
--- /dev/null
+++ b/net/disk_cache/cache_util_posix.cc
@@ -0,0 +1,37 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/cache_util.h"
+
+#include "base/file_util.h"
+#include "base/logging.h"
+#include "base/string_util.h"
+
+namespace disk_cache {
+
+bool MoveCache(const FilePath& from_path, const FilePath& to_path) {
+ // Just use the version from base.
+ return file_util::Move(from_path, to_path);
+}
+
+void DeleteCache(const FilePath& path, bool remove_folder) {
+ file_util::FileEnumerator iter(path,
+ /* recursive */ false,
+ file_util::FileEnumerator::FILES);
+ for (FilePath file = iter.Next(); !file.value().empty(); file = iter.Next()) {
+ if (!file_util::Delete(file, /* recursive */ false))
+ NOTREACHED();
+ }
+
+ if (remove_folder) {
+ if (!file_util::Delete(path, /* recursive */ false))
+ NOTREACHED();
+ }
+}
+
+bool DeleteCacheFile(const FilePath& name) {
+ return file_util::Delete(name, false);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/cache_util_win.cc b/net/disk_cache/cache_util_win.cc
new file mode 100644
index 00000000..aefce30d
--- /dev/null
+++ b/net/disk_cache/cache_util_win.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/cache_util.h"
+
+#include <windows.h>
+
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/scoped_handle.h"
+#include "base/file_util.h"
+
+namespace {
+
+// Deletes all the files on path that match search_name pattern.
+void DeleteFiles(const wchar_t* path, const wchar_t* search_name) {
+ std::wstring name(path);
+ file_util::AppendToPath(&name, search_name);
+
+ WIN32_FIND_DATA data;
+ ScopedFindFileHandle handle(FindFirstFile(name.c_str(), &data));
+ if (!handle.IsValid())
+ return;
+
+ std::wstring adjusted_path(path);
+ adjusted_path += L'\\';
+ do {
+ if (data.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY ||
+ data.dwFileAttributes == FILE_ATTRIBUTE_REPARSE_POINT)
+ continue;
+ std::wstring current(adjusted_path);
+ current += data.cFileName;
+ DeleteFile(current.c_str());
+ } while (FindNextFile(handle, &data));
+}
+
+} // namespace
+
+namespace disk_cache {
+
+bool MoveCache(const FilePath& from_path, const FilePath& to_path) {
+ // I don't want to use the shell version of move because if something goes
+ // wrong, that version will attempt to move file by file and fail at the end.
+ if (!MoveFileEx(from_path.value().c_str(), to_path.value().c_str(), 0)) {
+ LOG(ERROR) << "Unable to move the cache: " << GetLastError();
+ return false;
+ }
+ return true;
+}
+
+void DeleteCache(const FilePath& path, bool remove_folder) {
+ DeleteFiles(path.value().c_str(), L"*");
+ if (remove_folder)
+ RemoveDirectory(path.value().c_str());
+}
+
+bool DeleteCacheFile(const FilePath& name) {
+ // We do a simple delete, without ever falling back to SHFileOperation, as the
+ // version from base does.
+ return DeleteFile(name.value().c_str()) ? true : false;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/disk_cache.h b/net/disk_cache/disk_cache.h
new file mode 100644
index 00000000..3d2793bb
--- /dev/null
+++ b/net/disk_cache/disk_cache.h
@@ -0,0 +1,354 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines the public interface of the disk cache. For more details see
+// http://dev.chromium.org/developers/design-documents/disk-cache
+
+#ifndef NET_DISK_CACHE_DISK_CACHE_H_
+#define NET_DISK_CACHE_DISK_CACHE_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/time.h"
+#include "net/base/cache_type.h"
+#include "net/base/completion_callback.h"
+
+class FilePath;
+
+namespace net {
+class IOBuffer;
+}
+
+namespace disk_cache {
+
+class Entry;
+class Backend;
+typedef net::CompletionCallback CompletionCallback;
+
+// Returns an instance of the Backend. path points to a folder where
+// the cached data will be stored. This cache instance must be the only object
+// that will be reading or writing files to that folder. The returned object
+// should be deleted when not needed anymore. If force is true, and there is
+// a problem with the cache initialization, the files will be deleted and a
+// new set will be created. max_bytes is the maximum size the cache can grow to.
+// If zero is passed in as max_bytes, the cache will determine the value to use
+// based on the available disk space. The returned pointer can be NULL if a
+// fatal error is found.
+// Note: This function is deprecated.
+Backend* CreateCacheBackend(const FilePath& path, bool force,
+ int max_bytes, net::CacheType type);
+
+// Returns an instance of a Backend implemented only in memory. The returned
+// object should be deleted when not needed anymore. max_bytes is the maximum
+// size the cache can grow to. If zero is passed in as max_bytes, the cache will
+// determine the value to use based on the available memory. The returned
+// pointer can be NULL if a fatal error is found.
+// Note: This function is deprecated.
+Backend* CreateInMemoryCacheBackend(int max_bytes);
+
+// Returns an instance of a Backend of the given |type|. |path| points to a
+// folder where the cached data will be stored (if appropriate). This cache
+// instance must be the only object that will be reading or writing files to
+// that folder. The returned object should be deleted when not needed anymore.
+// If |force| is true, and there is a problem with the cache initialization, the
+// files will be deleted and a new set will be created. |max_bytes| is the
+// maximum size the cache can grow to. If zero is passed in as |max_bytes|, the
+// cache will determine the value to use. The returned pointer can be NULL if a
+// fatal error is found. The actual return value of the function is a net error
+// code. If this function returns ERR_IO_PENDING, the |callback| will be invoked
+// when a backend is available or a fatal error condition is reached. The
+// pointer to receive the |backend| must remain valid until the operation
+// completes.
+int CreateCacheBackend(net::CacheType type, const FilePath& path, int max_bytes,
+ bool force, Backend** backend,
+ CompletionCallback* callback);
+
+// The root interface for a disk cache instance.
+class Backend {
+ public:
+ // If the backend is destroyed when there are operations in progress (any
+ // callback that has not been invoked yet), this method cancels said
+ // operations so the callbacks are not invoked, possibly leaving the work
+ // half way (for instance, dooming just a few entries). Note that pending IO
+ // for a given Entry (as opposed to the Backend) will still generate a
+ // callback from within this method.
+ virtual ~Backend() {}
+
+ // Returns the number of entries in the cache.
+ virtual int32 GetEntryCount() const = 0;
+
+ // Opens an existing entry. Upon success, the out param holds a pointer
+ // to a Entry object representing the specified disk cache entry.
+ // When the entry pointer is no longer needed, the Close method
+ // should be called.
+ // Note: This method is deprecated.
+ virtual bool OpenEntry(const std::string& key, Entry** entry) = 0;
+
+ // Opens an existing entry. Upon success, |entry| holds a pointer to an Entry
+ // object representing the specified disk cache entry. When the entry pointer
+ // is no longer needed, its Close method should be called. The return value is
+ // a net error code. If this method returns ERR_IO_PENDING, the |callback|
+ // will be invoked when the entry is available. The pointer to receive the
+ // |entry| must remain valid until the operation completes.
+ virtual int OpenEntry(const std::string& key, Entry** entry,
+ CompletionCallback* callback) = 0;
+
+ // Creates a new entry. Upon success, the out param holds a pointer
+ // to a Entry object representing the newly created disk cache
+ // entry. When the entry pointer is no longer needed, the Close
+ // method should be called.
+ // Note: This method is deprecated.
+ virtual bool CreateEntry(const std::string& key, Entry** entry) = 0;
+
+ // Creates a new entry. Upon success, the out param holds a pointer to an
+ // Entry object representing the newly created disk cache entry. When the
+ // entry pointer is no longer needed, its Close method should be called. The
+ // return value is a net error code. If this method returns ERR_IO_PENDING,
+ // the |callback| will be invoked when the entry is available. The pointer to
+ // receive the |entry| must remain valid until the operation completes.
+ virtual int CreateEntry(const std::string& key, Entry** entry,
+ CompletionCallback* callback) = 0;
+
+ // Marks the entry, specified by the given key, for deletion.
+ // Note: This method is deprecated.
+ virtual bool DoomEntry(const std::string& key) = 0;
+
+ // Marks the entry, specified by the given key, for deletion. The return value
+ // is a net error code. If this method returns ERR_IO_PENDING, the |callback|
+ // will be invoked after the entry is doomed.
+ virtual int DoomEntry(const std::string& key,
+ CompletionCallback* callback) = 0;
+
+ // Marks all entries for deletion.
+ // Note: This method is deprecated.
+ virtual bool DoomAllEntries() = 0;
+
+ // Marks all entries for deletion. The return value is a net error code. If
+ // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
+ // operation completes.
+ virtual int DoomAllEntries(CompletionCallback* callback) = 0;
+
+ // Marks a range of entries for deletion. This supports unbounded deletes in
+ // either direction by using null Time values for either argument.
+ // Note: This method is deprecated.
+ virtual bool DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time) = 0;
+
+ // Marks a range of entries for deletion. This supports unbounded deletes in
+ // either direction by using null Time values for either argument. The return
+ // value is a net error code. If this method returns ERR_IO_PENDING, the
+ // |callback| will be invoked when the operation completes.
+ virtual int DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
+ CompletionCallback* callback) = 0;
+
+ // Marks all entries accessed since initial_time for deletion.
+ // Note: This method is deprecated.
+ virtual bool DoomEntriesSince(const base::Time initial_time) = 0;
+
+ // Marks all entries accessed since |initial_time| for deletion. The return
+ // value is a net error code. If this method returns ERR_IO_PENDING, the
+ // |callback| will be invoked when the operation completes.
+ virtual int DoomEntriesSince(const base::Time initial_time,
+ CompletionCallback* callback) = 0;
+
+ // Enumerate the cache. Initialize iter to NULL before calling this method
+ // the first time. That will cause the enumeration to start at the head of
+ // the cache. For subsequent calls, pass the same iter pointer again without
+ // changing its value. This method returns false when there are no more
+ // entries to enumerate. When the entry pointer is no longer needed, the
+ // Close method should be called.
+ //
+ // NOTE: This method does not modify the last_used field of the entry,
+ // and therefore it does not impact the eviction ranking of the entry.
+ // Note: This method is deprecated.
+ virtual bool OpenNextEntry(void** iter, Entry** next_entry) = 0;
+
+ // Enumerates the cache. Initialize |iter| to NULL before calling this method
+ // the first time. That will cause the enumeration to start at the head of
+ // the cache. For subsequent calls, pass the same |iter| pointer again without
+ // changing its value. This method returns ERR_FAILED when there are no more
+ // entries to enumerate. When the entry pointer is no longer needed, its
+ // Close method should be called. The return value is a net error code. If
+ // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
+ // |next_entry| is available. The pointer to receive the |next_entry| must
+ // remain valid until the operation completes.
+ //
+ // NOTE: This method does not modify the last_used field of the entry, and
+ // therefore it does not impact the eviction ranking of the entry.
+ virtual int OpenNextEntry(void** iter, Entry** next_entry,
+ CompletionCallback* callback) = 0;
+
+ // Releases iter without returning the next entry. Whenever OpenNextEntry()
+ // returns true, but the caller is not interested in continuing the
+ // enumeration by calling OpenNextEntry() again, the enumeration must be
+ // ended by calling this method with iter returned by OpenNextEntry().
+ virtual void EndEnumeration(void** iter) = 0;
+
+ // Return a list of cache statistics.
+ virtual void GetStats(
+ std::vector<std::pair<std::string, std::string> >* stats) = 0;
+};
+
+// This interface represents an entry in the disk cache.
+class Entry {
+ public:
+ // Marks this cache entry for deletion.
+ virtual void Doom() = 0;
+
+ // Releases this entry. Calling this method does not cancel pending IO
+ // operations on this entry. Even after the last reference to this object has
+ // been released, pending completion callbacks may be invoked.
+ virtual void Close() = 0;
+
+ // Returns the key associated with this cache entry.
+ virtual std::string GetKey() const = 0;
+
+ // Returns the time when this cache entry was last used.
+ virtual base::Time GetLastUsed() const = 0;
+
+ // Returns the time when this cache entry was last modified.
+ virtual base::Time GetLastModified() const = 0;
+
+ // Returns the size of the cache data with the given index.
+ virtual int32 GetDataSize(int index) const = 0;
+
+ // Copies cache data into the given buffer of length |buf_len|. If
+ // completion_callback is null, then this call blocks until the read
+ // operation is complete. Otherwise, completion_callback will be
+ // called on the current thread once the read completes. Returns the
+ // number of bytes read or a network error code. If a completion callback is
+ // provided then it will be called if this function returns ERR_IO_PENDING,
+ // and a reference to |buf| will be retained until the callback is called.
+ // Note that the callback will be invoked in any case, even after Close has
+ // been called; in other words, the caller may close this entry without
+ // having to wait for all the callbacks, and still rely on the cleanup
+ // performed from the callback code.
+ virtual int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ CompletionCallback* completion_callback) = 0;
+
+ // Copies cache data from the given buffer of length |buf_len|. If
+ // completion_callback is null, then this call blocks until the write
+ // operation is complete. Otherwise, completion_callback will be
+ // called on the current thread once the write completes. Returns the
+ // number of bytes written or a network error code. If a completion callback
+ // is provided then it will be called if this function returns ERR_IO_PENDING,
+ // and a reference to |buf| will be retained until the callback is called.
+ // Note that the callback will be invoked in any case, even after Close has
+ // been called; in other words, the caller may close this entry without
+ // having to wait for all the callbacks, and still rely on the cleanup
+ // performed from the callback code.
+ // If truncate is true, this call will truncate the stored data at the end of
+ // what we are writing here.
+ virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ CompletionCallback* completion_callback,
+ bool truncate) = 0;
+
+ // Sparse entries support:
+ //
+ // A Backend implementation can support sparse entries, so the cache keeps
+ // track of which parts of the entry have been written before. The backend
+ // will never return data that was not written previously, so reading from
+ // such region will return 0 bytes read (or actually the number of bytes read
+ // before reaching that region).
+ //
+ // There are only two streams for sparse entries: a regular control stream
+ // (index 0) that must be accessed through the regular API (ReadData and
+ // WriteData), and one sparse stream that must me accessed through the sparse-
+ // aware API that follows. Calling a non-sparse aware method with an index
+ // argument other than 0 is a mistake that results in implementation specific
+ // behavior. Using a sparse-aware method with an entry that was not stored
+ // using the same API, or with a backend that doesn't support sparse entries
+ // will return ERR_CACHE_OPERATION_NOT_SUPPORTED.
+ //
+ // The storage granularity of the implementation should be at least 1 KB. In
+ // other words, storing less than 1 KB may result in an implementation
+ // dropping the data completely, and writing at offsets not aligned with 1 KB,
+ // or with lengths not a multiple of 1 KB may result in the first or last part
+ // of the data being discarded. However, two consecutive writes should not
+ // result in a hole in between the two parts as long as they are sequential
+ // (the second one starts where the first one ended), and there is no other
+ // write between them.
+ //
+ // The Backend implementation is free to evict any range from the cache at any
+ // moment, so in practice, the previously stated granularity of 1 KB is not
+ // as bad as it sounds.
+ //
+ // The sparse methods don't support multiple simultaneous IO operations to the
+ // same physical entry, so in practice a single object should be instantiated
+ // for a given key at any given time. Once an operation has been issued, the
+ // caller should wait until it completes before starting another one. This
+ // requirement includes the case when an entry is closed while some operation
+ // is in progress and another object is instantiated; any IO operation will
+ // fail while the previous operation is still in-flight. In order to deal with
+ // this requirement, the caller could either wait until the operation
+ // completes before closing the entry, or call CancelSparseIO() before closing
+ // the entry, and call ReadyForSparseIO() on the new entry and wait for the
+ // callback before issuing new operations.
+
+ // Behaves like ReadData() except that this method is used to access sparse
+ // entries.
+ virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ CompletionCallback* completion_callback) = 0;
+
+ // Behaves like WriteData() except that this method is used to access sparse
+ // entries. |truncate| is not part of this interface because a sparse entry
+ // is not expected to be reused with new data. To delete the old data and
+ // start again, or to reduce the total size of the stream data (which implies
+ // that the content has changed), the whole entry should be doomed and
+ // re-created.
+ virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ CompletionCallback* completion_callback) = 0;
+
+ // Returns information about the currently stored portion of a sparse entry.
+ // |offset| and |len| describe a particular range that should be scanned to
+ // find out if it is stored or not. |start| will contain the offset of the
+ // first byte that is stored within this range, and the return value is the
+ // minimum number of consecutive stored bytes. Note that it is possible that
+ // this entry has stored more than the returned value. This method returns a
+ // net error code whenever the request cannot be completed successfully.
+ // Note: This method is deprecated.
+ virtual int GetAvailableRange(int64 offset, int len, int64* start) = 0;
+
+ // Returns information about the currently stored portion of a sparse entry.
+ // |offset| and |len| describe a particular range that should be scanned to
+ // find out if it is stored or not. |start| will contain the offset of the
+ // first byte that is stored within this range, and the return value is the
+ // minimum number of consecutive stored bytes. Note that it is possible that
+ // this entry has stored more than the returned value. This method returns a
+ // net error code whenever the request cannot be completed successfully. If
+ // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
+ // operation completes, and |start| must remain valid until that point.
+ virtual int GetAvailableRange(int64 offset, int len, int64* start,
+ CompletionCallback* callback) = 0;
+
+ // Cancels any pending sparse IO operation (if any). The completion callback
+ // of the operation in question will still be called when the operation
+ // finishes, but the operation will finish sooner when this method is used.
+ virtual void CancelSparseIO() = 0;
+
+ // Returns OK if this entry can be used immediately. If that is not the
+ // case, returns ERR_IO_PENDING and invokes the provided callback when this
+ // entry is ready to use. This method always returns OK for non-sparse
+ // entries, and returns ERR_IO_PENDING when a previous operation was cancelled
+ // (by calling CancelSparseIO), but the cache is still busy with it. If there
+ // is a pending operation that has not been cancelled, this method will return
+ // OK although another IO operation cannot be issued at this time; in this
+ // case the caller should just wait for the regular callback to be invoked
+ // instead of using this method to provide another callback.
+ //
+ // Note that CancelSparseIO may have been called on another instance of this
+ // object that refers to the same physical disk entry.
+ // Note: This method is deprecated.
+ virtual int ReadyForSparseIO(CompletionCallback* completion_callback) = 0;
+
+ protected:
+ virtual ~Entry() {}
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_DISK_CACHE_H_
diff --git a/net/disk_cache/disk_cache_perftest.cc b/net/disk_cache/disk_cache_perftest.cc
new file mode 100644
index 00000000..40643411
--- /dev/null
+++ b/net/disk_cache/disk_cache_perftest.cc
@@ -0,0 +1,243 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/file_path.h"
+#include "base/file_util.h"
+#include "base/perftimer.h"
+#include "base/string_util.h"
+#include "base/test/test_file_util.h"
+#include "base/timer.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/block_files.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/hash.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+using base::Time;
+
+extern volatile int g_cache_tests_received;
+extern volatile bool g_cache_tests_error;
+
+typedef PlatformTest DiskCacheTest;
+
+namespace {
+
+struct TestEntry {
+ std::string key;
+ int data_len;
+};
+typedef std::vector<TestEntry> TestEntries;
+
+const int kMaxSize = 16 * 1024 - 1;
+
+// Creates num_entries on the cache, and writes 200 bytes of metadata and up
+// to kMaxSize of data to each entry.
+int TimeWrite(int num_entries, disk_cache::Backend* cache,
+ TestEntries* entries) {
+ const int kSize1 = 200;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kMaxSize);
+
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ CacheTestFillBuffer(buffer2->data(), kMaxSize, false);
+
+ CallbackTest callback(true);
+ g_cache_tests_error = false;
+ g_cache_tests_received = 0;
+ int expected = 0;
+
+ MessageLoopHelper helper;
+
+ PerfTimeLogger timer("Write disk cache entries");
+
+ for (int i = 0; i < num_entries; i++) {
+ TestEntry entry;
+ entry.key = GenerateKey(true);
+ entry.data_len = rand() % kMaxSize;
+ entries->push_back(entry);
+
+ disk_cache::Entry* cache_entry;
+ if (!cache->CreateEntry(entry.key, &cache_entry))
+ break;
+ int ret = cache_entry->WriteData(0, 0, buffer1, kSize1, &callback, false);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (kSize1 != ret)
+ break;
+
+ ret = cache_entry->WriteData(1, 0, buffer2, entry.data_len, &callback,
+ false);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (entry.data_len != ret)
+ break;
+ cache_entry->Close();
+ }
+
+ helper.WaitUntilCacheIoFinished(expected);
+ timer.Done();
+
+ return expected;
+}
+
+// Reads the data and metadata from each entry listed on |entries|.
+int TimeRead(int num_entries, disk_cache::Backend* cache,
+ const TestEntries& entries, bool cold) {
+ const int kSize1 = 200;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kMaxSize);
+
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ CacheTestFillBuffer(buffer2->data(), kMaxSize, false);
+
+ CallbackTest callback(true);
+ g_cache_tests_error = false;
+ g_cache_tests_received = 0;
+ int expected = 0;
+
+ MessageLoopHelper helper;
+
+ const char* message = cold ? "Read disk cache entries (cold)" :
+ "Read disk cache entries (warm)";
+ PerfTimeLogger timer(message);
+
+ for (int i = 0; i < num_entries; i++) {
+ disk_cache::Entry* cache_entry;
+ if (!cache->OpenEntry(entries[i].key, &cache_entry))
+ break;
+ int ret = cache_entry->ReadData(0, 0, buffer1, kSize1, &callback);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (kSize1 != ret)
+ break;
+
+ ret = cache_entry->ReadData(1, 0, buffer2, entries[i].data_len, &callback);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ else if (entries[i].data_len != ret)
+ break;
+ cache_entry->Close();
+ }
+
+ helper.WaitUntilCacheIoFinished(expected);
+ timer.Done();
+
+ return expected;
+}
+
+int BlockSize() {
+ // We can use form 1 to 4 blocks.
+ return (rand() & 0x3) + 1;
+}
+
+} // namespace
+
+TEST_F(DiskCacheTest, Hash) {
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ PerfTimeLogger timer("Hash disk cache keys");
+ for (int i = 0; i < 300000; i++) {
+ std::string key = GenerateKey(true);
+ disk_cache::Hash(key);
+ }
+ timer.Done();
+}
+
+TEST_F(DiskCacheTest, CacheBackendPerformance) {
+ MessageLoopForIO message_loop;
+
+ ScopedTestCache test_cache;
+ disk_cache::Backend* cache =
+ disk_cache::CreateCacheBackend(test_cache.path(), false, 0,
+ net::DISK_CACHE);
+ ASSERT_TRUE(NULL != cache);
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ TestEntries entries;
+ int num_entries = 1000;
+
+ int ret = TimeWrite(num_entries, cache, &entries);
+ EXPECT_EQ(ret, g_cache_tests_received);
+
+ MessageLoop::current()->RunAllPending();
+ delete cache;
+
+ ASSERT_TRUE(file_util::EvictFileFromSystemCache(
+ test_cache.path().AppendASCII("index")));
+ ASSERT_TRUE(file_util::EvictFileFromSystemCache(
+ test_cache.path().AppendASCII("data_0")));
+ ASSERT_TRUE(file_util::EvictFileFromSystemCache(
+ test_cache.path().AppendASCII("data_1")));
+ ASSERT_TRUE(file_util::EvictFileFromSystemCache(
+ test_cache.path().AppendASCII("data_2")));
+ ASSERT_TRUE(file_util::EvictFileFromSystemCache(
+ test_cache.path().AppendASCII("data_3")));
+
+ cache = disk_cache::CreateCacheBackend(test_cache.path(), false, 0,
+ net::DISK_CACHE);
+ ASSERT_TRUE(NULL != cache);
+
+ ret = TimeRead(num_entries, cache, entries, true);
+ EXPECT_EQ(ret, g_cache_tests_received);
+
+ ret = TimeRead(num_entries, cache, entries, false);
+ EXPECT_EQ(ret, g_cache_tests_received);
+
+ MessageLoop::current()->RunAllPending();
+ delete cache;
+}
+
+// Creating and deleting "entries" on a block-file is something quite frequent
+// (after all, almost everything is stored on block files). The operation is
+// almost free when the file is empty, but can be expensive if the file gets
+// fragmented, or if we have multiple files. This test measures that scenario,
+// by using multiple, highly fragmented files.
+TEST_F(DiskCacheTest, BlockFilesPerformance) {
+ MessageLoopForIO message_loop;
+
+ ScopedTestCache test_cache;
+
+ disk_cache::BlockFiles files(test_cache.path());
+ ASSERT_TRUE(files.Init(true));
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ const int kNumEntries = 60000;
+ disk_cache::Addr* address = new disk_cache::Addr[kNumEntries];
+
+ PerfTimeLogger timer1("Fill three block-files");
+
+ // Fill up the 32-byte block file (use three files).
+ for (int i = 0; i < kNumEntries; i++) {
+ EXPECT_TRUE(files.CreateBlock(disk_cache::RANKINGS, BlockSize(),
+ &address[i]));
+ }
+
+ timer1.Done();
+ PerfTimeLogger timer2("Create and delete blocks");
+
+ for (int i = 0; i < 200000; i++) {
+ int entry = rand() * (kNumEntries / RAND_MAX + 1);
+ if (entry >= kNumEntries)
+ entry = 0;
+
+ files.DeleteBlock(address[entry], false);
+ EXPECT_TRUE(files.CreateBlock(disk_cache::RANKINGS, BlockSize(),
+ &address[entry]));
+ }
+
+ timer2.Done();
+ MessageLoop::current()->RunAllPending();
+ delete[] address;
+}
diff --git a/net/disk_cache/disk_cache_test_base.cc b/net/disk_cache/disk_cache_test_base.cc
new file mode 100644
index 00000000..21352a27
--- /dev/null
+++ b/net/disk_cache/disk_cache_test_base.cc
@@ -0,0 +1,113 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/disk_cache_test_base.h"
+
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/mem_backend_impl.h"
+
+void DiskCacheTest::TearDown() {
+ MessageLoop::current()->RunAllPending();
+}
+
+void DiskCacheTestWithCache::SetMaxSize(int size) {
+ size_ = size;
+ if (cache_impl_)
+ EXPECT_TRUE(cache_impl_->SetMaxSize(size));
+
+ if (mem_cache_)
+ EXPECT_TRUE(mem_cache_->SetMaxSize(size));
+}
+
+void DiskCacheTestWithCache::InitCache() {
+ if (mask_ || new_eviction_)
+ implementation_ = true;
+
+ if (memory_only_)
+ InitMemoryCache();
+ else
+ InitDiskCache();
+
+ ASSERT_TRUE(NULL != cache_);
+ if (first_cleanup_)
+ ASSERT_EQ(0, cache_->GetEntryCount());
+}
+
+void DiskCacheTestWithCache::InitMemoryCache() {
+ if (!implementation_) {
+ cache_ = disk_cache::CreateInMemoryCacheBackend(size_);
+ return;
+ }
+
+ mem_cache_ = new disk_cache::MemBackendImpl();
+ cache_ = mem_cache_;
+ ASSERT_TRUE(NULL != cache_);
+
+ if (size_)
+ EXPECT_TRUE(mem_cache_->SetMaxSize(size_));
+
+ ASSERT_TRUE(mem_cache_->Init());
+}
+
+void DiskCacheTestWithCache::InitDiskCache() {
+ FilePath path = GetCacheFilePath();
+ if (first_cleanup_)
+ ASSERT_TRUE(DeleteCache(path));
+
+ if (implementation_)
+ return InitDiskCacheImpl(path);
+
+ cache_ = disk_cache::BackendImpl::CreateBackend(path, force_creation_, size_,
+ net::DISK_CACHE,
+ disk_cache::kNoRandom);
+}
+
+void DiskCacheTestWithCache::InitDiskCacheImpl(const FilePath& path) {
+ if (mask_)
+ cache_impl_ = new disk_cache::BackendImpl(path, mask_);
+ else
+ cache_impl_ = new disk_cache::BackendImpl(path);
+
+ cache_ = cache_impl_;
+ ASSERT_TRUE(NULL != cache_);
+
+ if (size_)
+ EXPECT_TRUE(cache_impl_->SetMaxSize(size_));
+
+ if (new_eviction_)
+ cache_impl_->SetNewEviction();
+
+ cache_impl_->SetFlags(disk_cache::kNoRandom);
+ ASSERT_TRUE(cache_impl_->Init());
+}
+
+void DiskCacheTestWithCache::TearDown() {
+ MessageLoop::current()->RunAllPending();
+ delete cache_;
+
+ if (!memory_only_ && integrity_) {
+ FilePath path = GetCacheFilePath();
+ EXPECT_TRUE(CheckCacheIntegrity(path, new_eviction_));
+ }
+
+ PlatformTest::TearDown();
+}
+
+// We are expected to leak memory when simulating crashes.
+void DiskCacheTestWithCache::SimulateCrash() {
+ ASSERT_TRUE(implementation_ && !memory_only_);
+ cache_impl_->ClearRefCountForTest();
+
+ delete cache_impl_;
+ FilePath path = GetCacheFilePath();
+ EXPECT_TRUE(CheckCacheIntegrity(path, new_eviction_));
+
+ InitDiskCacheImpl(path);
+}
+
+void DiskCacheTestWithCache::SetTestMode() {
+ ASSERT_TRUE(implementation_ && !memory_only_);
+ cache_impl_->SetUnitTestMode();
+}
diff --git a/net/disk_cache/disk_cache_test_base.h b/net/disk_cache/disk_cache_test_base.h
new file mode 100644
index 00000000..c198e229
--- /dev/null
+++ b/net/disk_cache/disk_cache_test_base.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_DISK_CACHE_TEST_BASE_H_
+#define NET_DISK_CACHE_DISK_CACHE_TEST_BASE_H_
+
+#include "base/basictypes.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+class FilePath;
+
+namespace disk_cache {
+
+class Backend;
+class BackendImpl;
+class MemBackendImpl;
+
+} // namespace disk_cache
+
+// These tests can use the path service, which uses autoreleased objects on the
+// Mac, so this needs to be a PlatformTest. Even tests that do not require a
+// cache (and that do not need to be a DiskCacheTestWithCache) are susceptible
+// to this problem; all such tests should use TEST_F(DiskCacheTest, ...).
+class DiskCacheTest : public PlatformTest {
+ virtual void TearDown();
+};
+
+// Provides basic support for cache related tests.
+class DiskCacheTestWithCache : public DiskCacheTest {
+ protected:
+ DiskCacheTestWithCache()
+ : cache_(NULL), cache_impl_(NULL), mem_cache_(NULL), mask_(0), size_(0),
+ memory_only_(false), implementation_(false), force_creation_(false),
+ new_eviction_(false), first_cleanup_(true), integrity_(true) {}
+
+ void InitCache();
+ virtual void TearDown();
+ void SimulateCrash();
+ void SetTestMode();
+
+ void SetMemoryOnlyMode() {
+ memory_only_ = true;
+ }
+
+ // Use the implementation directly instead of the factory provided object.
+ void SetDirectMode() {
+ implementation_ = true;
+ }
+
+ void SetMask(uint32 mask) {
+ mask_ = mask;
+ }
+
+ void SetMaxSize(int size);
+
+ // Deletes and re-creates the files on initialization errors.
+ void SetForceCreation() {
+ force_creation_ = true;
+ }
+
+ void SetNewEviction() {
+ new_eviction_ = true;
+ }
+
+ void DisableFirstCleanup() {
+ first_cleanup_ = false;
+ }
+
+ void DisableIntegrityCheck() {
+ integrity_ = false;
+ }
+
+ // cache_ will always have a valid object, regardless of how the cache was
+ // initialized. The implementation pointers can be NULL.
+ disk_cache::Backend* cache_;
+ disk_cache::BackendImpl* cache_impl_;
+ disk_cache::MemBackendImpl* mem_cache_;
+
+ uint32 mask_;
+ int size_;
+ bool memory_only_;
+ bool implementation_;
+ bool force_creation_;
+ bool new_eviction_;
+ bool first_cleanup_;
+ bool integrity_;
+ // This is intentionally left uninitialized, to be used by any test.
+ bool success_;
+
+ private:
+ void InitMemoryCache();
+ void InitDiskCache();
+ void InitDiskCacheImpl(const FilePath& path);
+};
+
+#endif // NET_DISK_CACHE_DISK_CACHE_TEST_BASE_H_
diff --git a/net/disk_cache/disk_cache_test_util.cc b/net/disk_cache/disk_cache_test_util.cc
new file mode 100644
index 00000000..36d28d24
--- /dev/null
+++ b/net/disk_cache/disk_cache_test_util.cc
@@ -0,0 +1,163 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/disk_cache_test_util.h"
+
+#include "base/logging.h"
+#include "base/file_util.h"
+#include "base/path_service.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/file.h"
+
+using base::Time;
+using base::TimeDelta;
+
+namespace {
+
+FilePath BuildCachePath(const std::string& name) {
+ FilePath path;
+ PathService::Get(base::DIR_TEMP, &path); // Ignore return value;
+ path = path.AppendASCII(name);
+ if (!file_util::PathExists(path))
+ file_util::CreateDirectory(path);
+
+ return path;
+}
+
+} // namespace.
+
+std::string GenerateKey(bool same_length) {
+ char key[200];
+ CacheTestFillBuffer(key, sizeof(key), same_length);
+
+ key[199] = '\0';
+ return std::string(key);
+}
+
+void CacheTestFillBuffer(char* buffer, size_t len, bool no_nulls) {
+ static bool called = false;
+ if (!called) {
+ called = true;
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+ }
+
+ for (size_t i = 0; i < len; i++) {
+ buffer[i] = static_cast<char>(rand());
+ if (!buffer[i] && no_nulls)
+ buffer[i] = 'g';
+ }
+ if (len && !buffer[0])
+ buffer[0] = 'g';
+}
+
+FilePath GetCacheFilePath() {
+ return BuildCachePath("cache_test");
+}
+
+bool CreateCacheTestFile(const FilePath& name) {
+ int flags = base::PLATFORM_FILE_CREATE_ALWAYS |
+ base::PLATFORM_FILE_READ |
+ base::PLATFORM_FILE_WRITE;
+
+ scoped_refptr<disk_cache::File> file(new disk_cache::File(
+ base::CreatePlatformFile(name, flags, NULL)));
+ if (!file->IsValid())
+ return false;
+
+ file->SetLength(4 * 1024 * 1024);
+ return true;
+}
+
+bool DeleteCache(const FilePath& path) {
+ disk_cache::DeleteCache(path, false);
+ return true;
+}
+
+bool CheckCacheIntegrity(const FilePath& path, bool new_eviction) {
+ scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(path));
+ if (!cache.get())
+ return false;
+ if (new_eviction)
+ cache->SetNewEviction();
+ cache->SetFlags(disk_cache::kNoRandom);
+ if (!cache->Init())
+ return false;
+ return cache->SelfCheck() >= 0;
+}
+
+ScopedTestCache::ScopedTestCache() : path_(GetCacheFilePath()) {
+ bool result = DeleteCache(path_);
+ DCHECK(result);
+}
+
+ScopedTestCache::ScopedTestCache(const std::string& name)
+ : path_(BuildCachePath(name)) {
+ bool result = DeleteCache(path_);
+ DCHECK(result);
+}
+
+ScopedTestCache::~ScopedTestCache() {
+ file_util::Delete(path(), true);
+}
+
+// -----------------------------------------------------------------------
+
+volatile int g_cache_tests_received = 0;
+volatile bool g_cache_tests_error = 0;
+
+// On the actual callback, increase the number of tests received and check for
+// errors (an unexpected test received)
+void CallbackTest::RunWithParams(const Tuple1<int>& params) {
+ if (reuse_) {
+ DCHECK(1 == reuse_);
+ if (2 == reuse_)
+ g_cache_tests_error = true;
+ reuse_++;
+ }
+
+ result_ = params.a;
+ g_cache_tests_received++;
+}
+
+// -----------------------------------------------------------------------
+
+MessageLoopHelper::MessageLoopHelper()
+ : num_callbacks_(0),
+ num_iterations_(0),
+ last_(0),
+ completed_(false) {
+ // Create a recurrent timer of 50 mS.
+ timer_.Start(
+ TimeDelta::FromMilliseconds(50), this, &MessageLoopHelper::TimerExpired);
+}
+
+bool MessageLoopHelper::WaitUntilCacheIoFinished(int num_callbacks) {
+ if (num_callbacks == g_cache_tests_received)
+ return true;
+
+ ExpectCallbacks(num_callbacks);
+ MessageLoop::current()->Run();
+ return completed_;
+}
+
+// Quits the message loop when all callbacks are called or we've been waiting
+// too long for them (2 secs without a callback).
+void MessageLoopHelper::TimerExpired() {
+ if (g_cache_tests_received > num_callbacks_) {
+ NOTREACHED();
+ } else if (g_cache_tests_received == num_callbacks_) {
+ completed_ = true;
+ MessageLoop::current()->Quit();
+ } else {
+ // Not finished yet. See if we have to abort.
+ if (last_ == g_cache_tests_received)
+ num_iterations_++;
+ else
+ last_ = g_cache_tests_received;
+ if (40 == num_iterations_)
+ MessageLoop::current()->Quit();
+ }
+}
diff --git a/net/disk_cache/disk_cache_test_util.h b/net/disk_cache/disk_cache_test_util.h
new file mode 100644
index 00000000..623810b9
--- /dev/null
+++ b/net/disk_cache/disk_cache_test_util.h
@@ -0,0 +1,101 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_DISK_CACHE_TEST_UTIL_H_
+#define NET_DISK_CACHE_DISK_CACHE_TEST_UTIL_H_
+
+#include <string>
+
+#include "base/file_path.h"
+#include "base/message_loop.h"
+#include "base/task.h"
+#include "base/timer.h"
+#include "build/build_config.h"
+
+class FilePath;
+
+// Re-creates a given test file inside the cache test folder.
+bool CreateCacheTestFile(const FilePath& name);
+
+// Deletes all file son the cache.
+bool DeleteCache(const FilePath& path);
+
+// Gets the path to the cache test folder.
+FilePath GetCacheFilePath();
+
+// Fills buffer with random values (may contain nulls unless no_nulls is true).
+void CacheTestFillBuffer(char* buffer, size_t len, bool no_nulls);
+
+// Generates a random key of up to 200 bytes.
+std::string GenerateKey(bool same_length);
+
+// Returns true if the cache is not corrupt.
+bool CheckCacheIntegrity(const FilePath& path, bool new_eviction);
+
+// Helper class which ensures that the cache dir returned by GetCacheFilePath
+// exists and is clear in ctor and that the directory gets deleted in dtor.
+class ScopedTestCache {
+ public:
+ ScopedTestCache();
+ ScopedTestCache(const std::string& name); // Use a specific folder name.
+ ~ScopedTestCache();
+
+ FilePath path() const { return path_; }
+
+ private:
+ const FilePath path_; // Path to the cache test folder.
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedTestCache);
+};
+
+// -----------------------------------------------------------------------
+
+// Simple callback to process IO completions from the cache. It allows tests
+// with multiple simultaneous IO operations.
+class CallbackTest : public CallbackRunner< Tuple1<int> > {
+ public:
+ explicit CallbackTest(bool reuse) : result_(-1), reuse_(reuse ? 0 : 1) {}
+ ~CallbackTest() {}
+
+ virtual void RunWithParams(const Tuple1<int>& params);
+ int result() const { return result_; }
+
+ private:
+ int result_;
+ int reuse_;
+ DISALLOW_COPY_AND_ASSIGN(CallbackTest);
+};
+
+// -----------------------------------------------------------------------
+
+// Simple helper to deal with the message loop on a test.
+class MessageLoopHelper {
+ public:
+ MessageLoopHelper();
+
+ // Run the message loop and wait for num_callbacks before returning. Returns
+ // false if we are waiting to long.
+ bool WaitUntilCacheIoFinished(int num_callbacks);
+
+ private:
+ // Sets the number of callbacks that can be received so far.
+ void ExpectCallbacks(int num_callbacks) {
+ num_callbacks_ = num_callbacks;
+ num_iterations_ = last_ = 0;
+ completed_ = false;
+ }
+
+ // Called periodically to test if WaitUntilCacheIoFinished should return.
+ void TimerExpired();
+
+ base::RepeatingTimer<MessageLoopHelper> timer_;
+ int num_callbacks_;
+ int num_iterations_;
+ int last_;
+ bool completed_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageLoopHelper);
+};
+
+#endif // NET_DISK_CACHE_DISK_CACHE_TEST_UTIL_H_
diff --git a/net/disk_cache/disk_format.h b/net/disk_cache/disk_format.h
new file mode 100644
index 00000000..619bd893
--- /dev/null
+++ b/net/disk_cache/disk_format.h
@@ -0,0 +1,266 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The cache is stored on disk as a collection of block-files, plus an index
+// file plus a collection of external files.
+//
+// Any data blob bigger than kMaxBlockSize (net/addr.h) will be stored on a
+// separate file named f_xxx where x is a hexadecimal number. Shorter data will
+// be stored as a series of blocks on a block-file. In any case, CacheAddr
+// represents the address of the data inside the cache.
+//
+// The index file is just a simple hash table that maps a particular entry to
+// a CacheAddr value. Linking for a given hash bucket is handled internally
+// by the cache entry.
+//
+// The last element of the cache is the block-file. A block file is a file
+// designed to store blocks of data of a given size. It is able to store data
+// that spans from one to four consecutive "blocks", and it grows as needed to
+// store up to approximately 65000 blocks. It has a fixed size header used for
+// book keeping such as tracking free of blocks on the file. For example, a
+// block-file for 1KB blocks will grow from 8KB when totally empty to about 64MB
+// when completely full. At that point, data blocks of 1KB will be stored on a
+// second block file that will store the next set of 65000 blocks. The first
+// file contains the number of the second file, and the second file contains the
+// number of a third file, created when the second file reaches its limit. It is
+// important to remember that no matter how long the chain of files is, any
+// given block can be located directly by its address, which contains the file
+// number and starting block inside the file.
+//
+// A new cache is initialized with four block files (named data_0 through
+// data_3), each one dedicated to store blocks of a given size. The number at
+// the end of the file name is the block file number (in decimal).
+//
+// There are two "special" types of blocks: an entry and a rankings node. An
+// entry keeps track of all the information related to the same cache entry,
+// such as the key, hash value, data pointers etc. A rankings node keeps track
+// of the information that is updated frequently for a given entry, such as its
+// location on the LRU lists, last access time etc.
+//
+// The files that store internal information for the cache (blocks and index)
+// are at least partially memory mapped. They have a location that is signaled
+// every time the internal structures are modified, so it is possible to detect
+// (most of the time) when the process dies in the middle of an update.
+//
+// In order to prevent dirty data to be used as valid (after a crash), every
+// cache entry has a dirty identifier. Each running instance of the cache keeps
+// a separate identifier (maintained on the "this_id" header field) that is used
+// to mark every entry that is created or modified. When the entry is closed,
+// and all the data can be trusted, the dirty flag is cleared from the entry.
+// When the cache encounters an entry whose identifier is different than the one
+// being currently used, it means that the entry was not properly closed on a
+// previous run, so it is discarded.
+
+#ifndef NET_DISK_CACHE_DISK_FORMAT_H_
+#define NET_DISK_CACHE_DISK_FORMAT_H_
+
+#include "base/basictypes.h"
+
+namespace disk_cache {
+
+typedef uint32 CacheAddr;
+
+const int kIndexTablesize = 0x10000;
+const uint32 kIndexMagic = 0xC103CAC3;
+const uint32 kCurrentVersion = 0x20000; // Version 2.0.
+
+struct LruData {
+ int32 pad1[2];
+ int32 filled; // Flag to tell when we filled the cache.
+ int32 sizes[5];
+ CacheAddr heads[5];
+ CacheAddr tails[5];
+ CacheAddr transaction; // In-flight operation target.
+ int32 operation; // Actual in-flight operation.
+ int32 operation_list; // In-flight operation list.
+ int32 pad2[7];
+};
+
+// Header for the master index file.
+struct IndexHeader {
+ uint32 magic;
+ uint32 version;
+ int32 num_entries; // Number of entries currently stored.
+ int32 num_bytes; // Total size of the stored data.
+ int32 last_file; // Last external file created.
+ int32 this_id; // Id for all entries being changed (dirty flag).
+ CacheAddr stats; // Storage for usage data.
+ int32 table_len; // Actual size of the table (0 == kIndexTablesize).
+ int32 crash; // Signals a previous crash.
+ int32 experiment; // Id of an ongoing test.
+ uint64 create_time; // Creation time for this set of files.
+ int32 pad[52];
+ LruData lru; // Eviction control data.
+ IndexHeader() {
+ memset(this, 0, sizeof(*this));
+ magic = kIndexMagic;
+ version = kCurrentVersion;
+ };
+};
+
+// The structure of the whole index file.
+struct Index {
+ IndexHeader header;
+ CacheAddr table[kIndexTablesize]; // Default size. Actual size controlled
+ // by header.table_len.
+};
+
+// Main structure for an entry on the backing storage. If the key is longer than
+// what can be stored on this structure, it will be extended on consecutive
+// blocks (adding 256 bytes each time), up to 4 blocks (1024 - 32 - 1 chars).
+// After that point, the whole key will be stored as a data block or external
+// file.
+struct EntryStore {
+ uint32 hash; // Full hash of the key.
+ CacheAddr next; // Next entry with the same hash or bucket.
+ CacheAddr rankings_node; // Rankings node for this entry.
+ int32 reuse_count; // How often is this entry used.
+ int32 refetch_count; // How often is this fetched from the net.
+ int32 state; // Current state.
+ uint64 creation_time;
+ int32 key_len;
+ CacheAddr long_key; // Optional address of a long key.
+ int32 data_size[4]; // We can store up to 4 data streams for each
+ CacheAddr data_addr[4]; // entry.
+ uint32 flags; // Any combination of EntryFlags.
+ int32 pad[5];
+ char key[256 - 24 * 4]; // null terminated
+};
+
+COMPILE_ASSERT(sizeof(EntryStore) == 256, bad_EntyStore);
+const int kMaxInternalKeyLength = 4 * sizeof(EntryStore) -
+ offsetof(EntryStore, key) - 1;
+
+// Possible states for a given entry.
+enum EntryState {
+ ENTRY_NORMAL = 0,
+ ENTRY_EVICTED, // The entry was recently evicted from the cache.
+ ENTRY_DOOMED // The entry was doomed.
+};
+
+// Flags that can be applied to an entry.
+enum EntryFlags {
+ PARENT_ENTRY = 1, // This entry has children (sparse) entries.
+ CHILD_ENTRY = 1 << 1 // Child entry that stores sparse data.
+};
+
+#pragma pack(push, 4)
+// Rankings information for a given entry.
+struct RankingsNode {
+ uint64 last_used; // LRU info.
+ uint64 last_modified; // LRU info.
+ CacheAddr next; // LRU list.
+ CacheAddr prev; // LRU list.
+ CacheAddr contents; // Address of the EntryStore.
+ int32 dirty; // The entry is being modifyied.
+ int32 dummy; // Old files may have a pointer here.
+};
+#pragma pack(pop)
+
+COMPILE_ASSERT(sizeof(RankingsNode) == 36, bad_RankingsNode);
+
+const uint32 kBlockMagic = 0xC104CAC3;
+const int kBlockHeaderSize = 8192; // Two pages: almost 64k entries
+const int kMaxBlocks = (kBlockHeaderSize - 80) * 8;
+
+// Bitmap to track used blocks on a block-file.
+typedef uint32 AllocBitmap[kMaxBlocks / 32];
+
+// A block-file is the file used to store information in blocks (could be
+// EntryStore blocks, RankingsNode blocks or user-data blocks).
+// We store entries that can expand for up to 4 consecutive blocks, and keep
+// counters of the number of blocks available for each type of entry. For
+// instance, an entry of 3 blocks is an entry of type 3. We also keep track of
+// where did we find the last entry of that type (to avoid searching the bitmap
+// from the beginning every time).
+// This Structure is the header of a block-file:
+struct BlockFileHeader {
+ uint32 magic;
+ uint32 version;
+ int16 this_file; // Index of this file.
+ int16 next_file; // Next file when this one is full.
+ int32 entry_size; // Size of the blocks of this file.
+ int32 num_entries; // Number of stored entries.
+ int32 max_entries; // Current maximum number of entries.
+ int32 empty[4]; // Counters of empty entries for each type.
+ int32 hints[4]; // Last used position for each entry type.
+ volatile int32 updating; // Keep track of updates to the header.
+ int32 user[5];
+ AllocBitmap allocation_map;
+ BlockFileHeader() {
+ memset(this, 0, sizeof(BlockFileHeader));
+ magic = kBlockMagic;
+ version = kCurrentVersion;
+ };
+};
+
+COMPILE_ASSERT(sizeof(BlockFileHeader) == kBlockHeaderSize, bad_header);
+
+// Sparse data support:
+// We keep a two level hierarchy to enable sparse data for an entry: the first
+// level consists of using separate "child" entries to store ranges of 1 MB,
+// and the second level stores blocks of 1 KB inside each child entry.
+//
+// Whenever we need to access a particular sparse offset, we first locate the
+// child entry that stores that offset, so we discard the 20 least significant
+// bits of the offset, and end up with the child id. For instance, the child id
+// to store the first megabyte is 0, and the child that should store offset
+// 0x410000 has an id of 4.
+//
+// The child entry is stored the same way as any other entry, so it also has a
+// name (key). The key includes a signature to be able to identify children
+// created for different generations of the same resource. In other words, given
+// that a given sparse entry can have a large number of child entries, and the
+// resource can be invalidated and replaced with a new version at any time, it
+// is important to be sure that a given child actually belongs to certain entry.
+//
+// The full name of a child entry is composed with a prefix ("Range_"), and two
+// hexadecimal 64-bit numbers at the end, separated by semicolons. The first
+// number is the signature of the parent key, and the second number is the child
+// id as described previously. The signature itself is also stored internally by
+// the child and the parent entries. For example, a sparse entry with a key of
+// "sparse entry name", and a signature of 0x052AF76, may have a child entry
+// named "Range_sparse entry name:052af76:4", which stores data in the range
+// 0x400000 to 0x4FFFFF.
+//
+// Each child entry keeps track of all the 1 KB blocks that have been written
+// to the entry, but being a regular entry, it will happily return zeros for any
+// read that spans data not written before. The actual sparse data is stored in
+// one of the data streams of the child entry (at index 1), while the control
+// information is stored in another stream (at index 2), both by parents and
+// the children.
+
+// This structure contains the control information for parent and child entries.
+// It is stored at offset 0 of the data stream with index 2.
+// It is possible to write to a child entry in a way that causes the last block
+// to be only partialy filled. In that case, last_block and last_block_len will
+// keep track of that block.
+struct SparseHeader {
+ int64 signature; // The parent and children signature.
+ uint32 magic; // Structure identifier (equal to kIndexMagic).
+ int32 parent_key_len; // Key length for the parent entry.
+ int32 last_block; // Index of the last written block.
+ int32 last_block_len; // Lenght of the last written block.
+ int32 dummy[10];
+};
+
+// The SparseHeader will be followed by a bitmap, as described by this
+// structure.
+struct SparseData {
+ SparseHeader header;
+ uint32 bitmap[32]; // Bitmap representation of known children (if this
+ // is a parent entry), or used blocks (for child
+ // entries. The size is fixed for child entries but
+ // not for parents; it can be as small as 4 bytes
+ // and as large as 8 KB.
+};
+
+// The number of blocks stored by a child entry.
+const int kNumSparseBits = 1024;
+COMPILE_ASSERT(sizeof(SparseData) == sizeof(SparseHeader) + kNumSparseBits / 8,
+ Invalid_SparseData_bitmap);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_DISK_FORMAT_H_
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
new file mode 100644
index 00000000..e3fda2ae
--- /dev/null
+++ b/net/disk_cache/entry_impl.cc
@@ -0,0 +1,932 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/entry_impl.h"
+
+#include "base/histogram.h"
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/bitmap.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/histogram_macros.h"
+#include "net/disk_cache/sparse_control.h"
+
+using base::Time;
+using base::TimeDelta;
+
+namespace {
+
+// Index for the file used to store the key, if any (files_[kKeyFileIndex]).
+const int kKeyFileIndex = 3;
+
+// This class implements FileIOCallback to buffer the callback from a file IO
+// operation from the actual net class.
+class SyncCallback: public disk_cache::FileIOCallback {
+ public:
+ SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer,
+ net::CompletionCallback* callback )
+ : entry_(entry), callback_(callback), buf_(buffer), start_(Time::Now()) {
+ entry->AddRef();
+ entry->IncrementIoCount();
+ }
+ ~SyncCallback() {}
+
+ virtual void OnFileIOComplete(int bytes_copied);
+ void Discard();
+ private:
+ disk_cache::EntryImpl* entry_;
+ net::CompletionCallback* callback_;
+ scoped_refptr<net::IOBuffer> buf_;
+ Time start_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SyncCallback);
+};
+
+void SyncCallback::OnFileIOComplete(int bytes_copied) {
+ entry_->DecrementIoCount();
+ if (callback_) {
+ entry_->ReportIOTime(disk_cache::EntryImpl::kAsyncIO, start_);
+ callback_->Run(bytes_copied);
+ }
+ entry_->Release();
+ delete this;
+}
+
+void SyncCallback::Discard() {
+ callback_ = NULL;
+ buf_ = NULL;
+ OnFileIOComplete(0);
+}
+
+// Clears buffer before offset and after valid_len, knowing that the size of
+// buffer is kMaxBlockSize.
+void ClearInvalidData(char* buffer, int offset, int valid_len) {
+ DCHECK(offset >= 0);
+ DCHECK(valid_len >= 0);
+ DCHECK(disk_cache::kMaxBlockSize >= offset + valid_len);
+ if (offset)
+ memset(buffer, 0, offset);
+ int end = disk_cache::kMaxBlockSize - offset - valid_len;
+ if (end)
+ memset(buffer + offset + valid_len, 0, end);
+}
+
+} // namespace
+
+namespace disk_cache {
+
+EntryImpl::EntryImpl(BackendImpl* backend, Addr address)
+ : entry_(NULL, Addr(0)), node_(NULL, Addr(0)) {
+ entry_.LazyInit(backend->File(address), address);
+ doomed_ = false;
+ backend_ = backend;
+ for (int i = 0; i < kNumStreams; i++) {
+ unreported_size_[i] = 0;
+ }
+ key_file_ = NULL;
+}
+
+// When an entry is deleted from the cache, we clean up all the data associated
+// with it for two reasons: to simplify the reuse of the block (we know that any
+// unused block is filled with zeros), and to simplify the handling of write /
+// read partial information from an entry (don't have to worry about returning
+// data related to a previous cache entry because the range was not fully
+// written before).
+EntryImpl::~EntryImpl() {
+ // Save the sparse info to disk before deleting this entry.
+ sparse_.reset();
+
+ if (doomed_) {
+ DeleteEntryData(true);
+ } else {
+ bool ret = true;
+ for (int index = 0; index < kNumStreams; index++) {
+ if (user_buffers_[index].get()) {
+ if (!(ret = Flush(index, entry_.Data()->data_size[index], false)))
+ LOG(ERROR) << "Failed to save user data";
+ } else if (unreported_size_[index]) {
+ backend_->ModifyStorageSize(
+ entry_.Data()->data_size[index] - unreported_size_[index],
+ entry_.Data()->data_size[index]);
+ }
+ }
+
+ if (!ret) {
+ // There was a failure writing the actual data. Mark the entry as dirty.
+ int current_id = backend_->GetCurrentEntryId();
+ node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
+ node_.Store();
+ } else if (node_.HasData() && node_.Data()->dirty) {
+ node_.Data()->dirty = 0;
+ node_.Store();
+ }
+ }
+
+ backend_->CacheEntryDestroyed(entry_.address());
+}
+
+void EntryImpl::Doom() {
+ if (doomed_)
+ return;
+
+ SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
+ backend_->InternalDoomEntry(this);
+}
+
+void EntryImpl::Close() {
+ Release();
+}
+
+std::string EntryImpl::GetKey() const {
+ CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+ if (entry->Data()->key_len <= kMaxInternalKeyLength)
+ return std::string(entry->Data()->key);
+
+ Addr address(entry->Data()->long_key);
+ DCHECK(address.is_initialized());
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ if (!key_file_) {
+ // We keep a copy of the file needed to access the key so that we can
+ // always return this object's key, even if the backend is disabled.
+ COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
+ key_file_ = const_cast<EntryImpl*>(this)->GetBackingFile(address,
+ kKeyFileIndex);
+ }
+
+ std::string key;
+ if (!key_file_ ||
+ !key_file_->Read(WriteInto(&key, entry->Data()->key_len + 1),
+ entry->Data()->key_len + 1, offset))
+ key.clear();
+ return key;
+}
+
+Time EntryImpl::GetLastUsed() const {
+ CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+ return Time::FromInternalValue(node->Data()->last_used);
+}
+
+Time EntryImpl::GetLastModified() const {
+ CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+ return Time::FromInternalValue(node->Data()->last_modified);
+}
+
+int32 EntryImpl::GetDataSize(int index) const {
+ if (index < 0 || index >= kNumStreams)
+ return 0;
+
+ CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+ return entry->Data()->data_size[index];
+}
+
+int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback) {
+ DCHECK(node_.Data()->dirty);
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = entry_.Data()->data_size[index];
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ Time start = Time::Now();
+
+ if (offset + buf_len > entry_size)
+ buf_len = entry_size - offset;
+
+ UpdateRank(false);
+
+ backend_->OnEvent(Stats::READ_DATA);
+
+ if (user_buffers_[index].get()) {
+ // Complete the operation locally.
+ DCHECK(kMaxBlockSize >= offset + buf_len);
+ memcpy(buf->data() , user_buffers_[index].get() + offset, buf_len);
+ ReportIOTime(kRead, start);
+ return buf_len;
+ }
+
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(address.is_initialized());
+ if (!address.is_initialized())
+ return net::ERR_FAILED;
+
+ File* file = GetBackingFile(address, index);
+ if (!file)
+ return net::ERR_FAILED;
+
+ size_t file_offset = offset;
+ if (address.is_block_file())
+ file_offset += address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+
+ SyncCallback* io_callback = NULL;
+ if (completion_callback)
+ io_callback = new SyncCallback(this, buf, completion_callback);
+
+ bool completed;
+ if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
+ if (io_callback)
+ io_callback->Discard();
+ return net::ERR_FAILED;
+ }
+
+ if (io_callback && completed)
+ io_callback->Discard();
+
+ ReportIOTime(kRead, start);
+ return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING;
+}
+
+int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback,
+ bool truncate) {
+ DCHECK(node_.Data()->dirty);
+ if (index < 0 || index >= kNumStreams)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int max_file_size = backend_->MaxFileSize();
+
+ // offset of buf_len could be negative numbers.
+ if (offset > max_file_size || buf_len > max_file_size ||
+ offset + buf_len > max_file_size) {
+ int size = offset + buf_len;
+ if (size <= max_file_size)
+ size = kint32max;
+ backend_->TooMuchStorageRequested(size);
+ return net::ERR_FAILED;
+ }
+
+ Time start = Time::Now();
+
+ // Read the size at this point (it may change inside prepare).
+ int entry_size = entry_.Data()->data_size[index];
+ if (!PrepareTarget(index, offset, buf_len, truncate))
+ return net::ERR_FAILED;
+
+ if (entry_size < offset + buf_len) {
+ unreported_size_[index] += offset + buf_len - entry_size;
+ entry_.Data()->data_size[index] = offset + buf_len;
+ entry_.set_modified();
+ if (!buf_len)
+ truncate = true; // Force file extension.
+ } else if (truncate) {
+ // If the size was modified inside PrepareTarget, we should not do
+ // anything here.
+ if ((entry_size > offset + buf_len) &&
+ (entry_size == entry_.Data()->data_size[index])) {
+ unreported_size_[index] += offset + buf_len - entry_size;
+ entry_.Data()->data_size[index] = offset + buf_len;
+ entry_.set_modified();
+ } else {
+ // Nothing to truncate.
+ truncate = false;
+ }
+ }
+
+ UpdateRank(true);
+
+ backend_->OnEvent(Stats::WRITE_DATA);
+
+ if (user_buffers_[index].get()) {
+ // Complete the operation locally.
+ if (!buf_len)
+ return 0;
+
+ DCHECK(kMaxBlockSize >= offset + buf_len);
+ memcpy(user_buffers_[index].get() + offset, buf->data(), buf_len);
+ ReportIOTime(kWrite, start);
+ return buf_len;
+ }
+
+ Addr address(entry_.Data()->data_addr[index]);
+ File* file = GetBackingFile(address, index);
+ if (!file)
+ return net::ERR_FAILED;
+
+ size_t file_offset = offset;
+ if (address.is_block_file()) {
+ file_offset += address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ } else if (truncate) {
+ if (!file->SetLength(offset + buf_len))
+ return net::ERR_FAILED;
+ }
+
+ if (!buf_len)
+ return 0;
+
+ SyncCallback* io_callback = NULL;
+ if (completion_callback)
+ io_callback = new SyncCallback(this, buf, completion_callback);
+
+ bool completed;
+ if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
+ &completed)) {
+ if (io_callback)
+ io_callback->Discard();
+ return net::ERR_FAILED;
+ }
+
+ if (io_callback && completed)
+ io_callback->Discard();
+
+ ReportIOTime(kWrite, start);
+ return (completed || !completion_callback) ? buf_len : net::ERR_IO_PENDING;
+}
+
+int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback) {
+ DCHECK(node_.Data()->dirty);
+ int result = InitSparseData();
+ if (net::OK != result)
+ return result;
+
+ Time start = Time::Now();
+ result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
+ completion_callback);
+ ReportIOTime(kSparseRead, start);
+ return result;
+}
+
+int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback) {
+ DCHECK(node_.Data()->dirty);
+ int result = InitSparseData();
+ if (net::OK != result)
+ return result;
+
+ Time start = Time::Now();
+ result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
+ buf_len, completion_callback);
+ ReportIOTime(kSparseWrite, start);
+ return result;
+}
+
+int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
+ int result = InitSparseData();
+ if (net::OK != result)
+ return result;
+
+ return sparse_->GetAvailableRange(offset, len, start);
+}
+
+int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
+ CompletionCallback* callback) {
+ return GetAvailableRange(offset, len, start);
+}
+
+void EntryImpl::CancelSparseIO() {
+ if (!sparse_.get())
+ return;
+
+ sparse_->CancelIO();
+}
+
+int EntryImpl::ReadyForSparseIO(net::CompletionCallback* completion_callback) {
+ if (!sparse_.get())
+ return net::OK;
+
+ return sparse_->ReadyToUse(completion_callback);
+}
+
+// ------------------------------------------------------------------------
+
+uint32 EntryImpl::GetHash() {
+ return entry_.Data()->hash;
+}
+
+bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
+ uint32 hash) {
+ Trace("Create entry In");
+ EntryStore* entry_store = entry_.Data();
+ RankingsNode* node = node_.Data();
+ memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
+ memset(node, 0, sizeof(RankingsNode));
+ if (!node_.LazyInit(backend_->File(node_address), node_address))
+ return false;
+
+ entry_store->rankings_node = node_address.value();
+ node->contents = entry_.address().value();
+
+ entry_store->hash = hash;
+ entry_store->creation_time = Time::Now().ToInternalValue();
+ entry_store->key_len = static_cast<int32>(key.size());
+ if (entry_store->key_len > kMaxInternalKeyLength) {
+ Addr address(0);
+ if (!CreateBlock(entry_store->key_len + 1, &address))
+ return false;
+
+ entry_store->long_key = address.value();
+ key_file_ = GetBackingFile(address, kKeyFileIndex);
+
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ if (!key_file_ || !key_file_->Write(key.data(), key.size(), offset)) {
+ DeleteData(address, kKeyFileIndex);
+ return false;
+ }
+
+ if (address.is_separate_file())
+ key_file_->SetLength(key.size() + 1);
+ } else {
+ memcpy(entry_store->key, key.data(), key.size());
+ entry_store->key[key.size()] = '\0';
+ }
+ backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
+ node->dirty = backend_->GetCurrentEntryId();
+ Log("Create Entry ");
+ return true;
+}
+
+bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) {
+ if (entry_.Data()->hash != hash ||
+ static_cast<size_t>(entry_.Data()->key_len) != key.size())
+ return false;
+
+ std::string my_key = GetKey();
+ return key.compare(my_key) ? false : true;
+}
+
+void EntryImpl::InternalDoom() {
+ DCHECK(node_.HasData());
+ if (!node_.Data()->dirty) {
+ node_.Data()->dirty = backend_->GetCurrentEntryId();
+ node_.Store();
+ }
+ doomed_ = true;
+}
+
+void EntryImpl::DeleteEntryData(bool everything) {
+ DCHECK(doomed_ || !everything);
+
+ if (GetEntryFlags() & PARENT_ENTRY) {
+ // We have some child entries that must go away.
+ SparseControl::DeleteChildren(this);
+ }
+
+ if (GetDataSize(0))
+ CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0));
+ if (GetDataSize(1))
+ CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1));
+ for (int index = 0; index < kNumStreams; index++) {
+ Addr address(entry_.Data()->data_addr[index]);
+ if (address.is_initialized()) {
+ DeleteData(address, index);
+ backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
+ unreported_size_[index], 0);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Data()->data_size[index] = 0;
+ }
+ }
+
+ if (!everything) {
+ entry_.Store();
+ return;
+ }
+
+ // Remove all traces of this entry.
+ backend_->RemoveEntry(this);
+
+ Addr address(entry_.Data()->long_key);
+ DeleteData(address, kKeyFileIndex);
+ backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
+
+ memset(node_.buffer(), 0, node_.size());
+ memset(entry_.buffer(), 0, entry_.size());
+ node_.Store();
+ entry_.Store();
+
+ backend_->DeleteBlock(node_.address(), false);
+ backend_->DeleteBlock(entry_.address(), false);
+}
+
+CacheAddr EntryImpl::GetNextAddress() {
+ return entry_.Data()->next;
+}
+
+void EntryImpl::SetNextAddress(Addr address) {
+ entry_.Data()->next = address.value();
+ bool success = entry_.Store();
+ DCHECK(success);
+}
+
+bool EntryImpl::LoadNodeAddress() {
+ Addr address(entry_.Data()->rankings_node);
+ if (!node_.LazyInit(backend_->File(address), address))
+ return false;
+ return node_.Load();
+}
+
+bool EntryImpl::Update() {
+ DCHECK(node_.HasData());
+
+ RankingsNode* rankings = node_.Data();
+ if (!rankings->dirty) {
+ rankings->dirty = backend_->GetCurrentEntryId();
+ if (!node_.Store())
+ return false;
+ }
+ return true;
+}
+
+bool EntryImpl::IsDirty(int32 current_id) {
+ DCHECK(node_.HasData());
+ // We are checking if the entry is valid or not. If there is a pointer here,
+ // we should not be checking the entry.
+ if (node_.Data()->dummy)
+ return true;
+
+ return node_.Data()->dirty && current_id != node_.Data()->dirty;
+}
+
+void EntryImpl::ClearDirtyFlag() {
+ node_.Data()->dirty = 0;
+}
+
+void EntryImpl::SetPointerForInvalidEntry(int32 new_id) {
+ node_.Data()->dirty = new_id;
+ node_.Data()->dummy = 0;
+ node_.Store();
+}
+
+bool EntryImpl::SanityCheck() {
+ if (!entry_.Data()->rankings_node || !entry_.Data()->key_len)
+ return false;
+
+ Addr rankings_addr(entry_.Data()->rankings_node);
+ if (!rankings_addr.is_initialized() || rankings_addr.is_separate_file() ||
+ rankings_addr.file_type() != RANKINGS)
+ return false;
+
+ Addr next_addr(entry_.Data()->next);
+ if (next_addr.is_initialized() &&
+ (next_addr.is_separate_file() || next_addr.file_type() != BLOCK_256))
+ return false;
+
+ return true;
+}
+
+void EntryImpl::IncrementIoCount() {
+ backend_->IncrementIoCount();
+}
+
+void EntryImpl::DecrementIoCount() {
+ backend_->DecrementIoCount();
+}
+
+void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
+ node_.Data()->last_used = last_used.ToInternalValue();
+ node_.Data()->last_modified = last_modified.ToInternalValue();
+ node_.set_modified();
+}
+
+void EntryImpl::ReportIOTime(Operation op, const base::Time& start) {
+ int group = backend_->GetSizeGroup();
+ switch (op) {
+ case kRead:
+ CACHE_UMA(AGE_MS, "ReadTime", group, start);
+ break;
+ case kWrite:
+ CACHE_UMA(AGE_MS, "WriteTime", group, start);
+ break;
+ case kSparseRead:
+ CACHE_UMA(AGE_MS, "SparseReadTime", 0, start);
+ break;
+ case kSparseWrite:
+ CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start);
+ break;
+ case kAsyncIO:
+ CACHE_UMA(AGE_MS, "AsyncIOTime", group, start);
+ break;
+ default:
+ NOTREACHED();
+ }
+}
+
+// ------------------------------------------------------------------------
+
+bool EntryImpl::CreateDataBlock(int index, int size) {
+ DCHECK(index >= 0 && index < kNumStreams);
+
+ Addr address(entry_.Data()->data_addr[index]);
+ if (!CreateBlock(size, &address))
+ return false;
+
+ entry_.Data()->data_addr[index] = address.value();
+ entry_.Store();
+ return true;
+}
+
+bool EntryImpl::CreateBlock(int size, Addr* address) {
+ DCHECK(!address->is_initialized());
+
+ FileType file_type = Addr::RequiredFileType(size);
+ if (EXTERNAL == file_type) {
+ if (size > backend_->MaxFileSize())
+ return false;
+ if (!backend_->CreateExternalFile(address))
+ return false;
+ } else {
+ int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) /
+ Addr::BlockSizeForFileType(file_type);
+
+ if (!backend_->CreateBlock(file_type, num_blocks, address))
+ return false;
+ }
+ return true;
+}
+
+void EntryImpl::DeleteData(Addr address, int index) {
+ if (!address.is_initialized())
+ return;
+ if (address.is_separate_file()) {
+ if (files_[index])
+ files_[index] = NULL; // Releases the object.
+
+ int failure = DeleteCacheFile(backend_->GetFileName(address)) ? 0 : 1;
+ CACHE_UMA(COUNTS, "DeleteFailed", 0, failure);
+ if (failure)
+ LOG(ERROR) << "Failed to delete " <<
+ backend_->GetFileName(address).value() << " from the cache.";
+ } else {
+ backend_->DeleteBlock(address, true);
+ }
+}
+
+void EntryImpl::UpdateRank(bool modified) {
+ if (!doomed_) {
+ // Everything is handled by the backend.
+ backend_->UpdateRank(this, true);
+ return;
+ }
+
+ Time current = Time::Now();
+ node_.Data()->last_used = current.ToInternalValue();
+
+ if (modified)
+ node_.Data()->last_modified = current.ToInternalValue();
+}
+
+File* EntryImpl::GetBackingFile(Addr address, int index) {
+ File* file;
+ if (address.is_separate_file())
+ file = GetExternalFile(address, index);
+ else
+ file = backend_->File(address);
+ return file;
+}
+
+File* EntryImpl::GetExternalFile(Addr address, int index) {
+ DCHECK(index >= 0 && index <= kKeyFileIndex);
+ if (!files_[index].get()) {
+ // For a key file, use mixed mode IO.
+ scoped_refptr<File> file(new File(kKeyFileIndex == index));
+ if (file->Init(backend_->GetFileName(address)))
+ files_[index].swap(file);
+ }
+ return files_[index].get();
+}
+
+bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
+ bool truncate) {
+ Addr address(entry_.Data()->data_addr[index]);
+
+ if (address.is_initialized() || user_buffers_[index].get())
+ return GrowUserBuffer(index, offset, buf_len, truncate);
+
+ if (offset + buf_len > kMaxBlockSize)
+ return CreateDataBlock(index, offset + buf_len);
+
+ user_buffers_[index].reset(new char[kMaxBlockSize]);
+
+ // Overwrite the parts of the buffer that are not going to be written
+ // by the current operation (and yes, let's assume that nothing is going
+ // to fail, and we'll actually write over the part that we are not cleaning
+ // here). The point is to avoid writing random stuff to disk later on.
+ ClearInvalidData(user_buffers_[index].get(), offset, buf_len);
+
+ return true;
+}
+
+// We get to this function with some data already stored. If there is a
+// truncation that results on data stored internally, we'll explicitly
+// handle the case here.
+bool EntryImpl::GrowUserBuffer(int index, int offset, int buf_len,
+ bool truncate) {
+ Addr address(entry_.Data()->data_addr[index]);
+
+ if (offset + buf_len > kMaxBlockSize) {
+ // The data has to be stored externally.
+ if (address.is_initialized()) {
+ if (address.is_separate_file())
+ return true;
+ if (!MoveToLocalBuffer(index))
+ return false;
+ }
+ return Flush(index, offset + buf_len, true);
+ }
+
+ if (!address.is_initialized()) {
+ DCHECK(user_buffers_[index].get());
+ if (truncate)
+ ClearInvalidData(user_buffers_[index].get(), 0, offset + buf_len);
+ return true;
+ }
+ if (address.is_separate_file()) {
+ if (!truncate)
+ return true;
+ return ImportSeparateFile(index, offset, buf_len);
+ }
+
+ // At this point we are dealing with data stored on disk, inside a block file.
+ if (offset + buf_len <= address.BlockSize() * address.num_blocks())
+ return true;
+
+ // ... and the allocated block has to change.
+ if (!MoveToLocalBuffer(index))
+ return false;
+
+ int clear_start = entry_.Data()->data_size[index];
+ if (truncate)
+ clear_start = std::min(clear_start, offset + buf_len);
+ else if (offset < clear_start)
+ clear_start = std::max(offset + buf_len, clear_start);
+
+ // Clear the end of the buffer.
+ ClearInvalidData(user_buffers_[index].get(), 0, clear_start);
+ return true;
+}
+
+bool EntryImpl::MoveToLocalBuffer(int index) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(!user_buffers_[index].get());
+ DCHECK(address.is_initialized());
+ scoped_array<char> buffer(new char[kMaxBlockSize]);
+
+ File* file = GetBackingFile(address, index);
+ size_t len = entry_.Data()->data_size[index];
+ size_t offset = 0;
+
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ if (!file || !file->Read(buffer.get(), len, offset, NULL, NULL))
+ return false;
+
+ DeleteData(address, index);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Store();
+
+ // If we lose this entry we'll see it as zero sized.
+ backend_->ModifyStorageSize(static_cast<int>(len) - unreported_size_[index],
+ 0);
+ unreported_size_[index] = static_cast<int>(len);
+
+ user_buffers_[index].swap(buffer);
+ return true;
+}
+
+bool EntryImpl::ImportSeparateFile(int index, int offset, int buf_len) {
+ if (entry_.Data()->data_size[index] > offset + buf_len) {
+ unreported_size_[index] += offset + buf_len -
+ entry_.Data()->data_size[index];
+ entry_.Data()->data_size[index] = offset + buf_len;
+ }
+
+ if (!MoveToLocalBuffer(index))
+ return false;
+
+ // Clear the end of the buffer.
+ ClearInvalidData(user_buffers_[index].get(), 0, offset + buf_len);
+ return true;
+}
+
+// The common scenario is that this is called from the destructor of the entry,
+// to write to disk what we have buffered. We don't want to hold the destructor
+// until the actual IO finishes, so we'll send an asynchronous write that will
+// free up the memory containing the data. To be consistent, this method always
+// returns with the buffer freed up (on success).
+bool EntryImpl::Flush(int index, int size, bool async) {
+ Addr address(entry_.Data()->data_addr[index]);
+ DCHECK(user_buffers_[index].get());
+ DCHECK(!address.is_initialized());
+
+ if (!size)
+ return true;
+
+ if (!CreateDataBlock(index, size))
+ return false;
+
+ address.set_value(entry_.Data()->data_addr[index]);
+
+ File* file = GetBackingFile(address, index);
+ size_t len = entry_.Data()->data_size[index];
+ size_t offset = 0;
+ if (address.is_block_file())
+ offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+ // We just told the backend to store len bytes for real.
+ DCHECK(len == static_cast<size_t>(unreported_size_[index]));
+ backend_->ModifyStorageSize(0, static_cast<int>(len));
+ unreported_size_[index] = 0;
+
+ if (!file)
+ return false;
+
+ // TODO(rvargas): figure out if it's worth to re-enable posting operations.
+ // Right now it is only used from GrowUserBuffer, not the destructor, and
+ // it is not accounted for from the point of view of the total number of
+ // pending operations of the cache. It is also racing with the actual write
+ // on the GrowUserBuffer path because there is no code to exclude the range
+ // that is going to be written.
+ async = false;
+ if (async) {
+ if (!file->PostWrite(user_buffers_[index].get(), len, offset))
+ return false;
+ } else {
+ if (!file->Write(user_buffers_[index].get(), len, offset, NULL, NULL))
+ return false;
+ user_buffers_[index].reset(NULL);
+ }
+
+ // The buffer is deleted from the PostWrite operation.
+ user_buffers_[index].release();
+
+ return true;
+}
+
+int EntryImpl::InitSparseData() {
+ if (sparse_.get())
+ return net::OK;
+
+ sparse_.reset(new SparseControl(this));
+ int result = sparse_->Init();
+ if (net::OK != result)
+ sparse_.reset();
+ return result;
+}
+
+void EntryImpl::SetEntryFlags(uint32 flags) {
+ entry_.Data()->flags |= flags;
+ entry_.set_modified();
+}
+
+uint32 EntryImpl::GetEntryFlags() {
+ return entry_.Data()->flags;
+}
+
+void EntryImpl::GetData(int index, char** buffer, Addr* address) {
+ if (user_buffers_[index].get()) {
+ // The data is already in memory, just copy it an we're done.
+ int data_len = entry_.Data()->data_size[index];
+ DCHECK(data_len <= kMaxBlockSize);
+ *buffer = new char[data_len];
+ memcpy(*buffer, user_buffers_[index].get(), data_len);
+ return;
+ }
+
+ // Bad news: we'd have to read the info from disk so instead we'll just tell
+ // the caller where to read from.
+ *buffer = NULL;
+ address->set_value(entry_.Data()->data_addr[index]);
+ if (address->is_initialized()) {
+ // Prevent us from deleting the block from the backing store.
+ backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
+ unreported_size_[index], 0);
+ entry_.Data()->data_addr[index] = 0;
+ entry_.Data()->data_size[index] = 0;
+ }
+}
+
+void EntryImpl::Log(const char* msg) {
+ int dirty = 0;
+ if (node_.HasData()) {
+ dirty = node_.Data()->dirty;
+ }
+
+ Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
+ entry_.address().value(), node_.address().value());
+
+ Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0],
+ entry_.Data()->data_addr[1], entry_.Data()->long_key);
+
+ Trace(" doomed: %d 0x%x", doomed_, dirty);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h
new file mode 100644
index 00000000..76e4965a
--- /dev/null
+++ b/net/disk_cache/entry_impl.h
@@ -0,0 +1,197 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_ENTRY_IMPL_H_
+#define NET_DISK_CACHE_ENTRY_IMPL_H_
+
+#include "base/scoped_ptr.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/storage_block.h"
+#include "net/disk_cache/storage_block-inl.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+class SparseControl;
+
+// This class implements the Entry interface. An object of this
+// class represents a single entry on the cache.
+class EntryImpl : public Entry, public base::RefCounted<EntryImpl> {
+ friend class base::RefCounted<EntryImpl>;
+ friend class SparseControl;
+ public:
+ enum Operation {
+ kRead,
+ kWrite,
+ kSparseRead,
+ kSparseWrite,
+ kAsyncIO
+ };
+
+ EntryImpl(BackendImpl* backend, Addr address);
+
+ // Entry interface.
+ virtual void Doom();
+ virtual void Close();
+ virtual std::string GetKey() const;
+ virtual base::Time GetLastUsed() const;
+ virtual base::Time GetLastModified() const;
+ virtual int32 GetDataSize(int index) const;
+ virtual int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback);
+ virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback,
+ bool truncate);
+ virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback);
+ virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback);
+ virtual int GetAvailableRange(int64 offset, int len, int64* start);
+ virtual int GetAvailableRange(int64 offset, int len, int64* start,
+ CompletionCallback* callback);
+ virtual void CancelSparseIO();
+ virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback);
+
+ inline CacheEntryBlock* entry() {
+ return &entry_;
+ }
+
+ inline CacheRankingsBlock* rankings() {
+ return &node_;
+ }
+
+ uint32 GetHash();
+
+ // Performs the initialization of a EntryImpl that will be added to the
+ // cache.
+ bool CreateEntry(Addr node_address, const std::string& key, uint32 hash);
+
+ // Returns true if this entry matches the lookup arguments.
+ bool IsSameEntry(const std::string& key, uint32 hash);
+
+ // Permamently destroys this entry.
+ void InternalDoom();
+
+ // Deletes this entry from disk. If |everything| is false, only the user data
+ // will be removed, leaving the key and control data intact.
+ void DeleteEntryData(bool everything);
+
+ // Returns the address of the next entry on the list of entries with the same
+ // hash.
+ CacheAddr GetNextAddress();
+
+ // Sets the address of the next entry on the list of entries with the same
+ // hash.
+ void SetNextAddress(Addr address);
+
+ // Reloads the rankings node information.
+ bool LoadNodeAddress();
+
+ // Updates the stored data to reflect the run-time information for this entry.
+ // Returns false if the data could not be updated. The purpose of this method
+ // is to be able to detect entries that are currently in use.
+ bool Update();
+
+ // Returns true if this entry is marked as dirty on disk.
+ bool IsDirty(int32 current_id);
+ void ClearDirtyFlag();
+
+ // Fixes this entry so it can be treated as valid (to delete it).
+ void SetPointerForInvalidEntry(int32 new_id);
+
+ // Returns false if the entry is clearly invalid.
+ bool SanityCheck();
+
+ // Handle the pending asynchronous IO count.
+ void IncrementIoCount();
+ void DecrementIoCount();
+
+ // Set the access times for this entry. This method provides support for
+ // the upgrade tool.
+ void SetTimes(base::Time last_used, base::Time last_modified);
+
+ // Generates a histogram for the time spent working on this operation.
+ void ReportIOTime(Operation op, const base::Time& start);
+
+ private:
+ enum {
+ kNumStreams = 3
+ };
+
+ ~EntryImpl();
+
+ // Initializes the storage for an internal or external data block.
+ bool CreateDataBlock(int index, int size);
+
+ // Initializes the storage for an internal or external generic block.
+ bool CreateBlock(int size, Addr* address);
+
+ // Deletes the data pointed by address, maybe backed by files_[index].
+ void DeleteData(Addr address, int index);
+
+ // Updates ranking information.
+ void UpdateRank(bool modified);
+
+ // Returns a pointer to the file that stores the given address.
+ File* GetBackingFile(Addr address, int index);
+
+ // Returns a pointer to the file that stores external data.
+ File* GetExternalFile(Addr address, int index);
+
+ // Prepares the target file or buffer for a write of buf_len bytes at the
+ // given offset.
+ bool PrepareTarget(int index, int offset, int buf_len, bool truncate);
+
+ // Grows the size of the storage used to store user data, if needed.
+ bool GrowUserBuffer(int index, int offset, int buf_len, bool truncate);
+
+ // Reads from a block data file to this object's memory buffer.
+ bool MoveToLocalBuffer(int index);
+
+ // Loads the external file to this object's memory buffer.
+ bool ImportSeparateFile(int index, int offset, int buf_len);
+
+ // Flush the in-memory data to the backing storage.
+ bool Flush(int index, int size, bool async);
+
+ // Initializes the sparse control object. Returns a net error code.
+ int InitSparseData();
+
+ // Adds the provided |flags| to the current EntryFlags for this entry.
+ void SetEntryFlags(uint32 flags);
+
+ // Returns the current EntryFlags for this entry.
+ uint32 GetEntryFlags();
+
+ // Gets the data stored at the given index. If the information is in memory,
+ // a buffer will be allocated and the data will be copied to it (the caller
+ // can find out the size of the buffer before making this call). Otherwise,
+ // the cache address of the data will be returned, and that address will be
+ // removed from the regular book keeping of this entry so the caller is
+ // responsible for deleting the block (or file) from the backing store at some
+ // point; there is no need to report any storage-size change, only to do the
+ // actual cleanup.
+ void GetData(int index, char** buffer, Addr* address);
+
+ // Logs this entry to the internal trace buffer.
+ void Log(const char* msg);
+
+ CacheEntryBlock entry_; // Key related information for this entry.
+ CacheRankingsBlock node_; // Rankings related information for this entry.
+ BackendImpl* backend_; // Back pointer to the cache.
+ scoped_array<char> user_buffers_[kNumStreams]; // Store user data.
+ // Files to store external user data and key.
+ scoped_refptr<File> files_[kNumStreams + 1];
+ // Copy of the file used to store the key. We don't own this object.
+ mutable File* key_file_;
+ int unreported_size_[kNumStreams]; // Bytes not reported yet to the backend.
+ bool doomed_; // True if this entry was removed from the cache.
+ scoped_ptr<SparseControl> sparse_; // Support for sparse entries.
+
+ DISALLOW_EVIL_CONSTRUCTORS(EntryImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_ENTRY_IMPL_H_
diff --git a/net/disk_cache/entry_unittest.cc b/net/disk_cache/entry_unittest.cc
new file mode 100644
index 00000000..e60a3da9
--- /dev/null
+++ b/net/disk_cache/entry_unittest.cc
@@ -0,0 +1,1401 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/platform_thread.h"
+#include "base/timer.h"
+#include "base/string_util.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/mem_entry_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::Time;
+
+extern volatile int g_cache_tests_received;
+extern volatile bool g_cache_tests_error;
+
+// Tests that can run with different types of caches.
+class DiskCacheEntryTest : public DiskCacheTestWithCache {
+ protected:
+ void InternalSyncIO();
+ void InternalAsyncIO();
+ void ExternalSyncIO();
+ void ExternalAsyncIO();
+ void StreamAccess();
+ void GetKey();
+ void GrowData();
+ void TruncateData();
+ void ZeroLengthIO();
+ void ReuseEntry(int size);
+ void InvalidData();
+ void DoomEntry();
+ void DoomedEntry();
+ void BasicSparseIO(bool async);
+ void HugeSparseIO(bool async);
+ void GetAvailableRange();
+ void DoomSparseEntry();
+ void PartialSparseEntry();
+};
+
+void DiskCacheEntryTest::InternalSyncIO() {
+ disk_cache::Entry *entry1 = NULL;
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+ ASSERT_TRUE(NULL != entry1);
+
+ const int kSize1 = 10;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ EXPECT_EQ(0, entry1->ReadData(0, 0, buffer1, kSize1, NULL));
+ base::strlcpy(buffer1->data(), "the data", kSize1);
+ EXPECT_EQ(10, entry1->WriteData(0, 0, buffer1, kSize1, NULL, false));
+ memset(buffer1->data(), 0, kSize1);
+ EXPECT_EQ(10, entry1->ReadData(0, 0, buffer1, kSize1, NULL));
+ EXPECT_STREQ("the data", buffer1->data());
+
+ const int kSize2 = 5000;
+ const int kSize3 = 10000;
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
+ scoped_refptr<net::IOBuffer> buffer3 = new net::IOBuffer(kSize3);
+ memset(buffer3->data(), 0, kSize3);
+ CacheTestFillBuffer(buffer2->data(), kSize2, false);
+ base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
+ EXPECT_EQ(5000, entry1->WriteData(1, 1500, buffer2, kSize2, NULL, false));
+ memset(buffer2->data(), 0, kSize2);
+ EXPECT_EQ(4989, entry1->ReadData(1, 1511, buffer2, kSize2, NULL));
+ EXPECT_STREQ("big data goes here", buffer2->data());
+ EXPECT_EQ(5000, entry1->ReadData(1, 0, buffer2, kSize2, NULL));
+ EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
+ EXPECT_EQ(1500, entry1->ReadData(1, 5000, buffer2, kSize2, NULL));
+
+ EXPECT_EQ(0, entry1->ReadData(1, 6500, buffer2, kSize2, NULL));
+ EXPECT_EQ(6500, entry1->ReadData(1, 0, buffer3, kSize3, NULL));
+ EXPECT_EQ(8192, entry1->WriteData(1, 0, buffer3, 8192, NULL, false));
+ EXPECT_EQ(8192, entry1->ReadData(1, 0, buffer3, kSize3, NULL));
+ EXPECT_EQ(8192, entry1->GetDataSize(1));
+
+ entry1->Doom();
+ entry1->Close();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, InternalSyncIO) {
+ InitCache();
+ InternalSyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyInternalSyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ InternalSyncIO();
+}
+
+void DiskCacheEntryTest::InternalAsyncIO() {
+ disk_cache::Entry *entry1 = NULL;
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+ ASSERT_TRUE(NULL != entry1);
+
+ // Avoid using internal buffers for the test. We have to write something to
+ // the entry and close it so that we flush the internal buffer to disk. After
+ // that, IO operations will be really hitting the disk. We don't care about
+ // the content, so just extending the entry is enough (all extensions zero-
+ // fill any holes).
+ EXPECT_EQ(0, entry1->WriteData(0, 15 * 1024, NULL, 0, NULL, false));
+ EXPECT_EQ(0, entry1->WriteData(1, 15 * 1024, NULL, 0, NULL, false));
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenEntry("the first key", &entry1));
+
+ // Let's verify that each IO goes to the right callback object.
+ CallbackTest callback1(false);
+ CallbackTest callback2(false);
+ CallbackTest callback3(false);
+ CallbackTest callback4(false);
+ CallbackTest callback5(false);
+ CallbackTest callback6(false);
+ CallbackTest callback7(false);
+ CallbackTest callback8(false);
+ CallbackTest callback9(false);
+ CallbackTest callback10(false);
+ CallbackTest callback11(false);
+ CallbackTest callback12(false);
+ CallbackTest callback13(false);
+
+ g_cache_tests_error = false;
+ g_cache_tests_received = 0;
+
+ MessageLoopHelper helper;
+
+ const int kSize1 = 10;
+ const int kSize2 = 5000;
+ const int kSize3 = 10000;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
+ scoped_refptr<net::IOBuffer> buffer3 = new net::IOBuffer(kSize3);
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ CacheTestFillBuffer(buffer2->data(), kSize2, false);
+ CacheTestFillBuffer(buffer3->data(), kSize3, false);
+
+ EXPECT_EQ(0, entry1->ReadData(0, 15 * 1024, buffer1, kSize1, &callback1));
+ base::strlcpy(buffer1->data(), "the data", kSize1);
+ int expected = 0;
+ int ret = entry1->WriteData(0, 0, buffer1, kSize1, &callback2, false);
+ EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ memset(buffer2->data(), 0, kSize2);
+ ret = entry1->ReadData(0, 0, buffer2, kSize1, &callback3);
+ EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("the data", buffer2->data());
+
+ base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
+ ret = entry1->WriteData(1, 1500, buffer2, kSize2, &callback4, true);
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ memset(buffer3->data(), 0, kSize3);
+ ret = entry1->ReadData(1, 1511, buffer3, kSize2, &callback5);
+ EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("big data goes here", buffer3->data());
+ ret = entry1->ReadData(1, 0, buffer2, kSize2, &callback6);
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ memset(buffer3->data(), 0, kSize3);
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
+ ret = entry1->ReadData(1, 5000, buffer2, kSize2, &callback7);
+ EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ ret = entry1->ReadData(1, 0, buffer3, kSize3, &callback9);
+ EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ ret = entry1->WriteData(1, 0, buffer3, 8192, &callback10, true);
+ EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ ret = entry1->ReadData(1, 0, buffer3, kSize3, &callback11);
+ EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_EQ(8192, entry1->GetDataSize(1));
+
+ ret = entry1->ReadData(0, 0, buffer1, kSize1, &callback12);
+ EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ ret = entry1->ReadData(1, 0, buffer2, kSize2, &callback13);
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+
+ EXPECT_FALSE(g_cache_tests_error);
+ EXPECT_EQ(expected, g_cache_tests_received);
+
+ entry1->Doom();
+ entry1->Close();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, InternalAsyncIO) {
+ InitCache();
+ InternalAsyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ InternalAsyncIO();
+}
+
+void DiskCacheEntryTest::ExternalSyncIO() {
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+
+ const int kSize1 = 17000;
+ const int kSize2 = 25000;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ CacheTestFillBuffer(buffer2->data(), kSize2, false);
+ base::strlcpy(buffer1->data(), "the data", kSize1);
+ EXPECT_EQ(17000, entry1->WriteData(0, 0, buffer1, kSize1, NULL, false));
+ memset(buffer1->data(), 0, kSize1);
+ EXPECT_EQ(17000, entry1->ReadData(0, 0, buffer1, kSize1, NULL));
+ EXPECT_STREQ("the data", buffer1->data());
+
+ base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
+ EXPECT_EQ(25000, entry1->WriteData(1, 10000, buffer2, kSize2, NULL, false));
+ memset(buffer2->data(), 0, kSize2);
+ EXPECT_EQ(24989, entry1->ReadData(1, 10011, buffer2, kSize2, NULL));
+ EXPECT_STREQ("big data goes here", buffer2->data());
+ EXPECT_EQ(25000, entry1->ReadData(1, 0, buffer2, kSize2, NULL));
+ EXPECT_EQ(0, memcmp(buffer2->data(), buffer2->data(), 10000));
+ EXPECT_EQ(5000, entry1->ReadData(1, 30000, buffer2, kSize2, NULL));
+
+ EXPECT_EQ(0, entry1->ReadData(1, 35000, buffer2, kSize2, NULL));
+ EXPECT_EQ(17000, entry1->ReadData(1, 0, buffer1, kSize1, NULL));
+ EXPECT_EQ(17000, entry1->WriteData(1, 20000, buffer1, kSize1, NULL, false));
+ EXPECT_EQ(37000, entry1->GetDataSize(1));
+
+ entry1->Doom();
+ entry1->Close();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, ExternalSyncIO) {
+ InitCache();
+ ExternalSyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyExternalSyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ ExternalSyncIO();
+}
+
+void DiskCacheEntryTest::ExternalAsyncIO() {
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry1));
+
+ // Let's verify that each IO goes to the right callback object.
+ CallbackTest callback1(false);
+ CallbackTest callback2(false);
+ CallbackTest callback3(false);
+ CallbackTest callback4(false);
+ CallbackTest callback5(false);
+ CallbackTest callback6(false);
+ CallbackTest callback7(false);
+ CallbackTest callback8(false);
+ CallbackTest callback9(false);
+
+ g_cache_tests_error = false;
+ g_cache_tests_received = 0;
+ int expected = 0;
+
+ MessageLoopHelper helper;
+
+ const int kSize1 = 17000;
+ const int kSize2 = 25000;
+ const int kSize3 = 25000;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
+ scoped_refptr<net::IOBuffer> buffer3 = new net::IOBuffer(kSize3);
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ CacheTestFillBuffer(buffer2->data(), kSize2, false);
+ CacheTestFillBuffer(buffer3->data(), kSize3, false);
+ base::strlcpy(buffer1->data(), "the data", kSize1);
+ int ret = entry1->WriteData(0, 0, buffer1, kSize1, &callback1, false);
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+
+ memset(buffer2->data(), 0, kSize1);
+ ret = entry1->ReadData(0, 0, buffer2, kSize1, &callback2);
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("the data", buffer1->data());
+
+ base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
+ ret = entry1->WriteData(1, 10000, buffer2, kSize2, &callback3, false);
+ EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+
+ memset(buffer3->data(), 0, kSize3);
+ ret = entry1->ReadData(1, 10011, buffer3, kSize3, &callback4);
+ EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_STREQ("big data goes here", buffer3->data());
+ ret = entry1->ReadData(1, 0, buffer2, kSize2, &callback5);
+ EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+ EXPECT_EQ(0, memcmp(buffer2->data(), buffer2->data(), 10000));
+ ret = entry1->ReadData(1, 30000, buffer2, kSize2, &callback6);
+ EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+
+ EXPECT_EQ(0, entry1->ReadData(1, 35000, buffer2, kSize2, &callback7));
+ ret = entry1->ReadData(1, 0, buffer1, kSize1, &callback8);
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ ret = entry1->WriteData(1, 20000, buffer1, kSize1, &callback9, false);
+ EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
+ if (net::ERR_IO_PENDING == ret)
+ expected++;
+ EXPECT_EQ(37000, entry1->GetDataSize(1));
+
+ EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
+
+ EXPECT_FALSE(g_cache_tests_error);
+ EXPECT_EQ(expected, g_cache_tests_received);
+
+ entry1->Doom();
+ entry1->Close();
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, ExternalAsyncIO) {
+ InitCache();
+ ExternalAsyncIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ ExternalAsyncIO();
+}
+
+void DiskCacheEntryTest::StreamAccess() {
+ disk_cache::Entry *entry = NULL;
+ ASSERT_TRUE(cache_->CreateEntry("the first key", &entry));
+ ASSERT_TRUE(NULL != entry);
+
+ const int kBufferSize = 1024;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kBufferSize);
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kBufferSize);
+
+ const int kNumStreams = 3;
+ for (int i = 0; i < kNumStreams; i++) {
+ CacheTestFillBuffer(buffer1->data(), kBufferSize, false);
+ EXPECT_EQ(kBufferSize, entry->WriteData(i, 0, buffer1, kBufferSize, NULL,
+ false));
+ memset(buffer2->data(), 0, kBufferSize);
+ EXPECT_EQ(kBufferSize, entry->ReadData(i, 0, buffer2, kBufferSize, NULL));
+ EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kBufferSize));
+ }
+
+ EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
+ entry->ReadData(kNumStreams, 0, buffer1, kBufferSize, NULL));
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, StreamAccess) {
+ InitCache();
+ StreamAccess();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyStreamAccess) {
+ SetMemoryOnlyMode();
+ InitCache();
+ StreamAccess();
+}
+
+void DiskCacheEntryTest::GetKey() {
+ std::string key1("the first key");
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ EXPECT_EQ(key1, entry1->GetKey()) << "short key";
+ entry1->Close();
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+ char key_buffer[20000];
+
+ CacheTestFillBuffer(key_buffer, 3000, true);
+ key_buffer[1000] = '\0';
+
+ key1 = key_buffer;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ EXPECT_TRUE(key1 == entry1->GetKey()) << "1000 bytes key";
+ entry1->Close();
+
+ key_buffer[1000] = 'p';
+ key_buffer[3000] = '\0';
+ key1 = key_buffer;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ EXPECT_TRUE(key1 == entry1->GetKey()) << "medium size key";
+ entry1->Close();
+
+ CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
+ key_buffer[19999] = '\0';
+
+ key1 = key_buffer;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ EXPECT_TRUE(key1 == entry1->GetKey()) << "long key";
+ entry1->Close();
+}
+
+TEST_F(DiskCacheEntryTest, GetKey) {
+ InitCache();
+ GetKey();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyGetKey) {
+ SetMemoryOnlyMode();
+ InitCache();
+ GetKey();
+}
+
+void DiskCacheEntryTest::GrowData() {
+ std::string key1("the first key");
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+
+ const int kSize = 20000;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize);
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buffer1->data(), kSize, false);
+ memset(buffer2->data(), 0, kSize);
+
+ base::strlcpy(buffer1->data(), "the data", kSize);
+ EXPECT_EQ(10, entry1->WriteData(0, 0, buffer1, 10, NULL, false));
+ EXPECT_EQ(10, entry1->ReadData(0, 0, buffer2, 10, NULL));
+ EXPECT_STREQ("the data", buffer2->data());
+ EXPECT_EQ(10, entry1->GetDataSize(0));
+
+ EXPECT_EQ(2000, entry1->WriteData(0, 0, buffer1, 2000, NULL, false));
+ EXPECT_EQ(2000, entry1->GetDataSize(0));
+ EXPECT_EQ(2000, entry1->ReadData(0, 0, buffer2, 2000, NULL));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
+
+ EXPECT_EQ(20000, entry1->WriteData(0, 0, buffer1, kSize, NULL, false));
+ EXPECT_EQ(20000, entry1->GetDataSize(0));
+ EXPECT_EQ(20000, entry1->ReadData(0, 0, buffer2, kSize, NULL));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
+ entry1->Close();
+
+ memset(buffer2->data(), 0, kSize);
+ ASSERT_TRUE(cache_->CreateEntry("Second key", &entry2));
+ EXPECT_EQ(10, entry2->WriteData(0, 0, buffer1, 10, NULL, false));
+ EXPECT_EQ(10, entry2->GetDataSize(0));
+ entry2->Close();
+
+ // Go from an internal address to a bigger block size.
+ ASSERT_TRUE(cache_->OpenEntry("Second key", &entry2));
+ EXPECT_EQ(2000, entry2->WriteData(0, 0, buffer1, 2000, NULL, false));
+ EXPECT_EQ(2000, entry2->GetDataSize(0));
+ EXPECT_EQ(2000, entry2->ReadData(0, 0, buffer2, 2000, NULL));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
+ entry2->Close();
+ memset(buffer2->data(), 0, kSize);
+
+ // Go from an internal address to an external one.
+ ASSERT_TRUE(cache_->OpenEntry("Second key", &entry2));
+ EXPECT_EQ(20000, entry2->WriteData(0, 0, buffer1, kSize, NULL, false));
+ EXPECT_EQ(20000, entry2->GetDataSize(0));
+ EXPECT_EQ(20000, entry2->ReadData(0, 0, buffer2, kSize, NULL));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
+ entry2->Close();
+}
+
+TEST_F(DiskCacheEntryTest, GrowData) {
+ InitCache();
+ GrowData();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyGrowData) {
+ SetMemoryOnlyMode();
+ InitCache();
+ GrowData();
+}
+
+void DiskCacheEntryTest::TruncateData() {
+ std::string key1("the first key");
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+
+ const int kSize1 = 20000;
+ const int kSize2 = 20000;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
+
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ memset(buffer2->data(), 0, kSize2);
+
+ // Simple truncation:
+ EXPECT_EQ(200, entry1->WriteData(0, 0, buffer1, 200, NULL, false));
+ EXPECT_EQ(200, entry1->GetDataSize(0));
+ EXPECT_EQ(100, entry1->WriteData(0, 0, buffer1, 100, NULL, false));
+ EXPECT_EQ(200, entry1->GetDataSize(0));
+ EXPECT_EQ(100, entry1->WriteData(0, 0, buffer1, 100, NULL, true));
+ EXPECT_EQ(100, entry1->GetDataSize(0));
+ EXPECT_EQ(0, entry1->WriteData(0, 50, buffer1, 0, NULL, true));
+ EXPECT_EQ(50, entry1->GetDataSize(0));
+ EXPECT_EQ(0, entry1->WriteData(0, 0, buffer1, 0, NULL, true));
+ EXPECT_EQ(0, entry1->GetDataSize(0));
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key1, &entry1));
+
+ // Go to an external file.
+ EXPECT_EQ(20000, entry1->WriteData(0, 0, buffer1, 20000, NULL, true));
+ EXPECT_EQ(20000, entry1->GetDataSize(0));
+ EXPECT_EQ(20000, entry1->ReadData(0, 0, buffer2, 20000, NULL));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000));
+ memset(buffer2->data(), 0, kSize2);
+
+ // External file truncation
+ EXPECT_EQ(18000, entry1->WriteData(0, 0, buffer1, 18000, NULL, false));
+ EXPECT_EQ(20000, entry1->GetDataSize(0));
+ EXPECT_EQ(18000, entry1->WriteData(0, 0, buffer1, 18000, NULL, true));
+ EXPECT_EQ(18000, entry1->GetDataSize(0));
+ EXPECT_EQ(0, entry1->WriteData(0, 17500, buffer1, 0, NULL, true));
+ EXPECT_EQ(17500, entry1->GetDataSize(0));
+
+ // And back to an internal block.
+ EXPECT_EQ(600, entry1->WriteData(0, 1000, buffer1, 600, NULL, true));
+ EXPECT_EQ(1600, entry1->GetDataSize(0));
+ EXPECT_EQ(600, entry1->ReadData(0, 1000, buffer2, 600, NULL));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600));
+ EXPECT_EQ(1000, entry1->ReadData(0, 0, buffer2, 1000, NULL));
+ EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000)) <<
+ "Preserves previous data";
+
+ // Go from external file to zero length.
+ EXPECT_EQ(20000, entry1->WriteData(0, 0, buffer1, 20000, NULL, true));
+ EXPECT_EQ(20000, entry1->GetDataSize(0));
+ EXPECT_EQ(0, entry1->WriteData(0, 0, buffer1, 0, NULL, true));
+ EXPECT_EQ(0, entry1->GetDataSize(0));
+
+ entry1->Close();
+}
+
+TEST_F(DiskCacheEntryTest, TruncateData) {
+ InitCache();
+ TruncateData();
+
+ // We generate asynchronous IO that is not really tracked until completion
+ // so we just wait here before running the next test.
+ MessageLoopHelper helper;
+ helper.WaitUntilCacheIoFinished(1);
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateData) {
+ SetMemoryOnlyMode();
+ InitCache();
+ TruncateData();
+}
+
+void DiskCacheEntryTest::ZeroLengthIO() {
+ std::string key1("the first key");
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+
+ EXPECT_EQ(0, entry1->ReadData(0, 0, NULL, 0, NULL));
+ EXPECT_EQ(0, entry1->WriteData(0, 0, NULL, 0, NULL, false));
+
+ // This write should extend the entry.
+ EXPECT_EQ(0, entry1->WriteData(0, 1000, NULL, 0, NULL, false));
+ EXPECT_EQ(0, entry1->ReadData(0, 500, NULL, 0, NULL));
+ EXPECT_EQ(0, entry1->ReadData(0, 2000, NULL, 0, NULL));
+ EXPECT_EQ(1000, entry1->GetDataSize(0));
+ entry1->Close();
+}
+
+TEST_F(DiskCacheEntryTest, ZeroLengthIO) {
+ InitCache();
+ ZeroLengthIO();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyZeroLengthIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ ZeroLengthIO();
+}
+
+// Write more than the total cache capacity but to a single entry. |size| is the
+// amount of bytes to write each time.
+void DiskCacheEntryTest::ReuseEntry(int size) {
+ std::string key1("the first key");
+ disk_cache::Entry *entry;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry));
+
+ entry->Close();
+ std::string key2("the second key");
+ ASSERT_TRUE(cache_->CreateEntry(key2, &entry));
+
+ scoped_refptr<net::IOBuffer> buffer = new net::IOBuffer(size);
+ CacheTestFillBuffer(buffer->data(), size, false);
+
+ for (int i = 0; i < 15; i++) {
+ EXPECT_EQ(0, entry->WriteData(0, 0, buffer, 0, NULL, true));
+ EXPECT_EQ(size, entry->WriteData(0, 0, buffer, size, NULL, false));
+ entry->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key2, &entry));
+ }
+
+ entry->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key1, &entry)) << "have not evicted this entry";
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, ReuseExternalEntry) {
+ SetDirectMode();
+ SetMaxSize(200 * 1024);
+ InitCache();
+ ReuseEntry(20 * 1024);
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyReuseExternalEntry) {
+ SetDirectMode();
+ SetMemoryOnlyMode();
+ SetMaxSize(200 * 1024);
+ InitCache();
+ ReuseEntry(20 * 1024);
+}
+
+TEST_F(DiskCacheEntryTest, ReuseInternalEntry) {
+ SetDirectMode();
+ SetMaxSize(100 * 1024);
+ InitCache();
+ ReuseEntry(10 * 1024);
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyReuseInternalEntry) {
+ SetDirectMode();
+ SetMemoryOnlyMode();
+ SetMaxSize(100 * 1024);
+ InitCache();
+ ReuseEntry(10 * 1024);
+}
+
+// Reading somewhere that was not written should return zeros.
+void DiskCacheEntryTest::InvalidData() {
+ std::string key1("the first key");
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+
+ const int kSize1 = 20000;
+ const int kSize2 = 20000;
+ const int kSize3 = 20000;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
+ scoped_refptr<net::IOBuffer> buffer3 = new net::IOBuffer(kSize3);
+
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ memset(buffer2->data(), 0, kSize2);
+
+ // Simple data grow:
+ EXPECT_EQ(200, entry1->WriteData(0, 400, buffer1, 200, NULL, false));
+ EXPECT_EQ(600, entry1->GetDataSize(0));
+ EXPECT_EQ(100, entry1->ReadData(0, 300, buffer3, 100, NULL));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key1, &entry1));
+
+ // The entry is now on disk. Load it and extend it.
+ EXPECT_EQ(200, entry1->WriteData(0, 800, buffer1, 200, NULL, false));
+ EXPECT_EQ(1000, entry1->GetDataSize(0));
+ EXPECT_EQ(100, entry1->ReadData(0, 700, buffer3, 100, NULL));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
+ entry1->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key1, &entry1));
+
+ // This time using truncate.
+ EXPECT_EQ(200, entry1->WriteData(0, 1800, buffer1, 200, NULL, true));
+ EXPECT_EQ(2000, entry1->GetDataSize(0));
+ EXPECT_EQ(100, entry1->ReadData(0, 1500, buffer3, 100, NULL));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
+
+ // Go to an external file.
+ EXPECT_EQ(200, entry1->WriteData(0, 19800, buffer1, 200, NULL, false));
+ EXPECT_EQ(20000, entry1->GetDataSize(0));
+ EXPECT_EQ(4000, entry1->ReadData(0, 14000, buffer3, 4000, NULL));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000));
+
+ // And back to an internal block.
+ EXPECT_EQ(600, entry1->WriteData(0, 1000, buffer1, 600, NULL, true));
+ EXPECT_EQ(1600, entry1->GetDataSize(0));
+ EXPECT_EQ(600, entry1->ReadData(0, 1000, buffer3, 600, NULL));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600));
+
+ // Extend it again.
+ EXPECT_EQ(600, entry1->WriteData(0, 2000, buffer1, 600, NULL, false));
+ EXPECT_EQ(2600, entry1->GetDataSize(0));
+ EXPECT_EQ(200, entry1->ReadData(0, 1800, buffer3, 200, NULL));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
+
+ // And again (with truncation flag).
+ EXPECT_EQ(600, entry1->WriteData(0, 3000, buffer1, 600, NULL, true));
+ EXPECT_EQ(3600, entry1->GetDataSize(0));
+ EXPECT_EQ(200, entry1->ReadData(0, 2800, buffer3, 200, NULL));
+ EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
+
+ entry1->Close();
+}
+
+TEST_F(DiskCacheEntryTest, InvalidData) {
+ InitCache();
+ InvalidData();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) {
+ SetMemoryOnlyMode();
+ InitCache();
+ InvalidData();
+}
+
+void DiskCacheEntryTest::DoomEntry() {
+ std::string key1("the first key");
+ disk_cache::Entry *entry1;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ entry1->Doom();
+ entry1->Close();
+
+ const int kSize = 20000;
+ scoped_refptr<net::IOBuffer> buffer = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buffer->data(), kSize, true);
+ buffer->data()[19999] = '\0';
+
+ key1 = buffer->data();
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ EXPECT_EQ(20000, entry1->WriteData(0, 0, buffer, kSize, NULL, false));
+ EXPECT_EQ(20000, entry1->WriteData(1, 0, buffer, kSize, NULL, false));
+ entry1->Doom();
+ entry1->Close();
+
+ EXPECT_EQ(0, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, DoomEntry) {
+ InitCache();
+ DoomEntry();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyDoomEntry) {
+ SetMemoryOnlyMode();
+ InitCache();
+ DoomEntry();
+}
+
+// Verify that basic operations work as expected with doomed entries.
+void DiskCacheEntryTest::DoomedEntry() {
+ std::string key("the first key");
+ disk_cache::Entry *entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+ entry->Doom();
+
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ Time initial = Time::Now();
+ PlatformThread::Sleep(20);
+
+ const int kSize1 = 2000;
+ const int kSize2 = 2000;
+ scoped_refptr<net::IOBuffer> buffer1 = new net::IOBuffer(kSize1);
+ scoped_refptr<net::IOBuffer> buffer2 = new net::IOBuffer(kSize2);
+ CacheTestFillBuffer(buffer1->data(), kSize1, false);
+ memset(buffer2->data(), 0, kSize2);
+
+ EXPECT_EQ(2000, entry->WriteData(0, 0, buffer1, 2000, NULL, false));
+ EXPECT_EQ(2000, entry->ReadData(0, 0, buffer2, 2000, NULL));
+ EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1));
+ EXPECT_EQ(key, entry->GetKey());
+ EXPECT_TRUE(initial < entry->GetLastModified());
+ EXPECT_TRUE(initial < entry->GetLastUsed());
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, DoomedEntry) {
+ InitCache();
+ DoomEntry();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) {
+ SetMemoryOnlyMode();
+ InitCache();
+ DoomEntry();
+}
+
+// Test that child entries in a memory cache backend are not visible from
+// enumerations.
+TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
+ SetMemoryOnlyMode();
+ InitCache();
+
+ const int kSize = 4096;
+ scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ std::string key("the first key");
+ disk_cache::Entry* parent_entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &parent_entry));
+
+ // Writes to the parent entry.
+ EXPECT_EQ(kSize, parent_entry->WriteSparseData(0, buf, kSize, NULL));
+
+ // This write creates a child entry and writes to it.
+ EXPECT_EQ(kSize, parent_entry->WriteSparseData(8192, buf, kSize, NULL));
+
+ parent_entry->Close();
+
+ // Perform the enumerations.
+ void* iter = NULL;
+ disk_cache::Entry* entry = NULL;
+ int count = 0;
+ while (cache_->OpenNextEntry(&iter, &entry)) {
+ ASSERT_TRUE(entry != NULL);
+ ++count;
+ disk_cache::MemEntryImpl* mem_entry =
+ reinterpret_cast<disk_cache::MemEntryImpl*>(entry);
+ EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry, mem_entry->type());
+ mem_entry->Close();
+ }
+ EXPECT_EQ(1, count);
+}
+
+// Writes |buf_1| to offset and reads it back as |buf_2|.
+void VerifySparseIO(disk_cache::Entry* entry, int64 offset,
+ net::IOBuffer* buf_1, int size, bool async,
+ net::IOBuffer* buf_2) {
+ TestCompletionCallback callback;
+ TestCompletionCallback* cb = async ? &callback : NULL;
+
+ memset(buf_2->data(), 0, size);
+ int ret = entry->ReadSparseData(offset, buf_2, size, cb);
+ ret = callback.GetResult(ret);
+ EXPECT_EQ(0, ret);
+
+ ret = entry->WriteSparseData(offset, buf_1, size, cb);
+ ret = callback.GetResult(ret);
+ EXPECT_EQ(size, ret);
+
+ ret = entry->ReadSparseData(offset, buf_2, size, cb);
+ ret = callback.GetResult(ret);
+ EXPECT_EQ(size, ret);
+
+ EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size));
+}
+
+// Reads |size| bytes from |entry| at |offset| and verifies that they are the
+// same as the content of the provided |buffer|.
+void VerifyContentSparseIO(disk_cache::Entry* entry, int64 offset, char* buffer,
+ int size, bool async) {
+ TestCompletionCallback callback;
+ TestCompletionCallback* cb = async ? &callback : NULL;
+
+ scoped_refptr<net::IOBuffer> buf_1 = new net::IOBuffer(size);
+ memset(buf_1->data(), 0, size);
+ int ret = entry->ReadSparseData(offset, buf_1, size, cb);
+ ret = callback.GetResult(ret);
+ EXPECT_EQ(size, ret);
+
+ EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size));
+}
+
+void DiskCacheEntryTest::BasicSparseIO(bool async) {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+
+ const int kSize = 2048;
+ scoped_refptr<net::IOBuffer> buf_1 = new net::IOBuffer(kSize);
+ scoped_refptr<net::IOBuffer> buf_2 = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf_1->data(), kSize, false);
+
+ // Write at offset 0.
+ VerifySparseIO(entry, 0, buf_1, kSize, async, buf_2);
+
+ // Write at offset 0x400000 (4 MB).
+ VerifySparseIO(entry, 0x400000, buf_1, kSize, async, buf_2);
+
+ // Write at offset 0x800000000 (32 GB).
+ VerifySparseIO(entry, 0x800000000LL, buf_1, kSize, async, buf_2);
+
+ entry->Close();
+
+ // Check everything again.
+ ASSERT_TRUE(cache_->OpenEntry(key, &entry));
+ VerifyContentSparseIO(entry, 0, buf_1->data(), kSize, async);
+ VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize, async);
+ VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize, async);
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, BasicSparseSyncIO) {
+ InitCache();
+ BasicSparseIO(false);
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseSyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ BasicSparseIO(false);
+}
+
+TEST_F(DiskCacheEntryTest, BasicSparseAsyncIO) {
+ InitCache();
+ BasicSparseIO(true);
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseAsyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ BasicSparseIO(true);
+}
+
+void DiskCacheEntryTest::HugeSparseIO(bool async) {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+
+ // Write 1.2 MB so that we cover multiple entries.
+ const int kSize = 1200 * 1024;
+ scoped_refptr<net::IOBuffer> buf_1 = new net::IOBuffer(kSize);
+ scoped_refptr<net::IOBuffer> buf_2 = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf_1->data(), kSize, false);
+
+ // Write at offset 0x20F0000 (33 MB - 64 KB).
+ VerifySparseIO(entry, 0x20F0000, buf_1, kSize, async, buf_2);
+ entry->Close();
+
+ // Check it again.
+ ASSERT_TRUE(cache_->OpenEntry(key, &entry));
+ VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize, async);
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, HugeSparseSyncIO) {
+ InitCache();
+ HugeSparseIO(false);
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseSyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ HugeSparseIO(false);
+}
+
+TEST_F(DiskCacheEntryTest, HugeSparseAsyncIO) {
+ InitCache();
+ HugeSparseIO(true);
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseAsyncIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+ HugeSparseIO(true);
+}
+
+void DiskCacheEntryTest::GetAvailableRange() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+
+ const int kSize = 16 * 1024;
+ scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
+ EXPECT_EQ(kSize, entry->WriteSparseData(0x20F0000, buf, kSize, NULL));
+ EXPECT_EQ(kSize, entry->WriteSparseData(0x20F4400, buf, kSize, NULL));
+
+ // We stop at the first empty block.
+ int64 start;
+ EXPECT_EQ(kSize, entry->GetAvailableRange(0x20F0000, kSize * 2, &start));
+ EXPECT_EQ(0x20F0000, start);
+
+ start = 0;
+ EXPECT_EQ(0, entry->GetAvailableRange(0, kSize, &start));
+ EXPECT_EQ(0, entry->GetAvailableRange(0x20F0000 - kSize, kSize, &start));
+ EXPECT_EQ(kSize, entry->GetAvailableRange(0, 0x2100000, &start));
+ EXPECT_EQ(0x20F0000, start);
+
+ // We should be able to Read based on the results of GetAvailableRange.
+ start = -1;
+ EXPECT_EQ(0, entry->GetAvailableRange(0x2100000, kSize, &start));
+ EXPECT_EQ(0, entry->ReadSparseData(start, buf, kSize, NULL));
+
+ start = 0;
+ EXPECT_EQ(0x2000, entry->GetAvailableRange(0x20F2000, kSize, &start));
+ EXPECT_EQ(0x20F2000, start);
+ EXPECT_EQ(0x2000, entry->ReadSparseData(start, buf, kSize, NULL));
+
+ // Make sure that we respect the |len| argument.
+ start = 0;
+ EXPECT_EQ(1, entry->GetAvailableRange(0x20F0001 - kSize, kSize, &start));
+ EXPECT_EQ(0x20F0000, start);
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, GetAvailableRange) {
+ InitCache();
+ GetAvailableRange();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) {
+ SetMemoryOnlyMode();
+ InitCache();
+ GetAvailableRange();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedSparseIO) {
+ SetMemoryOnlyMode();
+ InitCache();
+
+ const int kSize = 8192;
+ scoped_refptr<net::IOBuffer> buf_1 = new net::IOBuffer(kSize);
+ scoped_refptr<net::IOBuffer> buf_2 = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf_1->data(), kSize, false);
+
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+
+ // This loop writes back to back starting from offset 0 and 9000.
+ for (int i = 0; i < kSize; i += 1024) {
+ scoped_refptr<net::WrappedIOBuffer> buf_3 =
+ new net::WrappedIOBuffer(buf_1->data() + i);
+ VerifySparseIO(entry, i, buf_3, 1024, false, buf_2);
+ VerifySparseIO(entry, 9000 + i, buf_3, 1024, false, buf_2);
+ }
+
+ // Make sure we have data written.
+ VerifyContentSparseIO(entry, 0, buf_1->data(), kSize, false);
+ VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize, false);
+
+ // This tests a large write that spans 3 entries from a misaligned offset.
+ VerifySparseIO(entry, 20481, buf_1, 8192, false, buf_2);
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedGetAvailableRange) {
+ SetMemoryOnlyMode();
+ InitCache();
+
+ const int kSize = 8192;
+ scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ disk_cache::Entry* entry;
+ std::string key("the first key");
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+
+ // Writes in the middle of an entry.
+ EXPECT_EQ(1024, entry->WriteSparseData(0, buf, 1024, NULL));
+ EXPECT_EQ(1024, entry->WriteSparseData(5120, buf, 1024, NULL));
+ EXPECT_EQ(1024, entry->WriteSparseData(10000, buf, 1024, NULL));
+
+ // Writes in the middle of an entry and spans 2 child entries.
+ EXPECT_EQ(8192, entry->WriteSparseData(50000, buf, 8192, NULL));
+
+ int64 start;
+ // Test that we stop at a discontinuous child at the second block.
+ EXPECT_EQ(1024, entry->GetAvailableRange(0, 10000, &start));
+ EXPECT_EQ(0, start);
+
+ // Test that number of bytes is reported correctly when we start from the
+ // middle of a filled region.
+ EXPECT_EQ(512, entry->GetAvailableRange(512, 10000, &start));
+ EXPECT_EQ(512, start);
+
+ // Test that we found bytes in the child of next block.
+ EXPECT_EQ(1024, entry->GetAvailableRange(1024, 10000, &start));
+ EXPECT_EQ(5120, start);
+
+ // Test that the desired length is respected. It starts within a filled
+ // region.
+ EXPECT_EQ(512, entry->GetAvailableRange(5500, 512, &start));
+ EXPECT_EQ(5500, start);
+
+ // Test that the desired length is respected. It starts before a filled
+ // region.
+ EXPECT_EQ(500, entry->GetAvailableRange(5000, 620, &start));
+ EXPECT_EQ(5120, start);
+
+ // Test that multiple blocks are scanned.
+ EXPECT_EQ(8192, entry->GetAvailableRange(40000, 20000, &start));
+ EXPECT_EQ(50000, start);
+
+ entry->Close();
+}
+
+void DiskCacheEntryTest::DoomSparseEntry() {
+ std::string key1("the first key");
+ std::string key2("the second key");
+ disk_cache::Entry *entry1, *entry2;
+ ASSERT_TRUE(cache_->CreateEntry(key1, &entry1));
+ ASSERT_TRUE(cache_->CreateEntry(key2, &entry2));
+
+ const int kSize = 4 * 1024;
+ scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ int64 offset = 1024;
+ // Write to a bunch of ranges.
+ for (int i = 0; i < 12; i++) {
+ EXPECT_EQ(kSize, entry1->WriteSparseData(offset, buf, kSize, NULL));
+ // Keep the second map under the default size.
+ if (i < 9)
+ EXPECT_EQ(kSize, entry2->WriteSparseData(offset, buf, kSize, NULL));
+ offset *= 4;
+ }
+
+ if (memory_only_)
+ EXPECT_EQ(2, cache_->GetEntryCount());
+ else
+ EXPECT_EQ(15, cache_->GetEntryCount());
+
+ // Doom the first entry while it's still open.
+ entry1->Doom();
+ entry1->Close();
+ entry2->Close();
+
+ // Doom the second entry after it's fully saved.
+ EXPECT_TRUE(cache_->DoomEntry(key2));
+
+ // Make sure we do all needed work. This may fail for entry2 if between Close
+ // and DoomEntry the system decides to remove all traces of the file from the
+ // system cache so we don't see that there is pending IO.
+ MessageLoop::current()->RunAllPending();
+
+ if (memory_only_) {
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ } else {
+ if (5 == cache_->GetEntryCount()) {
+ // Most likely we are waiting for the result of reading the sparse info
+ // (it's always async on Posix so it is easy to miss). Unfortunately we
+ // don't have any signal to watch for so we can only wait.
+ PlatformThread::Sleep(500);
+ MessageLoop::current()->RunAllPending();
+ }
+ EXPECT_EQ(0, cache_->GetEntryCount());
+ }
+}
+
+TEST_F(DiskCacheEntryTest, DoomSparseEntry) {
+ InitCache();
+ DoomSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryOnlyDoomSparseEntry) {
+ SetMemoryOnlyMode();
+ InitCache();
+ DoomSparseEntry();
+}
+
+void DiskCacheEntryTest::PartialSparseEntry() {
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+
+ // We should be able to deal with IO that is not aligned to the block size
+ // of a sparse entry, at least to write a big range without leaving holes.
+ const int kSize = 4 * 1024;
+ const int kSmallSize = 128;
+ scoped_refptr<net::IOBuffer> buf1 = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf1->data(), kSize, false);
+
+ // The first write is just to extend the entry. The third write occupies
+ // a 1KB block partially, it may not be written internally depending on the
+ // implementation.
+ EXPECT_EQ(kSize, entry->WriteSparseData(20000, buf1, kSize, NULL));
+ EXPECT_EQ(kSize, entry->WriteSparseData(500, buf1, kSize, NULL));
+ EXPECT_EQ(kSmallSize,
+ entry->WriteSparseData(1080321, buf1, kSmallSize, NULL));
+ entry->Close();
+ ASSERT_TRUE(cache_->OpenEntry(key, &entry));
+
+ scoped_refptr<net::IOBuffer> buf2 = new net::IOBuffer(kSize);
+ memset(buf2->data(), 0, kSize);
+ EXPECT_EQ(0, entry->ReadSparseData(8000, buf2, kSize, NULL));
+
+ EXPECT_EQ(500, entry->ReadSparseData(kSize, buf2, kSize, NULL));
+ EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
+ EXPECT_EQ(0, entry->ReadSparseData(0, buf2, kSize, NULL));
+
+ // This read should not change anything.
+ EXPECT_EQ(96, entry->ReadSparseData(24000, buf2, kSize, NULL));
+ EXPECT_EQ(500, entry->ReadSparseData(kSize, buf2, kSize, NULL));
+ EXPECT_EQ(0, entry->ReadSparseData(499, buf2, kSize, NULL));
+
+ int64 start;
+ if (memory_only_) {
+ EXPECT_EQ(100, entry->GetAvailableRange(0, 600, &start));
+ EXPECT_EQ(500, start);
+ } else {
+ EXPECT_EQ(1024, entry->GetAvailableRange(0, 2048, &start));
+ EXPECT_EQ(1024, start);
+ }
+ EXPECT_EQ(500, entry->GetAvailableRange(kSize, kSize, &start));
+ EXPECT_EQ(kSize, start);
+ EXPECT_EQ(3616, entry->GetAvailableRange(20 * 1024, 10000, &start));
+ EXPECT_EQ(20 * 1024, start);
+
+ // 1. Query before a filled 1KB block.
+ // 2. Query within a filled 1KB block.
+ // 3. Query beyond a filled 1KB block.
+ if (memory_only_) {
+ EXPECT_EQ(3496, entry->GetAvailableRange(19400, kSize, &start));
+ EXPECT_EQ(20000, start);
+ } else {
+ EXPECT_EQ(3016, entry->GetAvailableRange(19400, kSize, &start));
+ EXPECT_EQ(20480, start);
+ }
+ EXPECT_EQ(1523, entry->GetAvailableRange(3073, kSize, &start));
+ EXPECT_EQ(3073, start);
+ EXPECT_EQ(0, entry->GetAvailableRange(4600, kSize, &start));
+ EXPECT_EQ(4600, start);
+
+ // Now make another write and verify that there is no hole in between.
+ EXPECT_EQ(kSize, entry->WriteSparseData(500 + kSize, buf1, kSize, NULL));
+ EXPECT_EQ(7 * 1024 + 500, entry->GetAvailableRange(1024, 10000, &start));
+ EXPECT_EQ(1024, start);
+ EXPECT_EQ(kSize, entry->ReadSparseData(kSize, buf2, kSize, NULL));
+ EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
+ EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
+
+ entry->Close();
+}
+
+TEST_F(DiskCacheEntryTest, PartialSparseEntry) {
+ InitCache();
+ PartialSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, MemoryPartialSparseEntry) {
+ SetMemoryOnlyMode();
+ InitCache();
+ PartialSparseEntry();
+}
+
+TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
+ InitCache();
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+
+ // Corrupt sparse children should be removed automatically.
+ const int kSize = 4 * 1024;
+ scoped_refptr<net::IOBuffer> buf1 = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf1->data(), kSize, false);
+
+ const int k1Meg = 1024 * 1024;
+ EXPECT_EQ(kSize, entry->WriteSparseData(8192, buf1, kSize, NULL));
+ EXPECT_EQ(kSize, entry->WriteSparseData(k1Meg + 8192, buf1, kSize, NULL));
+ EXPECT_EQ(kSize, entry->WriteSparseData(2 * k1Meg + 8192, buf1, kSize, NULL));
+ entry->Close();
+ EXPECT_EQ(4, cache_->GetEntryCount());
+
+ void* iter = NULL;
+ int count = 0;
+ std::string child_key[2];
+ while (cache_->OpenNextEntry(&iter, &entry)) {
+ ASSERT_TRUE(entry != NULL);
+ // Writing to an entry will alter the LRU list and invalidate the iterator.
+ if (entry->GetKey() != key && count < 2)
+ child_key[count++] = entry->GetKey();
+ entry->Close();
+ }
+ for (int i = 0; i < 2; i++) {
+ ASSERT_TRUE(cache_->OpenEntry(child_key[i], &entry));
+ // Overwrite the header's magic and signature.
+ EXPECT_EQ(12, entry->WriteData(2, 0, buf1, 12, NULL, false));
+ entry->Close();
+ }
+
+ EXPECT_EQ(4, cache_->GetEntryCount());
+ ASSERT_TRUE(cache_->OpenEntry(key, &entry));
+
+ // Two children should be gone. One while reading and one while writing.
+ EXPECT_EQ(0, entry->ReadSparseData(2 * k1Meg + 8192, buf1, kSize, NULL));
+ EXPECT_EQ(kSize, entry->WriteSparseData(k1Meg + 16384, buf1, kSize, NULL));
+ EXPECT_EQ(0, entry->ReadSparseData(k1Meg + 8192, buf1, kSize, NULL));
+
+ // We never touched this one.
+ EXPECT_EQ(kSize, entry->ReadSparseData(8192, buf1, kSize, NULL));
+ entry->Close();
+
+ // We re-created one of the corrupt children.
+ EXPECT_EQ(3, cache_->GetEntryCount());
+}
+
+TEST_F(DiskCacheEntryTest, CancelSparseIO) {
+ InitCache();
+ std::string key("the first key");
+ disk_cache::Entry* entry;
+ ASSERT_TRUE(cache_->CreateEntry(key, &entry));
+
+ const int kSize = 40 * 1024;
+ scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(kSize);
+ CacheTestFillBuffer(buf->data(), kSize, false);
+
+ TestCompletionCallback cb1, cb2, cb3, cb4;
+ int64 offset = 0;
+ int tries = 0;
+ const int maxtries = 100; // Avoid hang on infinitely fast disks
+ for (int ret = 0; ret != net::ERR_IO_PENDING; offset += kSize * 4) {
+ ret = entry->WriteSparseData(offset, buf, kSize, &cb1);
+ if (++tries > maxtries) {
+ LOG(ERROR) << "Data writes never come back PENDING; skipping test";
+ entry->Close();
+ return;
+ }
+ }
+
+ // Cannot use the entry at this point.
+ offset = 0;
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->GetAvailableRange(offset, kSize, &offset));
+ EXPECT_EQ(net::OK, entry->ReadyForSparseIO(&cb2));
+
+ // We cancel the pending operation, and register multiple notifications.
+ entry->CancelSparseIO();
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb2));
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb3));
+ entry->CancelSparseIO(); // Should be a no op at this point.
+ EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(&cb4));
+
+ offset = 0;
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->GetAvailableRange(offset, kSize, &offset));
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->ReadSparseData(offset, buf, kSize, NULL));
+ EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
+ entry->WriteSparseData(offset, buf, kSize, NULL));
+
+ // Now see if we receive all notifications.
+ EXPECT_EQ(kSize, cb1.GetResult(net::ERR_IO_PENDING));
+ EXPECT_EQ(net::OK, cb2.GetResult(net::ERR_IO_PENDING));
+ EXPECT_EQ(net::OK, cb3.GetResult(net::ERR_IO_PENDING));
+ EXPECT_EQ(net::OK, cb4.GetResult(net::ERR_IO_PENDING));
+
+ EXPECT_EQ(kSize, entry->GetAvailableRange(offset, kSize, &offset));
+ EXPECT_EQ(net::OK, entry->ReadyForSparseIO(&cb2));
+ entry->Close();
+}
diff --git a/net/disk_cache/errors.h b/net/disk_cache/errors.h
new file mode 100644
index 00000000..4bf6f72f
--- /dev/null
+++ b/net/disk_cache/errors.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Error codes reported by self tests or to UMA.
+
+#ifndef NET_DISK_CACHE_ERRORS_H__
+#define NET_DISK_CACHE_ERRORS_H__
+
+namespace disk_cache {
+
+enum {
+ ERR_INIT_FAILED = -1,
+ ERR_INVALID_TAIL = -2,
+ ERR_INVALID_HEAD = -3,
+ ERR_INVALID_PREV = -4,
+ ERR_INVALID_NEXT = -5,
+ ERR_INVALID_ENTRY = -6,
+ ERR_INVALID_ADDRESS = -7,
+ ERR_INVALID_LINKS = -8,
+ ERR_NUM_ENTRIES_MISMATCH = -9,
+ ERR_READ_FAILURE = -10,
+ ERR_PREVIOUS_CRASH = -11,
+ ERR_STORAGE_ERROR = -12,
+ ERR_INVALID_MASK = -13
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_ERRORS_H__
diff --git a/net/disk_cache/eviction.cc b/net/disk_cache/eviction.cc
new file mode 100644
index 00000000..bdcef6b4
--- /dev/null
+++ b/net/disk_cache/eviction.cc
@@ -0,0 +1,488 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The eviction policy is a very simple pure LRU, so the elements at the end of
+// the list are evicted until kCleanUpMargin free space is available. There is
+// only one list in use (Rankings::NO_USE), and elements are sent to the front
+// of the list whenever they are accessed.
+
+// The new (in-development) eviction policy ads re-use as a factor to evict
+// an entry. The story so far:
+
+// Entries are linked on separate lists depending on how often they are used.
+// When we see an element for the first time, it goes to the NO_USE list; if
+// the object is reused later on, we move it to the LOW_USE list, until it is
+// used kHighUse times, at which point it is moved to the HIGH_USE list.
+// Whenever an element is evicted, we move it to the DELETED list so that if the
+// element is accessed again, we remember the fact that it was already stored
+// and maybe in the future we don't evict that element.
+
+// When we have to evict an element, first we try to use the last element from
+// the NO_USE list, then we move to the LOW_USE and only then we evict an entry
+// from the HIGH_USE. We attempt to keep entries on the cache for at least
+// kTargetTime hours (with frequently accessed items stored for longer periods),
+// but if we cannot do that, we fall-back to keep each list roughly the same
+// size so that we have a chance to see an element again and move it to another
+// list.
+
+#include "net/disk_cache/eviction.h"
+
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "base/time.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/histogram_macros.h"
+#include "net/disk_cache/trace.h"
+
+using base::Time;
+
+namespace {
+
+const int kCleanUpMargin = 1024 * 1024;
+const int kHighUse = 10; // Reuse count to be on the HIGH_USE list.
+const int kTargetTime = 24 * 7; // Time to be evicted (hours since last use).
+
+int LowWaterAdjust(int high_water) {
+ if (high_water < kCleanUpMargin)
+ return 0;
+
+ return high_water - kCleanUpMargin;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+void Eviction::Init(BackendImpl* backend) {
+ // We grab a bunch of info from the backend to make the code a little cleaner
+ // when we're actually doing work.
+ backend_ = backend;
+ rankings_ = &backend->rankings_;
+ header_ = &backend_->data_->header;
+ max_size_ = LowWaterAdjust(backend_->max_size_);
+ new_eviction_ = backend->new_eviction_;
+ first_trim_ = true;
+ trimming_ = false;
+ delay_trim_ = false;
+}
+
+void Eviction::TrimCache(bool empty) {
+ if (new_eviction_)
+ return TrimCacheV2(empty);
+
+ if (backend_->disabled_ || trimming_)
+ return;
+
+ if (!empty && backend_->IsLoaded())
+ return PostDelayedTrim();
+
+ Trace("*** Trim Cache ***");
+ trimming_ = true;
+ Time start = Time::Now();
+ Rankings::ScopedRankingsBlock node(rankings_);
+ Rankings::ScopedRankingsBlock next(rankings_,
+ rankings_->GetPrev(node.get(), Rankings::NO_USE));
+ int target_size = empty ? 0 : max_size_;
+ while (header_->num_bytes > target_size && next.get()) {
+ // The iterator could be invalidated within EvictEntry().
+ if (!next->HasData())
+ break;
+ node.reset(next.release());
+ next.reset(rankings_->GetPrev(node.get(), Rankings::NO_USE));
+ if (node->Data()->dirty != backend_->GetCurrentEntryId() || empty) {
+ // This entry is not being used by anybody.
+ // Do NOT use node as an iterator after this point.
+ rankings_->TrackRankingsBlock(node.get(), false);
+ if (!EvictEntry(node.get(), empty))
+ continue;
+
+ if (!empty) {
+ backend_->OnEvent(Stats::TRIM_ENTRY);
+
+ if ((Time::Now() - start).InMilliseconds() > 20) {
+ MessageLoop::current()->PostTask(FROM_HERE,
+ factory_.NewRunnableMethod(&Eviction::TrimCache, false));
+ break;
+ }
+ }
+ }
+ }
+
+ CACHE_UMA(AGE_MS, "TotalTrimTime", backend_->GetSizeGroup(), start);
+ trimming_ = false;
+ Trace("*** Trim Cache end ***");
+ return;
+}
+
+void Eviction::UpdateRank(EntryImpl* entry, bool modified) {
+ if (new_eviction_)
+ return UpdateRankV2(entry, modified);
+
+ rankings_->UpdateRank(entry->rankings(), modified, GetListForEntry(entry));
+}
+
+void Eviction::OnOpenEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnOpenEntryV2(entry);
+}
+
+void Eviction::OnCreateEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnCreateEntryV2(entry);
+
+ rankings_->Insert(entry->rankings(), true, GetListForEntry(entry));
+}
+
+void Eviction::OnDoomEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnDoomEntryV2(entry);
+
+ rankings_->Remove(entry->rankings(), GetListForEntry(entry));
+}
+
+void Eviction::OnDestroyEntry(EntryImpl* entry) {
+ if (new_eviction_)
+ return OnDestroyEntryV2(entry);
+}
+
+void Eviction::PostDelayedTrim() {
+ // Prevent posting multiple tasks.
+ if (delay_trim_)
+ return;
+ delay_trim_ = true;
+ MessageLoop::current()->PostDelayedTask(FROM_HERE,
+ factory_.NewRunnableMethod(&Eviction::DelayedTrim), 1000);
+}
+
+void Eviction::DelayedTrim() {
+ delay_trim_ = false;
+ TrimCache(false);
+}
+
+void Eviction::ReportTrimTimes(EntryImpl* entry) {
+ if (first_trim_) {
+ first_trim_ = false;
+ if (backend_->ShouldReportAgain()) {
+ CACHE_UMA(AGE, "TrimAge", 0, entry->GetLastUsed());
+ ReportListStats();
+ }
+
+ if (header_->lru.filled)
+ return;
+
+ header_->lru.filled = 1;
+
+ if (header_->create_time) {
+ // This is the first entry that we have to evict, generate some noise.
+ backend_->FirstEviction();
+ } else {
+ // This is an old file, but we may want more reports from this user so
+ // lets save some create_time.
+ Time::Exploded old = {0};
+ old.year = 2009;
+ old.month = 3;
+ old.day_of_month = 1;
+ header_->create_time = Time::FromLocalExploded(old).ToInternalValue();
+ }
+ }
+}
+
+Rankings::List Eviction::GetListForEntry(EntryImpl* entry) {
+ return Rankings::NO_USE;
+}
+
+bool Eviction::EvictEntry(CacheRankingsBlock* node, bool empty) {
+ EntryImpl* entry = backend_->GetEnumeratedEntry(node, true);
+ if (!entry) {
+ Trace("NewEntry failed on Trim 0x%x", node->address().value());
+ return false;
+ }
+
+ ReportTrimTimes(entry);
+ if (empty || !new_eviction_) {
+ entry->Doom();
+ } else {
+ entry->DeleteEntryData(false);
+ EntryStore* info = entry->entry()->Data();
+ DCHECK(ENTRY_NORMAL == info->state);
+
+ rankings_->Remove(entry->rankings(), GetListForEntryV2(entry));
+ info->state = ENTRY_EVICTED;
+ entry->entry()->Store();
+ rankings_->Insert(entry->rankings(), true, Rankings::DELETED);
+ backend_->OnEvent(Stats::TRIM_ENTRY);
+ }
+ entry->Release();
+
+ return true;
+}
+
+// -----------------------------------------------------------------------
+
+void Eviction::TrimCacheV2(bool empty) {
+ if (backend_->disabled_ || trimming_)
+ return;
+
+ if (!empty && backend_->IsLoaded())
+ return PostDelayedTrim();
+
+ Trace("*** Trim Cache ***");
+ trimming_ = true;
+ Time start = Time::Now();
+
+ const int kListsToSearch = 3;
+ Rankings::ScopedRankingsBlock next[kListsToSearch];
+ int list = Rankings::LAST_ELEMENT;
+
+ // Get a node from each list.
+ for (int i = 0; i < kListsToSearch; i++) {
+ bool done = false;
+ next[i].set_rankings(rankings_);
+ if (done)
+ continue;
+ next[i].reset(rankings_->GetPrev(NULL, static_cast<Rankings::List>(i)));
+ if (!empty && NodeIsOldEnough(next[i].get(), i)) {
+ list = static_cast<Rankings::List>(i);
+ done = true;
+ }
+ }
+
+ // If we are not meeting the time targets lets move on to list length.
+ if (!empty && Rankings::LAST_ELEMENT == list) {
+ list = SelectListByLenght();
+ // Make sure that frequently used items are kept for a minimum time; we know
+ // that this entry is not older than its current target, but it must be at
+ // least older than the target for list 0 (kTargetTime).
+ if ((Rankings::HIGH_USE == list || Rankings::LOW_USE == list) &&
+ !NodeIsOldEnough(next[list].get(), 0))
+ list = 0;
+ }
+
+ if (empty)
+ list = 0;
+
+ Rankings::ScopedRankingsBlock node(rankings_);
+
+ int target_size = empty ? 0 : max_size_;
+ for (; list < kListsToSearch; list++) {
+ while (header_->num_bytes > target_size && next[list].get()) {
+ // The iterator could be invalidated within EvictEntry().
+ if (!next[list]->HasData())
+ break;
+ node.reset(next[list].release());
+ next[list].reset(rankings_->GetPrev(node.get(),
+ static_cast<Rankings::List>(list)));
+ if (node->Data()->dirty != backend_->GetCurrentEntryId() || empty) {
+ // This entry is not being used by anybody.
+ // Do NOT use node as an iterator after this point.
+ rankings_->TrackRankingsBlock(node.get(), false);
+ if (!EvictEntry(node.get(), empty))
+ continue;
+
+ if (!empty && (Time::Now() - start).InMilliseconds() > 20) {
+ MessageLoop::current()->PostTask(FROM_HERE,
+ factory_.NewRunnableMethod(&Eviction::TrimCache, false));
+ break;
+ }
+ }
+ }
+ if (!empty)
+ list = kListsToSearch;
+ }
+
+ if (empty) {
+ TrimDeleted(true);
+ } else if (header_->lru.sizes[Rankings::DELETED] > header_->num_entries / 4) {
+ MessageLoop::current()->PostTask(FROM_HERE,
+ factory_.NewRunnableMethod(&Eviction::TrimDeleted, empty));
+ }
+
+ CACHE_UMA(AGE_MS, "TotalTrimTime", backend_->GetSizeGroup(), start);
+ Trace("*** Trim Cache end ***");
+ trimming_ = false;
+ return;
+}
+
+void Eviction::UpdateRankV2(EntryImpl* entry, bool modified) {
+ rankings_->UpdateRank(entry->rankings(), modified, GetListForEntryV2(entry));
+}
+
+void Eviction::OnOpenEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ DCHECK(ENTRY_NORMAL == info->state);
+
+ if (info->reuse_count < kint32max) {
+ info->reuse_count++;
+ entry->entry()->set_modified();
+
+ // We may need to move this to a new list.
+ if (1 == info->reuse_count) {
+ rankings_->Remove(entry->rankings(), Rankings::NO_USE);
+ rankings_->Insert(entry->rankings(), false, Rankings::LOW_USE);
+ entry->entry()->Store();
+ } else if (kHighUse == info->reuse_count) {
+ rankings_->Remove(entry->rankings(), Rankings::LOW_USE);
+ rankings_->Insert(entry->rankings(), false, Rankings::HIGH_USE);
+ entry->entry()->Store();
+ }
+ }
+}
+
+void Eviction::OnCreateEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ switch (info->state) {
+ case ENTRY_NORMAL: {
+ DCHECK(!info->reuse_count);
+ DCHECK(!info->refetch_count);
+ break;
+ };
+ case ENTRY_EVICTED: {
+ if (info->refetch_count < kint32max)
+ info->refetch_count++;
+
+ if (info->refetch_count > kHighUse && info->reuse_count < kHighUse) {
+ info->reuse_count = kHighUse;
+ } else {
+ info->reuse_count++;
+ }
+ info->state = ENTRY_NORMAL;
+ entry->entry()->Store();
+ rankings_->Remove(entry->rankings(), Rankings::DELETED);
+ break;
+ };
+ default:
+ NOTREACHED();
+ }
+
+ rankings_->Insert(entry->rankings(), true, GetListForEntryV2(entry));
+}
+
+void Eviction::OnDoomEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ if (ENTRY_NORMAL != info->state)
+ return;
+
+ rankings_->Remove(entry->rankings(), GetListForEntryV2(entry));
+
+ info->state = ENTRY_DOOMED;
+ entry->entry()->Store();
+ rankings_->Insert(entry->rankings(), true, Rankings::DELETED);
+}
+
+void Eviction::OnDestroyEntryV2(EntryImpl* entry) {
+ rankings_->Remove(entry->rankings(), Rankings::DELETED);
+}
+
+Rankings::List Eviction::GetListForEntryV2(EntryImpl* entry) {
+ EntryStore* info = entry->entry()->Data();
+ DCHECK(ENTRY_NORMAL == info->state);
+
+ if (!info->reuse_count)
+ return Rankings::NO_USE;
+
+ if (info->reuse_count < kHighUse)
+ return Rankings::LOW_USE;
+
+ return Rankings::HIGH_USE;
+}
+
+// This is a minimal implementation that just discards the oldest nodes.
+// TODO(rvargas): Do something better here.
+void Eviction::TrimDeleted(bool empty) {
+ Trace("*** Trim Deleted ***");
+ if (backend_->disabled_)
+ return;
+
+ Time start = Time::Now();
+ Rankings::ScopedRankingsBlock node(rankings_);
+ Rankings::ScopedRankingsBlock next(rankings_,
+ rankings_->GetPrev(node.get(), Rankings::DELETED));
+ for (int i = 0; (i < 4 || empty) && next.get(); i++) {
+ node.reset(next.release());
+ next.reset(rankings_->GetPrev(node.get(), Rankings::DELETED));
+ RemoveDeletedNode(node.get());
+ }
+
+ if (header_->lru.sizes[Rankings::DELETED] > header_->num_entries / 4)
+ MessageLoop::current()->PostTask(FROM_HERE,
+ factory_.NewRunnableMethod(&Eviction::TrimDeleted, false));
+
+ CACHE_UMA(AGE_MS, "TotalTrimDeletedTime", 0, start);
+ Trace("*** Trim Deleted end ***");
+ return;
+}
+
+bool Eviction::RemoveDeletedNode(CacheRankingsBlock* node) {
+ EntryImpl* entry;
+ bool dirty;
+ if (backend_->NewEntry(Addr(node->Data()->contents), &entry, &dirty)) {
+ Trace("NewEntry failed on Trim 0x%x", node->address().value());
+ return false;
+ }
+
+ // TODO(rvargas): figure out how to deal with corruption at this point (dirty
+ // entries that live in this list).
+ if (node->Data()->dirty) {
+ // We ignore the failure; we're removing the entry anyway.
+ entry->Update();
+ }
+ entry->entry()->Data()->state = ENTRY_DOOMED;
+ entry->Doom();
+ entry->Release();
+ return true;
+}
+
+bool Eviction::NodeIsOldEnough(CacheRankingsBlock* node, int list) {
+ if (!node)
+ return false;
+
+ // If possible, we want to keep entries on each list at least kTargetTime
+ // hours. Each successive list on the enumeration has 2x the target time of
+ // the previous list.
+ Time used = Time::FromInternalValue(node->Data()->last_used);
+ int multiplier = 1 << list;
+ return (Time::Now() - used).InHours() > kTargetTime * multiplier;
+}
+
+int Eviction::SelectListByLenght() {
+ int data_entries = header_->num_entries -
+ header_->lru.sizes[Rankings::DELETED];
+ // Start by having each list to be roughly the same size.
+ if (header_->lru.sizes[0] > data_entries / 3)
+ return 0;
+ if (header_->lru.sizes[1] > data_entries / 3)
+ return 1;
+ return 2;
+}
+
+void Eviction::ReportListStats() {
+ if (!new_eviction_)
+ return;
+
+ Rankings::ScopedRankingsBlock last1(rankings_,
+ rankings_->GetPrev(NULL, Rankings::NO_USE));
+ Rankings::ScopedRankingsBlock last2(rankings_,
+ rankings_->GetPrev(NULL, Rankings::LOW_USE));
+ Rankings::ScopedRankingsBlock last3(rankings_,
+ rankings_->GetPrev(NULL, Rankings::HIGH_USE));
+ Rankings::ScopedRankingsBlock last4(rankings_,
+ rankings_->GetPrev(NULL, Rankings::DELETED));
+
+ if (last1.get())
+ CACHE_UMA(AGE, "NoUseAge", 0,
+ Time::FromInternalValue(last1.get()->Data()->last_used));
+ if (last2.get())
+ CACHE_UMA(AGE, "LowUseAge", 0,
+ Time::FromInternalValue(last2.get()->Data()->last_used));
+ if (last3.get())
+ CACHE_UMA(AGE, "HighUseAge", 0,
+ Time::FromInternalValue(last3.get()->Data()->last_used));
+ if (last4.get())
+ CACHE_UMA(AGE, "DeletedAge", 0,
+ Time::FromInternalValue(last4.get()->Data()->last_used));
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/eviction.h b/net/disk_cache/eviction.h
new file mode 100644
index 00000000..e3c0a729
--- /dev/null
+++ b/net/disk_cache/eviction.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_EVICTION_H_
+#define NET_DISK_CACHE_EVICTION_H_
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/task.h"
+#include "net/disk_cache/disk_format.h"
+#include "net/disk_cache/rankings.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+class EntryImpl;
+
+// This class implements the eviction algorithm for the cache and it is tightly
+// integrated with BackendImpl.
+class Eviction {
+ public:
+ Eviction() : backend_(NULL), ALLOW_THIS_IN_INITIALIZER_LIST(factory_(this)) {}
+ ~Eviction() {}
+
+ void Init(BackendImpl* backend);
+
+ // Deletes entries from the cache until the current size is below the limit.
+ // If empty is true, the whole cache will be trimmed, regardless of being in
+ // use.
+ void TrimCache(bool empty);
+
+ // Updates the ranking information for an entry.
+ void UpdateRank(EntryImpl* entry, bool modified);
+
+ // Notifications of interesting events for a given entry.
+ void OnOpenEntry(EntryImpl* entry);
+ void OnCreateEntry(EntryImpl* entry);
+ void OnDoomEntry(EntryImpl* entry);
+ void OnDestroyEntry(EntryImpl* entry);
+
+ private:
+ void PostDelayedTrim();
+ void DelayedTrim();
+ void ReportTrimTimes(EntryImpl* entry);
+ Rankings::List GetListForEntry(EntryImpl* entry);
+ bool EvictEntry(CacheRankingsBlock* node, bool empty);
+
+ // We'll just keep for a while a separate set of methods that implement the
+ // new eviction algorithm. This code will replace the original methods when
+ // finished.
+ void TrimCacheV2(bool empty);
+ void UpdateRankV2(EntryImpl* entry, bool modified);
+ void OnOpenEntryV2(EntryImpl* entry);
+ void OnCreateEntryV2(EntryImpl* entry);
+ void OnDoomEntryV2(EntryImpl* entry);
+ void OnDestroyEntryV2(EntryImpl* entry);
+ Rankings::List GetListForEntryV2(EntryImpl* entry);
+ void TrimDeleted(bool empty);
+ bool RemoveDeletedNode(CacheRankingsBlock* node);
+
+ bool NodeIsOldEnough(CacheRankingsBlock* node, int list);
+ int SelectListByLenght();
+ void ReportListStats();
+
+ BackendImpl* backend_;
+ Rankings* rankings_;
+ IndexHeader* header_;
+ int max_size_;
+ bool new_eviction_;
+ bool first_trim_;
+ bool trimming_;
+ bool delay_trim_;
+ ScopedRunnableMethodFactory<Eviction> factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(Eviction);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_EVICTION_H_
diff --git a/net/disk_cache/file.h b/net/disk_cache/file.h
new file mode 100644
index 00000000..76c2f794
--- /dev/null
+++ b/net/disk_cache/file.h
@@ -0,0 +1,94 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_FILE_H_
+#define NET_DISK_CACHE_FILE_H_
+
+#include <string>
+
+#include "base/platform_file.h"
+#include "base/ref_counted.h"
+
+class FilePath;
+
+namespace disk_cache {
+
+// This interface is used to support asynchronous ReadData and WriteData calls.
+class FileIOCallback {
+ public:
+ // Notified of the actual number of bytes read or written. This value is
+ // negative if an error occurred.
+ virtual void OnFileIOComplete(int bytes_copied) = 0;
+
+ virtual ~FileIOCallback() {}
+};
+
+// Simple wrapper around a file that allows asynchronous operations.
+class File : public base::RefCounted<File> {
+ friend class base::RefCounted<File>;
+ public:
+ File() : init_(false), mixed_(false) {}
+ // mixed_mode set to true enables regular synchronous operations for the file.
+ explicit File(bool mixed_mode) : init_(false), mixed_(mixed_mode) {}
+
+ // Initializes the object to use the passed in file instead of opening it with
+ // the Init() call. No asynchronous operations can be performed with this
+ // object.
+ explicit File(base::PlatformFile file);
+
+ // Initializes the object to point to a given file. The file must aready exist
+ // on disk, and allow shared read and write.
+ bool Init(const FilePath& name);
+
+ // Returns the handle or file descriptor.
+ base::PlatformFile platform_file() const;
+
+ // Returns true if the file was opened properly.
+ bool IsValid() const;
+
+ // Performs synchronous IO.
+ bool Read(void* buffer, size_t buffer_len, size_t offset);
+ bool Write(const void* buffer, size_t buffer_len, size_t offset);
+
+ // Performs asynchronous IO. callback will be called when the IO completes,
+ // as an APC on the thread that queued the operation.
+ bool Read(void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed);
+ bool Write(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed);
+
+ // Performs asynchronous writes, but doesn't notify when done. Automatically
+ // deletes buffer when done.
+ bool PostWrite(const void* buffer, size_t buffer_len, size_t offset);
+
+ // Sets the file's length. The file is truncated or extended with zeros to
+ // the new length.
+ bool SetLength(size_t length);
+ size_t GetLength();
+
+ // Blocks until |num_pending_io| IO operations complete.
+ static void WaitForPendingIO(int* num_pending_io);
+
+ protected:
+ virtual ~File();
+
+ // Performs the actual asynchronous write. If notify is set and there is no
+ // callback, the call will be re-synchronized.
+ bool AsyncWrite(const void* buffer, size_t buffer_len, size_t offset,
+ bool notify, FileIOCallback* callback, bool* completed);
+
+ private:
+ bool init_;
+ bool mixed_;
+ base::PlatformFile platform_file_; // Regular, asynchronous IO handle.
+ base::PlatformFile sync_platform_file_; // Synchronous IO handle.
+
+ DISALLOW_COPY_AND_ASSIGN(File);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FILE_H_
diff --git a/net/disk_cache/file_block.h b/net/disk_cache/file_block.h
new file mode 100644
index 00000000..25709207
--- /dev/null
+++ b/net/disk_cache/file_block.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_FILE_BLOCK_H__
+#define NET_DISK_CACHE_FILE_BLOCK_H__
+
+namespace disk_cache {
+
+// This interface exposes common functionality for a single block of data
+// stored on a file-block, regardless of the real type or size of the block.
+// Used to simplify loading / storing the block from disk.
+class FileBlock {
+ public:
+ virtual ~FileBlock() {}
+
+ // Returns a pointer to the actual data.
+ virtual void* buffer() const = 0;
+
+ // Returns the size of the block;
+ virtual size_t size() const = 0;
+
+ // Returns the file offset of this block.
+ virtual int offset() const = 0;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FILE_BLOCK_H__
diff --git a/net/disk_cache/file_lock.cc b/net/disk_cache/file_lock.cc
new file mode 100644
index 00000000..949c645d
--- /dev/null
+++ b/net/disk_cache/file_lock.cc
@@ -0,0 +1,27 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/file_lock.h"
+
+namespace disk_cache {
+
+FileLock::FileLock(BlockFileHeader* header) {
+ updating_ = &header->updating;
+ (*updating_)++;
+ acquired_ = true;
+}
+
+void FileLock::Lock() {
+ if (acquired_)
+ return;
+ (*updating_)++;
+}
+
+void FileLock::Unlock() {
+ if (!acquired_)
+ return;
+ (*updating_)--;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/file_lock.h b/net/disk_cache/file_lock.h
new file mode 100644
index 00000000..3ebed76d
--- /dev/null
+++ b/net/disk_cache/file_lock.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_FILE_LOCK_H__
+#define NET_DISK_CACHE_FILE_LOCK_H__
+
+#include "net/disk_cache/disk_format.h"
+
+namespace disk_cache {
+
+// This class implements a file lock that lives on the header of a memory mapped
+// file. This is NOT a thread related lock, it is a lock to detect corruption
+// of the file when the process crashes in the middle of an update.
+// The lock is acquired on the constructor and released on the destructor.
+// The typical use of the class is:
+// {
+// BlockFileHeader* header = GetFileHeader();
+// FileLock lock(header);
+// header->max_entries = num_entries;
+// // At this point the destructor is going to release the lock.
+// }
+// It is important to perform Lock() and Unlock() operations in the right order,
+// because otherwise the desired effect of the "lock" will not be achieved. If
+// the operations are inlined / optimized, the "locked" operations can happen
+// outside the lock.
+class FileLock {
+ public:
+ explicit FileLock(BlockFileHeader* header);
+ virtual ~FileLock() {
+ Unlock();
+ }
+ // Virtual to make sure the compiler never inlines the calls.
+ virtual void Lock();
+ virtual void Unlock();
+ private:
+ bool acquired_;
+ volatile int32* updating_;
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_FILE_LOCK_H__
diff --git a/net/disk_cache/file_posix.cc b/net/disk_cache/file_posix.cc
new file mode 100644
index 00000000..bfaad599
--- /dev/null
+++ b/net/disk_cache/file_posix.cc
@@ -0,0 +1,379 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/file.h"
+
+#include <fcntl.h>
+
+#include <set>
+
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/singleton.h"
+#include "base/waitable_event.h"
+#include "base/worker_pool.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace {
+
+class InFlightIO;
+
+// This class represents a single asynchronous IO operation while it is being
+// bounced between threads.
+class BackgroundIO : public base::RefCountedThreadSafe<BackgroundIO> {
+ public:
+ // Other than the actual parameters for the IO operation (including the
+ // |callback| that must be notified at the end), we need the controller that
+ // is keeping track of all operations. When done, we notify the controller
+ // (we do NOT invoke the callback), in the worker thead that completed the
+ // operation.
+ BackgroundIO(disk_cache::File* file, const void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback* callback,
+ InFlightIO* controller)
+ : io_completed_(true, false), callback_(callback), file_(file), buf_(buf),
+ buf_len_(buf_len), offset_(offset), controller_(controller),
+ bytes_(0) {}
+
+ // Read and Write are the operations that can be performed asynchronously.
+ // The actual parameters for the operation are setup in the constructor of
+ // the object, with the exception of |delete_buffer|, that allows a write
+ // without a callback. Both methods should be called from a worker thread, by
+ // posting a task to the WorkerPool (they are RunnableMethods). When finished,
+ // controller->OnIOComplete() is called.
+ void Read();
+ void Write(bool delete_buffer);
+
+ // This method signals the controller that this operation is finished, in the
+ // original thread (presumably the IO-Thread). In practice, this is a
+ // RunableMethod that allows cancellation.
+ void OnIOSignalled();
+
+ // Allows the cancellation of the task to notify the controller (step number 7
+ // in the diagram below). In practice, if the controller waits for the
+ // operation to finish it doesn't have to wait for the final task to be
+ // processed by the message loop so calling this method prevents its delivery.
+ // Note that this method is not intended to cancel the actual IO operation or
+ // to prevent the first notification to take place (OnIOComplete).
+ void Cancel();
+
+ // Retrieves the number of bytes transfered.
+ int Result();
+
+ base::WaitableEvent* io_completed() {
+ return &io_completed_;
+ }
+
+ disk_cache::FileIOCallback* callback() {
+ return callback_;
+ }
+
+ disk_cache::File* file() {
+ return file_;
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<BackgroundIO>;
+ ~BackgroundIO() {}
+
+ // An event to signal when the operation completes, and the user callback that
+ // has to be invoked. These members are accessed directly by the controller.
+ base::WaitableEvent io_completed_;
+ disk_cache::FileIOCallback* callback_;
+
+ disk_cache::File* file_;
+ const void* buf_;
+ size_t buf_len_;
+ size_t offset_;
+ InFlightIO* controller_; // The controller that tracks all operations.
+ int bytes_; // Final operation result.
+
+ DISALLOW_COPY_AND_ASSIGN(BackgroundIO);
+};
+
+// This class keeps track of every asynchronous IO operation. A single instance
+// of this class is meant to be used to start an asynchronous operation (using
+// PostRead/PostWrite). This class will post the operation to a worker thread,
+// hanlde the notification when the operation finishes and perform the callback
+// on the same thread that was used to start the operation.
+//
+// The regular sequence of calls is:
+// Thread_1 Worker_thread
+// 1. InFlightIO::PostRead()
+// 2. -> PostTask ->
+// 3. BackgroundIO::Read()
+// 4. IO operation completes
+// 5. InFlightIO::OnIOComplete()
+// 6. <- PostTask <-
+// 7. BackgroundIO::OnIOSignalled()
+// 8. InFlightIO::InvokeCallback()
+// 9. invoke callback
+//
+// Shutdown is a special case that is handled though WaitForPendingIO() instead
+// of just waiting for step 7.
+class InFlightIO {
+ public:
+ InFlightIO() : callback_thread_(MessageLoop::current()) {}
+ ~InFlightIO() {}
+
+ // These methods start an asynchronous operation. The arguments have the same
+ // semantics of the File asynchronous operations, with the exception that the
+ // operation never finishes synchronously.
+ void PostRead(disk_cache::File* file, void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback* callback);
+ void PostWrite(disk_cache::File* file, const void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback* callback,
+ bool delete_buffer);
+
+ // Blocks the current thread until all IO operations tracked by this object
+ // complete.
+ void WaitForPendingIO();
+
+ // Called on a worker thread when |operation| completes.
+ void OnIOComplete(BackgroundIO* operation);
+
+ // Invokes the users' completion callback at the end of the IO operation.
+ // |cancel_task| is true if the actual task posted to the thread is still
+ // queued (because we are inside WaitForPendingIO), and false if said task is
+ // the one performing the call.
+ void InvokeCallback(BackgroundIO* operation, bool cancel_task);
+
+ private:
+ typedef std::set<scoped_refptr<BackgroundIO> > IOList;
+
+ IOList io_list_; // List of pending io operations.
+ MessageLoop* callback_thread_;
+};
+
+// ---------------------------------------------------------------------------
+
+// Runs on a worker thread.
+void BackgroundIO::Read() {
+ if (file_->Read(const_cast<void*>(buf_), buf_len_, offset_)) {
+ bytes_ = static_cast<int>(buf_len_);
+ } else {
+ bytes_ = -1;
+ }
+ controller_->OnIOComplete(this);
+}
+
+int BackgroundIO::Result() {
+ return bytes_;
+}
+
+void BackgroundIO::Cancel() {
+ DCHECK(controller_);
+ controller_ = NULL;
+}
+
+// Runs on a worker thread.
+void BackgroundIO::Write(bool delete_buffer) {
+ bool rv = file_->Write(buf_, buf_len_, offset_);
+ if (delete_buffer) {
+ // TODO(rvargas): remove or update this code.
+ delete[] reinterpret_cast<const char*>(buf_);
+ }
+
+ bytes_ = rv ? static_cast<int>(buf_len_) : -1;
+ controller_->OnIOComplete(this);
+}
+
+// Runs on the IO thread.
+void BackgroundIO::OnIOSignalled() {
+ if (controller_)
+ controller_->InvokeCallback(this, false);
+}
+
+// ---------------------------------------------------------------------------
+
+void InFlightIO::PostRead(disk_cache::File *file, void* buf, size_t buf_len,
+ size_t offset, disk_cache::FileIOCallback *callback) {
+ scoped_refptr<BackgroundIO> operation =
+ new BackgroundIO(file, buf, buf_len, offset, callback, this);
+ io_list_.insert(operation.get());
+ file->AddRef(); // Balanced on InvokeCallback()
+
+ WorkerPool::PostTask(FROM_HERE,
+ NewRunnableMethod(operation.get(), &BackgroundIO::Read),
+ true);
+}
+
+void InFlightIO::PostWrite(disk_cache::File* file, const void* buf,
+ size_t buf_len, size_t offset,
+ disk_cache::FileIOCallback* callback,
+ bool delete_buffer) {
+ scoped_refptr<BackgroundIO> operation =
+ new BackgroundIO(file, buf, buf_len, offset, callback, this);
+ io_list_.insert(operation.get());
+ file->AddRef(); // Balanced on InvokeCallback()
+
+ WorkerPool::PostTask(FROM_HERE,
+ NewRunnableMethod(operation.get(), &BackgroundIO::Write,
+ delete_buffer),
+ true);
+}
+
+void InFlightIO::WaitForPendingIO() {
+ while (!io_list_.empty()) {
+ // Block the current thread until all pending IO completes.
+ IOList::iterator it = io_list_.begin();
+ InvokeCallback(*it, true);
+ }
+}
+
+// Runs on a worker thread.
+void InFlightIO::OnIOComplete(BackgroundIO* operation) {
+ callback_thread_->PostTask(FROM_HERE,
+ NewRunnableMethod(operation,
+ &BackgroundIO::OnIOSignalled));
+ operation->io_completed()->Signal();
+}
+
+// Runs on the IO thread.
+void InFlightIO::InvokeCallback(BackgroundIO* operation, bool cancel_task) {
+ operation->io_completed()->Wait();
+
+ if (cancel_task)
+ operation->Cancel();
+
+ disk_cache::FileIOCallback* callback = operation->callback();
+ int bytes = operation->Result();
+
+ // Release the references acquired in PostRead / PostWrite.
+ operation->file()->Release();
+ io_list_.erase(operation);
+ callback->OnFileIOComplete(bytes);
+}
+
+} // namespace
+
+namespace disk_cache {
+
+File::File(base::PlatformFile file)
+ : init_(true), mixed_(true), platform_file_(file) {
+}
+
+bool File::Init(const FilePath& name) {
+ if (init_)
+ return false;
+
+ int flags = base::PLATFORM_FILE_OPEN |
+ base::PLATFORM_FILE_READ |
+ base::PLATFORM_FILE_WRITE;
+ platform_file_ = base::CreatePlatformFile(name, flags, NULL);
+ if (platform_file_ < 0) {
+ platform_file_ = 0;
+ return false;
+ }
+
+ init_ = true;
+ return true;
+}
+
+File::~File() {
+ if (platform_file_)
+ close(platform_file_);
+}
+
+base::PlatformFile File::platform_file() const {
+ return platform_file_;
+}
+
+bool File::IsValid() const {
+ if (!init_)
+ return false;
+ return (base::kInvalidPlatformFileValue != platform_file_);
+}
+
+bool File::Read(void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ if (buffer_len > ULONG_MAX || offset > LONG_MAX)
+ return false;
+
+ int ret = pread(platform_file_, buffer, buffer_len, offset);
+ return (static_cast<size_t>(ret) == buffer_len);
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ int ret = pwrite(platform_file_, buffer, buffer_len, offset);
+ return (static_cast<size_t>(ret) == buffer_len);
+}
+
+// We have to increase the ref counter of the file before performing the IO to
+// prevent the completion to happen with an invalid handle (if the file is
+// closed while the IO is in flight).
+bool File::Read(void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (!callback) {
+ if (completed)
+ *completed = true;
+ return Read(buffer, buffer_len, offset);
+ }
+
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ InFlightIO* io_operations = Singleton<InFlightIO>::get();
+ io_operations->PostRead(this, buffer, buffer_len, offset, callback);
+
+ *completed = false;
+ return true;
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (!callback) {
+ if (completed)
+ *completed = true;
+ return Write(buffer, buffer_len, offset);
+ }
+
+ return AsyncWrite(buffer, buffer_len, offset, true, callback, completed);
+}
+
+bool File::PostWrite(const void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ return AsyncWrite(buffer, buffer_len, offset, false, NULL, NULL);
+}
+
+bool File::AsyncWrite(const void* buffer, size_t buffer_len, size_t offset,
+ bool notify, FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ InFlightIO* io_operations = Singleton<InFlightIO>::get();
+ io_operations->PostWrite(this, buffer, buffer_len, offset, callback, !notify);
+
+ if (completed)
+ *completed = false;
+ return true;
+}
+
+bool File::SetLength(size_t length) {
+ DCHECK(init_);
+ if (length > ULONG_MAX)
+ return false;
+
+ return 0 == ftruncate(platform_file_, length);
+}
+
+size_t File::GetLength() {
+ DCHECK(init_);
+ size_t ret = lseek(platform_file_, 0, SEEK_END);
+ return ret;
+}
+
+// Static.
+void File::WaitForPendingIO(int* num_pending_io) {
+ if (*num_pending_io)
+ Singleton<InFlightIO>::get()->WaitForPendingIO();
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/file_win.cc b/net/disk_cache/file_win.cc
new file mode 100644
index 00000000..2b1f20bd
--- /dev/null
+++ b/net/disk_cache/file_win.cc
@@ -0,0 +1,287 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/file.h"
+
+#include "base/file_path.h"
+#include "base/message_loop.h"
+#include "base/singleton.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace {
+
+// Structure used for asynchronous operations.
+struct MyOverlapped {
+ MyOverlapped(disk_cache::File* file, size_t offset,
+ disk_cache::FileIOCallback* callback);
+ ~MyOverlapped();
+ OVERLAPPED* overlapped() {
+ return &context_.overlapped;
+ }
+
+ MessageLoopForIO::IOContext context_;
+ scoped_refptr<disk_cache::File> file_;
+ disk_cache::FileIOCallback* callback_;
+ const void* buffer_;
+ bool delete_buffer_; // Delete the user buffer at completion.
+};
+
+COMPILE_ASSERT(!offsetof(MyOverlapped, context_), starts_with_overlapped);
+
+// Helper class to handle the IO completion notifications from the message loop.
+class CompletionHandler : public MessageLoopForIO::IOHandler {
+ virtual void OnIOCompleted(MessageLoopForIO::IOContext* context,
+ DWORD actual_bytes, DWORD error);
+};
+
+void CompletionHandler::OnIOCompleted(MessageLoopForIO::IOContext* context,
+ DWORD actual_bytes, DWORD error) {
+ MyOverlapped* data = reinterpret_cast<MyOverlapped*>(context);
+
+ if (error) {
+ DCHECK(!actual_bytes);
+ actual_bytes = static_cast<DWORD>(-1);
+ NOTREACHED();
+ }
+
+ if (data->callback_)
+ data->callback_->OnFileIOComplete(static_cast<int>(actual_bytes));
+
+ delete data;
+}
+
+MyOverlapped::MyOverlapped(disk_cache::File* file, size_t offset,
+ disk_cache::FileIOCallback* callback) {
+ memset(this, 0, sizeof(*this));
+ context_.handler = Singleton<CompletionHandler>::get();
+ context_.overlapped.Offset = static_cast<DWORD>(offset);
+ file_ = file;
+ callback_ = callback;
+}
+
+MyOverlapped::~MyOverlapped() {
+ if (delete_buffer_) {
+ DCHECK(!callback_);
+ // This whole thing could be updated to use IOBuffer, but PostWrite is not
+ // used at the moment. TODO(rvargas): remove or update this code.
+ delete[] reinterpret_cast<const char*>(buffer_);
+ }
+}
+
+} // namespace
+
+namespace disk_cache {
+
+File::File(base::PlatformFile file)
+ : init_(true), mixed_(true), platform_file_(INVALID_HANDLE_VALUE),
+ sync_platform_file_(file) {
+}
+
+bool File::Init(const FilePath& name) {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ platform_file_ = CreateFile(name.value().c_str(),
+ GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
+ OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL);
+
+ if (INVALID_HANDLE_VALUE == platform_file_)
+ return false;
+
+ MessageLoopForIO::current()->RegisterIOHandler(
+ platform_file_, Singleton<CompletionHandler>::get());
+
+ init_ = true;
+ sync_platform_file_ = CreateFile(name.value().c_str(),
+ GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
+ OPEN_EXISTING, 0, NULL);
+
+ if (INVALID_HANDLE_VALUE == sync_platform_file_)
+ return false;
+
+ return true;
+}
+
+File::~File() {
+ if (!init_)
+ return;
+
+ if (INVALID_HANDLE_VALUE != platform_file_)
+ CloseHandle(platform_file_);
+ if (INVALID_HANDLE_VALUE != sync_platform_file_)
+ CloseHandle(sync_platform_file_);
+}
+
+base::PlatformFile File::platform_file() const {
+ DCHECK(init_);
+ return (INVALID_HANDLE_VALUE == platform_file_) ? sync_platform_file_ :
+ platform_file_;
+}
+
+bool File::IsValid() const {
+ if (!init_)
+ return false;
+ return (INVALID_HANDLE_VALUE != platform_file_ ||
+ INVALID_HANDLE_VALUE != sync_platform_file_);
+}
+
+bool File::Read(void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ if (buffer_len > ULONG_MAX || offset > LONG_MAX)
+ return false;
+
+ DWORD ret = SetFilePointer(sync_platform_file_, static_cast<LONG>(offset),
+ NULL, FILE_BEGIN);
+ if (INVALID_SET_FILE_POINTER == ret)
+ return false;
+
+ DWORD actual;
+ DWORD size = static_cast<DWORD>(buffer_len);
+ if (!ReadFile(sync_platform_file_, buffer, size, &actual, NULL))
+ return false;
+ return actual == size;
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ DWORD ret = SetFilePointer(sync_platform_file_, static_cast<LONG>(offset),
+ NULL, FILE_BEGIN);
+ if (INVALID_SET_FILE_POINTER == ret)
+ return false;
+
+ DWORD actual;
+ DWORD size = static_cast<DWORD>(buffer_len);
+ if (!WriteFile(sync_platform_file_, buffer, size, &actual, NULL))
+ return false;
+ return actual == size;
+}
+
+// We have to increase the ref counter of the file before performing the IO to
+// prevent the completion to happen with an invalid handle (if the file is
+// closed while the IO is in flight).
+bool File::Read(void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (!callback) {
+ if (completed)
+ *completed = true;
+ return Read(buffer, buffer_len, offset);
+ }
+
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ MyOverlapped* data = new MyOverlapped(this, offset, callback);
+ DWORD size = static_cast<DWORD>(buffer_len);
+
+ DWORD actual;
+ if (!ReadFile(platform_file_, buffer, size, &actual, data->overlapped())) {
+ *completed = false;
+ if (GetLastError() == ERROR_IO_PENDING)
+ return true;
+ delete data;
+ return false;
+ }
+
+ // The operation completed already. We'll be called back anyway.
+ *completed = (actual == size);
+ DCHECK(actual == size);
+ data->callback_ = NULL;
+ data->file_ = NULL; // There is no reason to hold on to this anymore.
+ return *completed;
+}
+
+bool File::Write(const void* buffer, size_t buffer_len, size_t offset,
+ FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (!callback) {
+ if (completed)
+ *completed = true;
+ return Write(buffer, buffer_len, offset);
+ }
+
+ return AsyncWrite(buffer, buffer_len, offset, true, callback, completed);
+}
+
+bool File::PostWrite(const void* buffer, size_t buffer_len, size_t offset) {
+ DCHECK(init_);
+ return AsyncWrite(buffer, buffer_len, offset, false, NULL, NULL);
+}
+
+bool File::AsyncWrite(const void* buffer, size_t buffer_len, size_t offset,
+ bool notify, FileIOCallback* callback, bool* completed) {
+ DCHECK(init_);
+ if (buffer_len > ULONG_MAX || offset > ULONG_MAX)
+ return false;
+
+ MyOverlapped* data = new MyOverlapped(this, offset, callback);
+ bool dummy_completed;
+ if (!callback) {
+ DCHECK(!notify);
+ data->delete_buffer_ = true;
+ data->buffer_ = buffer;
+ completed = &dummy_completed;
+ }
+
+ DWORD size = static_cast<DWORD>(buffer_len);
+
+ DWORD actual;
+ if (!WriteFile(platform_file_, buffer, size, &actual, data->overlapped())) {
+ *completed = false;
+ if (GetLastError() == ERROR_IO_PENDING)
+ return true;
+ delete data;
+ return false;
+ }
+
+ // The operation completed already. We'll be called back anyway.
+ *completed = (actual == size);
+ DCHECK(actual == size);
+ data->callback_ = NULL;
+ data->file_ = NULL; // There is no reason to hold on to this anymore.
+ return *completed;
+}
+
+bool File::SetLength(size_t length) {
+ DCHECK(init_);
+ if (length > ULONG_MAX)
+ return false;
+
+ DWORD size = static_cast<DWORD>(length);
+ HANDLE file = platform_file();
+ if (INVALID_SET_FILE_POINTER == SetFilePointer(file, size, NULL, FILE_BEGIN))
+ return false;
+
+ return TRUE == SetEndOfFile(file);
+}
+
+size_t File::GetLength() {
+ DCHECK(init_);
+ LARGE_INTEGER size;
+ HANDLE file = platform_file();
+ if (!GetFileSizeEx(file, &size))
+ return 0;
+ if (size.HighPart)
+ return ULONG_MAX;
+
+ return static_cast<size_t>(size.LowPart);
+}
+
+// Static.
+void File::WaitForPendingIO(int* num_pending_io) {
+ while (*num_pending_io) {
+ // Asynchronous IO operations may be in flight and the completion may end
+ // up calling us back so let's wait for them.
+ MessageLoopForIO::IOHandler* handler = Singleton<CompletionHandler>::get();
+ MessageLoopForIO::current()->WaitForIOCompletion(100, handler);
+ }
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/hash.cc b/net/disk_cache/hash.cc
new file mode 100644
index 00000000..1e83913c
--- /dev/null
+++ b/net/disk_cache/hash.cc
@@ -0,0 +1,67 @@
+// From http://www.azillionmonkeys.com/qed/hash.html
+
+#include "net/disk_cache/hash.h"
+
+typedef uint32 uint32_t;
+typedef uint16 uint16_t;
+
+namespace disk_cache {
+
+#undef get16bits
+#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
+ || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
+#define get16bits(d) (*((const uint16_t *) (d)))
+#endif
+
+#if !defined (get16bits)
+#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
+ +(uint32_t)(((const uint8_t *)(d))[0]) )
+#endif
+
+uint32 SuperFastHash(const char * data, int len) {
+ uint32_t hash = len, tmp;
+ int rem;
+
+ if (len <= 0 || data == NULL)
+ return 0;
+
+ rem = len & 3;
+ len >>= 2;
+
+ /* Main loop */
+ for (;len > 0; len--) {
+ hash += get16bits(data);
+ tmp = (get16bits(data + 2) << 11) ^ hash;
+ hash = (hash << 16) ^ tmp;
+ data += 2 * sizeof(uint16_t);
+ hash += hash >> 11;
+ }
+
+ /* Handle end cases */
+ switch (rem) {
+ case 3: hash += get16bits(data);
+ hash ^= hash << 16;
+ hash ^= data[sizeof(uint16_t)] << 18;
+ hash += hash >> 11;
+ break;
+ case 2: hash += get16bits(data);
+ hash ^= hash << 11;
+ hash += hash >> 17;
+ break;
+ case 1: hash += *data;
+ hash ^= hash << 10;
+ hash += hash >> 1;
+ }
+
+ /* Force "avalanching" of final 127 bits */
+ hash ^= hash << 3;
+ hash += hash >> 5;
+ hash ^= hash << 4;
+ hash += hash >> 17;
+ hash ^= hash << 25;
+ hash += hash >> 6;
+
+ return hash;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/hash.h b/net/disk_cache/hash.h
new file mode 100644
index 00000000..bf35c268
--- /dev/null
+++ b/net/disk_cache/hash.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_HASH_H__
+#define NET_DISK_CACHE_HASH_H__
+
+#include <string>
+
+#include "base/basictypes.h"
+
+namespace disk_cache {
+
+// From http://www.azillionmonkeys.com/qed/hash.html
+// This is the hash used on WebCore/platform/stringhash
+uint32 SuperFastHash(const char * data, int len);
+
+inline uint32 Hash(const char* key, size_t length) {
+ return SuperFastHash(key, static_cast<int>(length));
+}
+
+inline uint32 Hash(const std::string& key) {
+ if (key.empty())
+ return 0;
+ return SuperFastHash(key.data(), static_cast<int>(key.size()));
+}
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_HASH_H__
diff --git a/net/disk_cache/histogram_macros.h b/net/disk_cache/histogram_macros.h
new file mode 100644
index 00000000..17cd345a
--- /dev/null
+++ b/net/disk_cache/histogram_macros.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains macros to simplify histogram reporting from the disk
+// cache. The main issue is that we want to have separate histograms for each
+// type of cache (regular vs. media, etc), without adding the complexity of
+// keeping track of a potentially large number of histogram objects that have to
+// survive the backend object that created them.
+
+#ifndef NET_DISK_CACHE_HISTOGRAM_MACROS_H_
+#define NET_DISK_CACHE_HISTOGRAM_MACROS_H_
+
+// HISTOGRAM_HOURS will collect time related data with a granularity of hours
+// and normal values of a few months.
+#define UMA_HISTOGRAM_HOURS UMA_HISTOGRAM_COUNTS_10000
+
+// HISTOGRAM_AGE will collect time elapsed since |initial_time|, with a
+// granularity of hours and normal values of a few months.
+#define UMA_HISTOGRAM_AGE(name, initial_time)\
+ UMA_HISTOGRAM_COUNTS_10000(name, (Time::Now() - initial_time).InHours())
+
+// HISTOGRAM_AGE_MS will collect time elapsed since |initial_time|, with the
+// normal resolution of the UMA_HISTOGRAM_TIMES.
+#define UMA_HISTOGRAM_AGE_MS(name, initial_time)\
+ UMA_HISTOGRAM_TIMES(name, Time::Now() - initial_time)
+
+#define UMA_HISTOGRAM_CACHE_ERROR(name, sample) \
+ UMA_HISTOGRAM_ENUMERATION(name, sample, 50)
+
+#ifdef NET_DISK_CACHE_BACKEND_IMPL_CC_
+#define BACKEND_OBJ this
+#else
+#define BACKEND_OBJ backend_
+#endif
+
+// Generates a UMA histogram of the given type, generating the proper name for
+// it (asking backend_->HistogramName), and adding the provided sample.
+// For example, to generate a regualar UMA_HISTOGRAM_COUNTS, this macro would
+// be used as:
+// CACHE_UMA(COUNTS, "MyName", 0, 20);
+// CACHE_UMA(COUNTS, "MyExperiment", 530, 55);
+// which roughly translates to:
+// UMA_HISTOGRAM_COUNTS("DiskCache.2.MyName", 20); // "2" is the CacheType.
+// UMA_HISTOGRAM_COUNTS("DiskCache.2.MyExperiment_530", 55);
+//
+#define CACHE_UMA(type, name, experiment, sample) {\
+ const std::string my_name = BACKEND_OBJ->HistogramName(name, experiment);\
+ switch (BACKEND_OBJ->cache_type()) {\
+ case net::DISK_CACHE:\
+ UMA_HISTOGRAM_##type(my_name.data(), sample);\
+ break;\
+ case net::MEDIA_CACHE:\
+ UMA_HISTOGRAM_##type(my_name.data(), sample);\
+ break;\
+ default:\
+ NOTREACHED();\
+ break;\
+ }\
+ }
+
+#endif // NET_DISK_CACHE_HISTOGRAM_MACROS_H_
diff --git a/net/disk_cache/mapped_file.h b/net/disk_cache/mapped_file.h
new file mode 100644
index 00000000..5b341412
--- /dev/null
+++ b/net/disk_cache/mapped_file.h
@@ -0,0 +1,56 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_MAPPED_FILE_H_
+#define NET_DISK_CACHE_MAPPED_FILE_H_
+
+#include <string>
+
+#include "net/disk_cache/disk_format.h"
+#include "net/disk_cache/file.h"
+#include "net/disk_cache/file_block.h"
+
+class FilePath;
+
+namespace disk_cache {
+
+// This class implements a memory mapped file used to access block-files. The
+// idea is that the header and bitmap will be memory mapped all the time, and
+// the actual data for the blocks will be access asynchronously (most of the
+// time).
+class MappedFile : public File {
+ public:
+ MappedFile() : File(true), init_(false) {}
+
+ // Performs object initialization. name is the file to use, and size is the
+ // ammount of data to memory map from th efile. If size is 0, the whole file
+ // will be mapped in memory.
+ void* Init(const FilePath& name, size_t size);
+
+ void* buffer() const {
+ return buffer_;
+ }
+
+ // Loads or stores a given block from the backing file (synchronously).
+ bool Load(const FileBlock* block);
+ bool Store(const FileBlock* block);
+
+ private:
+ virtual ~MappedFile();
+
+ bool init_;
+#if defined(OS_WIN)
+ HANDLE section_;
+#endif
+ void* buffer_; // Address of the memory mapped buffer.
+ size_t view_size_; // Size of the memory pointed by buffer_.
+
+ DISALLOW_COPY_AND_ASSIGN(MappedFile);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MAPPED_FILE_H_
diff --git a/net/disk_cache/mapped_file_posix.cc b/net/disk_cache/mapped_file_posix.cc
new file mode 100644
index 00000000..f9a361b7
--- /dev/null
+++ b/net/disk_cache/mapped_file_posix.cc
@@ -0,0 +1,55 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mapped_file.h"
+
+#include <errno.h>
+#include <sys/mman.h>
+
+#include "base/file_path.h"
+#include "base/logging.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace disk_cache {
+
+void* MappedFile::Init(const FilePath& name, size_t size) {
+ DCHECK(!init_);
+ if (init_ || !File::Init(name))
+ return NULL;
+
+ if (!size)
+ size = GetLength();
+
+ buffer_ = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ platform_file(), 0);
+ init_ = true;
+ DCHECK(reinterpret_cast<intptr_t>(buffer_) != -1);
+ if (reinterpret_cast<intptr_t>(buffer_) == -1)
+ buffer_ = 0;
+
+ view_size_ = size;
+ return buffer_;
+}
+
+MappedFile::~MappedFile() {
+ if (!init_)
+ return;
+
+ if (buffer_) {
+ int ret = munmap(buffer_, view_size_);
+ DCHECK(0 == ret);
+ }
+}
+
+bool MappedFile::Load(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Read(block->buffer(), block->size(), offset);
+}
+
+bool MappedFile::Store(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Write(block->buffer(), block->size(), offset);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/mapped_file_unittest.cc b/net/disk_cache/mapped_file_unittest.cc
new file mode 100644
index 00000000..ea28f32c
--- /dev/null
+++ b/net/disk_cache/mapped_file_unittest.cc
@@ -0,0 +1,121 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/file_util.h"
+#include "base/string_util.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "net/disk_cache/mapped_file.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+int g_cache_tests_max_id;
+volatile int g_cache_tests_received;
+volatile bool g_cache_tests_error;
+
+// Implementation of FileIOCallback for the tests.
+class FileCallbackTest: public disk_cache::FileIOCallback {
+ public:
+ explicit FileCallbackTest(int id) : id_(id), reuse_(0) {}
+ explicit FileCallbackTest(int id, bool reuse)
+ : id_(id), reuse_(reuse_ ? 0 : 1) {}
+ ~FileCallbackTest() {}
+
+ virtual void OnFileIOComplete(int bytes_copied);
+ private:
+ int id_;
+ int reuse_;
+};
+
+void FileCallbackTest::OnFileIOComplete(int bytes_copied) {
+ if (id_ > g_cache_tests_max_id) {
+ NOTREACHED();
+ g_cache_tests_error = true;
+ } else if (reuse_) {
+ DCHECK(1 == reuse_);
+ if (2 == reuse_)
+ g_cache_tests_error = true;
+ reuse_++;
+ }
+
+ g_cache_tests_received++;
+}
+
+// Wait up to 2 secs without callbacks, or until we receive expected callbacks.
+void WaitForCallbacks(int expected) {
+ if (!expected)
+ return;
+
+#if defined(OS_WIN)
+ int iterations = 0;
+ int last = 0;
+ while (iterations < 40) {
+ SleepEx(50, TRUE);
+ if (expected == g_cache_tests_received)
+ return;
+ if (last == g_cache_tests_received)
+ iterations++;
+ else
+ iterations = 0;
+ }
+#elif defined(OS_POSIX)
+ // TODO(rvargas): Do something when async IO is implemented.
+#endif
+}
+
+} // namespace
+
+TEST_F(DiskCacheTest, MappedFile_SyncIO) {
+ FilePath filename = GetCacheFilePath().AppendASCII("a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ char buffer1[20];
+ char buffer2[20];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ base::strlcpy(buffer1, "the data", arraysize(buffer1));
+ EXPECT_TRUE(file->Write(buffer1, sizeof(buffer1), 8192));
+ EXPECT_TRUE(file->Read(buffer2, sizeof(buffer2), 8192));
+ EXPECT_STREQ(buffer1, buffer2);
+}
+
+TEST_F(DiskCacheTest, MappedFile_AsyncIO) {
+ FilePath filename = GetCacheFilePath().AppendASCII("a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ FileCallbackTest callback(1);
+ g_cache_tests_error = false;
+ g_cache_tests_max_id = 0;
+ g_cache_tests_received = 0;
+
+ MessageLoopHelper helper;
+
+ char buffer1[20];
+ char buffer2[20];
+ CacheTestFillBuffer(buffer1, sizeof(buffer1), false);
+ base::strlcpy(buffer1, "the data", arraysize(buffer1));
+ bool completed;
+ EXPECT_TRUE(file->Write(buffer1, sizeof(buffer1), 1024 * 1024, &callback,
+ &completed));
+ int expected = completed ? 0 : 1;
+
+ g_cache_tests_max_id = 1;
+ helper.WaitUntilCacheIoFinished(expected);
+
+ EXPECT_TRUE(file->Read(buffer2, sizeof(buffer2), 1024 * 1024, &callback,
+ &completed));
+ if (!completed)
+ expected++;
+
+ helper.WaitUntilCacheIoFinished(expected);
+
+ EXPECT_EQ(expected, g_cache_tests_received);
+ EXPECT_FALSE(g_cache_tests_error);
+ EXPECT_STREQ(buffer1, buffer2);
+}
diff --git a/net/disk_cache/mapped_file_win.cc b/net/disk_cache/mapped_file_win.cc
new file mode 100644
index 00000000..69c13a44
--- /dev/null
+++ b/net/disk_cache/mapped_file_win.cc
@@ -0,0 +1,55 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mapped_file.h"
+
+#include "base/file_path.h"
+#include "base/logging.h"
+#include "net/disk_cache/disk_cache.h"
+
+namespace disk_cache {
+
+void* MappedFile::Init(const FilePath& name, size_t size) {
+ DCHECK(!init_);
+ if (init_ || !File::Init(name))
+ return NULL;
+
+ buffer_ = NULL;
+ init_ = true;
+ section_ = CreateFileMapping(platform_file(), NULL, PAGE_READWRITE, 0,
+ static_cast<DWORD>(size), NULL);
+ if (!section_)
+ return NULL;
+
+ buffer_ = MapViewOfFile(section_, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, size);
+ DCHECK(buffer_);
+ view_size_ = size;
+
+ return buffer_;
+}
+
+MappedFile::~MappedFile() {
+ if (!init_)
+ return;
+
+ if (buffer_) {
+ BOOL ret = UnmapViewOfFile(buffer_);
+ DCHECK(ret);
+ }
+
+ if (section_)
+ CloseHandle(section_);
+}
+
+bool MappedFile::Load(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Read(block->buffer(), block->size(), offset);
+}
+
+bool MappedFile::Store(const FileBlock* block) {
+ size_t offset = block->offset() + view_size_;
+ return Write(block->buffer(), block->size(), offset);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/mem_backend_impl.cc b/net/disk_cache/mem_backend_impl.cc
new file mode 100644
index 00000000..875efdc9
--- /dev/null
+++ b/net/disk_cache/mem_backend_impl.cc
@@ -0,0 +1,318 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mem_backend_impl.h"
+
+#include "base/logging.h"
+#include "base/sys_info.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/cache_util.h"
+#include "net/disk_cache/mem_entry_impl.h"
+
+using base::Time;
+
+namespace {
+
+const int kDefaultCacheSize = 10 * 1024 * 1024;
+const int kCleanUpMargin = 1024 * 1024;
+
+int LowWaterAdjust(int high_water) {
+ if (high_water < kCleanUpMargin)
+ return 0;
+
+ return high_water - kCleanUpMargin;
+}
+
+} // namespace
+
+namespace disk_cache {
+
+Backend* CreateInMemoryCacheBackend(int max_bytes) {
+ MemBackendImpl* cache = new MemBackendImpl();
+ cache->SetMaxSize(max_bytes);
+ if (cache->Init())
+ return cache;
+
+ delete cache;
+ LOG(ERROR) << "Unable to create cache";
+ return NULL;
+}
+
+// ------------------------------------------------------------------------
+
+bool MemBackendImpl::Init() {
+ if (max_size_)
+ return true;
+
+ int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
+
+ if (total_memory <= 0) {
+ max_size_ = kDefaultCacheSize;
+ return true;
+ }
+
+ // We want to use up to 2% of the computer's memory, with a limit of 50 MB,
+ // reached on systemd with more than 2.5 GB of RAM.
+ total_memory = total_memory * 2 / 100;
+ if (total_memory > kDefaultCacheSize * 5)
+ max_size_ = kDefaultCacheSize * 5;
+ else
+ max_size_ = static_cast<int32>(total_memory);
+
+ return true;
+}
+
+MemBackendImpl::~MemBackendImpl() {
+ EntryMap::iterator it = entries_.begin();
+ while (it != entries_.end()) {
+ it->second->Doom();
+ it = entries_.begin();
+ }
+ DCHECK(!current_size_);
+}
+
+bool MemBackendImpl::SetMaxSize(int max_bytes) {
+ COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
+ if (max_bytes < 0)
+ return false;
+
+ // Zero size means use the default.
+ if (!max_bytes)
+ return true;
+
+ max_size_ = max_bytes;
+ return true;
+}
+
+int32 MemBackendImpl::GetEntryCount() const {
+ return static_cast<int32>(entries_.size());
+}
+
+bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) {
+ EntryMap::iterator it = entries_.find(key);
+ if (it == entries_.end())
+ return false;
+
+ it->second->Open();
+
+ *entry = it->second;
+ return true;
+}
+
+int MemBackendImpl::OpenEntry(const std::string& key, Entry** entry,
+ CompletionCallback* callback) {
+ if (OpenEntry(key, entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) {
+ EntryMap::iterator it = entries_.find(key);
+ if (it != entries_.end())
+ return false;
+
+ MemEntryImpl* cache_entry = new MemEntryImpl(this);
+ if (!cache_entry->CreateEntry(key)) {
+ delete entry;
+ return false;
+ }
+
+ rankings_.Insert(cache_entry);
+ entries_[key] = cache_entry;
+
+ *entry = cache_entry;
+ return true;
+}
+
+int MemBackendImpl::CreateEntry(const std::string& key, Entry** entry,
+ CompletionCallback* callback) {
+ if (CreateEntry(key, entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+bool MemBackendImpl::DoomEntry(const std::string& key) {
+ Entry* entry;
+ if (!OpenEntry(key, &entry))
+ return false;
+
+ entry->Doom();
+ entry->Close();
+ return true;
+}
+
+int MemBackendImpl::DoomEntry(const std::string& key,
+ CompletionCallback* callback) {
+ if (DoomEntry(key))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) {
+ // Only parent entries can be passed into this method.
+ DCHECK(entry->type() == MemEntryImpl::kParentEntry);
+
+ rankings_.Remove(entry);
+ EntryMap::iterator it = entries_.find(entry->GetKey());
+ if (it != entries_.end())
+ entries_.erase(it);
+ else
+ NOTREACHED();
+
+ entry->InternalDoom();
+}
+
+bool MemBackendImpl::DoomAllEntries() {
+ TrimCache(true);
+ return true;
+}
+
+int MemBackendImpl::DoomAllEntries(CompletionCallback* callback) {
+ if (DoomAllEntries())
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+bool MemBackendImpl::DoomEntriesBetween(const Time initial_time,
+ const Time end_time) {
+ if (end_time.is_null())
+ return DoomEntriesSince(initial_time);
+
+ DCHECK(end_time >= initial_time);
+
+ MemEntryImpl* next = rankings_.GetNext(NULL);
+
+ // rankings_ is ordered by last used, this will descend through the cache
+ // and start dooming items before the end_time, and will stop once it reaches
+ // an item used before the initial time.
+ while (next) {
+ MemEntryImpl* node = next;
+ next = rankings_.GetNext(next);
+
+ if (node->GetLastUsed() < initial_time)
+ break;
+
+ if (node->GetLastUsed() < end_time)
+ node->Doom();
+ }
+
+ return true;
+}
+
+int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
+ CompletionCallback* callback) {
+ if (DoomEntriesBetween(initial_time, end_time))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+bool MemBackendImpl::DoomEntriesSince(const Time initial_time) {
+ for (;;) {
+ // Get the entry in the front.
+ Entry* entry = rankings_.GetNext(NULL);
+
+ // Break the loop when there are no more entries or the entry is too old.
+ if (!entry || entry->GetLastUsed() < initial_time)
+ return true;
+ entry->Doom();
+ }
+}
+
+int MemBackendImpl::DoomEntriesSince(const base::Time initial_time,
+ CompletionCallback* callback) {
+ if (DoomEntriesSince(initial_time))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+bool MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry) {
+ MemEntryImpl* current = reinterpret_cast<MemEntryImpl*>(*iter);
+ MemEntryImpl* node = rankings_.GetNext(current);
+ // We should never return a child entry so iterate until we hit a parent
+ // entry.
+ while (node && node->type() != MemEntryImpl::kParentEntry) {
+ node = rankings_.GetNext(node);
+ }
+ *next_entry = node;
+ *iter = node;
+
+ if (node)
+ node->Open();
+
+ return NULL != node;
+}
+
+int MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
+ CompletionCallback* callback) {
+ if (OpenNextEntry(iter, next_entry))
+ return net::OK;
+
+ return net::ERR_FAILED;
+}
+
+void MemBackendImpl::EndEnumeration(void** iter) {
+ *iter = NULL;
+}
+
+void MemBackendImpl::TrimCache(bool empty) {
+ MemEntryImpl* next = rankings_.GetPrev(NULL);
+
+ DCHECK(next);
+
+ int target_size = empty ? 0 : LowWaterAdjust(max_size_);
+ while (current_size_ > target_size && next) {
+ MemEntryImpl* node = next;
+ next = rankings_.GetPrev(next);
+ if (!node->InUse() || empty) {
+ node->Doom();
+ }
+ }
+
+ return;
+}
+
+void MemBackendImpl::AddStorageSize(int32 bytes) {
+ current_size_ += bytes;
+ DCHECK(current_size_ >= 0);
+
+ if (current_size_ > max_size_)
+ TrimCache(false);
+}
+
+void MemBackendImpl::SubstractStorageSize(int32 bytes) {
+ current_size_ -= bytes;
+ DCHECK(current_size_ >= 0);
+}
+
+void MemBackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
+ if (old_size >= new_size)
+ SubstractStorageSize(old_size - new_size);
+ else
+ AddStorageSize(new_size - old_size);
+}
+
+void MemBackendImpl::UpdateRank(MemEntryImpl* node) {
+ rankings_.UpdateRank(node);
+}
+
+int MemBackendImpl::MaxFileSize() const {
+ return max_size_ / 8;
+}
+
+void MemBackendImpl::InsertIntoRankingList(MemEntryImpl* entry) {
+ rankings_.Insert(entry);
+}
+
+void MemBackendImpl::RemoveFromRankingList(MemEntryImpl* entry) {
+ rankings_.Remove(entry);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/mem_backend_impl.h b/net/disk_cache/mem_backend_impl.h
new file mode 100644
index 00000000..48d196cc
--- /dev/null
+++ b/net/disk_cache/mem_backend_impl.h
@@ -0,0 +1,102 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface of the cache.
+
+#ifndef NET_DISK_CACHE_MEM_BACKEND_IMPL_H__
+#define NET_DISK_CACHE_MEM_BACKEND_IMPL_H__
+
+#include "base/hash_tables.h"
+
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/mem_rankings.h"
+
+namespace disk_cache {
+
+class MemEntryImpl;
+
+// This class implements the Backend interface. An object of this class handles
+// the operations of the cache without writing to disk.
+class MemBackendImpl : public Backend {
+ public:
+ MemBackendImpl() : max_size_(0), current_size_(0) {}
+ ~MemBackendImpl();
+
+ // Performs general initialization for this current instance of the cache.
+ bool Init();
+
+ // Backend interface.
+ virtual int32 GetEntryCount() const;
+ virtual bool OpenEntry(const std::string& key, Entry** entry);
+ virtual int OpenEntry(const std::string& key, Entry** entry,
+ CompletionCallback* callback);
+ virtual bool CreateEntry(const std::string& key, Entry** entry);
+ virtual int CreateEntry(const std::string& key, Entry** entry,
+ CompletionCallback* callback);
+ virtual bool DoomEntry(const std::string& key);
+ virtual int DoomEntry(const std::string& key, CompletionCallback* callback);
+ virtual bool DoomAllEntries();
+ virtual int DoomAllEntries(CompletionCallback* callback);
+ virtual bool DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time);
+ virtual int DoomEntriesBetween(const base::Time initial_time,
+ const base::Time end_time,
+ CompletionCallback* callback);
+ virtual bool DoomEntriesSince(const base::Time initial_time);
+ virtual int DoomEntriesSince(const base::Time initial_time,
+ CompletionCallback* callback);
+ virtual bool OpenNextEntry(void** iter, Entry** next_entry);
+ virtual int OpenNextEntry(void** iter, Entry** next_entry,
+ CompletionCallback* callback);
+ virtual void EndEnumeration(void** iter);
+ virtual void GetStats(
+ std::vector<std::pair<std::string, std::string> >* stats) {}
+
+ // Sets the maximum size for the total amount of data stored by this instance.
+ bool SetMaxSize(int max_bytes);
+
+ // Permanently deletes an entry.
+ void InternalDoomEntry(MemEntryImpl* entry);
+
+ // Updates the ranking information for an entry.
+ void UpdateRank(MemEntryImpl* node);
+
+ // A user data block is being created, extended or truncated.
+ void ModifyStorageSize(int32 old_size, int32 new_size);
+
+ // Returns the maximum size for a file to reside on the cache.
+ int MaxFileSize() const;
+
+ // Insert an MemEntryImpl into the ranking list. This method is only called
+ // from MemEntryImpl to insert child entries. The reference can be removed
+ // by calling RemoveFromRankingList(|entry|).
+ void InsertIntoRankingList(MemEntryImpl* entry);
+
+ // Remove |entry| from ranking list. This method is only called from
+ // MemEntryImpl to remove a child entry from the ranking list.
+ void RemoveFromRankingList(MemEntryImpl* entry);
+
+ private:
+ // Deletes entries from the cache until the current size is below the limit.
+ // If empty is true, the whole cache will be trimmed, regardless of being in
+ // use.
+ void TrimCache(bool empty);
+
+ // Handles the used storage count.
+ void AddStorageSize(int32 bytes);
+ void SubstractStorageSize(int32 bytes);
+
+ typedef base::hash_map<std::string, MemEntryImpl*> EntryMap;
+
+ EntryMap entries_;
+ MemRankings rankings_; // Rankings to be able to trim the cache.
+ int32 max_size_; // Maximum data size for this instance.
+ int32 current_size_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(MemBackendImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEM_BACKEND_IMPL_H__
diff --git a/net/disk_cache/mem_entry_impl.cc b/net/disk_cache/mem_entry_impl.cc
new file mode 100644
index 00000000..002b5143
--- /dev/null
+++ b/net/disk_cache/mem_entry_impl.cc
@@ -0,0 +1,487 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mem_entry_impl.h"
+
+#include "base/logging.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/mem_backend_impl.h"
+
+using base::Time;
+
+namespace {
+
+const int kSparseData = 1;
+
+// Maximum size of a sparse entry is 2 to the power of this number.
+const int kMaxSparseEntryBits = 12;
+
+// Sparse entry has maximum size of 4KB.
+const int kMaxSparseEntrySize = 1 << kMaxSparseEntryBits;
+
+// Convert global offset to child index.
+inline int ToChildIndex(int64 offset) {
+ return static_cast<int>(offset >> kMaxSparseEntryBits);
+}
+
+// Convert global offset to offset in child entry.
+inline int ToChildOffset(int64 offset) {
+ return static_cast<int>(offset & (kMaxSparseEntrySize - 1));
+}
+
+} // nemespace
+
+namespace disk_cache {
+
+MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) {
+ doomed_ = false;
+ backend_ = backend;
+ ref_count_ = 0;
+ parent_ = NULL;
+ child_id_ = 0;
+ child_first_pos_ = 0;
+ next_ = NULL;
+ prev_ = NULL;
+ for (int i = 0; i < NUM_STREAMS; i++)
+ data_size_[i] = 0;
+}
+
+MemEntryImpl::~MemEntryImpl() {
+ for (int i = 0; i < NUM_STREAMS; i++)
+ backend_->ModifyStorageSize(data_size_[i], 0);
+ backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0);
+}
+
+void MemEntryImpl::Doom() {
+ if (doomed_)
+ return;
+ if (type() == kParentEntry) {
+ // Perform internal doom from the backend if this is a parent entry.
+ backend_->InternalDoomEntry(this);
+ } else {
+ // Manually detach from the backend and perform internal doom.
+ backend_->RemoveFromRankingList(this);
+ InternalDoom();
+ }
+}
+
+void MemEntryImpl::Close() {
+ // Only a parent entry can be closed.
+ DCHECK(type() == kParentEntry);
+ ref_count_--;
+ DCHECK(ref_count_ >= 0);
+ if (!ref_count_ && doomed_)
+ InternalDoom();
+}
+
+std::string MemEntryImpl::GetKey() const {
+ // A child entry doesn't have key so this method should not be called.
+ DCHECK(type() == kParentEntry);
+ return key_;
+}
+
+Time MemEntryImpl::GetLastUsed() const {
+ return last_used_;
+}
+
+Time MemEntryImpl::GetLastModified() const {
+ return last_modified_;
+}
+
+int32 MemEntryImpl::GetDataSize(int index) const {
+ if (index < 0 || index >= NUM_STREAMS)
+ return 0;
+ return data_size_[index];
+}
+
+int MemEntryImpl::ReadData(int index, int offset, net::IOBuffer* buf,
+ int buf_len, net::CompletionCallback* completion_callback) {
+ DCHECK(type() == kParentEntry || index == kSparseData);
+
+ if (index < 0 || index >= NUM_STREAMS)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int entry_size = GetDataSize(index);
+ if (offset >= entry_size || offset < 0 || !buf_len)
+ return 0;
+
+ if (buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset + buf_len > entry_size)
+ buf_len = entry_size - offset;
+
+ UpdateRank(false);
+
+ memcpy(buf->data() , &(data_[index])[offset], buf_len);
+ return buf_len;
+}
+
+int MemEntryImpl::WriteData(int index, int offset, net::IOBuffer* buf,
+ int buf_len, net::CompletionCallback* completion_callback, bool truncate) {
+ DCHECK(type() == kParentEntry || index == kSparseData);
+
+ if (index < 0 || index >= NUM_STREAMS)
+ return net::ERR_INVALID_ARGUMENT;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ int max_file_size = backend_->MaxFileSize();
+
+ // offset of buf_len could be negative numbers.
+ if (offset > max_file_size || buf_len > max_file_size ||
+ offset + buf_len > max_file_size) {
+ return net::ERR_FAILED;
+ }
+
+ // Read the size at this point.
+ int entry_size = GetDataSize(index);
+
+ PrepareTarget(index, offset, buf_len);
+
+ if (entry_size < offset + buf_len) {
+ backend_->ModifyStorageSize(entry_size, offset + buf_len);
+ data_size_[index] = offset + buf_len;
+ } else if (truncate) {
+ if (entry_size > offset + buf_len) {
+ backend_->ModifyStorageSize(entry_size, offset + buf_len);
+ data_size_[index] = offset + buf_len;
+ }
+ }
+
+ UpdateRank(true);
+
+ if (!buf_len)
+ return 0;
+
+ memcpy(&(data_[index])[offset], buf->data(), buf_len);
+ return buf_len;
+}
+
+int MemEntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback) {
+ DCHECK(type() == kParentEntry);
+
+ if (!InitSparseInfo())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ // We will keep using this buffer and adjust the offset in this buffer.
+ scoped_refptr<net::DrainableIOBuffer> io_buf =
+ new net::DrainableIOBuffer(buf, buf_len);
+
+ // Iterate until we have read enough.
+ while (io_buf->BytesRemaining()) {
+ MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), false);
+
+ // No child present for that offset.
+ if (!child)
+ break;
+
+ // We then need to prepare the child offset and len.
+ int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
+
+ // If we are trying to read from a position that the child entry has no data
+ // we should stop.
+ if (child_offset < child->child_first_pos_)
+ break;
+ int ret = child->ReadData(kSparseData, child_offset, io_buf,
+ io_buf->BytesRemaining(), NULL);
+
+ // If we encounter an error in one entry, return immediately.
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ break;
+
+ // Increment the counter by number of bytes read in the child entry.
+ io_buf->DidConsume(ret);
+ }
+
+ UpdateRank(false);
+
+ return io_buf->BytesConsumed();
+}
+
+int MemEntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback) {
+ DCHECK(type() == kParentEntry);
+
+ if (!InitSparseInfo())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ scoped_refptr<net::DrainableIOBuffer> io_buf =
+ new net::DrainableIOBuffer(buf, buf_len);
+
+ // This loop walks through child entries continuously starting from |offset|
+ // and writes blocks of data (of maximum size kMaxSparseEntrySize) into each
+ // child entry until all |buf_len| bytes are written. The write operation can
+ // start in the middle of an entry.
+ while (io_buf->BytesRemaining()) {
+ MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), true);
+ int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
+
+ // Find the right amount to write, this evaluates the remaining bytes to
+ // write and remaining capacity of this child entry.
+ int write_len = std::min(static_cast<int>(io_buf->BytesRemaining()),
+ kMaxSparseEntrySize - child_offset);
+
+ // Keep a record of the last byte position (exclusive) in the child.
+ int data_size = child->GetDataSize(kSparseData);
+
+ // Always writes to the child entry. This operation may overwrite data
+ // previously written.
+ // TODO(hclam): if there is data in the entry and this write is not
+ // continuous we may want to discard this write.
+ int ret = child->WriteData(kSparseData, child_offset, io_buf, write_len,
+ NULL, true);
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ break;
+
+ // Keep a record of the first byte position in the child if the write was
+ // not aligned nor continuous. This is to enable witting to the middle
+ // of an entry and still keep track of data off the aligned edge.
+ if (data_size != child_offset)
+ child->child_first_pos_ = child_offset;
+
+ // Adjust the offset in the IO buffer.
+ io_buf->DidConsume(ret);
+ }
+
+ UpdateRank(true);
+
+ return io_buf->BytesConsumed();
+}
+
+int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
+ DCHECK(type() == kParentEntry);
+ DCHECK(start);
+
+ if (!InitSparseInfo())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || len < 0 || !start)
+ return net::ERR_INVALID_ARGUMENT;
+
+ MemEntryImpl* current_child = NULL;
+
+ // Find the first child and record the number of empty bytes.
+ int empty = FindNextChild(offset, len, &current_child);
+ if (current_child) {
+ *start = offset + empty;
+ len -= empty;
+
+ // Counts the number of continuous bytes.
+ int continuous = 0;
+
+ // This loop scan for continuous bytes.
+ while (len && current_child) {
+ // Number of bytes available in this child.
+ int data_size = current_child->GetDataSize(kSparseData) -
+ ToChildOffset(*start + continuous);
+ if (data_size > len)
+ data_size = len;
+
+ // We have found more continuous bytes so increment the count. Also
+ // decrement the length we should scan.
+ continuous += data_size;
+ len -= data_size;
+
+ // If the next child is discontinuous, break the loop.
+ if (FindNextChild(*start + continuous, len, &current_child))
+ break;
+ }
+ return continuous;
+ }
+ *start = offset;
+ return 0;
+}
+
+int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
+ CompletionCallback* callback) {
+ return GetAvailableRange(offset, len, start);
+}
+
+int MemEntryImpl::ReadyForSparseIO(
+ net::CompletionCallback* completion_callback) {
+ return net::OK;
+}
+
+// ------------------------------------------------------------------------
+
+bool MemEntryImpl::CreateEntry(const std::string& key) {
+ key_ = key;
+ last_modified_ = Time::Now();
+ last_used_ = Time::Now();
+ Open();
+ backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
+ return true;
+}
+
+void MemEntryImpl::InternalDoom() {
+ doomed_ = true;
+ if (!ref_count_) {
+ if (type() == kParentEntry) {
+ // If this is a parent entry, we need to doom all the child entries.
+ if (children_.get()) {
+ EntryMap children;
+ children.swap(*children_);
+ for (EntryMap::iterator i = children.begin();
+ i != children.end(); ++i) {
+ // Since a pointer to this object is also saved in the map, avoid
+ // dooming it.
+ if (i->second != this)
+ i->second->Doom();
+ }
+ DCHECK(children_->size() == 0);
+ }
+ } else {
+ // If this is a child entry, detach it from the parent.
+ parent_->DetachChild(child_id_);
+ }
+ delete this;
+ }
+}
+
+void MemEntryImpl::Open() {
+ // Only a parent entry can be opened.
+ // TODO(hclam): make sure it's correct to not apply the concept of ref
+ // counting to child entry.
+ DCHECK(type() == kParentEntry);
+ ref_count_++;
+ DCHECK(ref_count_ >= 0);
+ DCHECK(!doomed_);
+}
+
+bool MemEntryImpl::InUse() {
+ if (type() == kParentEntry) {
+ return ref_count_ > 0;
+ } else {
+ // A child entry is always not in use. The consequence is that a child entry
+ // can always be evicted while the associated parent entry is currently in
+ // used (i.e. opened).
+ return false;
+ }
+}
+
+// ------------------------------------------------------------------------
+
+void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) {
+ int entry_size = GetDataSize(index);
+
+ if (entry_size >= offset + buf_len)
+ return; // Not growing the stored data.
+
+ if (static_cast<int>(data_[index].size()) < offset + buf_len)
+ data_[index].resize(offset + buf_len);
+
+ if (offset <= entry_size)
+ return; // There is no "hole" on the stored data.
+
+ // Cleanup the hole not written by the user. The point is to avoid returning
+ // random stuff later on.
+ memset(&(data_[index])[entry_size], 0, offset - entry_size);
+}
+
+void MemEntryImpl::UpdateRank(bool modified) {
+ Time current = Time::Now();
+ last_used_ = current;
+
+ if (modified)
+ last_modified_ = current;
+
+ if (!doomed_)
+ backend_->UpdateRank(this);
+}
+
+bool MemEntryImpl::InitSparseInfo() {
+ DCHECK(type() == kParentEntry);
+
+ if (!children_.get()) {
+ // If we already have some data in sparse stream but we are being
+ // initialized as a sparse entry, we should fail.
+ if (GetDataSize(kSparseData))
+ return false;
+ children_.reset(new EntryMap());
+
+ // The parent entry stores data for the first block, so save this object to
+ // index 0.
+ (*children_)[0] = this;
+ }
+ return true;
+}
+
+bool MemEntryImpl::InitChildEntry(MemEntryImpl* parent, int child_id) {
+ DCHECK(!parent_);
+ DCHECK(!child_id_);
+ parent_ = parent;
+ child_id_ = child_id;
+ last_modified_ = Time::Now();
+ last_used_ = Time::Now();
+ // Insert this to the backend's ranking list.
+ backend_->InsertIntoRankingList(this);
+ return true;
+}
+
+MemEntryImpl* MemEntryImpl::OpenChild(int64 offset, bool create) {
+ DCHECK(type() == kParentEntry);
+ int index = ToChildIndex(offset);
+ EntryMap::iterator i = children_->find(index);
+ if (i != children_->end()) {
+ return i->second;
+ } else if (create) {
+ MemEntryImpl* child = new MemEntryImpl(backend_);
+ child->InitChildEntry(this, index);
+ (*children_)[index] = child;
+ return child;
+ }
+ return NULL;
+}
+
+int MemEntryImpl::FindNextChild(int64 offset, int len, MemEntryImpl** child) {
+ DCHECK(child);
+ *child = NULL;
+ int scanned_len = 0;
+
+ // This loop tries to find the first existing child.
+ while (scanned_len < len) {
+ // This points to the current offset in the child.
+ int current_child_offset = ToChildOffset(offset + scanned_len);
+ MemEntryImpl* current_child = OpenChild(offset + scanned_len, false);
+ if (current_child) {
+ int child_first_pos = current_child->child_first_pos_;
+
+ // This points to the first byte that we should be reading from, we need
+ // to take care of the filled region and the current offset in the child.
+ int first_pos = std::max(current_child_offset, child_first_pos);
+
+ // If the first byte position we should read from doesn't exceed the
+ // filled region, we have found the first child.
+ if (first_pos < current_child->GetDataSize(kSparseData)) {
+ *child = current_child;
+
+ // We need to advance the scanned length.
+ scanned_len += first_pos - current_child_offset;
+ break;
+ }
+ }
+ scanned_len += kMaxSparseEntrySize - current_child_offset;
+ }
+ return scanned_len;
+}
+
+void MemEntryImpl::DetachChild(int child_id) {
+ children_->erase(child_id);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/mem_entry_impl.h b/net/disk_cache/mem_entry_impl.h
new file mode 100644
index 00000000..4ff7a0fb
--- /dev/null
+++ b/net/disk_cache/mem_entry_impl.h
@@ -0,0 +1,167 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_MEM_ENTRY_IMPL_H_
+#define NET_DISK_CACHE_MEM_ENTRY_IMPL_H_
+
+#include "base/hash_tables.h"
+#include "base/scoped_ptr.h"
+#include "net/disk_cache/disk_cache.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
+
+namespace disk_cache {
+
+class MemBackendImpl;
+
+// This class implements the Entry interface for the memory-only cache. An
+// object of this class represents a single entry on the cache. We use two
+// types of entries, parent and child to support sparse caching.
+//
+// A parent entry is non-sparse until a sparse method is invoked (i.e.
+// ReadSparseData, WriteSparseData, GetAvailableRange) when sparse information
+// is initialized. It then manages a list of child entries and delegates the
+// sparse API calls to the child entries. It creates and deletes child entries
+// and updates the list when needed.
+//
+// A child entry is used to carry partial cache content, non-sparse methods like
+// ReadData and WriteData cannot be applied to them. The lifetime of a child
+// entry is managed by the parent entry that created it except that the entry
+// can be evicted independently. A child entry does not have a key and it is not
+// registered in the backend's entry map. It is registered in the backend's
+// ranking list to enable eviction of a partial content.
+//
+// A sparse entry has a fixed maximum size and can be partially filled. There
+// can only be one continous filled region in a sparse entry, as illustrated by
+// the following example:
+// | xxx ooooo |
+// x = unfilled region
+// o = filled region
+// It is guranteed that there is at most one unfilled region and one filled
+// region, and the unfilled region (if there is one) is always before the filled
+// region. The book keeping for filled region in a sparse entry is done by using
+// the variable |child_first_pos_| (inclusive).
+
+class MemEntryImpl : public Entry {
+ public:
+ enum EntryType {
+ kParentEntry,
+ kChildEntry,
+ };
+
+ explicit MemEntryImpl(MemBackendImpl* backend);
+
+ // Entry interface.
+ virtual void Doom();
+ virtual void Close();
+ virtual std::string GetKey() const;
+ virtual base::Time GetLastUsed() const;
+ virtual base::Time GetLastModified() const;
+ virtual int32 GetDataSize(int index) const;
+ virtual int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback);
+ virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback,
+ bool truncate);
+ virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback);
+ virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback);
+ virtual int GetAvailableRange(int64 offset, int len, int64* start);
+ virtual int GetAvailableRange(int64 offset, int len, int64* start,
+ CompletionCallback* callback);
+ virtual void CancelSparseIO() {}
+ virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback);
+
+ // Performs the initialization of a EntryImpl that will be added to the
+ // cache.
+ bool CreateEntry(const std::string& key);
+
+ // Permanently destroys this entry.
+ void InternalDoom();
+
+ void Open();
+ bool InUse();
+
+ MemEntryImpl* next() const {
+ return next_;
+ }
+
+ MemEntryImpl* prev() const {
+ return prev_;
+ }
+
+ void set_next(MemEntryImpl* next) {
+ next_ = next;
+ }
+
+ void set_prev(MemEntryImpl* prev) {
+ prev_ = prev;
+ }
+
+ EntryType type() const {
+ return parent_ ? kChildEntry : kParentEntry;
+ }
+
+ private:
+ typedef base::hash_map<int, MemEntryImpl*> EntryMap;
+
+ enum {
+ NUM_STREAMS = 3
+ };
+
+ ~MemEntryImpl();
+
+ // Grows and cleans up the data buffer.
+ void PrepareTarget(int index, int offset, int buf_len);
+
+ // Updates ranking information.
+ void UpdateRank(bool modified);
+
+ // Initializes the children map and sparse info. This method is only called
+ // on a parent entry.
+ bool InitSparseInfo();
+
+ // Performs the initialization of a MemEntryImpl as a child entry.
+ // |parent| is the pointer to the parent entry. |child_id| is the ID of
+ // the new child.
+ bool InitChildEntry(MemEntryImpl* parent, int child_id);
+
+ // Returns an entry responsible for |offset|. The returned entry can be a
+ // child entry or this entry itself if |offset| points to the first range.
+ // If such entry does not exist and |create| is true, a new child entry is
+ // created.
+ MemEntryImpl* OpenChild(int64 offset, bool create);
+
+ // Finds the first child located within the range [|offset|, |offset + len|).
+ // Returns the number of bytes ahead of |offset| to reach the first available
+ // bytes in the entry. The first child found is output to |child|.
+ int FindNextChild(int64 offset, int len, MemEntryImpl** child);
+
+ // Removes child indexed by |child_id| from the children map.
+ void DetachChild(int child_id);
+
+ std::string key_;
+ std::vector<char> data_[NUM_STREAMS]; // User data.
+ int32 data_size_[NUM_STREAMS];
+ int ref_count_;
+
+ int child_id_; // The ID of a child entry.
+ int child_first_pos_; // The position of the first byte in a child
+ // entry.
+ MemEntryImpl* next_; // Pointers for the LRU list.
+ MemEntryImpl* prev_;
+ MemEntryImpl* parent_; // Pointer to the parent entry.
+ scoped_ptr<EntryMap> children_;
+
+ base::Time last_modified_; // LRU information.
+ base::Time last_used_;
+ MemBackendImpl* backend_; // Back pointer to the cache.
+ bool doomed_; // True if this entry was removed from the cache.
+
+ DISALLOW_EVIL_CONSTRUCTORS(MemEntryImpl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEM_ENTRY_IMPL_H_
diff --git a/net/disk_cache/mem_rankings.cc b/net/disk_cache/mem_rankings.cc
new file mode 100644
index 00000000..d5f4a653
--- /dev/null
+++ b/net/disk_cache/mem_rankings.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/mem_rankings.h"
+
+#include "base/logging.h"
+#include "net/disk_cache/mem_entry_impl.h"
+
+namespace disk_cache {
+
+MemRankings::~MemRankings() {
+ DCHECK(!head_ && !tail_);
+}
+
+void MemRankings::Insert(MemEntryImpl* node) {
+ if (head_)
+ head_->set_prev(node);
+
+ if (!tail_)
+ tail_ = node;
+
+ node->set_prev(NULL);
+ node->set_next(head_);
+ head_ = node;
+}
+
+void MemRankings::Remove(MemEntryImpl* node) {
+ MemEntryImpl* prev = node->prev();
+ MemEntryImpl* next = node->next();
+
+ if (head_ == node)
+ head_ = next;
+
+ if (tail_ == node)
+ tail_ = prev;
+
+ if (prev)
+ prev->set_next(next);
+
+ if (next)
+ next->set_prev(prev);
+
+ node->set_next(NULL);
+ node->set_prev(NULL);
+}
+
+void MemRankings::UpdateRank(MemEntryImpl* node) {
+ Remove(node);
+ Insert(node);
+}
+
+MemEntryImpl* MemRankings::GetNext(MemEntryImpl* node) {
+ if (!node)
+ return head_;
+
+ return node->next();
+}
+
+MemEntryImpl* MemRankings::GetPrev(MemEntryImpl* node) {
+ if (!node)
+ return tail_;
+
+ return node->prev();
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/mem_rankings.h b/net/disk_cache/mem_rankings.h
new file mode 100644
index 00000000..680be4c5
--- /dev/null
+++ b/net/disk_cache/mem_rankings.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_MEM_RANKINGS_H__
+#define NET_DISK_CACHE_MEM_RANKINGS_H__
+
+#include "base/basictypes.h"
+
+namespace disk_cache {
+
+class MemEntryImpl;
+
+// This class handles the ranking information for the memory-only cache.
+class MemRankings {
+ public:
+ MemRankings() : head_(NULL), tail_(NULL) {}
+ ~MemRankings();
+
+ // Inserts a given entry at the head of the queue.
+ void Insert(MemEntryImpl* node);
+
+ // Removes a given entry from the LRU list.
+ void Remove(MemEntryImpl* node);
+
+ // Moves a given entry to the head.
+ void UpdateRank(MemEntryImpl* node);
+
+ // Iterates through the list.
+ MemEntryImpl* GetNext(MemEntryImpl* node);
+ MemEntryImpl* GetPrev(MemEntryImpl* node);
+
+ private:
+ MemEntryImpl* head_;
+ MemEntryImpl* tail_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(MemRankings);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_MEM_RANKINGS_H__
diff --git a/net/disk_cache/rankings.cc b/net/disk_cache/rankings.cc
new file mode 100644
index 00000000..d2250df8
--- /dev/null
+++ b/net/disk_cache/rankings.cc
@@ -0,0 +1,804 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/rankings.h"
+
+#include "base/histogram.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/errors.h"
+#include "net/disk_cache/histogram_macros.h"
+
+using base::Time;
+
+// This is used by crash_cache.exe to generate unit test files.
+disk_cache::RankCrashes g_rankings_crash = disk_cache::NO_CRASH;
+
+namespace {
+
+enum Operation {
+ INSERT = 1,
+ REMOVE
+};
+
+// This class provides a simple lock for the LRU list of rankings. Whenever an
+// entry is to be inserted or removed from the list, a transaction object should
+// be created to keep track of the operation. If the process crashes before
+// finishing the operation, the transaction record (stored as part of the user
+// data on the file header) can be used to finish the operation.
+class Transaction {
+ public:
+ // addr is the cache addres of the node being inserted or removed. We want to
+ // avoid having the compiler doing optimizations on when to read or write
+ // from user_data because it is the basis of the crash detection. Maybe
+ // volatile is not enough for that, but it should be a good hint.
+ Transaction(volatile disk_cache::LruData* data, disk_cache::Addr addr,
+ Operation op, int list);
+ ~Transaction();
+ private:
+ volatile disk_cache::LruData* data_;
+ DISALLOW_COPY_AND_ASSIGN(Transaction);
+};
+
+Transaction::Transaction(volatile disk_cache::LruData* data,
+ disk_cache::Addr addr, Operation op, int list)
+ : data_(data) {
+ DCHECK(!data_->transaction);
+ DCHECK(addr.is_initialized());
+ data_->operation = op;
+ data_->operation_list = list;
+ data_->transaction = addr.value();
+}
+
+Transaction::~Transaction() {
+ DCHECK(data_->transaction);
+ data_->transaction = 0;
+ data_->operation = 0;
+ data_->operation_list = 0;
+}
+
+// Code locations that can generate crashes.
+enum CrashLocation {
+ ON_INSERT_1, ON_INSERT_2, ON_INSERT_3, ON_INSERT_4, ON_REMOVE_1, ON_REMOVE_2,
+ ON_REMOVE_3, ON_REMOVE_4, ON_REMOVE_5, ON_REMOVE_6, ON_REMOVE_7, ON_REMOVE_8
+};
+
+void TerminateSelf() {
+#if defined(OS_WIN)
+ // Windows does more work on _exit() than we would like, so we force exit.
+ TerminateProcess(GetCurrentProcess(), 0);
+#elif defined(OS_POSIX)
+ // On POSIX, _exit() will terminate the process with minimal cleanup,
+ // and it is cleaner than killing.
+ _exit(0);
+#endif
+}
+
+// Generates a crash on debug builds, acording to the value of g_rankings_crash.
+// This used by crash_cache.exe to generate unit-test files.
+void GenerateCrash(CrashLocation location) {
+#ifndef NDEBUG
+ if (disk_cache::NO_CRASH == g_rankings_crash)
+ return;
+ switch (location) {
+ case ON_INSERT_1:
+ switch (g_rankings_crash) {
+ case disk_cache::INSERT_ONE_1:
+ case disk_cache::INSERT_LOAD_1:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ case ON_INSERT_2:
+ if (disk_cache::INSERT_EMPTY_1 == g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_INSERT_3:
+ switch (g_rankings_crash) {
+ case disk_cache::INSERT_EMPTY_2:
+ case disk_cache::INSERT_ONE_2:
+ case disk_cache::INSERT_LOAD_2:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ case ON_INSERT_4:
+ switch (g_rankings_crash) {
+ case disk_cache::INSERT_EMPTY_3:
+ case disk_cache::INSERT_ONE_3:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ case ON_REMOVE_1:
+ switch (g_rankings_crash) {
+ case disk_cache::REMOVE_ONE_1:
+ case disk_cache::REMOVE_HEAD_1:
+ case disk_cache::REMOVE_TAIL_1:
+ case disk_cache::REMOVE_LOAD_1:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ case ON_REMOVE_2:
+ if (disk_cache::REMOVE_ONE_2 == g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_REMOVE_3:
+ if (disk_cache::REMOVE_ONE_3 == g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_REMOVE_4:
+ if (disk_cache::REMOVE_HEAD_2 == g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_REMOVE_5:
+ if (disk_cache::REMOVE_TAIL_2 == g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_REMOVE_6:
+ if (disk_cache::REMOVE_TAIL_3 == g_rankings_crash)
+ TerminateSelf();
+ break;
+ case ON_REMOVE_7:
+ switch (g_rankings_crash) {
+ case disk_cache::REMOVE_ONE_4:
+ case disk_cache::REMOVE_LOAD_2:
+ case disk_cache::REMOVE_HEAD_3:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ case ON_REMOVE_8:
+ switch (g_rankings_crash) {
+ case disk_cache::REMOVE_HEAD_4:
+ case disk_cache::REMOVE_LOAD_3:
+ TerminateSelf();
+ default:
+ break;
+ }
+ break;
+ default:
+ NOTREACHED();
+ return;
+ }
+#endif // NDEBUG
+}
+
+} // namespace
+
+namespace disk_cache {
+
+bool Rankings::Init(BackendImpl* backend, bool count_lists) {
+ DCHECK(!init_);
+ if (init_)
+ return false;
+
+ backend_ = backend;
+ control_data_ = backend_->GetLruData();
+ count_lists_ = count_lists;
+
+ ReadHeads();
+ ReadTails();
+
+ if (control_data_->transaction)
+ CompleteTransaction();
+
+ init_ = true;
+ return true;
+}
+
+void Rankings::Reset() {
+ init_ = false;
+ for (int i = 0; i < LAST_ELEMENT; i++) {
+ heads_[i].set_value(0);
+ tails_[i].set_value(0);
+ }
+ control_data_ = NULL;
+}
+
+bool Rankings::GetRanking(CacheRankingsBlock* rankings) {
+ if (!rankings->address().is_initialized())
+ return false;
+
+ Time start = Time::Now();
+ if (!rankings->Load())
+ return false;
+
+ if (!SanityCheck(rankings, true)) {
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return false;
+ }
+
+ backend_->OnEvent(Stats::OPEN_RANKINGS);
+
+ // "dummy" is the old "pointer" value, so it has to be 0.
+ if (!rankings->Data()->dirty && !rankings->Data()->dummy)
+ return true;
+
+ EntryImpl* entry = backend_->GetOpenEntry(rankings);
+ if (backend_->GetCurrentEntryId() != rankings->Data()->dirty || !entry) {
+ // We cannot trust this entry, but we cannot initiate a cleanup from this
+ // point (we may be in the middle of a cleanup already). Just get rid of
+ // the invalid pointer and continue; the entry will be deleted when detected
+ // from a regular open/create path.
+ rankings->Data()->dummy = 0;
+ rankings->Data()->dirty = backend_->GetCurrentEntryId() - 1;
+ if (!rankings->Data()->dirty)
+ rankings->Data()->dirty--;
+ return true;
+ }
+
+ // Note that we should not leave this module without deleting rankings first.
+ rankings->SetData(entry->rankings()->Data());
+
+ CACHE_UMA(AGE_MS, "GetRankings", 0, start);
+ return true;
+}
+
+void Rankings::ConvertToLongLived(CacheRankingsBlock* rankings) {
+ if (rankings->own_data())
+ return;
+
+ // We cannot return a shared node because we are not keeping a reference
+ // to the entry that owns the buffer. Make this node a copy of the one that
+ // we have, and let the iterator logic update it when the entry changes.
+ CacheRankingsBlock temp(NULL, Addr(0));
+ *temp.Data() = *rankings->Data();
+ rankings->StopSharingData();
+ *rankings->Data() = *temp.Data();
+}
+
+void Rankings::Insert(CacheRankingsBlock* node, bool modified, List list) {
+ Trace("Insert 0x%x", node->address().value());
+ DCHECK(node->HasData());
+ Addr& my_head = heads_[list];
+ Addr& my_tail = tails_[list];
+ Transaction lock(control_data_, node->address(), INSERT, list);
+ CacheRankingsBlock head(backend_->File(my_head), my_head);
+ if (my_head.is_initialized()) {
+ if (!GetRanking(&head))
+ return;
+
+ if (head.Data()->prev != my_head.value() && // Normal path.
+ head.Data()->prev != node->address().value()) { // FinishInsert().
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return;
+ }
+
+ head.Data()->prev = node->address().value();
+ head.Store();
+ GenerateCrash(ON_INSERT_1);
+ UpdateIterators(&head);
+ }
+
+ node->Data()->next = my_head.value();
+ node->Data()->prev = node->address().value();
+ my_head.set_value(node->address().value());
+
+ if (!my_tail.is_initialized() || my_tail.value() == node->address().value()) {
+ my_tail.set_value(node->address().value());
+ node->Data()->next = my_tail.value();
+ WriteTail(list);
+ GenerateCrash(ON_INSERT_2);
+ }
+
+ Time now = Time::Now();
+ node->Data()->last_used = now.ToInternalValue();
+ if (modified)
+ node->Data()->last_modified = now.ToInternalValue();
+ node->Store();
+ GenerateCrash(ON_INSERT_3);
+
+ // The last thing to do is move our head to point to a node already stored.
+ WriteHead(list);
+ IncrementCounter(list);
+ GenerateCrash(ON_INSERT_4);
+}
+
+// If a, b and r are elements on the list, and we want to remove r, the possible
+// states for the objects if a crash happens are (where y(x, z) means for object
+// y, prev is x and next is z):
+// A. One element:
+// 1. r(r, r), head(r), tail(r) initial state
+// 2. r(r, r), head(0), tail(r) WriteHead()
+// 3. r(r, r), head(0), tail(0) WriteTail()
+// 4. r(0, 0), head(0), tail(0) next.Store()
+//
+// B. Remove a random element:
+// 1. a(x, r), r(a, b), b(r, y), head(x), tail(y) initial state
+// 2. a(x, r), r(a, b), b(a, y), head(x), tail(y) next.Store()
+// 3. a(x, b), r(a, b), b(a, y), head(x), tail(y) prev.Store()
+// 4. a(x, b), r(0, 0), b(a, y), head(x), tail(y) node.Store()
+//
+// C. Remove head:
+// 1. r(r, b), b(r, y), head(r), tail(y) initial state
+// 2. r(r, b), b(r, y), head(b), tail(y) WriteHead()
+// 3. r(r, b), b(b, y), head(b), tail(y) next.Store()
+// 4. r(0, 0), b(b, y), head(b), tail(y) prev.Store()
+//
+// D. Remove tail:
+// 1. a(x, r), r(a, r), head(x), tail(r) initial state
+// 2. a(x, r), r(a, r), head(x), tail(a) WriteTail()
+// 3. a(x, a), r(a, r), head(x), tail(a) prev.Store()
+// 4. a(x, a), r(0, 0), head(x), tail(a) next.Store()
+void Rankings::Remove(CacheRankingsBlock* node, List list) {
+ Trace("Remove 0x%x (0x%x 0x%x)", node->address().value(), node->Data()->next,
+ node->Data()->prev);
+ DCHECK(node->HasData());
+ InvalidateIterators(node);
+ Addr next_addr(node->Data()->next);
+ Addr prev_addr(node->Data()->prev);
+ if (!next_addr.is_initialized() || next_addr.is_separate_file() ||
+ !prev_addr.is_initialized() || prev_addr.is_separate_file()) {
+ LOG(WARNING) << "Invalid rankings info.";
+ return;
+ }
+
+ CacheRankingsBlock next(backend_->File(next_addr), next_addr);
+ CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
+ if (!GetRanking(&next) || !GetRanking(&prev))
+ return;
+
+ if (!CheckLinks(node, &prev, &next, list))
+ return;
+
+ Transaction lock(control_data_, node->address(), REMOVE, list);
+ prev.Data()->next = next.address().value();
+ next.Data()->prev = prev.address().value();
+ GenerateCrash(ON_REMOVE_1);
+
+ CacheAddr node_value = node->address().value();
+ Addr& my_head = heads_[list];
+ Addr& my_tail = tails_[list];
+ if (node_value == my_head.value() || node_value == my_tail.value()) {
+ if (my_head.value() == my_tail.value()) {
+ my_head.set_value(0);
+ my_tail.set_value(0);
+
+ WriteHead(list);
+ GenerateCrash(ON_REMOVE_2);
+ WriteTail(list);
+ GenerateCrash(ON_REMOVE_3);
+ } else if (node_value == my_head.value()) {
+ my_head.set_value(next.address().value());
+ next.Data()->prev = next.address().value();
+
+ WriteHead(list);
+ GenerateCrash(ON_REMOVE_4);
+ } else if (node_value == my_tail.value()) {
+ my_tail.set_value(prev.address().value());
+ prev.Data()->next = prev.address().value();
+
+ WriteTail(list);
+ GenerateCrash(ON_REMOVE_5);
+
+ // Store the new tail to make sure we can undo the operation if we crash.
+ prev.Store();
+ GenerateCrash(ON_REMOVE_6);
+ }
+ }
+
+ // Nodes out of the list can be identified by invalid pointers.
+ node->Data()->next = 0;
+ node->Data()->prev = 0;
+
+ // The last thing to get to disk is the node itself, so before that there is
+ // enough info to recover.
+ next.Store();
+ GenerateCrash(ON_REMOVE_7);
+ prev.Store();
+ GenerateCrash(ON_REMOVE_8);
+ node->Store();
+ DecrementCounter(list);
+ UpdateIterators(&next);
+ UpdateIterators(&prev);
+}
+
+// A crash in between Remove and Insert will lead to a dirty entry not on the
+// list. We want to avoid that case as much as we can (as while waiting for IO),
+// but the net effect is just an assert on debug when attempting to remove the
+// entry. Otherwise we'll need reentrant transactions, which is an overkill.
+void Rankings::UpdateRank(CacheRankingsBlock* node, bool modified, List list) {
+ Time start = Time::Now();
+ Remove(node, list);
+ Insert(node, modified, list);
+ CACHE_UMA(AGE_MS, "UpdateRank", 0, start);
+}
+
+void Rankings::CompleteTransaction() {
+ Addr node_addr(static_cast<CacheAddr>(control_data_->transaction));
+ if (!node_addr.is_initialized() || node_addr.is_separate_file()) {
+ NOTREACHED();
+ LOG(ERROR) << "Invalid rankings info.";
+ return;
+ }
+
+ Trace("CompleteTransaction 0x%x", node_addr.value());
+
+ CacheRankingsBlock node(backend_->File(node_addr), node_addr);
+ if (!node.Load())
+ return;
+
+ node.Data()->dummy = 0;
+ node.Store();
+
+ Addr& my_head = heads_[control_data_->operation_list];
+ Addr& my_tail = tails_[control_data_->operation_list];
+
+ // We want to leave the node inside the list. The entry must me marked as
+ // dirty, and will be removed later. Otherwise, we'll get assertions when
+ // attempting to remove the dirty entry.
+ if (INSERT == control_data_->operation) {
+ Trace("FinishInsert h:0x%x t:0x%x", my_head.value(), my_tail.value());
+ FinishInsert(&node);
+ } else if (REMOVE == control_data_->operation) {
+ Trace("RevertRemove h:0x%x t:0x%x", my_head.value(), my_tail.value());
+ RevertRemove(&node);
+ } else {
+ NOTREACHED();
+ LOG(ERROR) << "Invalid operation to recover.";
+ }
+}
+
+void Rankings::FinishInsert(CacheRankingsBlock* node) {
+ control_data_->transaction = 0;
+ control_data_->operation = 0;
+ Addr& my_head = heads_[control_data_->operation_list];
+ Addr& my_tail = tails_[control_data_->operation_list];
+ if (my_head.value() != node->address().value()) {
+ if (my_tail.value() == node->address().value()) {
+ // This part will be skipped by the logic of Insert.
+ node->Data()->next = my_tail.value();
+ }
+
+ Insert(node, true, static_cast<List>(control_data_->operation_list));
+ }
+
+ // Tell the backend about this entry.
+ backend_->RecoveredEntry(node);
+}
+
+void Rankings::RevertRemove(CacheRankingsBlock* node) {
+ Addr next_addr(node->Data()->next);
+ Addr prev_addr(node->Data()->prev);
+ if (!next_addr.is_initialized() || !prev_addr.is_initialized()) {
+ // The operation actually finished. Nothing to do.
+ control_data_->transaction = 0;
+ return;
+ }
+ if (next_addr.is_separate_file() || prev_addr.is_separate_file()) {
+ NOTREACHED();
+ LOG(WARNING) << "Invalid rankings info.";
+ control_data_->transaction = 0;
+ return;
+ }
+
+ CacheRankingsBlock next(backend_->File(next_addr), next_addr);
+ CacheRankingsBlock prev(backend_->File(prev_addr), prev_addr);
+ if (!next.Load() || !prev.Load())
+ return;
+
+ CacheAddr node_value = node->address().value();
+ DCHECK(prev.Data()->next == node_value ||
+ prev.Data()->next == prev_addr.value() ||
+ prev.Data()->next == next.address().value());
+ DCHECK(next.Data()->prev == node_value ||
+ next.Data()->prev == next_addr.value() ||
+ next.Data()->prev == prev.address().value());
+
+ if (node_value != prev_addr.value())
+ prev.Data()->next = node_value;
+ if (node_value != next_addr.value())
+ next.Data()->prev = node_value;
+
+ List my_list = static_cast<List>(control_data_->operation_list);
+ Addr& my_head = heads_[my_list];
+ Addr& my_tail = tails_[my_list];
+ if (!my_head.is_initialized() || !my_tail.is_initialized()) {
+ my_head.set_value(node_value);
+ my_tail.set_value(node_value);
+ WriteHead(my_list);
+ WriteTail(my_list);
+ } else if (my_head.value() == next.address().value()) {
+ my_head.set_value(node_value);
+ prev.Data()->next = next.address().value();
+ WriteHead(my_list);
+ } else if (my_tail.value() == prev.address().value()) {
+ my_tail.set_value(node_value);
+ next.Data()->prev = prev.address().value();
+ WriteTail(my_list);
+ }
+
+ next.Store();
+ prev.Store();
+ control_data_->transaction = 0;
+ control_data_->operation = 0;
+}
+
+CacheRankingsBlock* Rankings::GetNext(CacheRankingsBlock* node, List list) {
+ ScopedRankingsBlock next(this);
+ if (!node) {
+ Addr& my_head = heads_[list];
+ if (!my_head.is_initialized())
+ return NULL;
+ next.reset(new CacheRankingsBlock(backend_->File(my_head), my_head));
+ } else {
+ if (!node->HasData())
+ node->Load();
+ Addr& my_tail = tails_[list];
+ if (!my_tail.is_initialized())
+ return NULL;
+ if (my_tail.value() == node->address().value())
+ return NULL;
+ Addr address(node->Data()->next);
+ if (address.value() == node->address().value())
+ return NULL; // Another tail? fail it.
+ next.reset(new CacheRankingsBlock(backend_->File(address), address));
+ }
+
+ TrackRankingsBlock(next.get(), true);
+
+ if (!GetRanking(next.get()))
+ return NULL;
+
+ ConvertToLongLived(next.get());
+ if (node && !CheckSingleLink(node, next.get()))
+ return NULL;
+
+ return next.release();
+}
+
+CacheRankingsBlock* Rankings::GetPrev(CacheRankingsBlock* node, List list) {
+ ScopedRankingsBlock prev(this);
+ if (!node) {
+ Addr& my_tail = tails_[list];
+ if (!my_tail.is_initialized())
+ return NULL;
+ prev.reset(new CacheRankingsBlock(backend_->File(my_tail), my_tail));
+ } else {
+ if (!node->HasData())
+ node->Load();
+ Addr& my_head = heads_[list];
+ if (!my_head.is_initialized())
+ return NULL;
+ if (my_head.value() == node->address().value())
+ return NULL;
+ Addr address(node->Data()->prev);
+ if (address.value() == node->address().value())
+ return NULL; // Another head? fail it.
+ prev.reset(new CacheRankingsBlock(backend_->File(address), address));
+ }
+
+ TrackRankingsBlock(prev.get(), true);
+
+ if (!GetRanking(prev.get()))
+ return NULL;
+
+ ConvertToLongLived(prev.get());
+ if (node && !CheckSingleLink(prev.get(), node))
+ return NULL;
+
+ return prev.release();
+}
+
+void Rankings::FreeRankingsBlock(CacheRankingsBlock* node) {
+ TrackRankingsBlock(node, false);
+}
+
+void Rankings::TrackRankingsBlock(CacheRankingsBlock* node,
+ bool start_tracking) {
+ if (!node)
+ return;
+
+ IteratorPair current(node->address().value(), node);
+
+ if (start_tracking)
+ iterators_.push_back(current);
+ else
+ iterators_.remove(current);
+}
+
+int Rankings::SelfCheck() {
+ int total = 0;
+ for (int i = 0; i < LAST_ELEMENT; i++) {
+ int partial = CheckList(static_cast<List>(i));
+ if (partial < 0)
+ return partial;
+ total += partial;
+ }
+ return total;
+}
+
+bool Rankings::SanityCheck(CacheRankingsBlock* node, bool from_list) {
+ const RankingsNode* data = node->Data();
+ if (!data->contents)
+ return false;
+
+ // It may have never been inserted.
+ if (from_list && (!data->last_used || !data->last_modified))
+ return false;
+
+ if ((!data->next && data->prev) || (data->next && !data->prev))
+ return false;
+
+ // Both pointers on zero is a node out of the list.
+ if (!data->next && !data->prev && from_list)
+ return false;
+
+ if ((node->address().value() == data->prev) && !IsHead(data->prev))
+ return false;
+
+ if ((node->address().value() == data->next) && !IsTail(data->next))
+ return false;
+
+ return true;
+}
+
+void Rankings::ReadHeads() {
+ for (int i = 0; i < LAST_ELEMENT; i++)
+ heads_[i] = Addr(control_data_->heads[i]);
+}
+
+void Rankings::ReadTails() {
+ for (int i = 0; i < LAST_ELEMENT; i++)
+ tails_[i] = Addr(control_data_->tails[i]);
+}
+
+void Rankings::WriteHead(List list) {
+ control_data_->heads[list] = heads_[list].value();
+}
+
+void Rankings::WriteTail(List list) {
+ control_data_->tails[list] = tails_[list].value();
+}
+
+bool Rankings::CheckEntry(CacheRankingsBlock* rankings) {
+ if (!rankings->Data()->dummy)
+ return true;
+
+ // If this entry is not dirty, it is a serious problem.
+ return backend_->GetCurrentEntryId() != rankings->Data()->dirty;
+}
+
+bool Rankings::CheckLinks(CacheRankingsBlock* node, CacheRankingsBlock* prev,
+ CacheRankingsBlock* next, List list) {
+ if ((prev->Data()->next != node->address().value() &&
+ heads_[list].value() != node->address().value()) ||
+ (next->Data()->prev != node->address().value() &&
+ tails_[list].value() != node->address().value())) {
+ LOG(ERROR) << "Inconsistent LRU.";
+
+ if (prev->Data()->next == next->address().value() &&
+ next->Data()->prev == prev->address().value()) {
+ // The list is actually ok, node is wrong.
+ node->Data()->next = 0;
+ node->Data()->prev = 0;
+ node->Store();
+ return false;
+ }
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return false;
+ }
+
+ return true;
+}
+
+bool Rankings::CheckSingleLink(CacheRankingsBlock* prev,
+ CacheRankingsBlock* next) {
+ if (prev->Data()->next != next->address().value() ||
+ next->Data()->prev != prev->address().value()) {
+ LOG(ERROR) << "Inconsistent LRU.";
+
+ backend_->CriticalError(ERR_INVALID_LINKS);
+ return false;
+ }
+
+ return true;
+}
+
+int Rankings::CheckList(List list) {
+ Addr& my_head = heads_[list];
+ Addr& my_tail = tails_[list];
+ if (!my_head.is_initialized()) {
+ if (!my_tail.is_initialized())
+ return 0;
+ // If there is no head, having a tail is an error.
+ return ERR_INVALID_TAIL;
+ }
+ // If there is no tail, having a head is an error.
+ if (!my_tail.is_initialized())
+ return ERR_INVALID_HEAD;
+
+ if (my_tail.is_separate_file())
+ return ERR_INVALID_TAIL;
+
+ if (my_head.is_separate_file())
+ return ERR_INVALID_HEAD;
+
+ int num_items = 0;
+ Addr address(my_head.value());
+ Addr prev(my_head.value());
+ scoped_ptr<CacheRankingsBlock> node;
+ do {
+ node.reset(new CacheRankingsBlock(backend_->File(address), address));
+ node->Load();
+ if (node->Data()->prev != prev.value())
+ return ERR_INVALID_PREV;
+ if (!CheckEntry(node.get()))
+ return ERR_INVALID_ENTRY;
+
+ prev.set_value(address.value());
+ address.set_value(node->Data()->next);
+ if (!address.is_initialized() || address.is_separate_file())
+ return ERR_INVALID_NEXT;
+
+ num_items++;
+ } while (node->address().value() != address.value());
+ return num_items;
+}
+
+bool Rankings::IsHead(CacheAddr addr) {
+ for (int i = 0; i < LAST_ELEMENT; i++)
+ if (addr == heads_[i].value())
+ return true;
+ return false;
+}
+
+bool Rankings::IsTail(CacheAddr addr) {
+ for (int i = 0; i < LAST_ELEMENT; i++)
+ if (addr == tails_[i].value())
+ return true;
+ return false;
+}
+
+// We expect to have just a few iterators at any given time, maybe two or three,
+// But we could have more than one pointing at the same mode. We walk the list
+// of cache iterators and update all that are pointing to the given node.
+void Rankings::UpdateIterators(CacheRankingsBlock* node) {
+ CacheAddr address = node->address().value();
+ for (IteratorList::iterator it = iterators_.begin(); it != iterators_.end();
+ ++it) {
+ if (it->first == address && it->second->HasData()) {
+ CacheRankingsBlock* other = it->second;
+ *other->Data() = *node->Data();
+ }
+ }
+}
+
+void Rankings::InvalidateIterators(CacheRankingsBlock* node) {
+ CacheAddr address = node->address().value();
+ for (IteratorList::iterator it = iterators_.begin(); it != iterators_.end();
+ ++it) {
+ if (it->first == address) {
+ LOG(WARNING) << "Invalidating iterator at 0x" << std::hex << address;
+ it->second->Discard();
+ }
+ }
+}
+
+void Rankings::IncrementCounter(List list) {
+ if (!count_lists_)
+ return;
+
+ DCHECK(control_data_->sizes[list] < kint32max);
+ if (control_data_->sizes[list] < kint32max)
+ control_data_->sizes[list]++;
+}
+
+void Rankings::DecrementCounter(List list) {
+ if (!count_lists_)
+ return;
+
+ DCHECK(control_data_->sizes[list] > 0);
+ if (control_data_->sizes[list] > 0)
+ control_data_->sizes[list]--;
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/rankings.h b/net/disk_cache/rankings.h
new file mode 100644
index 00000000..c9fc8d29
--- /dev/null
+++ b/net/disk_cache/rankings.h
@@ -0,0 +1,205 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_RANKINGS_H_
+#define NET_DISK_CACHE_RANKINGS_H_
+
+#include <list>
+
+#include "base/scoped_ptr.h"
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/mapped_file.h"
+#include "net/disk_cache/storage_block.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+
+// Type of crashes generated for the unit tests.
+enum RankCrashes {
+ NO_CRASH = 0,
+ INSERT_EMPTY_1,
+ INSERT_EMPTY_2,
+ INSERT_EMPTY_3,
+ INSERT_ONE_1,
+ INSERT_ONE_2,
+ INSERT_ONE_3,
+ INSERT_LOAD_1,
+ INSERT_LOAD_2,
+ REMOVE_ONE_1,
+ REMOVE_ONE_2,
+ REMOVE_ONE_3,
+ REMOVE_ONE_4,
+ REMOVE_HEAD_1,
+ REMOVE_HEAD_2,
+ REMOVE_HEAD_3,
+ REMOVE_HEAD_4,
+ REMOVE_TAIL_1,
+ REMOVE_TAIL_2,
+ REMOVE_TAIL_3,
+ REMOVE_LOAD_1,
+ REMOVE_LOAD_2,
+ REMOVE_LOAD_3,
+ MAX_CRASH
+};
+
+// This class handles the ranking information for the cache.
+class Rankings {
+ public:
+ // Possible lists of entries.
+ enum List {
+ NO_USE = 0, // List of entries that have not been reused.
+ LOW_USE, // List of entries with low reuse.
+ HIGH_USE, // List of entries with high reuse.
+ RESERVED, // Reserved for future use.
+ DELETED, // List of recently deleted or doomed entries.
+ LAST_ELEMENT
+ };
+
+ // This class provides a specialized version of scoped_ptr, that calls
+ // Rankings whenever a CacheRankingsBlock is deleted, to keep track of cache
+ // iterators that may go stale.
+ class ScopedRankingsBlock : public scoped_ptr<CacheRankingsBlock> {
+ public:
+ ScopedRankingsBlock() : rankings_(NULL) {}
+ explicit ScopedRankingsBlock(Rankings* rankings) : rankings_(rankings) {}
+ ScopedRankingsBlock(Rankings* rankings, CacheRankingsBlock* node)
+ : scoped_ptr<CacheRankingsBlock>(node), rankings_(rankings) {}
+
+ ~ScopedRankingsBlock() {
+ rankings_->FreeRankingsBlock(get());
+ }
+
+ void set_rankings(Rankings* rankings) {
+ rankings_ = rankings;
+ }
+
+ // scoped_ptr::reset will delete the object.
+ void reset(CacheRankingsBlock* p = NULL) {
+ if (p != get())
+ rankings_->FreeRankingsBlock(get());
+ scoped_ptr<CacheRankingsBlock>::reset(p);
+ }
+
+ private:
+ Rankings* rankings_;
+ DISALLOW_EVIL_CONSTRUCTORS(ScopedRankingsBlock);
+ };
+
+ // If we have multiple lists, we have to iterate through all at the same time.
+ // This structure keeps track of where we are on the iteration.
+ struct Iterator {
+ List list; // Which entry was returned to the user.
+ CacheRankingsBlock* nodes[3]; // Nodes on the first three lists.
+ Rankings* my_rankings;
+ explicit Iterator(Rankings* rankings) {
+ memset(this, 0, sizeof(Iterator));
+ my_rankings = rankings;
+ }
+ ~Iterator() {
+ for (int i = 0; i < 3; i++)
+ ScopedRankingsBlock(my_rankings, nodes[i]);
+ }
+ };
+
+ Rankings() : init_(false) {}
+ ~Rankings() {}
+
+ bool Init(BackendImpl* backend, bool count_lists);
+
+ // Restores original state, leaving the object ready for initialization.
+ void Reset();
+
+ // Inserts a given entry at the head of the queue.
+ void Insert(CacheRankingsBlock* node, bool modified, List list);
+
+ // Removes a given entry from the LRU list.
+ void Remove(CacheRankingsBlock* node, List list);
+
+ // Moves a given entry to the head.
+ void UpdateRank(CacheRankingsBlock* node, bool modified, List list);
+
+ // Iterates through the list.
+ CacheRankingsBlock* GetNext(CacheRankingsBlock* node, List list);
+ CacheRankingsBlock* GetPrev(CacheRankingsBlock* node, List list);
+ void FreeRankingsBlock(CacheRankingsBlock* node);
+
+ // Controls tracking of nodes used for enumerations.
+ void TrackRankingsBlock(CacheRankingsBlock* node, bool start_tracking);
+
+ // Peforms a simple self-check of the lists, and returns the number of items
+ // or an error code (negative value).
+ int SelfCheck();
+
+ // Returns false if the entry is clearly invalid. from_list is true if the
+ // node comes from the LRU list.
+ bool SanityCheck(CacheRankingsBlock* node, bool from_list);
+
+ private:
+ typedef std::pair<CacheAddr, CacheRankingsBlock*> IteratorPair;
+ typedef std::list<IteratorPair> IteratorList;
+
+ void ReadHeads();
+ void ReadTails();
+ void WriteHead(List list);
+ void WriteTail(List list);
+
+ // Gets the rankings information for a given rankings node. We may end up
+ // sharing the actual memory with a loaded entry, but we are not taking a
+ // reference to that entry, so |rankings| must be short lived.
+ bool GetRanking(CacheRankingsBlock* rankings);
+
+ // Makes |rankings| suitable to live a long life.
+ void ConvertToLongLived(CacheRankingsBlock* rankings);
+
+ // Finishes a list modification after a crash.
+ void CompleteTransaction();
+ void FinishInsert(CacheRankingsBlock* rankings);
+ void RevertRemove(CacheRankingsBlock* rankings);
+
+ // Returns false if this entry will not be recognized as dirty (called during
+ // selfcheck).
+ bool CheckEntry(CacheRankingsBlock* rankings);
+
+ // Returns false if node is not properly linked.
+ bool CheckLinks(CacheRankingsBlock* node, CacheRankingsBlock* prev,
+ CacheRankingsBlock* next, List list);
+
+ // Checks the links between two consecutive nodes.
+ bool CheckSingleLink(CacheRankingsBlock* prev, CacheRankingsBlock* next);
+
+ // Peforms a simple check of the list, and returns the number of items or an
+ // error code (negative value).
+ int CheckList(List list);
+
+ // Returns true if addr is the head or tail of any list.
+ bool IsHead(CacheAddr addr);
+ bool IsTail(CacheAddr addr);
+
+ // Updates the iterators whenever node is being changed.
+ void UpdateIterators(CacheRankingsBlock* node);
+
+ // Invalidates the iterators pointing to this node.
+ void InvalidateIterators(CacheRankingsBlock* node);
+
+ // Keeps track of the number of entries on a list.
+ void IncrementCounter(List list);
+ void DecrementCounter(List list);
+
+ bool init_;
+ bool count_lists_;
+ Addr heads_[LAST_ELEMENT];
+ Addr tails_[LAST_ELEMENT];
+ BackendImpl* backend_;
+ LruData* control_data_; // Data related to the LRU lists.
+ IteratorList iterators_;
+
+ DISALLOW_COPY_AND_ASSIGN(Rankings);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_RANKINGS_H_
diff --git a/net/disk_cache/sparse_control.cc b/net/disk_cache/sparse_control.cc
new file mode 100644
index 00000000..9f0f537f
--- /dev/null
+++ b/net/disk_cache/sparse_control.cc
@@ -0,0 +1,744 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/sparse_control.h"
+
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "base/time.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/entry_impl.h"
+#include "net/disk_cache/file.h"
+
+using base::Time;
+
+namespace {
+
+// Stream of the sparse data index.
+const int kSparseIndex = 2;
+
+// Stream of the sparse data.
+const int kSparseData = 1;
+
+// We can have up to 64k children.
+const int kMaxMapSize = 8 * 1024;
+
+// The maximum number of bytes that a child can store.
+const int kMaxEntrySize = 0x100000;
+
+// The size of each data block (tracked by the child allocation bitmap).
+const int kBlockSize = 1024;
+
+// Returns the name of of a child entry given the base_name and signature of the
+// parent and the child_id.
+// If the entry is called entry_name, child entries will be named something
+// like Range_entry_name:XXX:YYY where XXX is the entry signature and YYY is the
+// number of the particular child.
+std::string GenerateChildName(const std::string& base_name, int64 signature,
+ int64 child_id) {
+ return StringPrintf("Range_%s:%" PRIx64 ":%" PRIx64, base_name.c_str(),
+ signature, child_id);
+}
+
+// This class deletes the children of a sparse entry.
+class ChildrenDeleter
+ : public base::RefCounted<ChildrenDeleter>,
+ public disk_cache::FileIOCallback {
+ public:
+ ChildrenDeleter(disk_cache::BackendImpl* backend, const std::string& name)
+ : backend_(backend), name_(name) {}
+
+ virtual void OnFileIOComplete(int bytes_copied);
+
+ // Two ways of deleting the children: if we have the children map, use Start()
+ // directly, otherwise pass the data address to ReadData().
+ void Start(char* buffer, int len);
+ void ReadData(disk_cache::Addr address, int len);
+
+ private:
+ friend class base::RefCounted<ChildrenDeleter>;
+ ~ChildrenDeleter() {}
+
+ void DeleteChildren();
+
+ disk_cache::BackendImpl* backend_;
+ std::string name_;
+ disk_cache::Bitmap children_map_;
+ int64 signature_;
+ scoped_array<char> buffer_;
+ DISALLOW_EVIL_CONSTRUCTORS(ChildrenDeleter);
+};
+
+// This is the callback of the file operation.
+void ChildrenDeleter::OnFileIOComplete(int bytes_copied) {
+ char* buffer = buffer_.release();
+ Start(buffer, bytes_copied);
+}
+
+void ChildrenDeleter::Start(char* buffer, int len) {
+ buffer_.reset(buffer);
+ if (len < static_cast<int>(sizeof(disk_cache::SparseData)))
+ return Release();
+
+ // Just copy the information from |buffer|, delete |buffer| and start deleting
+ // the child entries.
+ disk_cache::SparseData* data =
+ reinterpret_cast<disk_cache::SparseData*>(buffer);
+ signature_ = data->header.signature;
+
+ int num_bits = (len - sizeof(disk_cache::SparseHeader)) * 8;
+ children_map_.Resize(num_bits, false);
+ children_map_.SetMap(data->bitmap, num_bits / 32);
+ buffer_.reset();
+
+ DeleteChildren();
+}
+
+void ChildrenDeleter::ReadData(disk_cache::Addr address, int len) {
+ DCHECK(address.is_block_file());
+ disk_cache::File* file(backend_->File(address));
+ if (!file)
+ return Release();
+
+ size_t file_offset = address.start_block() * address.BlockSize() +
+ disk_cache::kBlockHeaderSize;
+
+ buffer_.reset(new char[len]);
+ bool completed;
+ if (!file->Read(buffer_.get(), len, file_offset, this, &completed))
+ return Release();
+
+ if (completed)
+ OnFileIOComplete(len);
+
+ // And wait until OnFileIOComplete gets called.
+}
+
+void ChildrenDeleter::DeleteChildren() {
+ int child_id = 0;
+ if (!children_map_.FindNextSetBit(&child_id)) {
+ // We are done. Just delete this object.
+ return Release();
+ }
+ std::string child_name = GenerateChildName(name_, signature_, child_id);
+ backend_->DoomEntry(child_name);
+ children_map_.Set(child_id, false);
+
+ // Post a task to delete the next child.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &ChildrenDeleter::DeleteChildren));
+}
+
+} // namespace.
+
+namespace disk_cache {
+
+SparseControl::~SparseControl() {
+ if (child_)
+ CloseChild();
+ if (init_)
+ WriteSparseData();
+}
+
+int SparseControl::Init() {
+ DCHECK(!init_);
+
+ // We should not have sparse data for the exposed entry.
+ if (entry_->GetDataSize(kSparseData))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ // Now see if there is something where we store our data.
+ int rv = net::OK;
+ int data_len = entry_->GetDataSize(kSparseIndex);
+ if (!data_len) {
+ rv = CreateSparseEntry();
+ } else {
+ rv = OpenSparseEntry(data_len);
+ }
+
+ if (rv == net::OK)
+ init_ = true;
+ return rv;
+}
+
+int SparseControl::StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf,
+ int buf_len, net::CompletionCallback* callback) {
+ DCHECK(init_);
+ // We don't support simultaneous IO for sparse data.
+ if (operation_ != kNoOperation)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (offset < 0 || buf_len < 0)
+ return net::ERR_INVALID_ARGUMENT;
+
+ // We only support up to 64 GB.
+ if (offset + buf_len >= 0x1000000000LL || offset + buf_len < 0)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ DCHECK(!user_buf_);
+ DCHECK(!user_callback_);
+
+ if (!buf && (op == kReadOperation || op == kWriteOperation))
+ return 0;
+
+ // Copy the operation parameters.
+ operation_ = op;
+ offset_ = offset;
+ user_buf_ = buf ? new net::DrainableIOBuffer(buf, buf_len) : NULL;
+ buf_len_ = buf_len;
+ user_callback_ = callback;
+
+ result_ = 0;
+ pending_ = false;
+ finished_ = false;
+ abort_ = false;
+
+ DoChildrenIO();
+
+ if (!pending_) {
+ // Everything was done synchronously.
+ operation_ = kNoOperation;
+ user_buf_ = NULL;
+ user_callback_ = NULL;
+ return result_;
+ }
+
+ return net::ERR_IO_PENDING;
+}
+
+int SparseControl::GetAvailableRange(int64 offset, int len, int64* start) {
+ DCHECK(init_);
+ // We don't support simultaneous IO for sparse data.
+ if (operation_ != kNoOperation)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ DCHECK(start);
+
+ range_found_ = false;
+ int result = StartIO(kGetRangeOperation, offset, NULL, len, NULL);
+ if (range_found_) {
+ *start = offset_;
+ return result;
+ }
+
+ // This is a failure. We want to return a valid start value in any case.
+ *start = offset;
+ return result < 0 ? result : 0; // Don't mask error codes to the caller.
+}
+
+void SparseControl::CancelIO() {
+ if (operation_ == kNoOperation)
+ return;
+ abort_ = true;
+}
+
+int SparseControl::ReadyToUse(net::CompletionCallback* completion_callback) {
+ if (!abort_)
+ return net::OK;
+
+ // We'll grab another reference to keep this object alive because we just have
+ // one extra reference due to the pending IO operation itself, but we'll
+ // release that one before invoking user_callback_.
+ entry_->AddRef(); // Balanced in DoAbortCallbacks.
+ abort_callbacks_.push_back(completion_callback);
+ return net::ERR_IO_PENDING;
+}
+
+// Static
+void SparseControl::DeleteChildren(EntryImpl* entry) {
+ DCHECK(entry->GetEntryFlags() & PARENT_ENTRY);
+ int data_len = entry->GetDataSize(kSparseIndex);
+ if (data_len < static_cast<int>(sizeof(SparseData)) ||
+ entry->GetDataSize(kSparseData))
+ return;
+
+ int map_len = data_len - sizeof(SparseHeader);
+ if (map_len > kMaxMapSize || map_len % 4)
+ return;
+
+ char* buffer;
+ Addr address;
+ entry->GetData(kSparseIndex, &buffer, &address);
+ if (!buffer && !address.is_initialized())
+ return;
+
+ ChildrenDeleter* deleter = new ChildrenDeleter(entry->backend_,
+ entry->GetKey());
+ // The object will self destruct when finished.
+ deleter->AddRef();
+
+ if (buffer) {
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ deleter, &ChildrenDeleter::Start, buffer, data_len));
+ } else {
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ deleter, &ChildrenDeleter::ReadData, address, data_len));
+ }
+}
+
+// We are going to start using this entry to store sparse data, so we have to
+// initialize our control info.
+int SparseControl::CreateSparseEntry() {
+ if (CHILD_ENTRY & entry_->GetEntryFlags())
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ memset(&sparse_header_, 0, sizeof(sparse_header_));
+ sparse_header_.signature = Time::Now().ToInternalValue();
+ sparse_header_.magic = kIndexMagic;
+ sparse_header_.parent_key_len = entry_->GetKey().size();
+ children_map_.Resize(kNumSparseBits, true);
+
+ // Save the header. The bitmap is saved in the destructor.
+ scoped_refptr<net::IOBuffer> buf =
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_));
+
+ int rv = entry_->WriteData(kSparseIndex, 0, buf, sizeof(sparse_header_), NULL,
+ false);
+ if (rv != sizeof(sparse_header_)) {
+ DLOG(ERROR) << "Unable to save sparse_header_";
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+ }
+
+ entry_->SetEntryFlags(PARENT_ENTRY);
+ return net::OK;
+}
+
+// We are opening an entry from disk. Make sure that our control data is there.
+int SparseControl::OpenSparseEntry(int data_len) {
+ if (data_len < static_cast<int>(sizeof(SparseData)))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (entry_->GetDataSize(kSparseData))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ if (!(PARENT_ENTRY & entry_->GetEntryFlags()))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ // Dont't go over board with the bitmap. 8 KB gives us offsets up to 64 GB.
+ int map_len = data_len - sizeof(sparse_header_);
+ if (map_len > kMaxMapSize || map_len % 4)
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ scoped_refptr<net::IOBuffer> buf =
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_));
+
+ // Read header.
+ int rv = entry_->ReadData(kSparseIndex, 0, buf, sizeof(sparse_header_), NULL);
+ if (rv != static_cast<int>(sizeof(sparse_header_)))
+ return net::ERR_CACHE_READ_FAILURE;
+
+ // The real validation should be performed by the caller. This is just to
+ // double check.
+ if (sparse_header_.magic != kIndexMagic ||
+ sparse_header_.parent_key_len !=
+ static_cast<int>(entry_->GetKey().size()))
+ return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
+
+ // Read the actual bitmap.
+ buf = new net::IOBuffer(map_len);
+ rv = entry_->ReadData(kSparseIndex, sizeof(sparse_header_), buf, map_len,
+ NULL);
+ if (rv != map_len)
+ return net::ERR_CACHE_READ_FAILURE;
+
+ // Grow the bitmap to the current size and copy the bits.
+ children_map_.Resize(map_len * 8, false);
+ children_map_.SetMap(reinterpret_cast<uint32*>(buf->data()), map_len);
+ return net::OK;
+}
+
+bool SparseControl::OpenChild() {
+ DCHECK_GE(result_, 0);
+
+ std::string key = GenerateChildKey();
+ if (child_) {
+ // Keep using the same child or open another one?.
+ if (key == child_->GetKey())
+ return true;
+ CloseChild();
+ }
+
+ // Se if we are tracking this child.
+ bool child_present = ChildPresent();
+ if (!child_present || !entry_->backend_->OpenEntry(key, &child_))
+ return ContinueWithoutChild(key);
+
+ EntryImpl* child = static_cast<EntryImpl*>(child_);
+ if (!(CHILD_ENTRY & child->GetEntryFlags()) ||
+ child->GetDataSize(kSparseIndex) <
+ static_cast<int>(sizeof(child_data_)))
+ return KillChildAndContinue(key, false);
+
+ scoped_refptr<net::WrappedIOBuffer> buf =
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_));
+
+ // Read signature.
+ int rv = child_->ReadData(kSparseIndex, 0, buf, sizeof(child_data_), NULL);
+ if (rv != sizeof(child_data_))
+ return KillChildAndContinue(key, true); // This is a fatal failure.
+
+ if (child_data_.header.signature != sparse_header_.signature ||
+ child_data_.header.magic != kIndexMagic)
+ return KillChildAndContinue(key, false);
+
+ if (child_data_.header.last_block_len < 0 ||
+ child_data_.header.last_block_len > kBlockSize) {
+ // Make sure this values are always within range.
+ child_data_.header.last_block_len = 0;
+ child_data_.header.last_block = -1;
+ }
+
+ return true;
+}
+
+void SparseControl::CloseChild() {
+ scoped_refptr<net::WrappedIOBuffer> buf =
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_));
+
+ // Save the allocation bitmap before closing the child entry.
+ int rv = child_->WriteData(kSparseIndex, 0, buf, sizeof(child_data_),
+ NULL, false);
+ if (rv != sizeof(child_data_))
+ DLOG(ERROR) << "Failed to save child data";
+ child_->Close();
+ child_ = NULL;
+}
+
+std::string SparseControl::GenerateChildKey() {
+ return GenerateChildName(entry_->GetKey(), sparse_header_.signature,
+ offset_ >> 20);
+}
+
+// We are deleting the child because something went wrong.
+bool SparseControl::KillChildAndContinue(const std::string& key, bool fatal) {
+ SetChildBit(false);
+ child_->Doom();
+ child_->Close();
+ child_ = NULL;
+ if (fatal) {
+ result_ = net::ERR_CACHE_READ_FAILURE;
+ return false;
+ }
+ return ContinueWithoutChild(key);
+}
+
+// We were not able to open this child; see what we can do.
+bool SparseControl::ContinueWithoutChild(const std::string& key) {
+ if (kReadOperation == operation_)
+ return false;
+ if (kGetRangeOperation == operation_)
+ return true;
+
+ if (!entry_->backend_->CreateEntry(key, &child_)) {
+ child_ = NULL;
+ result_ = net::ERR_CACHE_READ_FAILURE;
+ return false;
+ }
+ // Write signature.
+ InitChildData();
+ return true;
+}
+
+bool SparseControl::ChildPresent() {
+ int child_bit = static_cast<int>(offset_ >> 20);
+ if (children_map_.Size() <= child_bit)
+ return false;
+
+ return children_map_.Get(child_bit);
+}
+
+void SparseControl::SetChildBit(bool value) {
+ int child_bit = static_cast<int>(offset_ >> 20);
+
+ // We may have to increase the bitmap of child entries.
+ if (children_map_.Size() <= child_bit)
+ children_map_.Resize(Bitmap::RequiredArraySize(child_bit + 1) * 32, true);
+
+ children_map_.Set(child_bit, value);
+}
+
+void SparseControl::WriteSparseData() {
+ scoped_refptr<net::IOBuffer> buf = new net::WrappedIOBuffer(
+ reinterpret_cast<const char*>(children_map_.GetMap()));
+
+ int len = children_map_.ArraySize() * 4;
+ int rv = entry_->WriteData(kSparseIndex, sizeof(sparse_header_), buf, len,
+ NULL, false);
+ if (rv != len) {
+ DLOG(ERROR) << "Unable to save sparse map";
+ }
+}
+
+bool SparseControl::VerifyRange() {
+ DCHECK_GE(result_, 0);
+
+ child_offset_ = static_cast<int>(offset_) & (kMaxEntrySize - 1);
+ child_len_ = std::min(buf_len_, kMaxEntrySize - child_offset_);
+
+ // We can write to (or get info from) anywhere in this child.
+ if (operation_ != kReadOperation)
+ return true;
+
+ // Check that there are no holes in this range.
+ int last_bit = (child_offset_ + child_len_ + 1023) >> 10;
+ int start = child_offset_ >> 10;
+ if (child_map_.FindNextBit(&start, last_bit, false)) {
+ // Something is not here.
+ DCHECK_GE(child_data_.header.last_block_len, 0);
+ DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
+ int partial_block_len = PartialBlockLength(start);
+ if (start == child_offset_ >> 10) {
+ // It looks like we don't have anything.
+ if (partial_block_len <= (child_offset_ & (kBlockSize - 1)))
+ return false;
+ }
+
+ // We have the first part.
+ child_len_ = (start << 10) - child_offset_;
+ if (partial_block_len) {
+ // We may have a few extra bytes.
+ child_len_ = std::min(child_len_ + partial_block_len, buf_len_);
+ }
+ // There is no need to read more after this one.
+ buf_len_ = child_len_;
+ }
+ return true;
+}
+
+void SparseControl::UpdateRange(int result) {
+ if (result <= 0 || operation_ != kWriteOperation)
+ return;
+
+ DCHECK_GE(child_data_.header.last_block_len, 0);
+ DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
+
+ // Write the bitmap.
+ int first_bit = child_offset_ >> 10;
+ int block_offset = child_offset_ & (kBlockSize - 1);
+ if (block_offset && (child_data_.header.last_block != first_bit ||
+ child_data_.header.last_block_len < block_offset)) {
+ // The first block is not completely filled; ignore it.
+ first_bit++;
+ }
+
+ int last_bit = (child_offset_ + result) >> 10;
+ block_offset = (child_offset_ + result) & (kBlockSize - 1);
+
+ // This condition will hit with the following criteria:
+ // 1. The first byte doesn't follow the last write.
+ // 2. The first byte is in the middle of a block.
+ // 3. The first byte and the last byte are in the same block.
+ if (first_bit > last_bit)
+ return;
+
+ if (block_offset && !child_map_.Get(last_bit)) {
+ // The last block is not completely filled; save it for later.
+ child_data_.header.last_block = last_bit;
+ child_data_.header.last_block_len = block_offset;
+ } else {
+ child_data_.header.last_block = -1;
+ }
+
+ child_map_.SetRange(first_bit, last_bit, true);
+}
+
+int SparseControl::PartialBlockLength(int block_index) const {
+ if (block_index == child_data_.header.last_block)
+ return child_data_.header.last_block_len;
+
+ // This may be the last stored index.
+ int entry_len = child_->GetDataSize(kSparseData);
+ if (block_index == entry_len >> 10)
+ return entry_len & (kBlockSize - 1);
+
+ // This is really empty.
+ return 0;
+}
+
+void SparseControl::InitChildData() {
+ // We know the real type of child_.
+ EntryImpl* child = static_cast<EntryImpl*>(child_);
+ child->SetEntryFlags(CHILD_ENTRY);
+
+ memset(&child_data_, 0, sizeof(child_data_));
+ child_data_.header = sparse_header_;
+
+ scoped_refptr<net::WrappedIOBuffer> buf =
+ new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_));
+
+ int rv = child_->WriteData(kSparseIndex, 0, buf, sizeof(child_data_),
+ NULL, false);
+ if (rv != sizeof(child_data_))
+ DLOG(ERROR) << "Failed to save child data";
+ SetChildBit(true);
+}
+
+void SparseControl::DoChildrenIO() {
+ while (DoChildIO()) continue;
+
+ if (pending_ && finished_)
+ DoUserCallback();
+}
+
+bool SparseControl::DoChildIO() {
+ finished_ = true;
+ if (!buf_len_ || result_ < 0)
+ return false;
+
+ if (!OpenChild())
+ return false;
+
+ if (!VerifyRange())
+ return false;
+
+ // We have more work to do. Let's not trigger a callback to the caller.
+ finished_ = false;
+ net::CompletionCallback* callback = user_callback_ ? &child_callback_ : NULL;
+
+ int rv = 0;
+ switch (operation_) {
+ case kReadOperation:
+ rv = child_->ReadData(kSparseData, child_offset_, user_buf_, child_len_,
+ callback);
+ break;
+ case kWriteOperation:
+ rv = child_->WriteData(kSparseData, child_offset_, user_buf_, child_len_,
+ callback, false);
+ break;
+ case kGetRangeOperation:
+ rv = DoGetAvailableRange();
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ if (rv == net::ERR_IO_PENDING) {
+ if (!pending_) {
+ pending_ = true;
+ // The child will protect himself against closing the entry while IO is in
+ // progress. However, this entry can still be closed, and that would not
+ // be a good thing for us, so we increase the refcount until we're
+ // finished doing sparse stuff.
+ entry_->AddRef(); // Balanced in DoUserCallback.
+ }
+ return false;
+ }
+ if (!rv)
+ return false;
+
+ DoChildIOCompleted(rv);
+ return true;
+}
+
+int SparseControl::DoGetAvailableRange() {
+ if (!child_)
+ return child_len_; // Move on to the next child.
+
+ // Check that there are no holes in this range.
+ int last_bit = (child_offset_ + child_len_ + 1023) >> 10;
+ int start = child_offset_ >> 10;
+ int partial_start_bytes = PartialBlockLength(start);
+ int found = start;
+ int bits_found = child_map_.FindBits(&found, last_bit, true);
+
+ // We don't care if there is a partial block in the middle of the range.
+ int block_offset = child_offset_ & (kBlockSize - 1);
+ if (!bits_found && partial_start_bytes <= block_offset)
+ return child_len_;
+
+ // We are done. Just break the loop and reset result_ to our real result.
+ range_found_ = true;
+
+ // found now points to the first 1. Lets see if we have zeros before it.
+ int empty_start = std::max((found << 10) - child_offset_, 0);
+
+ int bytes_found = bits_found << 10;
+ bytes_found += PartialBlockLength(found + bits_found);
+
+ if (start == found)
+ bytes_found -= block_offset;
+
+ // If the user is searching past the end of this child, bits_found is the
+ // right result; otherwise, we have some empty space at the start of this
+ // query that we have to subtract from the range that we searched.
+ result_ = std::min(bytes_found, child_len_ - empty_start);
+
+ if (!bits_found) {
+ result_ = std::min(partial_start_bytes - block_offset, child_len_);
+ empty_start = 0;
+ }
+
+ // Only update offset_ when this query found zeros at the start.
+ if (empty_start)
+ offset_ += empty_start;
+
+ // This will actually break the loop.
+ buf_len_ = 0;
+ return 0;
+}
+
+void SparseControl::DoChildIOCompleted(int result) {
+ if (result < 0) {
+ // We fail the whole operation if we encounter an error.
+ result_ = result;
+ return;
+ }
+
+ UpdateRange(result);
+
+ result_ += result;
+ offset_ += result;
+ buf_len_ -= result;
+
+ // We'll be reusing the user provided buffer for the next chunk.
+ if (buf_len_ && user_buf_)
+ user_buf_->DidConsume(result);
+}
+
+void SparseControl::OnChildIOCompleted(int result) {
+ DCHECK_NE(net::ERR_IO_PENDING, result);
+ DoChildIOCompleted(result);
+
+ if (abort_) {
+ // We'll return the current result of the operation, which may be less than
+ // the bytes to read or write, but the user cancelled the operation.
+ abort_ = false;
+ DoUserCallback();
+ return DoAbortCallbacks();
+ }
+
+ // We are running a callback from the message loop. It's time to restart what
+ // we were doing before.
+ DoChildrenIO();
+}
+
+void SparseControl::DoUserCallback() {
+ DCHECK(user_callback_);
+ net::CompletionCallback* c = user_callback_;
+ user_callback_ = NULL;
+ user_buf_ = NULL;
+ pending_ = false;
+ operation_ = kNoOperation;
+ entry_->Release(); // Don't touch object after this line.
+ c->Run(result_);
+}
+
+void SparseControl::DoAbortCallbacks() {
+ for (size_t i = 0; i < abort_callbacks_.size(); i++) {
+ // Releasing all references to entry_ may result in the destruction of this
+ // object so we should not be touching it after the last Release().
+ net::CompletionCallback* c = abort_callbacks_[i];
+ if (i == abort_callbacks_.size() - 1)
+ abort_callbacks_.clear();
+
+ entry_->Release(); // Don't touch object after this line.
+ c->Run(net::OK);
+ }
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/sparse_control.h b/net/disk_cache/sparse_control.h
new file mode 100644
index 00000000..0005f580
--- /dev/null
+++ b/net/disk_cache/sparse_control.h
@@ -0,0 +1,177 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_SPARSE_CONTROL_H_
+#define NET_DISK_CACHE_SPARSE_CONTROL_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "net/base/completion_callback.h"
+#include "net/disk_cache/bitmap.h"
+#include "net/disk_cache/disk_format.h"
+
+namespace net {
+class IOBuffer;
+class DrainableIOBuffer;
+}
+
+namespace disk_cache {
+
+class Entry;
+class EntryImpl;
+
+// This class provides support for the sparse capabilities of the disk cache.
+// Basically, sparse IO is directed from EntryImpl to this class, and we split
+// the operation into multiple small pieces, sending each one to the
+// appropriate entry. An instance of this class is asociated with each entry
+// used directly for sparse operations (the entry passed in to the constructor).
+class SparseControl {
+ public:
+ // The operation to perform.
+ enum SparseOperation {
+ kNoOperation,
+ kReadOperation,
+ kWriteOperation,
+ kGetRangeOperation
+ };
+
+ explicit SparseControl(EntryImpl* entry)
+ : entry_(entry), child_(NULL), operation_(kNoOperation), init_(false),
+ child_map_(child_data_.bitmap, kNumSparseBits, kNumSparseBits / 32),
+ ALLOW_THIS_IN_INITIALIZER_LIST(
+ child_callback_(this, &SparseControl::OnChildIOCompleted)),
+ user_callback_(NULL) {}
+ ~SparseControl();
+
+ // Initializes the object for the current entry. If this entry already stores
+ // sparse data, or can be used to do it, it updates the relevant information
+ // on disk and returns net::OK. Otherwise it returns a net error code.
+ int Init();
+
+ // Performs an actual sparse read or write operation for this entry. |op| is
+ // the operation to perform, |offset| is the desired sparse offset, |buf| and
+ // |buf_len| specify the actual data to use and |callback| is the callback
+ // to use for asynchronous operations. See the description of the Read /
+ // WriteSparseData for details about the arguments. The return value is the
+ // number of bytes read or written, or a net error code.
+ int StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf,
+ int buf_len, net::CompletionCallback* callback);
+
+ // Implements Entry::GetAvailableRange().
+ int GetAvailableRange(int64 offset, int len, int64* start);
+
+ // Cancels the current sparse operation (if any).
+ void CancelIO();
+
+ // Returns OK if the entry can be used for new IO or ERR_IO_PENDING if we are
+ // busy. If the entry is busy, we'll invoke the callback when we are ready
+ // again. See disk_cache::Entry::ReadyToUse() for more info.
+ int ReadyToUse(net::CompletionCallback* completion_callback);
+
+ // Deletes the children entries of |entry|.
+ static void DeleteChildren(EntryImpl* entry);
+
+ private:
+ // Creates a new sparse entry or opens an aready created entry from disk.
+ // These methods just read / write the required info from disk for the current
+ // entry, and verify that everything is correct. The return value is a net
+ // error code.
+ int CreateSparseEntry();
+ int OpenSparseEntry(int data_len);
+
+ // Opens and closes a child entry. A child entry is a regular EntryImpl object
+ // with a key derived from the key of the resource to store and the range
+ // stored by that child.
+ bool OpenChild();
+ void CloseChild();
+ std::string GenerateChildKey();
+
+ // Deletes the current child and continues the current operation (open).
+ bool KillChildAndContinue(const std::string& key, bool fatal);
+
+ // Continues the current operation (open) without a current child.
+ bool ContinueWithoutChild(const std::string& key);
+
+ // Returns true if the required child is tracked by the parent entry, i.e. it
+ // was already created.
+ bool ChildPresent();
+
+ // Sets the bit for the current child to the provided |value|. In other words,
+ // starts or stops tracking this child.
+ void SetChildBit(bool value);
+
+ // Writes to disk the tracking information for this entry.
+ void WriteSparseData();
+
+ // Verify that the range to be accessed for the current child is appropriate.
+ // Returns false if an error is detected or there is no need to perform the
+ // current IO operation (for instance if the required range is not stored by
+ // the child).
+ bool VerifyRange();
+
+ // Updates the contents bitmap for the current range, based on the result of
+ // the current operation.
+ void UpdateRange(int result);
+
+ // Returns the number of bytes stored at |block_index|, if its allocation-bit
+ // is off (because it is not completely filled).
+ int PartialBlockLength(int block_index) const;
+
+ // Initializes the sparse info for the current child.
+ void InitChildData();
+
+ // Iterates through all the children needed to complete the current operation.
+ void DoChildrenIO();
+
+ // Performs a single operation with the current child. Returns true when we
+ // should move on to the next child and false when we should interrupt our
+ // work.
+ bool DoChildIO();
+
+ // Performs the required work for GetAvailableRange for one child.
+ int DoGetAvailableRange();
+
+ // Performs the required work after a single IO operations finishes.
+ void DoChildIOCompleted(int result);
+
+ // Invoked by the callback of asynchronous operations.
+ void OnChildIOCompleted(int result);
+
+ // Reports to the user that we are done.
+ void DoUserCallback();
+ void DoAbortCallbacks();
+
+ EntryImpl* entry_; // The sparse entry.
+ Entry* child_; // The current child entry.
+ SparseOperation operation_;
+ bool pending_; // True if any child IO operation returned pending.
+ bool finished_;
+ bool init_;
+ bool range_found_; // True if GetAvailableRange found something.
+ bool abort_; // True if we should abort the current operation ASAP.
+
+ SparseHeader sparse_header_; // Data about the children of entry_.
+ Bitmap children_map_; // The actual bitmap of children.
+ SparseData child_data_; // Parent and allocation map of child_.
+ Bitmap child_map_; // The allocation map as a bitmap.
+
+ net::CompletionCallbackImpl<SparseControl> child_callback_;
+ net::CompletionCallback* user_callback_;
+ std::vector<net::CompletionCallback*> abort_callbacks_;
+ int64 offset_; // Current sparse offset.
+ scoped_refptr<net::DrainableIOBuffer> user_buf_;
+ int buf_len_; // Bytes to read or write.
+ int child_offset_; // Offset to use for the current child.
+ int child_len_; // Bytes to read or write for this child.
+ int result_;
+
+ DISALLOW_COPY_AND_ASSIGN(SparseControl);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_SPARSE_CONTROL_H_
diff --git a/net/disk_cache/stats.cc b/net/disk_cache/stats.cc
new file mode 100644
index 00000000..e69ea00c
--- /dev/null
+++ b/net/disk_cache/stats.cc
@@ -0,0 +1,321 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/stats.h"
+
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/string_util.h"
+#include "net/disk_cache/backend_impl.h"
+
+namespace {
+
+const int32 kDiskSignature = 0xF01427E0;
+
+struct OnDiskStats {
+ int32 signature;
+ int size;
+ int data_sizes[disk_cache::Stats::kDataSizesLength];
+ int64 counters[disk_cache::Stats::MAX_COUNTER];
+};
+
+// Returns the "floor" (as opposed to "ceiling") of log base 2 of number.
+int LogBase2(int32 number) {
+ unsigned int value = static_cast<unsigned int>(number);
+ const unsigned int mask[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
+ const unsigned int s[] = {1, 2, 4, 8, 16};
+
+ unsigned int result = 0;
+ for (int i = 4; i >= 0; i--) {
+ if (value & mask[i]) {
+ value >>= s[i];
+ result |= s[i];
+ }
+ }
+ return static_cast<int>(result);
+}
+
+static const char* kCounterNames[] = {
+ "Open miss",
+ "Open hit",
+ "Create miss",
+ "Create hit",
+ "Resurrect hit",
+ "Create error",
+ "Trim entry",
+ "Doom entry",
+ "Doom cache",
+ "Invalid entry",
+ "Open entries",
+ "Max entries",
+ "Timer",
+ "Read data",
+ "Write data",
+ "Open rankings",
+ "Get rankings",
+ "Fatal error",
+ "Last report",
+ "Last report timer"
+};
+COMPILE_ASSERT(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER,
+ update_the_names);
+
+} // namespace
+
+namespace disk_cache {
+
+bool LoadStats(BackendImpl* backend, Addr address, OnDiskStats* stats) {
+ MappedFile* file = backend->File(address);
+ if (!file)
+ return false;
+
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ if (!file->Read(stats, sizeof(*stats), offset))
+ return false;
+
+ if (stats->signature != kDiskSignature)
+ return false;
+
+ // We don't want to discard the whole cache every time we have one extra
+ // counter; just reset them to zero.
+ if (stats->size != sizeof(*stats))
+ memset(stats, 0, sizeof(*stats));
+
+ return true;
+}
+
+bool StoreStats(BackendImpl* backend, Addr address, OnDiskStats* stats) {
+ MappedFile* file = backend->File(address);
+ if (!file)
+ return false;
+
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ return file->Write(stats, sizeof(*stats), offset);
+}
+
+bool CreateStats(BackendImpl* backend, Addr* address, OnDiskStats* stats) {
+ if (!backend->CreateBlock(BLOCK_256, 2, address))
+ return false;
+
+ // If we have more than 512 bytes of counters, change kDiskSignature so we
+ // don't overwrite something else (LoadStats must fail).
+ COMPILE_ASSERT(sizeof(*stats) <= 256 * 2, use_more_blocks);
+ memset(stats, 0, sizeof(*stats));
+ stats->signature = kDiskSignature;
+ stats->size = sizeof(*stats);
+
+ return StoreStats(backend, *address, stats);
+}
+
+bool Stats::Init(BackendImpl* backend, uint32* storage_addr) {
+ OnDiskStats stats;
+ Addr address(*storage_addr);
+ if (address.is_initialized()) {
+ if (!LoadStats(backend, address, &stats))
+ return false;
+ } else {
+ if (!CreateStats(backend, &address, &stats))
+ return false;
+ *storage_addr = address.value();
+ }
+
+ storage_addr_ = address.value();
+ backend_ = backend;
+
+ memcpy(data_sizes_, stats.data_sizes, sizeof(data_sizes_));
+ memcpy(counters_, stats.counters, sizeof(counters_));
+
+ // It seems impossible to support this histogram for more than one
+ // simultaneous objects with the current infrastructure.
+ static bool first_time = true;
+ if (first_time) {
+ first_time = false;
+ // ShouldReportAgain() will re-enter this object.
+ if (!size_histogram_.get() && backend->cache_type() == net::DISK_CACHE &&
+ backend->ShouldReportAgain()) {
+ // Stats may be reused when the cache is re-created, but we want only one
+ // histogram at any given time.
+ size_histogram_ =
+ StatsHistogram::StatsHistogramFactoryGet("DiskCache.SizeStats");
+ size_histogram_->Init(this);
+ }
+ }
+
+ return true;
+}
+
+Stats::~Stats() {
+ Store();
+}
+
+// The array will be filled this way:
+// index size
+// 0 [0, 1024)
+// 1 [1024, 2048)
+// 2 [2048, 4096)
+// 3 [4K, 6K)
+// ...
+// 10 [18K, 20K)
+// 11 [20K, 24K)
+// 12 [24k, 28K)
+// ...
+// 15 [36k, 40K)
+// 16 [40k, 64K)
+// 17 [64K, 128K)
+// 18 [128K, 256K)
+// ...
+// 23 [4M, 8M)
+// 24 [8M, 16M)
+// 25 [16M, 32M)
+// 26 [32M, 64M)
+// 27 [64M, ...)
+int Stats::GetStatsBucket(int32 size) {
+ if (size < 1024)
+ return 0;
+
+ // 10 slots more, until 20K.
+ if (size < 20 * 1024)
+ return size / 2048 + 1;
+
+ // 5 slots more, from 20K to 40K.
+ if (size < 40 * 1024)
+ return (size - 20 * 1024) / 4096 + 11;
+
+ // From this point on, use a logarithmic scale.
+ int result = LogBase2(size) + 1;
+
+ COMPILE_ASSERT(kDataSizesLength > 16, update_the_scale);
+ if (result >= kDataSizesLength)
+ result = kDataSizesLength - 1;
+
+ return result;
+}
+
+int Stats::GetBucketRange(size_t i) const {
+ if (i < 2)
+ return static_cast<int>(1024 * i);
+
+ if (i < 12)
+ return static_cast<int>(2048 * (i - 1));
+
+ if (i < 17)
+ return static_cast<int>(4096 * (i - 11)) + 20 * 1024;
+
+ int n = 64 * 1024;
+ if (i > static_cast<size_t>(kDataSizesLength)) {
+ NOTREACHED();
+ i = kDataSizesLength;
+ }
+
+ i -= 17;
+ n <<= i;
+ return n;
+}
+
+void Stats::Snapshot(StatsHistogram::StatsSamples* samples) const {
+ samples->GetCounts()->resize(kDataSizesLength);
+ for (int i = 0; i < kDataSizesLength; i++) {
+ int count = data_sizes_[i];
+ if (count < 0)
+ count = 0;
+ samples->GetCounts()->at(i) = count;
+ }
+}
+
+void Stats::ModifyStorageStats(int32 old_size, int32 new_size) {
+ // We keep a counter of the data block size on an array where each entry is
+ // the adjusted log base 2 of the size. The first entry counts blocks of 256
+ // bytes, the second blocks up to 512 bytes, etc. With 20 entries, the last
+ // one stores entries of more than 64 MB
+ int new_index = GetStatsBucket(new_size);
+ int old_index = GetStatsBucket(old_size);
+
+ if (new_size)
+ data_sizes_[new_index]++;
+
+ if (old_size)
+ data_sizes_[old_index]--;
+}
+
+void Stats::OnEvent(Counters an_event) {
+ DCHECK(an_event > MIN_COUNTER || an_event < MAX_COUNTER);
+ counters_[an_event]++;
+}
+
+void Stats::SetCounter(Counters counter, int64 value) {
+ DCHECK(counter > MIN_COUNTER || counter < MAX_COUNTER);
+ counters_[counter] = value;
+}
+
+int64 Stats::GetCounter(Counters counter) const {
+ DCHECK(counter > MIN_COUNTER || counter < MAX_COUNTER);
+ return counters_[counter];
+}
+
+void Stats::GetItems(StatsItems* items) {
+ std::pair<std::string, std::string> item;
+ for (int i = 0; i < kDataSizesLength; i++) {
+ item.first = StringPrintf("Size%02d", i);
+ item.second = StringPrintf("0x%08x", data_sizes_[i]);
+ items->push_back(item);
+ }
+
+ for (int i = MIN_COUNTER + 1; i < MAX_COUNTER; i++) {
+ item.first = kCounterNames[i];
+ item.second = StringPrintf("0x%" PRIx64, counters_[i]);
+ items->push_back(item);
+ }
+}
+
+int Stats::GetHitRatio() const {
+ return GetRatio(OPEN_HIT, OPEN_MISS);
+}
+
+int Stats::GetResurrectRatio() const {
+ return GetRatio(RESURRECT_HIT, CREATE_HIT);
+}
+
+int Stats::GetRatio(Counters hit, Counters miss) const {
+ int64 ratio = GetCounter(hit) * 100;
+ if (!ratio)
+ return 0;
+
+ ratio /= (GetCounter(hit) + GetCounter(miss));
+ return static_cast<int>(ratio);
+}
+
+void Stats::ResetRatios() {
+ SetCounter(OPEN_HIT, 0);
+ SetCounter(OPEN_MISS, 0);
+ SetCounter(RESURRECT_HIT, 0);
+ SetCounter(CREATE_HIT, 0);
+}
+
+int Stats::GetLargeEntriesSize() {
+ int total = 0;
+ // data_sizes_[20] stores values between 512 KB and 1 MB (see comment before
+ // GetStatsBucket()).
+ for (int bucket = 20; bucket < kDataSizesLength; bucket++)
+ total += data_sizes_[bucket] * GetBucketRange(bucket);
+
+ return total;
+}
+
+void Stats::Store() {
+ if (!backend_)
+ return;
+
+ OnDiskStats stats;
+ stats.signature = kDiskSignature;
+ stats.size = sizeof(stats);
+ memcpy(stats.data_sizes, data_sizes_, sizeof(data_sizes_));
+ memcpy(stats.counters, counters_, sizeof(counters_));
+
+ Addr address(storage_addr_);
+ StoreStats(backend_, address, &stats);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/stats.h b/net/disk_cache/stats.h
new file mode 100644
index 00000000..13536b13
--- /dev/null
+++ b/net/disk_cache/stats.h
@@ -0,0 +1,94 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_STATS_H_
+#define NET_DISK_CACHE_STATS_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/scoped_ptr.h"
+#include "net/disk_cache/stats_histogram.h"
+
+namespace disk_cache {
+
+class BackendImpl;
+
+typedef std::vector<std::pair<std::string, std::string> > StatsItems;
+
+// This class stores cache-specific usage information, for tunning purposes.
+class Stats {
+ public:
+ static const int kDataSizesLength = 28;
+ enum Counters {
+ MIN_COUNTER = 0,
+ OPEN_MISS = MIN_COUNTER,
+ OPEN_HIT,
+ CREATE_MISS,
+ CREATE_HIT,
+ RESURRECT_HIT,
+ CREATE_ERROR,
+ TRIM_ENTRY,
+ DOOM_ENTRY,
+ DOOM_CACHE,
+ INVALID_ENTRY,
+ OPEN_ENTRIES, // Average number of open entries.
+ MAX_ENTRIES, // Maximum number of open entries.
+ TIMER,
+ READ_DATA,
+ WRITE_DATA,
+ OPEN_RANKINGS, // An entry has to be read just to modify rankings.
+ GET_RANKINGS, // We got the ranking info without reading the whole entry.
+ FATAL_ERROR,
+ LAST_REPORT, // Time of the last time we sent a report.
+ LAST_REPORT_TIMER, // Timer count of the last time we sent a report.
+ MAX_COUNTER
+ };
+
+ Stats() : backend_(NULL) {}
+ ~Stats();
+
+ bool Init(BackendImpl* backend, uint32* storage_addr);
+
+ // Tracks changes to the stoage space used by an entry.
+ void ModifyStorageStats(int32 old_size, int32 new_size);
+
+ // Tracks general events.
+ void OnEvent(Counters an_event);
+ void SetCounter(Counters counter, int64 value);
+ int64 GetCounter(Counters counter) const;
+
+ void GetItems(StatsItems* items);
+ int GetHitRatio() const;
+ int GetResurrectRatio() const;
+ void ResetRatios();
+
+ // Returns the lower bound of the space used by entries bigger than 512 KB.
+ int GetLargeEntriesSize();
+
+ // Saves the stats to disk.
+ void Store();
+
+ // Support for StatsHistograms. Together, these methods allow StatsHistograms
+ // to take a snapshot of the data_sizes_ as the histogram data.
+ int GetBucketRange(size_t i) const;
+ void Snapshot(StatsHistogram::StatsSamples* samples) const;
+
+ private:
+ int GetStatsBucket(int32 size);
+ int GetRatio(Counters hit, Counters miss) const;
+
+ BackendImpl* backend_;
+ uint32 storage_addr_;
+ int data_sizes_[kDataSizesLength];
+ int64 counters_[MAX_COUNTER];
+ scoped_refptr<StatsHistogram> size_histogram_;
+
+ DISALLOW_COPY_AND_ASSIGN(Stats);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_STATS_H_
diff --git a/net/disk_cache/stats_histogram.cc b/net/disk_cache/stats_histogram.cc
new file mode 100644
index 00000000..e6eaf902
--- /dev/null
+++ b/net/disk_cache/stats_histogram.cc
@@ -0,0 +1,86 @@
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/stats_histogram.h"
+
+#include "base/logging.h"
+#include "net/disk_cache/stats.h"
+
+namespace disk_cache {
+
+// Static.
+const Stats* StatsHistogram::stats_ = NULL;
+
+scoped_refptr<StatsHistogram> StatsHistogram::StatsHistogramFactoryGet(
+ const std::string& name) {
+ scoped_refptr<Histogram> histogram(NULL);
+
+ Sample minimum = 1;
+ Sample maximum = disk_cache::Stats::kDataSizesLength - 1;
+ size_t bucket_count = disk_cache::Stats::kDataSizesLength;
+
+ if (StatisticsRecorder::FindHistogram(name, &histogram)) {
+ DCHECK(histogram.get() != NULL);
+ } else {
+ histogram = new StatsHistogram(name, minimum, maximum, bucket_count);
+ scoped_refptr<Histogram> registered_histogram(NULL);
+ StatisticsRecorder::FindHistogram(name, &registered_histogram);
+ if (registered_histogram.get() != NULL &&
+ registered_histogram.get() != histogram.get())
+ histogram = registered_histogram;
+ }
+
+ DCHECK(HISTOGRAM == histogram->histogram_type());
+ DCHECK(histogram->HasConstructorArguments(minimum, maximum, bucket_count));
+
+ // We're preparing for an otherwise unsafe upcast by ensuring we have the
+ // proper class type.
+ Histogram* temp_histogram = histogram.get();
+ StatsHistogram* temp_stats_histogram =
+ static_cast<StatsHistogram*>(temp_histogram);
+ scoped_refptr<StatsHistogram> return_histogram = temp_stats_histogram;
+ return return_histogram;
+}
+
+bool StatsHistogram::Init(const Stats* stats) {
+ DCHECK(stats);
+ if (stats_)
+ return false;
+
+ SetFlags(kUmaTargetedHistogramFlag);
+
+ // We support statistics report for only one cache.
+ init_ = true;
+ stats_ = stats;
+ return true;
+}
+
+StatsHistogram::~StatsHistogram() {
+ // Only cleanup what we set.
+ if (init_)
+ stats_ = NULL;
+}
+
+Histogram::Sample StatsHistogram::ranges(size_t i) const {
+ DCHECK(stats_);
+ return stats_->GetBucketRange(i);
+}
+
+size_t StatsHistogram::bucket_count() const {
+ return disk_cache::Stats::kDataSizesLength;
+}
+
+void StatsHistogram::SnapshotSample(SampleSet* sample) const {
+ DCHECK(stats_);
+ StatsSamples my_sample;
+ stats_->Snapshot(&my_sample);
+
+ *sample = my_sample;
+
+ // Only report UMA data once.
+ StatsHistogram* mutable_me = const_cast<StatsHistogram*>(this);
+ mutable_me->ClearFlags(kUmaTargetedHistogramFlag);
+}
+
+} // namespace disk_cache
diff --git a/net/disk_cache/stats_histogram.h b/net/disk_cache/stats_histogram.h
new file mode 100644
index 00000000..995d4869
--- /dev/null
+++ b/net/disk_cache/stats_histogram.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_STATS_HISTOGRAM_H_
+#define NET_DISK_CACHE_STATS_HISTOGRAM_H_
+
+#include <string>
+
+#include "base/histogram.h"
+
+namespace disk_cache {
+
+class Stats;
+
+// This class provides support for sending the disk cache size stats as a UMA
+// histogram. We'll provide our own storage and management for the data, and a
+// SampleSet with a copy of our data.
+//
+// Class derivation of Histogram "deprecated," and should not be copied, and
+// may eventually go away.
+//
+class StatsHistogram : public Histogram {
+ public:
+ class StatsSamples : public SampleSet {
+ public:
+ Counts* GetCounts() {
+ return &counts_;
+ }
+ };
+
+ explicit StatsHistogram(const std::string& name, Sample minimum,
+ Sample maximum, size_t bucket_count)
+ : Histogram(name, minimum, maximum, bucket_count), init_(false) {}
+ ~StatsHistogram();
+
+ static scoped_refptr<StatsHistogram>
+ StatsHistogramFactoryGet(const std::string& name);
+
+ // We'll be reporting data from the given set of cache stats.
+ bool Init(const Stats* stats);
+
+ virtual Sample ranges(size_t i) const;
+ virtual size_t bucket_count() const;
+ virtual void SnapshotSample(SampleSet* sample) const;
+
+ private:
+ friend class Histogram;
+
+ bool init_;
+ static const Stats* stats_;
+ DISALLOW_COPY_AND_ASSIGN(StatsHistogram);
+};
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_STATS_HISTOGRAM_H_
diff --git a/net/disk_cache/storage_block-inl.h b/net/disk_cache/storage_block-inl.h
new file mode 100644
index 00000000..5e026a7e
--- /dev/null
+++ b/net/disk_cache/storage_block-inl.h
@@ -0,0 +1,160 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_DISK_CACHE_STORAGE_BLOCK_INL_H_
+#define NET_DISK_CACHE_STORAGE_BLOCK_INL_H_
+
+#include "net/disk_cache/storage_block.h"
+
+#include "base/logging.h"
+#include "net/disk_cache/trace.h"
+
+namespace disk_cache {
+
+template<typename T> StorageBlock<T>::StorageBlock(MappedFile* file,
+ Addr address)
+ : data_(NULL), file_(file), address_(address), modified_(false),
+ own_data_(false), extended_(false) {
+ if (address.num_blocks() > 1)
+ extended_ = true;
+ DCHECK(!address.is_initialized() || sizeof(*data_) == address.BlockSize());
+}
+
+template<typename T> StorageBlock<T>::~StorageBlock() {
+ if (modified_)
+ Store();
+ DeleteData();
+}
+
+template<typename T> void* StorageBlock<T>::buffer() const {
+ return data_;
+}
+
+template<typename T> size_t StorageBlock<T>::size() const {
+ if (!extended_)
+ return sizeof(*data_);
+ return address_.num_blocks() * sizeof(*data_);
+}
+
+template<typename T> int StorageBlock<T>::offset() const {
+ return address_.start_block() * address_.BlockSize();
+}
+
+template<typename T> bool StorageBlock<T>::LazyInit(MappedFile* file,
+ Addr address) {
+ if (file_ || address_.is_initialized()) {
+ NOTREACHED();
+ return false;
+ }
+ file_ = file;
+ address_.set_value(address.value());
+ if (address.num_blocks() > 1)
+ extended_ = true;
+
+ DCHECK(sizeof(*data_) == address.BlockSize());
+ return true;
+}
+
+template<typename T> void StorageBlock<T>::SetData(T* other) {
+ DCHECK(!modified_);
+ DeleteData();
+ data_ = other;
+}
+
+template<typename T> void StorageBlock<T>::Discard() {
+ if (!data_)
+ return;
+ if (!own_data_) {
+ NOTREACHED();
+ return;
+ }
+ DeleteData();
+ data_ = NULL;
+ modified_ = false;
+ extended_ = false;
+}
+
+template<typename T> void StorageBlock<T>::StopSharingData() {
+ if (!data_ || own_data_)
+ return;
+ DCHECK(!modified_);
+ data_ = NULL;
+}
+
+template<typename T> void StorageBlock<T>::set_modified() {
+ DCHECK(data_);
+ modified_ = true;
+}
+
+template<typename T> T* StorageBlock<T>::Data() {
+ if (!data_)
+ AllocateData();
+ return data_;
+}
+
+template<typename T> bool StorageBlock<T>::HasData() const {
+ return (NULL != data_);
+}
+
+template<typename T> bool StorageBlock<T>::own_data() const {
+ return own_data_;
+}
+
+template<typename T> const Addr StorageBlock<T>::address() const {
+ return address_;
+}
+
+template<typename T> bool StorageBlock<T>::Load() {
+ if (file_) {
+ if (!data_)
+ AllocateData();
+
+ if (file_->Load(this)) {
+ modified_ = false;
+ return true;
+ }
+ }
+ LOG(WARNING) << "Failed data load.";
+ Trace("Failed data load.");
+ return false;
+}
+
+template<typename T> bool StorageBlock<T>::Store() {
+ if (file_ && data_) {
+ if (file_->Store(this)) {
+ modified_ = false;
+ return true;
+ }
+ }
+ LOG(ERROR) << "Failed data store.";
+ Trace("Failed data store.");
+ return false;
+}
+
+template<typename T> void StorageBlock<T>::AllocateData() {
+ DCHECK(!data_);
+ if (!extended_) {
+ data_ = new T;
+ } else {
+ void* buffer = new char[address_.num_blocks() * sizeof(*data_)];
+ data_ = new(buffer) T;
+ }
+ own_data_ = true;
+}
+
+template<typename T> void StorageBlock<T>::DeleteData() {
+ if (own_data_) {
+ if (!extended_) {
+ delete data_;
+ } else {
+ data_->~T();
+ delete[] reinterpret_cast<char*>(data_);
+ }
+ own_data_ = false;
+ }
+}
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_STORAGE_BLOCK_INL_H_
diff --git a/net/disk_cache/storage_block.h b/net/disk_cache/storage_block.h
new file mode 100644
index 00000000..0d94b824
--- /dev/null
+++ b/net/disk_cache/storage_block.h
@@ -0,0 +1,93 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See net/disk_cache/disk_cache.h for the public interface.
+
+#ifndef NET_DISK_CACHE_STORAGE_BLOCK_H__
+#define NET_DISK_CACHE_STORAGE_BLOCK_H__
+
+#include "net/disk_cache/addr.h"
+#include "net/disk_cache/mapped_file.h"
+
+namespace disk_cache {
+
+class EntryImpl;
+
+// This class encapsulates common behavior of a single "block" of data that is
+// stored on a block-file. It implements the FileBlock interface, so it can be
+// serialized directly to the backing file.
+// This object provides a memory buffer for the related data, and it can be used
+// to actually share that memory with another instance of the class.
+//
+// The following example shows how to share storage with another object:
+// StorageBlock<TypeA> a(file, address);
+// StorageBlock<TypeB> b(file, address);
+// a.Load();
+// DoSomething(a.Data());
+// b.SetData(a.Data());
+// ModifySomething(b.Data());
+// // Data modified on the previous call will be saved by b's destructor.
+// b.set_modified();
+template<typename T>
+class StorageBlock : public FileBlock {
+ public:
+ StorageBlock(MappedFile* file, Addr address);
+ virtual ~StorageBlock();
+
+ // FileBlock interface.
+ virtual void* buffer() const;
+ virtual size_t size() const;
+ virtual int offset() const;
+
+ // Allows the overide of dummy values passed on the constructor.
+ bool LazyInit(MappedFile* file, Addr address);
+
+ // Sets the internal storage to share the memory provided by other instance.
+ void SetData(T* other);
+
+ // Deletes the data, even if it was modified and not saved. This object must
+ // own the memory buffer (it cannot be shared).
+ void Discard();
+
+ // Stops sharing the data with another object.
+ void StopSharingData();
+
+ // Sets the object to lazily save the in-memory data on destruction.
+ void set_modified();
+
+ // Gets a pointer to the internal storage (allocates storage if needed).
+ T* Data();
+
+ // Returns true if there is data associated with this object.
+ bool HasData() const;
+
+ // Returns true if this object owns the data buffer, false if it is shared.
+ bool own_data() const;
+
+ const Addr address() const;
+
+ // Loads and store the data.
+ bool Load();
+ bool Store();
+
+ private:
+ void AllocateData();
+ void DeleteData();
+
+ T* data_;
+ MappedFile* file_;
+ Addr address_;
+ bool modified_;
+ bool own_data_; // Is data_ owned by this object or shared with someone else.
+ bool extended_; // Used to store an entry of more than one block.
+
+ DISALLOW_EVIL_CONSTRUCTORS(StorageBlock);
+};
+
+typedef StorageBlock<EntryStore> CacheEntryBlock;
+typedef StorageBlock<RankingsNode> CacheRankingsBlock;
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_STORAGE_BLOCK_H__
diff --git a/net/disk_cache/storage_block_unittest.cc b/net/disk_cache/storage_block_unittest.cc
new file mode 100644
index 00000000..c9406b69
--- /dev/null
+++ b/net/disk_cache/storage_block_unittest.cc
@@ -0,0 +1,69 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_util.h"
+#include "net/disk_cache/storage_block.h"
+#include "net/disk_cache/storage_block-inl.h"
+#include "net/disk_cache/disk_cache_test_base.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST_F(DiskCacheTest, StorageBlock_LoadStore) {
+ FilePath filename = GetCacheFilePath().AppendASCII("a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ disk_cache::CacheEntryBlock entry1(file, disk_cache::Addr(0xa0010001));
+ memset(entry1.Data(), 0, sizeof(disk_cache::EntryStore));
+ entry1.Data()->hash = 0xaa5555aa;
+ entry1.Data()->rankings_node = 0xa0010002;
+
+ EXPECT_TRUE(entry1.Store());
+ entry1.Data()->hash = 0x88118811;
+ entry1.Data()->rankings_node = 0xa0040009;
+
+ EXPECT_TRUE(entry1.Load());
+ EXPECT_EQ(0xaa5555aa, entry1.Data()->hash);
+ EXPECT_EQ(0xa0010002, entry1.Data()->rankings_node);
+}
+
+TEST_F(DiskCacheTest, StorageBlock_SetData) {
+ FilePath filename = GetCacheFilePath().AppendASCII("a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ disk_cache::CacheEntryBlock entry1(file, disk_cache::Addr(0xa0010001));
+ entry1.Data()->hash = 0xaa5555aa;
+
+ disk_cache::CacheEntryBlock entry2(file, disk_cache::Addr(0xa0010002));
+ EXPECT_TRUE(entry2.Load());
+ EXPECT_TRUE(entry2.Data() != NULL);
+ EXPECT_TRUE(0 == entry2.Data()->hash);
+
+ EXPECT_TRUE(entry2.Data() != entry1.Data());
+ entry2.SetData(entry1.Data());
+ EXPECT_EQ(0xaa5555aa, entry2.Data()->hash);
+ EXPECT_TRUE(entry2.Data() == entry1.Data());
+}
+
+TEST_F(DiskCacheTest, StorageBlock_SetModified) {
+ FilePath filename = GetCacheFilePath().AppendASCII("a_test");
+ scoped_refptr<disk_cache::MappedFile> file(new disk_cache::MappedFile);
+ ASSERT_TRUE(CreateCacheTestFile(filename));
+ ASSERT_TRUE(file->Init(filename, 8192));
+
+ disk_cache::CacheEntryBlock* entry1 =
+ new disk_cache::CacheEntryBlock(file, disk_cache::Addr(0xa0010003));
+ EXPECT_TRUE(entry1->Load());
+ EXPECT_TRUE(0 == entry1->Data()->hash);
+ entry1->Data()->hash = 0x45687912;
+ entry1->set_modified();
+ delete entry1;
+
+ disk_cache::CacheEntryBlock entry2(file, disk_cache::Addr(0xa0010003));
+ EXPECT_TRUE(entry2.Load());
+ EXPECT_TRUE(0x45687912 == entry2.Data()->hash);
+}
diff --git a/net/disk_cache/stress_cache.cc b/net/disk_cache/stress_cache.cc
new file mode 100644
index 00000000..67c396c2
--- /dev/null
+++ b/net/disk_cache/stress_cache.cc
@@ -0,0 +1,207 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a simple application that stress-tests the crash recovery of the disk
+// cache. The main application starts a copy of itself on a loop, checking the
+// exit code of the child process. When the child dies in an unexpected way,
+// the main application quits.
+
+// The child application has two threads: one to exercise the cache in an
+// infinite loop, and another one to asynchronously kill the process.
+
+#include <string>
+#include <vector>
+
+#include "base/at_exit.h"
+#include "base/command_line.h"
+#include "base/debug_util.h"
+#include "base/file_path.h"
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/path_service.h"
+#include "base/platform_thread.h"
+#include "base/process_util.h"
+#include "base/string_util.h"
+#include "base/thread.h"
+#include "net/base/io_buffer.h"
+#include "net/disk_cache/backend_impl.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/disk_cache/disk_cache_test_util.h"
+
+using base::Time;
+
+const int kError = -1;
+const int kExpectedCrash = 100;
+
+// Starts a new process.
+int RunSlave(int iteration) {
+ FilePath exe;
+ PathService::Get(base::FILE_EXE, &exe);
+
+ CommandLine cmdline(exe);
+ cmdline.AppendLooseValue(ASCIIToWide(IntToString(iteration)));
+
+ base::ProcessHandle handle;
+ if (!base::LaunchApp(cmdline, false, false, &handle)) {
+ printf("Unable to run test\n");
+ return kError;
+ }
+
+ int exit_code;
+ if (!base::WaitForExitCode(handle, &exit_code)) {
+ printf("Unable to get return code\n");
+ return kError;
+ }
+ return exit_code;
+}
+
+// Main loop for the master process.
+int MasterCode() {
+ for (int i = 0; i < 100000; i++) {
+ int ret = RunSlave(i);
+ if (kExpectedCrash != ret)
+ return ret;
+ }
+
+ printf("More than enough...\n");
+
+ return 0;
+}
+
+// -----------------------------------------------------------------------
+
+// This thread will loop forever, adding and removing entries from the cache.
+// iteration is the current crash cycle, so the entries on the cache are marked
+// to know which instance of the application wrote them.
+void StressTheCache(int iteration) {
+ int cache_size = 0x800000; // 8MB
+ FilePath path = GetCacheFilePath().AppendASCII("_stress");
+ disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(path);
+ cache->SetFlags(disk_cache::kNoLoadProtection | disk_cache::kNoRandom);
+ cache->SetMaxSize(cache_size);
+ cache->SetType(net::DISK_CACHE);
+ if (!cache->Init()) {
+ printf("Unable to initialize cache.\n");
+ return;
+ }
+ printf("Iteration %d, initial entries: %d\n", iteration,
+ cache->GetEntryCount());
+
+ int seed = static_cast<int>(Time::Now().ToInternalValue());
+ srand(seed);
+
+ const int kNumKeys = 5000;
+ const int kNumEntries = 30;
+ std::string keys[kNumKeys];
+ disk_cache::Entry* entries[kNumEntries] = {0};
+
+ for (int i = 0; i < kNumKeys; i++) {
+ keys[i] = GenerateKey(true);
+ }
+
+ const int kSize = 4000;
+ scoped_refptr<net::IOBuffer> buffer = new net::IOBuffer(kSize);
+ memset(buffer->data(), 'k', kSize);
+
+ for (int i = 0;; i++) {
+ int slot = rand() % kNumEntries;
+ int key = rand() % kNumKeys;
+
+ if (entries[slot])
+ entries[slot]->Close();
+
+ if (!cache->OpenEntry(keys[key], &entries[slot]))
+ CHECK(cache->CreateEntry(keys[key], &entries[slot]));
+
+ base::snprintf(buffer->data(), kSize, "%d %d", iteration, i);
+ CHECK(kSize == entries[slot]->WriteData(0, 0, buffer, kSize, NULL, false));
+
+ if (rand() % 100 > 80) {
+ key = rand() % kNumKeys;
+ cache->DoomEntry(keys[key]);
+ }
+
+ if (!(i % 100))
+ printf("Entries: %d \r", i);
+ MessageLoop::current()->RunAllPending();
+ }
+}
+
+// We want to prevent the timer thread from killing the process while we are
+// waiting for the debugger to attach.
+bool g_crashing = false;
+
+class CrashTask : public Task {
+ public:
+ CrashTask() {}
+ ~CrashTask() {}
+
+ virtual void Run() {
+ // Keep trying to run.
+ RunSoon(MessageLoop::current());
+
+ if (g_crashing)
+ return;
+
+ if (rand() % 100 > 1) {
+ printf("sweet death...\n");
+#if defined(OS_WIN)
+ // Windows does more work on _exit() that we would like, so we use Kill.
+ base::KillProcessById(base::GetCurrentProcId(), kExpectedCrash, false);
+#elif defined(OS_POSIX)
+ // On POSIX, _exit() will terminate the process with minimal cleanup,
+ // and it is cleaner than killing.
+ _exit(kExpectedCrash);
+#endif
+ }
+ }
+
+ static void RunSoon(MessageLoop* target_loop) {
+ int task_delay = 10000; // 10 seconds
+ CrashTask* task = new CrashTask();
+ target_loop->PostDelayedTask(FROM_HERE, task, task_delay);
+ }
+};
+
+// We leak everything here :)
+bool StartCrashThread() {
+ base::Thread* thread = new base::Thread("party_crasher");
+ if (!thread->Start())
+ return false;
+
+ CrashTask::RunSoon(thread->message_loop());
+ return true;
+}
+
+void CrashHandler(const std::string& str) {
+ g_crashing = true;
+ DebugUtil::BreakDebugger();
+}
+
+// -----------------------------------------------------------------------
+
+int main(int argc, const char* argv[]) {
+ // Setup an AtExitManager so Singleton objects will be destructed.
+ base::AtExitManager at_exit_manager;
+
+ if (argc < 2)
+ return MasterCode();
+
+ logging::SetLogAssertHandler(CrashHandler);
+
+ // Some time for the memory manager to flush stuff.
+ PlatformThread::Sleep(3000);
+ MessageLoop message_loop(MessageLoop::TYPE_IO);
+
+ char* end;
+ long int iteration = strtol(argv[1], &end, 0);
+
+ if (!StartCrashThread()) {
+ printf("failed to start thread\n");
+ return kError;
+ }
+
+ StressTheCache(iteration);
+ return 0;
+}
diff --git a/net/disk_cache/trace.cc b/net/disk_cache/trace.cc
new file mode 100644
index 00000000..94d9fad9
--- /dev/null
+++ b/net/disk_cache/trace.cc
@@ -0,0 +1,148 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/disk_cache/trace.h"
+
+#include <stdio.h>
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+#include "base/logging.h"
+
+// Change this value to 1 to enable tracing on a release build. By default,
+// tracing is enabled only on debug builds.
+#define ENABLE_TRACING 0
+
+#ifndef NDEBUG
+#undef ENABLE_TRACING
+#define ENABLE_TRACING 1
+#endif
+
+namespace {
+
+const int kEntrySize = 48;
+const int kNumberOfEntries = 5000; // 240 KB.
+
+struct TraceBuffer {
+ int num_traces;
+ int current;
+ char buffer[kNumberOfEntries][kEntrySize];
+};
+
+void DebugOutput(const char* msg) {
+#if defined(OS_WIN)
+ OutputDebugStringA(msg);
+#else
+ NOTIMPLEMENTED();
+#endif
+}
+
+} // namespace
+
+namespace disk_cache {
+
+// s_trace_buffer and s_trace_object are not singletons because I want the
+// buffer to be destroyed and re-created when the last user goes away, and it
+// must be straightforward to access the buffer from the debugger.
+static TraceObject* s_trace_object = NULL;
+
+// Static.
+TraceObject* TraceObject::GetTraceObject() {
+ if (s_trace_object)
+ return s_trace_object;
+
+ s_trace_object = new TraceObject();
+ return s_trace_object;
+}
+
+#if ENABLE_TRACING
+
+static TraceBuffer* s_trace_buffer = NULL;
+
+void InitTrace(void) {
+ if (s_trace_buffer)
+ return;
+
+ s_trace_buffer = new TraceBuffer;
+ memset(s_trace_buffer, 0, sizeof(*s_trace_buffer));
+}
+
+void DestroyTrace(void) {
+ DCHECK(s_trace_buffer);
+ delete s_trace_buffer;
+ s_trace_buffer = NULL;
+ s_trace_object = NULL;
+}
+
+void Trace(const char* format, ...) {
+ DCHECK(s_trace_buffer);
+ va_list ap;
+ va_start(ap, format);
+
+#if defined(OS_WIN)
+ vsprintf_s(s_trace_buffer->buffer[s_trace_buffer->current], format, ap);
+#else
+ vsnprintf(s_trace_buffer->buffer[s_trace_buffer->current],
+ sizeof(s_trace_buffer->buffer[s_trace_buffer->current]), format,
+ ap);
+#endif
+ s_trace_buffer->num_traces++;
+ s_trace_buffer->current++;
+ if (s_trace_buffer->current == kNumberOfEntries)
+ s_trace_buffer->current = 0;
+
+ va_end(ap);
+}
+
+// Writes the last num_traces to the debugger output.
+void DumpTrace(int num_traces) {
+ DCHECK(s_trace_buffer);
+ DebugOutput("Last traces:\n");
+
+ if (num_traces > kNumberOfEntries || num_traces < 0)
+ num_traces = kNumberOfEntries;
+
+ if (s_trace_buffer->num_traces) {
+ char line[kEntrySize + 2];
+
+ int current = s_trace_buffer->current - num_traces;
+ if (current < 0)
+ current += kNumberOfEntries;
+
+ for (int i = 0; i < num_traces; i++) {
+ memcpy(line, s_trace_buffer->buffer[current], kEntrySize);
+ line[kEntrySize] = '\0';
+ size_t length = strlen(line);
+ if (length) {
+ line[length] = '\n';
+ line[length + 1] = '\0';
+ DebugOutput(line);
+ }
+
+ current++;
+ if (current == kNumberOfEntries)
+ current = 0;
+ }
+ }
+
+ DebugOutput("End of Traces\n");
+}
+
+#else // ENABLE_TRACING
+
+void InitTrace(void) {
+ return;
+}
+
+void DestroyTrace(void) {
+ s_trace_object = NULL;
+}
+
+void Trace(const char* format, ...) {
+}
+
+#endif // ENABLE_TRACING
+
+} // namespace disk_cache
diff --git a/net/disk_cache/trace.h b/net/disk_cache/trace.h
new file mode 100644
index 00000000..be50417b
--- /dev/null
+++ b/net/disk_cache/trace.h
@@ -0,0 +1,47 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file provides support for basic in-memory tracing of short events. We
+// keep a static circular buffer where we store the last traced events, so we
+// can review the cache recent behavior should we need it.
+
+#ifndef NET_DISK_CACHE_TRACE_H__
+#define NET_DISK_CACHE_TRACE_H__
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/ref_counted.h"
+
+namespace disk_cache {
+
+// Create and destroy the tracing buffer.
+void InitTrace(void);
+void DestroyTrace(void);
+
+// Simple class to handle the trace buffer lifetime. Any object interested in
+// tracing should keep a reference to the object returned by GetTraceObject().
+class TraceObject : public base::RefCounted<TraceObject> {
+ friend class base::RefCounted<TraceObject>;
+ public:
+ static TraceObject* GetTraceObject();
+
+ private:
+ TraceObject() {
+ InitTrace();
+ }
+
+ ~TraceObject() {
+ DestroyTrace();
+ }
+ DISALLOW_EVIL_CONSTRUCTORS(TraceObject);
+};
+
+// Traces to the internal buffer.
+void Trace(const char* format, ...);
+
+} // namespace disk_cache
+
+#endif // NET_DISK_CACHE_TRACE_H__