aboutsummaryrefslogtreecommitdiff
path: root/core/src/main/java/com/android/volley/toolbox/DiskBasedCache.java
diff options
context:
space:
mode:
Diffstat (limited to 'core/src/main/java/com/android/volley/toolbox/DiskBasedCache.java')
-rw-r--r--core/src/main/java/com/android/volley/toolbox/DiskBasedCache.java677
1 files changed, 677 insertions, 0 deletions
diff --git a/core/src/main/java/com/android/volley/toolbox/DiskBasedCache.java b/core/src/main/java/com/android/volley/toolbox/DiskBasedCache.java
new file mode 100644
index 0000000..d4310e0
--- /dev/null
+++ b/core/src/main/java/com/android/volley/toolbox/DiskBasedCache.java
@@ -0,0 +1,677 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.volley.toolbox;
+
+import android.os.SystemClock;
+import android.text.TextUtils;
+import androidx.annotation.VisibleForTesting;
+import com.android.volley.Cache;
+import com.android.volley.Header;
+import com.android.volley.VolleyLog;
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.EOFException;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Cache implementation that caches files directly onto the hard disk in the specified directory.
+ * The default disk usage size is 5MB, but is configurable.
+ *
+ * <p>This cache supports the {@link Entry#allResponseHeaders} headers field.
+ */
+public class DiskBasedCache implements Cache {
+
+ /** Map of the Key, CacheHeader pairs */
+ private final Map<String, CacheHeader> mEntries = new LinkedHashMap<>(16, .75f, true);
+
+ /** Total amount of space currently used by the cache in bytes. */
+ private long mTotalSize = 0;
+
+ /** The supplier for the root directory to use for the cache. */
+ private final FileSupplier mRootDirectorySupplier;
+
+ /** The maximum size of the cache in bytes. */
+ private final int mMaxCacheSizeInBytes;
+
+ /** Default maximum disk usage in bytes. */
+ private static final int DEFAULT_DISK_USAGE_BYTES = 5 * 1024 * 1024;
+
+ /** High water mark percentage for the cache */
+ @VisibleForTesting static final float HYSTERESIS_FACTOR = 0.9f;
+
+ /** Magic number for current version of cache file format. */
+ private static final int CACHE_MAGIC = 0x20150306;
+
+ /**
+ * Constructs an instance of the DiskBasedCache at the specified directory.
+ *
+ * @param rootDirectory The root directory of the cache.
+ * @param maxCacheSizeInBytes The maximum size of the cache in bytes. Note that the cache may
+ * briefly exceed this size on disk when writing a new entry that pushes it over the limit
+ * until the ensuing pruning completes.
+ */
+ public DiskBasedCache(final File rootDirectory, int maxCacheSizeInBytes) {
+ mRootDirectorySupplier =
+ new FileSupplier() {
+ @Override
+ public File get() {
+ return rootDirectory;
+ }
+ };
+ mMaxCacheSizeInBytes = maxCacheSizeInBytes;
+ }
+
+ /**
+ * Constructs an instance of the DiskBasedCache at the specified directory.
+ *
+ * @param rootDirectorySupplier The supplier for the root directory of the cache.
+ * @param maxCacheSizeInBytes The maximum size of the cache in bytes. Note that the cache may
+ * briefly exceed this size on disk when writing a new entry that pushes it over the limit
+ * until the ensuing pruning completes.
+ */
+ public DiskBasedCache(FileSupplier rootDirectorySupplier, int maxCacheSizeInBytes) {
+ mRootDirectorySupplier = rootDirectorySupplier;
+ mMaxCacheSizeInBytes = maxCacheSizeInBytes;
+ }
+
+ /**
+ * Constructs an instance of the DiskBasedCache at the specified directory using the default
+ * maximum cache size of 5MB.
+ *
+ * @param rootDirectory The root directory of the cache.
+ */
+ public DiskBasedCache(File rootDirectory) {
+ this(rootDirectory, DEFAULT_DISK_USAGE_BYTES);
+ }
+
+ /**
+ * Constructs an instance of the DiskBasedCache at the specified directory using the default
+ * maximum cache size of 5MB.
+ *
+ * @param rootDirectorySupplier The supplier for the root directory of the cache.
+ */
+ public DiskBasedCache(FileSupplier rootDirectorySupplier) {
+ this(rootDirectorySupplier, DEFAULT_DISK_USAGE_BYTES);
+ }
+
+ /** Clears the cache. Deletes all cached files from disk. */
+ @Override
+ public synchronized void clear() {
+ File[] files = mRootDirectorySupplier.get().listFiles();
+ if (files != null) {
+ for (File file : files) {
+ file.delete();
+ }
+ }
+ mEntries.clear();
+ mTotalSize = 0;
+ VolleyLog.d("Cache cleared.");
+ }
+
+ /** Returns the cache entry with the specified key if it exists, null otherwise. */
+ @Override
+ public synchronized Entry get(String key) {
+ CacheHeader entry = mEntries.get(key);
+ // if the entry does not exist, return.
+ if (entry == null) {
+ return null;
+ }
+ File file = getFileForKey(key);
+ try {
+ CountingInputStream cis =
+ new CountingInputStream(
+ new BufferedInputStream(createInputStream(file)), file.length());
+ try {
+ CacheHeader entryOnDisk = CacheHeader.readHeader(cis);
+ if (!TextUtils.equals(key, entryOnDisk.key)) {
+ // File was shared by two keys and now holds data for a different entry!
+ VolleyLog.d(
+ "%s: key=%s, found=%s", file.getAbsolutePath(), key, entryOnDisk.key);
+ // Remove key whose contents on disk have been replaced.
+ removeEntry(key);
+ return null;
+ }
+ byte[] data = streamToBytes(cis, cis.bytesRemaining());
+ return entry.toCacheEntry(data);
+ } finally {
+ // Any IOException thrown here is handled by the below catch block by design.
+ //noinspection ThrowFromFinallyBlock
+ cis.close();
+ }
+ } catch (IOException e) {
+ VolleyLog.d("%s: %s", file.getAbsolutePath(), e.toString());
+ remove(key);
+ return null;
+ }
+ }
+
+ /**
+ * Initializes the DiskBasedCache by scanning for all files currently in the specified root
+ * directory. Creates the root directory if necessary.
+ */
+ @Override
+ public synchronized void initialize() {
+ File rootDirectory = mRootDirectorySupplier.get();
+ if (!rootDirectory.exists()) {
+ if (!rootDirectory.mkdirs()) {
+ VolleyLog.e("Unable to create cache dir %s", rootDirectory.getAbsolutePath());
+ }
+ return;
+ }
+ File[] files = rootDirectory.listFiles();
+ if (files == null) {
+ return;
+ }
+ for (File file : files) {
+ try {
+ long entrySize = file.length();
+ CountingInputStream cis =
+ new CountingInputStream(
+ new BufferedInputStream(createInputStream(file)), entrySize);
+ try {
+ CacheHeader entry = CacheHeader.readHeader(cis);
+ entry.size = entrySize;
+ putEntry(entry.key, entry);
+ } finally {
+ // Any IOException thrown here is handled by the below catch block by design.
+ //noinspection ThrowFromFinallyBlock
+ cis.close();
+ }
+ } catch (IOException e) {
+ //noinspection ResultOfMethodCallIgnored
+ file.delete();
+ }
+ }
+ }
+
+ /**
+ * Invalidates an entry in the cache.
+ *
+ * @param key Cache key
+ * @param fullExpire True to fully expire the entry, false to soft expire
+ */
+ @Override
+ public synchronized void invalidate(String key, boolean fullExpire) {
+ Entry entry = get(key);
+ if (entry != null) {
+ entry.softTtl = 0;
+ if (fullExpire) {
+ entry.ttl = 0;
+ }
+ put(key, entry);
+ }
+ }
+
+ /** Puts the entry with the specified key into the cache. */
+ @Override
+ public synchronized void put(String key, Entry entry) {
+ // If adding this entry would trigger a prune, but pruning would cause the new entry to be
+ // deleted, then skip writing the entry in the first place, as this is just churn.
+ // Note that we don't include the cache header overhead in this calculation for simplicity,
+ // so putting entries which are just below the threshold may still cause this churn.
+ if (mTotalSize + entry.data.length > mMaxCacheSizeInBytes
+ && entry.data.length > mMaxCacheSizeInBytes * HYSTERESIS_FACTOR) {
+ return;
+ }
+ File file = getFileForKey(key);
+ try {
+ BufferedOutputStream fos = new BufferedOutputStream(createOutputStream(file));
+ CacheHeader e = new CacheHeader(key, entry);
+ boolean success = e.writeHeader(fos);
+ if (!success) {
+ fos.close();
+ VolleyLog.d("Failed to write header for %s", file.getAbsolutePath());
+ throw new IOException();
+ }
+ fos.write(entry.data);
+ fos.close();
+ e.size = file.length();
+ putEntry(key, e);
+ pruneIfNeeded();
+ } catch (IOException e) {
+ boolean deleted = file.delete();
+ if (!deleted) {
+ VolleyLog.d("Could not clean up file %s", file.getAbsolutePath());
+ }
+ initializeIfRootDirectoryDeleted();
+ }
+ }
+
+ /** Removes the specified key from the cache if it exists. */
+ @Override
+ public synchronized void remove(String key) {
+ boolean deleted = getFileForKey(key).delete();
+ removeEntry(key);
+ if (!deleted) {
+ VolleyLog.d(
+ "Could not delete cache entry for key=%s, filename=%s",
+ key, getFilenameForKey(key));
+ }
+ }
+
+ /**
+ * Creates a pseudo-unique filename for the specified cache key.
+ *
+ * @param key The key to generate a file name for.
+ * @return A pseudo-unique filename.
+ */
+ private String getFilenameForKey(String key) {
+ int firstHalfLength = key.length() / 2;
+ String localFilename = String.valueOf(key.substring(0, firstHalfLength).hashCode());
+ localFilename += String.valueOf(key.substring(firstHalfLength).hashCode());
+ return localFilename;
+ }
+
+ /** Returns a file object for the given cache key. */
+ public File getFileForKey(String key) {
+ return new File(mRootDirectorySupplier.get(), getFilenameForKey(key));
+ }
+
+ /** Re-initialize the cache if the directory was deleted. */
+ private void initializeIfRootDirectoryDeleted() {
+ if (!mRootDirectorySupplier.get().exists()) {
+ VolleyLog.d("Re-initializing cache after external clearing.");
+ mEntries.clear();
+ mTotalSize = 0;
+ initialize();
+ }
+ }
+
+ /** Represents a supplier for {@link File}s. */
+ public interface FileSupplier {
+ File get();
+ }
+
+ /** Prunes the cache to fit the maximum size. */
+ private void pruneIfNeeded() {
+ if (mTotalSize < mMaxCacheSizeInBytes) {
+ return;
+ }
+ if (VolleyLog.DEBUG) {
+ VolleyLog.v("Pruning old cache entries.");
+ }
+
+ long before = mTotalSize;
+ int prunedFiles = 0;
+ long startTime = SystemClock.elapsedRealtime();
+
+ Iterator<Map.Entry<String, CacheHeader>> iterator = mEntries.entrySet().iterator();
+ while (iterator.hasNext()) {
+ Map.Entry<String, CacheHeader> entry = iterator.next();
+ CacheHeader e = entry.getValue();
+ boolean deleted = getFileForKey(e.key).delete();
+ if (deleted) {
+ mTotalSize -= e.size;
+ } else {
+ VolleyLog.d(
+ "Could not delete cache entry for key=%s, filename=%s",
+ e.key, getFilenameForKey(e.key));
+ }
+ iterator.remove();
+ prunedFiles++;
+
+ if (mTotalSize < mMaxCacheSizeInBytes * HYSTERESIS_FACTOR) {
+ break;
+ }
+ }
+
+ if (VolleyLog.DEBUG) {
+ VolleyLog.v(
+ "pruned %d files, %d bytes, %d ms",
+ prunedFiles, (mTotalSize - before), SystemClock.elapsedRealtime() - startTime);
+ }
+ }
+
+ /**
+ * Puts the entry with the specified key into the cache.
+ *
+ * @param key The key to identify the entry by.
+ * @param entry The entry to cache.
+ */
+ private void putEntry(String key, CacheHeader entry) {
+ if (!mEntries.containsKey(key)) {
+ mTotalSize += entry.size;
+ } else {
+ CacheHeader oldEntry = mEntries.get(key);
+ mTotalSize += (entry.size - oldEntry.size);
+ }
+ mEntries.put(key, entry);
+ }
+
+ /** Removes the entry identified by 'key' from the cache. */
+ private void removeEntry(String key) {
+ CacheHeader removed = mEntries.remove(key);
+ if (removed != null) {
+ mTotalSize -= removed.size;
+ }
+ }
+
+ /**
+ * Reads length bytes from CountingInputStream into byte array.
+ *
+ * @param cis input stream
+ * @param length number of bytes to read
+ * @throws IOException if fails to read all bytes
+ */
+ @VisibleForTesting
+ static byte[] streamToBytes(CountingInputStream cis, long length) throws IOException {
+ long maxLength = cis.bytesRemaining();
+ // Length cannot be negative or greater than bytes remaining, and must not overflow int.
+ if (length < 0 || length > maxLength || (int) length != length) {
+ throw new IOException("streamToBytes length=" + length + ", maxLength=" + maxLength);
+ }
+ byte[] bytes = new byte[(int) length];
+ new DataInputStream(cis).readFully(bytes);
+ return bytes;
+ }
+
+ @VisibleForTesting
+ InputStream createInputStream(File file) throws FileNotFoundException {
+ return new FileInputStream(file);
+ }
+
+ @VisibleForTesting
+ OutputStream createOutputStream(File file) throws FileNotFoundException {
+ return new FileOutputStream(file);
+ }
+
+ /** Handles holding onto the cache headers for an entry. */
+ @VisibleForTesting
+ static class CacheHeader {
+ /**
+ * The size of the data identified by this CacheHeader on disk (both header and data).
+ *
+ * <p>Must be set by the caller after it has been calculated.
+ *
+ * <p>This is not serialized to disk.
+ */
+ long size;
+
+ /** The key that identifies the cache entry. */
+ final String key;
+
+ /** ETag for cache coherence. */
+ final String etag;
+
+ /** Date of this response as reported by the server. */
+ final long serverDate;
+
+ /** The last modified date for the requested object. */
+ final long lastModified;
+
+ /** TTL for this record. */
+ final long ttl;
+
+ /** Soft TTL for this record. */
+ final long softTtl;
+
+ /** Headers from the response resulting in this cache entry. */
+ final List<Header> allResponseHeaders;
+
+ private CacheHeader(
+ String key,
+ String etag,
+ long serverDate,
+ long lastModified,
+ long ttl,
+ long softTtl,
+ List<Header> allResponseHeaders) {
+ this.key = key;
+ this.etag = "".equals(etag) ? null : etag;
+ this.serverDate = serverDate;
+ this.lastModified = lastModified;
+ this.ttl = ttl;
+ this.softTtl = softTtl;
+ this.allResponseHeaders = allResponseHeaders;
+ }
+
+ /**
+ * Instantiates a new CacheHeader object.
+ *
+ * @param key The key that identifies the cache entry
+ * @param entry The cache entry.
+ */
+ CacheHeader(String key, Entry entry) {
+ this(
+ key,
+ entry.etag,
+ entry.serverDate,
+ entry.lastModified,
+ entry.ttl,
+ entry.softTtl,
+ getAllResponseHeaders(entry));
+ }
+
+ private static List<Header> getAllResponseHeaders(Entry entry) {
+ // If the entry contains all the response headers, use that field directly.
+ if (entry.allResponseHeaders != null) {
+ return entry.allResponseHeaders;
+ }
+
+ // Legacy fallback - copy headers from the map.
+ return HttpHeaderParser.toAllHeaderList(entry.responseHeaders);
+ }
+
+ /**
+ * Reads the header from a CountingInputStream and returns a CacheHeader object.
+ *
+ * @param is The InputStream to read from.
+ * @throws IOException if fails to read header
+ */
+ static CacheHeader readHeader(CountingInputStream is) throws IOException {
+ int magic = readInt(is);
+ if (magic != CACHE_MAGIC) {
+ // don't bother deleting, it'll get pruned eventually
+ throw new IOException();
+ }
+ String key = readString(is);
+ String etag = readString(is);
+ long serverDate = readLong(is);
+ long lastModified = readLong(is);
+ long ttl = readLong(is);
+ long softTtl = readLong(is);
+ List<Header> allResponseHeaders = readHeaderList(is);
+ return new CacheHeader(
+ key, etag, serverDate, lastModified, ttl, softTtl, allResponseHeaders);
+ }
+
+ /** Creates a cache entry for the specified data. */
+ Entry toCacheEntry(byte[] data) {
+ Entry e = new Entry();
+ e.data = data;
+ e.etag = etag;
+ e.serverDate = serverDate;
+ e.lastModified = lastModified;
+ e.ttl = ttl;
+ e.softTtl = softTtl;
+ e.responseHeaders = HttpHeaderParser.toHeaderMap(allResponseHeaders);
+ e.allResponseHeaders = Collections.unmodifiableList(allResponseHeaders);
+ return e;
+ }
+
+ /** Writes the contents of this CacheHeader to the specified OutputStream. */
+ boolean writeHeader(OutputStream os) {
+ try {
+ writeInt(os, CACHE_MAGIC);
+ writeString(os, key);
+ writeString(os, etag == null ? "" : etag);
+ writeLong(os, serverDate);
+ writeLong(os, lastModified);
+ writeLong(os, ttl);
+ writeLong(os, softTtl);
+ writeHeaderList(allResponseHeaders, os);
+ os.flush();
+ return true;
+ } catch (IOException e) {
+ VolleyLog.d("%s", e.toString());
+ return false;
+ }
+ }
+ }
+
+ @VisibleForTesting
+ static class CountingInputStream extends FilterInputStream {
+ private final long length;
+ private long bytesRead;
+
+ CountingInputStream(InputStream in, long length) {
+ super(in);
+ this.length = length;
+ }
+
+ @Override
+ public int read() throws IOException {
+ int result = super.read();
+ if (result != -1) {
+ bytesRead++;
+ }
+ return result;
+ }
+
+ @Override
+ public int read(byte[] buffer, int offset, int count) throws IOException {
+ int result = super.read(buffer, offset, count);
+ if (result != -1) {
+ bytesRead += result;
+ }
+ return result;
+ }
+
+ @VisibleForTesting
+ long bytesRead() {
+ return bytesRead;
+ }
+
+ long bytesRemaining() {
+ return length - bytesRead;
+ }
+ }
+
+ /*
+ * Homebrewed simple serialization system used for reading and writing cache
+ * headers on disk. Once upon a time, this used the standard Java
+ * Object{Input,Output}Stream, but the default implementation relies heavily
+ * on reflection (even for standard types) and generates a ton of garbage.
+ *
+ * TODO: Replace by standard DataInput and DataOutput in next cache version.
+ */
+
+ /**
+ * Simple wrapper around {@link InputStream#read()} that throws EOFException instead of
+ * returning -1.
+ */
+ private static int read(InputStream is) throws IOException {
+ int b = is.read();
+ if (b == -1) {
+ throw new EOFException();
+ }
+ return b;
+ }
+
+ static void writeInt(OutputStream os, int n) throws IOException {
+ os.write((n >> 0) & 0xff);
+ os.write((n >> 8) & 0xff);
+ os.write((n >> 16) & 0xff);
+ os.write((n >> 24) & 0xff);
+ }
+
+ static int readInt(InputStream is) throws IOException {
+ int n = 0;
+ n |= (read(is) << 0);
+ n |= (read(is) << 8);
+ n |= (read(is) << 16);
+ n |= (read(is) << 24);
+ return n;
+ }
+
+ static void writeLong(OutputStream os, long n) throws IOException {
+ os.write((byte) (n >>> 0));
+ os.write((byte) (n >>> 8));
+ os.write((byte) (n >>> 16));
+ os.write((byte) (n >>> 24));
+ os.write((byte) (n >>> 32));
+ os.write((byte) (n >>> 40));
+ os.write((byte) (n >>> 48));
+ os.write((byte) (n >>> 56));
+ }
+
+ static long readLong(InputStream is) throws IOException {
+ long n = 0;
+ n |= ((read(is) & 0xFFL) << 0);
+ n |= ((read(is) & 0xFFL) << 8);
+ n |= ((read(is) & 0xFFL) << 16);
+ n |= ((read(is) & 0xFFL) << 24);
+ n |= ((read(is) & 0xFFL) << 32);
+ n |= ((read(is) & 0xFFL) << 40);
+ n |= ((read(is) & 0xFFL) << 48);
+ n |= ((read(is) & 0xFFL) << 56);
+ return n;
+ }
+
+ static void writeString(OutputStream os, String s) throws IOException {
+ byte[] b = s.getBytes("UTF-8");
+ writeLong(os, b.length);
+ os.write(b, 0, b.length);
+ }
+
+ static String readString(CountingInputStream cis) throws IOException {
+ long n = readLong(cis);
+ byte[] b = streamToBytes(cis, n);
+ return new String(b, "UTF-8");
+ }
+
+ static void writeHeaderList(List<Header> headers, OutputStream os) throws IOException {
+ if (headers != null) {
+ writeInt(os, headers.size());
+ for (Header header : headers) {
+ writeString(os, header.getName());
+ writeString(os, header.getValue());
+ }
+ } else {
+ writeInt(os, 0);
+ }
+ }
+
+ static List<Header> readHeaderList(CountingInputStream cis) throws IOException {
+ int size = readInt(cis);
+ if (size < 0) {
+ throw new IOException("readHeaderList size=" + size);
+ }
+ List<Header> result =
+ (size == 0) ? Collections.<Header>emptyList() : new ArrayList<Header>();
+ for (int i = 0; i < size; i++) {
+ String name = readString(cis).intern();
+ String value = readString(cis).intern();
+ result.add(new Header(name, value));
+ }
+ return result;
+ }
+}