Skip to content

Commit

Permalink
Try to decode source data directly if we fail to write it to disk.
Browse files Browse the repository at this point in the history
Currently if you disable the disk cache using DiskCacheAdapter or some similarly non-functional implementation, any attempt to load remote data with DiskCacheStrategy.AUTOMATIC or any data at all with DiskCacheStrategy.DATA will fail. In both of these scenarios Glide expects the data to be written to the disk cache first, then decoded from the disk cache. So if the disk cache can't write the data, the load fails.

Here we're changing Glide so that even with these strategies we attempt to fall back to decoding the original data, if we can rewind it. I can imagine this behavior being confusing in some cases where someone misconfigures the disk cache. Arguably this change should be limited to DiskCacheStrategy.AUTOMATIC.

I've decided to just keep this always because it's probably better to succeed at loading if we can, even if we fail to cache the image and doing so introduces some confusion. For users who want to disable the disk cache entirely, it's also unexpected for the default behavior in Glide to be to fail 100% of remote image loads.

PiperOrigin-RevId: 368307717
  • Loading branch information
sjudd authored and glide-copybara-robot committed Apr 13, 2021
1 parent 0ac564f commit 755c39f
Show file tree
Hide file tree
Showing 2 changed files with 83 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import com.bumptech.glide.load.Options;
import com.bumptech.glide.load.ResourceEncoder;
import com.bumptech.glide.load.Transformation;
import com.bumptech.glide.load.data.DataRewinder;
import com.bumptech.glide.load.engine.DecodeJob.DiskCacheProvider;
import com.bumptech.glide.load.engine.bitmap_recycle.ArrayPool;
import com.bumptech.glide.load.engine.cache.DiskCache;
Expand Down Expand Up @@ -99,6 +100,10 @@ DiskCacheStrategy getDiskCacheStrategy() {
return diskCacheStrategy;
}

<T> DataRewinder<T> getRewinder(T data) {
return glideContext.getRegistry().getRewinder(data);
}

Priority getPriority() {
return priority;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,13 @@
import com.bumptech.glide.load.Key;
import com.bumptech.glide.load.data.DataFetcher;
import com.bumptech.glide.load.data.DataFetcher.DataCallback;
import com.bumptech.glide.load.data.DataRewinder;
import com.bumptech.glide.load.engine.cache.DiskCache;
import com.bumptech.glide.load.model.ModelLoader;
import com.bumptech.glide.load.model.ModelLoader.LoadData;
import com.bumptech.glide.util.LogTime;
import com.bumptech.glide.util.Synthetic;
import java.io.IOException;
import java.util.Collections;

/**
Expand All @@ -21,30 +24,52 @@
*
* <p>Depending on the disk cache strategy, source data may first be written to disk and then loaded
* from the cache file rather than returned directly.
*
* <p>This object may be used by multiple threads, but only one at a time. It is not safe to access
* this object on multiple threads concurrently.
*/
class SourceGenerator implements DataFetcherGenerator, DataFetcherGenerator.FetcherReadyCallback {
private static final String TAG = "SourceGenerator";

private final DecodeHelper<?> helper;
private final FetcherReadyCallback cb;

private int loadDataListIndex;
private DataCacheGenerator sourceCacheGenerator;
private Object dataToCache;
private volatile int loadDataListIndex;
private volatile DataCacheGenerator sourceCacheGenerator;
private volatile Object dataToCache;
private volatile ModelLoader.LoadData<?> loadData;
private DataCacheKey originalKey;
private volatile DataCacheKey originalKey;

SourceGenerator(DecodeHelper<?> helper, FetcherReadyCallback cb) {
this.helper = helper;
this.cb = cb;
}

// Concurrent access isn't supported.
@SuppressWarnings({"NonAtomicOperationOnVolatileField", "NonAtomicVolatileUpdate"})
@Override
public boolean startNext() {
if (dataToCache != null) {
Object data = dataToCache;
dataToCache = null;
cacheData(data);
try {
boolean isDataInCache = cacheData(data);
// If we failed to write the data to cache, the cacheData method will try to decode the
// original data directly instead of going through the disk cache. Since cacheData has
// already called our callback at this point, there's nothing more to do but return.
if (!isDataInCache) {
return true;
}
// If we were able to write the data to cache successfully, we now need to proceed to call
// the sourceCacheGenerator below to load the data from cache.
} catch (IOException e) {
// An IOException means we weren't able to write data to cache or we weren't able to rewind
// it after a disk cache write failed. In either case we can just move on and try the next
// fetch below.
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Failed to properly rewind or write data to cache", e);
}
}
}

if (sourceCacheGenerator != null && sourceCacheGenerator.startNext()) {
Expand Down Expand Up @@ -98,33 +123,70 @@ private boolean hasNextModelLoader() {
return loadDataListIndex < helper.getLoadData().size();
}

private void cacheData(Object dataToCache) {
/**
* Returns {@code true} if we were able to cache the data and should try to decode the data
* directly from cache and {@code false} if we were unable to cache the data and should make an
* attempt to decode from source.
*/
private boolean cacheData(Object dataToCache) throws IOException {
long startTime = LogTime.getLogTime();
boolean isLoadingFromSourceData = false;
try {
Encoder<Object> encoder = helper.getSourceEncoder(dataToCache);
DataCacheWriter<Object> writer =
new DataCacheWriter<>(encoder, dataToCache, helper.getOptions());
originalKey = new DataCacheKey(loadData.sourceKey, helper.getSignature());
helper.getDiskCache().put(originalKey, writer);
DataRewinder<Object> rewinder = helper.getRewinder(dataToCache);
Object data = rewinder.rewindAndGet();
Encoder<Object> encoder = helper.getSourceEncoder(data);
DataCacheWriter<Object> writer = new DataCacheWriter<>(encoder, data, helper.getOptions());
DataCacheKey newOriginalKey = new DataCacheKey(loadData.sourceKey, helper.getSignature());
DiskCache diskCache = helper.getDiskCache();
diskCache.put(newOriginalKey, writer);
if (Log.isLoggable(TAG, Log.VERBOSE)) {
Log.v(
TAG,
"Finished encoding source to cache"
+ ", key: "
+ originalKey
+ newOriginalKey
+ ", data: "
+ dataToCache
+ ", encoder: "
+ encoder
+ ", duration: "
+ LogTime.getElapsedMillis(startTime));
}

if (diskCache.get(newOriginalKey) != null) {
originalKey = newOriginalKey;
sourceCacheGenerator =
new DataCacheGenerator(Collections.singletonList(loadData.sourceKey), helper, this);
// We were able to write the data to cache.
return true;
} else {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(
TAG,
"Attempt to write: "
+ originalKey
+ ", data: "
+ dataToCache
+ " to the disk"
+ " cache failed, maybe the disk cache is disabled?"
+ " Trying to decode the data directly...");
}

isLoadingFromSourceData = true;
cb.onDataFetcherReady(
loadData.sourceKey,
rewinder.rewindAndGet(),
loadData.fetcher,
loadData.fetcher.getDataSource(),
loadData.sourceKey);
}
// We failed to write the data to cache.
return false;
} finally {
loadData.fetcher.cleanup();
if (!isLoadingFromSourceData) {
loadData.fetcher.cleanup();
}
}

sourceCacheGenerator =
new DataCacheGenerator(Collections.singletonList(loadData.sourceKey), helper, this);
}

@Override
Expand Down

0 comments on commit 755c39f

Please sign in to comment.