aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorChris Dalton <csmartdalton@google.com>2018-10-23 18:26:20 -0600
committerSkia Commit-Bot <skia-commit-bot@chromium.org>2018-10-24 01:03:41 +0000
commit3b57279fd65a91684d5281620f01a25e9639c947 (patch)
tree25d87776ee870adc842a8caddfcc51f3cd1f8ed3 /src
parent55e326d24316ad40613043e22c5cd304a83be586 (diff)
downloadskqp-3b57279fd65a91684d5281620f01a25e9639c947.tar.gz
ccpr: Harden the path cache
- Fixes a potential threading issue from freeing the (non-thread-safe) CachedAtlasInfo in the GrCCPathCacheEntry destructor. - Adds logic to HashNode to better guarantee it will remain coherent with the LRU list. - Adds various asserts, including ones to ensure non-thread-safe operations only happen on the graphics thread. TBR=brianosman@google.com Bug: chromium:897510 Bug: chromium:897413 Bug: chromium:897245 Bug: chromium:897507 Change-Id: I5aac8ef712743f62b8c5d0b798804b425084ec02 Reviewed-on: https://skia-review.googlesource.com/c/164707 Reviewed-by: Chris Dalton <csmartdalton@google.com> Commit-Queue: Chris Dalton <csmartdalton@google.com> Auto-Submit: Chris Dalton <csmartdalton@google.com>
Diffstat (limited to 'src')
-rw-r--r--src/gpu/ccpr/GrCCPathCache.cpp100
-rw-r--r--src/gpu/ccpr/GrCCPathCache.h36
2 files changed, 93 insertions, 43 deletions
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
index 6d37b4fca5..e61503abe7 100644
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ b/src/gpu/ccpr/GrCCPathCache.cpp
@@ -112,52 +112,77 @@ private:
}
-inline GrCCPathCache::HashNode::HashNode(uint32_t pathCacheUniqueID, const MaskTransform& m,
- const GrShape& shape) {
+inline bool operator==(const GrCCPathCache::HashKey& key1, const GrCCPathCache::HashKey& key2) {
+ return key1.fData[0] == key2.fData[0] && !memcmp(&key1.fData[1], &key2.fData[1], key1.fData[0]);
+}
+
+inline GrCCPathCache::HashKey GrCCPathCache::HashNode::GetKey(const GrCCPathCacheEntry* entry) {
+ // The shape key is a variable-length footer to the entry allocation.
+ return HashKey{(const uint32_t*)((const char*)entry + sizeof(GrCCPathCacheEntry))};
+}
+
+inline uint32_t GrCCPathCache::HashNode::Hash(HashKey key) {
+ return GrResourceKeyHash(&key.fData[1], key.fData[0]);
+}
+
+inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, const MaskTransform& m,
+ const GrShape& shape)
+ : fPathCache(pathCache) {
+ SkASSERT(SkGetThreadID() == fPathCache->fGraphicsThreadID);
SkASSERT(shape.hasUnstyledKey());
WriteStyledKey writeKey(shape);
- void* memory = ::operator new (sizeof(GrCCPathCacheEntry) +
- writeKey.allocCountU32() * sizeof(uint32_t));
- fEntry.reset(new (memory) GrCCPathCacheEntry(pathCacheUniqueID, m));
+ void* mem = ::operator new (sizeof(GrCCPathCacheEntry) +
+ writeKey.allocCountU32() * sizeof(uint32_t));
+ fEntry.reset(new (mem) GrCCPathCacheEntry(fPathCache->fInvalidatedEntriesInbox.uniqueID(), m));
// The shape key is a variable-length footer to the entry allocation.
- uint32_t* keyData = (uint32_t*)((char*)memory + sizeof(GrCCPathCacheEntry));
+ uint32_t* keyData = (uint32_t*)((char*)mem + sizeof(GrCCPathCacheEntry));
writeKey.write(shape, keyData);
}
-inline bool operator==(const GrCCPathCache::HashKey& key1, const GrCCPathCache::HashKey& key2) {
- return key1.fData[0] == key2.fData[0] && !memcmp(&key1.fData[1], &key2.fData[1], key1.fData[0]);
+inline GrCCPathCache::HashNode::~HashNode() {
+ this->willExitHashTable();
}
-inline GrCCPathCache::HashKey GrCCPathCache::HashNode::GetKey(const GrCCPathCacheEntry* entry) {
- // The shape key is a variable-length footer to the entry allocation.
- return HashKey{(const uint32_t*)((const char*)entry + sizeof(GrCCPathCacheEntry))};
+inline GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
+ this->willExitHashTable();
+ fPathCache = node.fPathCache;
+ fEntry = std::move(node.fEntry);
+ SkASSERT(!node.fEntry);
+ return *this;
}
-inline uint32_t GrCCPathCache::HashNode::Hash(HashKey key) {
- return GrResourceKeyHash(&key.fData[1], key.fData[0]);
+inline void GrCCPathCache::HashNode::willExitHashTable() {
+ if (!fEntry) {
+ return; // We were moved.
+ }
+
+ SkASSERT(fPathCache);
+ SkASSERT(SkGetThreadID() == fPathCache->fGraphicsThreadID);
+ SkASSERT(fPathCache->fLRU.isInList(fEntry.get()));
+
+ fEntry->invalidateAtlas(); // Must happen on graphics thread.
+ fPathCache->fLRU.remove(fEntry.get());
}
GrCCPathCache::GrCCPathCache()
- : fInvalidatedEntriesInbox(next_path_cache_id()) {
+ : fInvalidatedEntriesInbox(next_path_cache_id()) {
+ SkDEBUGCODE(fGraphicsThreadID = SkGetThreadID());
}
-#ifdef SK_DEBUG
GrCCPathCache::~GrCCPathCache() {
- // Ensure the hash table and LRU list are still coherent.
- int lruCount = 0;
- for (const GrCCPathCacheEntry* entry : fLRU) {
- SkASSERT(fHashTable.find(HashNode::GetKey(entry))->entry() == entry);
- ++lruCount;
- }
- SkASSERT(fHashTable.count() == lruCount);
+ SkASSERT(SkGetThreadID() == fGraphicsThreadID);
+
+ fHashTable.reset(); // Must be cleared first; ~HashNode calls fLRU.remove() on us.
+ SkASSERT(fLRU.isEmpty()); // Ensure the hash table and LRU list were coherent.
}
-#endif
sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTransform& m,
CreateIfAbsent createIfAbsent) {
+ SkASSERT(SkGetThreadID() == fGraphicsThreadID);
+
if (!shape.hasUnstyledKey()) {
return nullptr;
}
@@ -191,27 +216,36 @@ sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTr
if (fHashTable.count() >= kMaxCacheCount) {
this->evict(fLRU.tail()); // We've exceeded our limit.
}
- entry = fHashTable.set(HashNode(fInvalidatedEntriesInbox.uniqueID(), m, shape))->entry();
+ SkASSERT(!fHashTable.find({keyData.get()}));
+ entry = fHashTable.set(HashNode(this, m, shape))->entry();
shape.addGenIDChangeListener(sk_ref_sp(entry));
SkASSERT(fHashTable.count() <= kMaxCacheCount);
} else {
fLRU.remove(entry); // Will be re-added at head.
}
+ SkDEBUGCODE(HashNode* node = fHashTable.find(HashNode::GetKey(entry)));
+ SkASSERT(node && node->entry() == entry);
fLRU.addToHead(entry);
return sk_ref_sp(entry);
}
void GrCCPathCache::evict(GrCCPathCacheEntry* entry) {
+ SkASSERT(SkGetThreadID() == fGraphicsThreadID);
+
bool isInCache = entry->fNext || (fLRU.tail() == entry);
SkASSERT(isInCache == fLRU.isInList(entry));
if (isInCache) {
- fLRU.remove(entry);
- fHashTable.remove(HashNode::GetKey(entry)); // Do this last, as it might delete the entry.
+ SkDEBUGCODE(HashNode* node = fHashTable.find(HashNode::GetKey(entry)));
+ SkASSERT(node && node->entry() == entry);
+ fHashTable.remove(HashNode::GetKey(entry));
+ // HashNode::willExitHashTable() takes care of the rest.
}
}
void GrCCPathCache::purgeAsNeeded() {
+ SkASSERT(SkGetThreadID() == fGraphicsThreadID);
+
SkTArray<sk_sp<GrCCPathCacheEntry>> invalidatedEntries;
fInvalidatedEntriesInbox.poll(&invalidatedEntries);
for (const sk_sp<GrCCPathCacheEntry>& entry : invalidatedEntries) {
@@ -220,15 +254,24 @@ void GrCCPathCache::purgeAsNeeded() {
}
+GrCCPathCacheEntry::GrCCPathCacheEntry(uint32_t pathCacheUniqueID,
+ const MaskTransform& maskTransform)
+ : fPathCacheUniqueID(pathCacheUniqueID), fMaskTransform(maskTransform) {
+ SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
+ SkDEBUGCODE(fGraphicsThreadID = SkGetThreadID());
+}
+
GrCCPathCacheEntry::~GrCCPathCacheEntry() {
+ // This might get called from a different thread.
+ SkASSERT(!fCachedAtlasInfo); // Should have been cleared when the entry was evicted.
SkASSERT(!fCurrFlushAtlas); // Client is required to reset fCurrFlushAtlas back to null.
- this->invalidateAtlas();
}
void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
const SkIVector& atlasOffset, const SkRect& devBounds,
const SkRect& devBounds45, const SkIRect& devIBounds,
const SkIVector& maskShift) {
+ SkASSERT(SkGetThreadID() == fGraphicsThreadID);
SkASSERT(atlasKey.isValid());
SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
@@ -245,6 +288,7 @@ void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
const SkIVector& newAtlasOffset,
sk_sp<GrCCAtlas::CachedAtlasInfo> info) {
+ SkASSERT(SkGetThreadID() == fGraphicsThreadID);
SkASSERT(atlasKey.isValid());
SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
@@ -257,6 +301,8 @@ void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
}
void GrCCPathCacheEntry::invalidateAtlas() {
+ SkASSERT(SkGetThreadID() == fGraphicsThreadID);
+
if (fCachedAtlasInfo) {
// Mark our own pixels invalid in the cached atlas texture.
fCachedAtlasInfo->fNumInvalidatedPathPixels += this->height() * this->width();
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
index 54a835a128..8265ea5fa9 100644
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ b/src/gpu/ccpr/GrCCPathCache.h
@@ -25,7 +25,7 @@ class GrShape;
class GrCCPathCache {
public:
GrCCPathCache();
- SkDEBUGCODE(~GrCCPathCache();)
+ ~GrCCPathCache();
// Stores the components of a transformation that affect a path mask (i.e. everything but
// integer translation). During construction, any integer portions of the matrix's translate are
@@ -63,7 +63,8 @@ private:
// This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static
// methods for SkTHash, and can only be moved. This guarantees the hash table holds exactly one
- // reference for each entry.
+ // reference for each entry. Also, when a HashNode goes out of scope, that means it is exiting
+ // the hash table. We take that opportunity to remove it from the LRU list and do some cleanup.
class HashNode : SkNoncopyable {
public:
static HashKey GetKey(const HashNode& node) { return GetKey(node.entry()); }
@@ -71,20 +72,22 @@ private:
static uint32_t Hash(HashKey);
HashNode() = default;
- HashNode(uint32_t pathCacheUniqueID, const MaskTransform&, const GrShape&);
- HashNode(HashNode&& node) : fEntry(std::move(node.fEntry)) {
+ HashNode(GrCCPathCache*, const MaskTransform&, const GrShape&);
+ HashNode(HashNode&& node)
+ : fPathCache(node.fPathCache), fEntry(std::move(node.fEntry)) {
SkASSERT(!node.fEntry);
}
- HashNode& operator=(HashNode&& node) {
- fEntry = std::move(node.fEntry);
- SkASSERT(!node.fEntry);
- return *this;
- }
+ ~HashNode();
+
+ HashNode& operator=(HashNode&& node);
GrCCPathCacheEntry* entry() const { return fEntry.get(); }
private:
+ void willExitHashTable();
+
+ GrCCPathCache* fPathCache = nullptr;
sk_sp<GrCCPathCacheEntry> fEntry;
// The GrShape's unstyled key is stored as a variable-length footer to the 'fEntry'
// allocation. GetKey provides access to it.
@@ -93,6 +96,8 @@ private:
SkTHashTable<HashNode, HashKey> fHashTable;
SkTInternalLList<GrCCPathCacheEntry> fLRU;
SkMessageBus<sk_sp<GrCCPathCacheEntry>>::Inbox fInvalidatedEntriesInbox;
+
+ SkDEBUGCODE(SkThreadID fGraphicsThreadID);
};
/**
@@ -157,10 +162,7 @@ public:
private:
using MaskTransform = GrCCPathCache::MaskTransform;
- GrCCPathCacheEntry(uint32_t pathCacheUniqueID, const MaskTransform& maskTransform)
- : fPathCacheUniqueID(pathCacheUniqueID), fMaskTransform(maskTransform) {
- SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
- }
+ GrCCPathCacheEntry(uint32_t pathCacheUniqueID, const MaskTransform&);
// Resets this entry back to not having an atlas, and purges its previous atlas texture from the
// resource cache if needed.
@@ -176,16 +178,18 @@ private:
GrUniqueKey fAtlasKey;
SkIVector fAtlasOffset;
- // If null, then we are referencing a "stashed" atlas (see initAsStashedAtlas()).
- sk_sp<GrCCAtlas::CachedAtlasInfo> fCachedAtlasInfo;
-
SkRect fDevBounds;
SkRect fDevBounds45;
SkIRect fDevIBounds;
+ // If null, then we are referencing a "stashed" atlas (see initAsStashedAtlas()).
+ sk_sp<GrCCAtlas::CachedAtlasInfo> fCachedAtlasInfo;
+
// This field is for when a path gets drawn more than once during the same flush.
const GrCCAtlas* fCurrFlushAtlas = nullptr;
+ SkDEBUGCODE(SkThreadID fGraphicsThreadID);
+
friend class GrCCPathCache;
friend void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry&, const SkIVector&,
uint32_t, DoEvenOddFill); // To access data.