diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp index 839cba4cfec4d..4816858986a93 100644 --- a/src/gpu/ccpr/GrCCPathCache.cpp +++ b/src/gpu/ccpr/GrCCPathCache.cpp @@ -10,7 +10,9 @@ #include "GrShape.h" #include "SkNx.h" -DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp); +static constexpr int kMaxKeyDataCountU32 = 256; // 1kB of uint32_t's. + +DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp); static inline uint32_t next_path_cache_id() { static std::atomic gNextID(1); @@ -23,8 +25,8 @@ static inline uint32_t next_path_cache_id() { } static inline bool SkShouldPostMessageToBus( - const sk_sp& entry, uint32_t msgBusUniqueID) { - return entry->pathCacheUniqueID() == msgBusUniqueID; + const sk_sp& key, uint32_t msgBusUniqueID) { + return key->pathCacheUniqueID() == msgBusUniqueID; } // The maximum number of cache entries we allow in our own cache. @@ -62,83 +64,51 @@ inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a, return true; } -namespace { - -// Produces a key that accounts both for a shape's path geometry, as well as any stroke/style. -class WriteStyledKey { -public: - static constexpr int kStyledKeySizeInBytesIdx = 0; - static constexpr int kStrokeWidthIdx = 1; - static constexpr int kStrokeMiterIdx = 2; - static constexpr int kStrokeCapJoinIdx = 3; - static constexpr int kShapeUnstyledKeyIdx = 4; - - static constexpr int kStrokeKeyCount = 3; // [width, miterLimit, cap|join]. - - WriteStyledKey(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {} - - // Returns the total number of uint32_t's to allocate for the key. - int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; } - - // Writes the key to out[]. - void write(const GrShape& shape, uint32_t* out) { - out[kStyledKeySizeInBytesIdx] = - (kStrokeKeyCount + fShapeUnstyledKeyCount) * sizeof(uint32_t); - - // Stroke key. - // We don't use GrStyle::WriteKey() because it does not account for hairlines. - // http://skbug.com/8273 - SkASSERT(!shape.style().hasPathEffect()); - const SkStrokeRec& stroke = shape.style().strokeRec(); - if (stroke.isFillStyle()) { - // Use a value for width that won't collide with a valid fp32 value >= 0. - out[kStrokeWidthIdx] = ~0; - out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0; - } else { - float width = stroke.getWidth(), miterLimit = stroke.getMiter(); - memcpy(&out[kStrokeWidthIdx], &width, sizeof(float)); - memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float)); - out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin(); - GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float)); - } - - // Shape unstyled key. - shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]); +sk_sp GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID, + int dataCountU32, const void* data) { + void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t)); + sk_sp key(new (memory) Key(pathCacheUniqueID, dataCountU32)); + if (data) { + memcpy(key->data(), data, key->dataSizeInBytes()); } - -private: - int fShapeUnstyledKeyCount; -}; - + return key; } -inline bool operator==(const GrCCPathCache::HashKey& key1, const GrCCPathCache::HashKey& key2) { - return key1.fData[0] == key2.fData[0] && !memcmp(&key1.fData[1], &key2.fData[1], key1.fData[0]); +const uint32_t* GrCCPathCache::Key::data() const { + // The shape key is a variable-length footer to the entry allocation. + return reinterpret_cast(reinterpret_cast(this) + sizeof(Key)); } -inline GrCCPathCache::HashKey GrCCPathCache::HashNode::GetKey(const GrCCPathCacheEntry* entry) { +uint32_t* GrCCPathCache::Key::data() { // The shape key is a variable-length footer to the entry allocation. - return HashKey{(const uint32_t*)((const char*)entry + sizeof(GrCCPathCacheEntry))}; + return reinterpret_cast(reinterpret_cast(this) + sizeof(Key)); } -inline uint32_t GrCCPathCache::HashNode::Hash(HashKey key) { - return GrResourceKeyHash(&key.fData[1], key.fData[0]); +inline bool GrCCPathCache::Key::operator==(const GrCCPathCache::Key& that) const { + return fDataSizeInBytes == that.fDataSizeInBytes && + !memcmp(this->data(), that.data(), fDataSizeInBytes); } -inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, const MaskTransform& m, - const GrShape& shape) - : fPathCache(pathCache) { - SkASSERT(SkGetThreadID() == fPathCache->fGraphicsThreadID); - SkASSERT(shape.hasUnstyledKey()); +void GrCCPathCache::Key::onChange() { + // Our key's corresponding path was invalidated. Post a thread-safe eviction message. + SkMessageBus>::Post(sk_ref_sp(this)); +} - WriteStyledKey writeKey(shape); - void* mem = ::operator new (sizeof(GrCCPathCacheEntry) + - writeKey.allocCountU32() * sizeof(uint32_t)); - fEntry.reset(new (mem) GrCCPathCacheEntry(fPathCache->fInvalidatedEntriesInbox.uniqueID(), m)); +inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey( + const GrCCPathCache::HashNode& node) { + return *node.entry()->fCacheKey; +} - // The shape key is a variable-length footer to the entry allocation. - uint32_t* keyData = (uint32_t*)((char*)mem + sizeof(GrCCPathCacheEntry)); - writeKey.write(shape, keyData); +inline uint32_t GrCCPathCache::HashNode::Hash(const Key& key) { + return GrResourceKeyHash(key.data(), key.dataSizeInBytes()); +} + +inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp key, + const MaskTransform& m, const GrShape& shape) + : fPathCache(pathCache) + , fEntry(new GrCCPathCacheEntry(key, m)) { + SkASSERT(shape.hasUnstyledKey()); + shape.addGenIDChangeListener(std::move(key)); } inline GrCCPathCache::HashNode::~HashNode() { @@ -159,47 +129,84 @@ inline void GrCCPathCache::HashNode::willExitHashTable() { } SkASSERT(fPathCache); - SkASSERT(SkGetThreadID() == fPathCache->fGraphicsThreadID); SkASSERT(fPathCache->fLRU.isInList(fEntry.get())); - fEntry->invalidateAtlas(); // Must happen on graphics thread. - fEntry->markShouldUnregisterFromPath(); + fEntry->fCacheKey->markShouldUnregisterFromPath(); // Unregister the path listener. fPathCache->fLRU.remove(fEntry.get()); } GrCCPathCache::GrCCPathCache() - : fInvalidatedEntriesInbox(next_path_cache_id()) { + : fInvalidatedKeysInbox(next_path_cache_id()) + , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) { } GrCCPathCache::~GrCCPathCache() { - // DDL threads never use the cache, in which case it doesn't matter what thread we - // clean up on. - SkASSERT(kIllegalThreadID == fGraphicsThreadID || SkGetThreadID() == fGraphicsThreadID); - fHashTable.reset(); // Must be cleared first; ~HashNode calls fLRU.remove() on us. SkASSERT(fLRU.isEmpty()); // Ensure the hash table and LRU list were coherent. } +namespace { + +// Produces a key that accounts both for a shape's path geometry, as well as any stroke/style. +class WriteKeyHelper { +public: + static constexpr int kStrokeWidthIdx = 0; + static constexpr int kStrokeMiterIdx = 1; + static constexpr int kStrokeCapJoinIdx = 2; + static constexpr int kShapeUnstyledKeyIdx = 3; + + WriteKeyHelper(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {} + + // Returns the total number of uint32_t's to allocate for the key. + int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; } + + // Writes the key data to out[]. + void write(const GrShape& shape, uint32_t* out) { + // Stroke key. + // We don't use GrStyle::WriteKey() because it does not account for hairlines. + // http://skbug.com/8273 + SkASSERT(!shape.style().hasPathEffect()); + const SkStrokeRec& stroke = shape.style().strokeRec(); + if (stroke.isFillStyle()) { + // Use a value for width that won't collide with a valid fp32 value >= 0. + out[kStrokeWidthIdx] = ~0; + out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0; + } else { + float width = stroke.getWidth(), miterLimit = stroke.getMiter(); + memcpy(&out[kStrokeWidthIdx], &width, sizeof(float)); + memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float)); + out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin(); + GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float)); + } + + // Shape unstyled key. + shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]); + } + +private: + int fShapeUnstyledKeyCount; +}; + +} + sk_sp GrCCPathCache::find(const GrShape& shape, const MaskTransform& m, CreateIfAbsent createIfAbsent) { -#ifdef SK_DEBUG - if (kIllegalThreadID == fGraphicsThreadID) { - fGraphicsThreadID = SkGetThreadID(); + if (!shape.hasUnstyledKey()) { + return nullptr; } -#endif - SkASSERT(SkGetThreadID() == fGraphicsThreadID); - if (!shape.hasUnstyledKey()) { + WriteKeyHelper writeKeyHelper(shape); + if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) { return nullptr; } - WriteStyledKey writeKey(shape); - SkAutoSTMalloc keyData(writeKey.allocCountU32()); - writeKey.write(shape, keyData.get()); + SkASSERT(fScratchKey->unique()); + fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32()); + writeKeyHelper.write(shape, fScratchKey->data()); GrCCPathCacheEntry* entry = nullptr; - if (HashNode* node = fHashTable.find({keyData.get()})) { + if (HashNode* node = fHashTable.find(*fScratchKey)) { entry = node->entry(); SkASSERT(fLRU.isInList(entry)); if (fuzzy_equals(m, entry->fMaskTransform)) { @@ -211,7 +218,7 @@ sk_sp GrCCPathCache::find(const GrShape& shape, const MaskTr entry->invalidateAtlas(); SkASSERT(!entry->fCurrFlushAtlas); // Should be null because 'entry' is unique. } else { - this->evict(entry); + this->evict(*fScratchKey); entry = nullptr; } } @@ -221,62 +228,45 @@ sk_sp GrCCPathCache::find(const GrShape& shape, const MaskTr return nullptr; } if (fHashTable.count() >= kMaxCacheCount) { - this->evict(fLRU.tail()); // We've exceeded our limit. + SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey)); + SkASSERT(node && node->entry() == fLRU.tail()); + this->evict(*fLRU.tail()->fCacheKey); // We've exceeded our limit. } - SkASSERT(!fHashTable.find({keyData.get()})); - entry = fHashTable.set(HashNode(this, m, shape))->entry(); - shape.addGenIDChangeListener(sk_ref_sp(entry)); + + // Create a new entry in the cache. + sk_sp permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(), + writeKeyHelper.allocCountU32(), fScratchKey->data()); + SkASSERT(*permanentKey == *fScratchKey); + SkASSERT(!fHashTable.find(*permanentKey)); + entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry(); + SkASSERT(fHashTable.count() <= kMaxCacheCount); } else { fLRU.remove(entry); // Will be re-added at head. } - SkDEBUGCODE(HashNode* node = fHashTable.find(HashNode::GetKey(entry))); + SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey)); SkASSERT(node && node->entry() == entry); fLRU.addToHead(entry); return sk_ref_sp(entry); } -void GrCCPathCache::evict(GrCCPathCacheEntry* entry) { - // Has the entry already been evicted? (We mark "shouldUnregisterFromPath" upon eviction.) - bool isInCache = !entry->shouldUnregisterFromPath(); - SkDEBUGCODE(HashNode* entryKeyNode = fHashTable.find(HashNode::GetKey(entry))); - SkASSERT((entryKeyNode && entryKeyNode->entry() == entry) == isInCache); - SkASSERT(fLRU.isInList(entry) == isInCache); - - if (isInCache) { - fHashTable.remove(HashNode::GetKey(entry)); - // HashNode::willExitHashTable() takes care of the rest. - } -} - void GrCCPathCache::purgeAsNeeded() { - SkTArray> invalidatedEntries; - fInvalidatedEntriesInbox.poll(&invalidatedEntries); - for (const sk_sp& entry : invalidatedEntries) { - this->evict(entry.get()); + SkTArray> invalidatedKeys; + fInvalidatedKeysInbox.poll(&invalidatedKeys); + for (const sk_sp& key : invalidatedKeys) { + bool isInCache = !key->shouldUnregisterFromPath(); // Gets set upon exiting the cache. + if (isInCache) { + this->evict(*key); + } } } -GrCCPathCacheEntry::GrCCPathCacheEntry(uint32_t pathCacheUniqueID, - const MaskTransform& maskTransform) - : fPathCacheUniqueID(pathCacheUniqueID), fMaskTransform(maskTransform) { - SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID); - SkDEBUGCODE(fGraphicsThreadID = SkGetThreadID()); -} - -GrCCPathCacheEntry::~GrCCPathCacheEntry() { - // This might get called from a different thread. - SkASSERT(!fCachedAtlasInfo); // Should have been cleared when the entry was evicted. - SkASSERT(!fCurrFlushAtlas); // Client is required to reset fCurrFlushAtlas back to null. -} - void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey, const SkIVector& atlasOffset, const SkRect& devBounds, const SkRect& devBounds45, const SkIRect& devIBounds, const SkIVector& maskShift) { - SkASSERT(SkGetThreadID() == fGraphicsThreadID); SkASSERT(atlasKey.isValid()); SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time. @@ -293,7 +283,6 @@ void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey, void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey, const SkIVector& newAtlasOffset, sk_sp info) { - SkASSERT(SkGetThreadID() == fGraphicsThreadID); SkASSERT(atlasKey.isValid()); SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time. @@ -306,8 +295,6 @@ void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey, } void GrCCPathCacheEntry::invalidateAtlas() { - SkASSERT(SkGetThreadID() == fGraphicsThreadID); - if (fCachedAtlasInfo) { // Mark our own pixels invalid in the cached atlas texture. fCachedAtlasInfo->fNumInvalidatedPathPixels += this->height() * this->width(); @@ -324,8 +311,3 @@ void GrCCPathCacheEntry::invalidateAtlas() { fAtlasKey.reset(); fCachedAtlasInfo = nullptr; } - -void GrCCPathCacheEntry::onChange() { - // Post a thread-safe eviction message. - SkMessageBus>::Post(sk_ref_sp(this)); -} diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h index c0e3cef5afcb0..16f31d0209447 100644 --- a/src/gpu/ccpr/GrCCPathCache.h +++ b/src/gpu/ccpr/GrCCPathCache.h @@ -27,6 +27,42 @@ class GrCCPathCache { GrCCPathCache(); ~GrCCPathCache(); + class Key : public SkPathRef::GenIDChangeListener { + public: + static sk_sp Make(uint32_t pathCacheUniqueID, int dataCountU32, + const void* data = nullptr); + + uint32_t pathCacheUniqueID() const { return fPathCacheUniqueID; } + + int dataSizeInBytes() const { return fDataSizeInBytes; } + const uint32_t* data() const; + + void resetDataCountU32(int dataCountU32) { + SkASSERT(dataCountU32 <= fDataReserveCountU32); + fDataSizeInBytes = dataCountU32 * sizeof(uint32_t); + } + uint32_t* data(); + + bool operator==(const Key&) const; + + // Called when our corresponding path is modified or deleted. Not threadsafe. + void onChange() override; + + private: + Key(uint32_t pathCacheUniqueID, int dataCountU32) + : fPathCacheUniqueID(pathCacheUniqueID) + , fDataSizeInBytes(dataCountU32 * sizeof(uint32_t)) + SkDEBUGCODE(, fDataReserveCountU32(dataCountU32)) { + SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID); + } + + const uint32_t fPathCacheUniqueID; + int fDataSizeInBytes; + SkDEBUGCODE(const int fDataReserveCountU32); + // The GrShape's unstyled key is stored as a variable-length footer to this class. GetKey + // provides access to it. + }; + // Stores the components of a transformation that affect a path mask (i.e. everything but // integer translation). During construction, any integer portions of the matrix's translate are // shaved off and returned to the caller. The caller is responsible for those integer shifts. @@ -50,29 +86,20 @@ class GrCCPathCache { sk_sp find(const GrShape&, const MaskTransform&, CreateIfAbsent = CreateIfAbsent::kNo); - void evict(GrCCPathCacheEntry*); - void purgeAsNeeded(); private: - // Wrapper around a raw GrShape key that has a specialized operator==. Used by the hash table. - struct HashKey { - const uint32_t* fData; - }; - friend bool operator==(const HashKey&, const HashKey&); - // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static // methods for SkTHash, and can only be moved. This guarantees the hash table holds exactly one // reference for each entry. Also, when a HashNode goes out of scope, that means it is exiting // the hash table. We take that opportunity to remove it from the LRU list and do some cleanup. class HashNode : SkNoncopyable { public: - static HashKey GetKey(const HashNode& node) { return GetKey(node.entry()); } - static HashKey GetKey(const GrCCPathCacheEntry*); - static uint32_t Hash(HashKey); + static const Key& GetKey(const HashNode&); + static uint32_t Hash(const Key&); HashNode() = default; - HashNode(GrCCPathCache*, const MaskTransform&, const GrShape&); + HashNode(GrCCPathCache*, sk_sp, const MaskTransform&, const GrShape&); HashNode(HashNode&& node) : fPathCache(node.fPathCache), fEntry(std::move(node.fEntry)) { SkASSERT(!node.fEntry); @@ -89,28 +116,30 @@ class GrCCPathCache { GrCCPathCache* fPathCache = nullptr; sk_sp fEntry; - // The GrShape's unstyled key is stored as a variable-length footer to the 'fEntry' - // allocation. GetKey provides access to it. }; - SkTHashTable fHashTable; - SkTInternalLList fLRU; - SkMessageBus>::Inbox fInvalidatedEntriesInbox; + void evict(const GrCCPathCache::Key& key) { + fHashTable.remove(key); // HashNode::willExitHashTable() takes care of the rest. + } - SkDEBUGCODE(SkThreadID fGraphicsThreadID = kIllegalThreadID); + SkTHashTable fHashTable; + SkTInternalLList fLRU; + SkMessageBus>::Inbox fInvalidatedKeysInbox; + sk_sp fScratchKey; // Reused for creating a temporary key in the find() method. }; /** * This class stores all the data necessary to draw a specific path + matrix combination from their * corresponding cached atlas. */ -class GrCCPathCacheEntry : public SkPathRef::GenIDChangeListener { +class GrCCPathCacheEntry : public GrNonAtomicRef { public: SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCPathCacheEntry); - ~GrCCPathCacheEntry() override; - - uint32_t pathCacheUniqueID() const { return fPathCacheUniqueID; } + ~GrCCPathCacheEntry() { + SkASSERT(!fCurrFlushAtlas); // Client is required to reset fCurrFlushAtlas back to null. + this->invalidateAtlas(); + } // The number of times this specific entry (path + matrix combination) has been pulled from // the path cache. As long as the caller does exactly one lookup per draw, this translates to @@ -162,16 +191,16 @@ class GrCCPathCacheEntry : public SkPathRef::GenIDChangeListener { private: using MaskTransform = GrCCPathCache::MaskTransform; - GrCCPathCacheEntry(uint32_t pathCacheUniqueID, const MaskTransform&); + GrCCPathCacheEntry(sk_sp cacheKey, const MaskTransform& maskTransform) + : fCacheKey(std::move(cacheKey)), fMaskTransform(maskTransform) { + } // Resets this entry back to not having an atlas, and purges its previous atlas texture from the // resource cache if needed. void invalidateAtlas(); - // Called when our corresponding path is modified or deleted. Not threadsafe. - void onChange() override; + sk_sp fCacheKey; - const uint32_t fPathCacheUniqueID; MaskTransform fMaskTransform; int fHitCount = 1; @@ -188,8 +217,6 @@ class GrCCPathCacheEntry : public SkPathRef::GenIDChangeListener { // This field is for when a path gets drawn more than once during the same flush. const GrCCAtlas* fCurrFlushAtlas = nullptr; - SkDEBUGCODE(SkThreadID fGraphicsThreadID); - friend class GrCCPathCache; friend void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry&, const SkIVector&, GrColor, DoEvenOddFill); // To access data.