Skip to content

Commit

Permalink
Fix bug in GrResourceAllocator's intermediate flushing (take 2)
Browse files Browse the repository at this point in the history
Without the missing increment of fCurOpListIndex in the intermediate flush case, the drawing manager could end up re-executing an entire opList.

TBR=bsalomon@google.com
Bug: 942538
Change-Id: I8298c02071e87a343ec03d809959d06049071d93
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/203726
Reviewed-by: Robert Phillips <robertphillips@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
  • Loading branch information
rphilli authored and Skia Commit-Bot committed Mar 27, 2019
1 parent 003e9f1 commit c476e5d
Show file tree
Hide file tree
Showing 4 changed files with 152 additions and 54 deletions.
3 changes: 2 additions & 1 deletion src/gpu/GrDrawingManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,8 @@ GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxy,
bool flushed = false;

{
GrResourceAllocator alloc(resourceProvider, flushState.deinstantiateProxyTracker());
GrResourceAllocator alloc(resourceProvider, flushState.deinstantiateProxyTracker()
SkDEBUGCODE(, fDAG.numOpLists()));
for (int i = 0; i < fDAG.numOpLists(); ++i) {
if (fDAG.opList(i)) {
fDAG.opList(i)->gatherProxyIntervals(&alloc);
Expand Down
65 changes: 38 additions & 27 deletions src/gpu/GrResourceAllocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ void GrResourceAllocator::markEndOfOpList(int opListIndex) {
}

fEndOfOpListOpIndices.push_back(this->curOp()); // This is the first op index of the next opList
SkASSERT(fEndOfOpListOpIndices.count() <= fNumOpLists);
}

GrResourceAllocator::~GrResourceAllocator() {
Expand Down Expand Up @@ -309,6 +310,34 @@ void GrResourceAllocator::expire(unsigned int curIndex) {
}
}

bool GrResourceAllocator::onOpListBoundary() const {
if (fIntvlList.empty()) {
SkASSERT(fCurOpListIndex+1 <= fNumOpLists);
// Although technically on an opList boundary there is no need to force an
// intermediate flush here
return false;
}

const Interval* tmp = fIntvlList.peekHead();
return fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start();
}

void GrResourceAllocator::forceIntermediateFlush(int* stopIndex) {
*stopIndex = fCurOpListIndex+1;

// This is interrupting the allocation of resources for this flush. We need to
// proactively clear the active interval list of any intervals that aren't
// guaranteed to survive the partial flush lest they become zombies (i.e.,
// holding a deleted surface proxy).
const Interval* tmp = fIntvlList.peekHead();
SkASSERT(fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start());

fCurOpListIndex++;
SkASSERT(fCurOpListIndex < fNumOpLists);

this->expire(tmp->start());
}

bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* outError) {
SkASSERT(outError);
*outError = AssignError::kNoError;
Expand All @@ -318,6 +347,9 @@ bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* o
return false; // nothing to render
}

SkASSERT(fCurOpListIndex < fNumOpLists);
SkASSERT(fNumOpLists == fEndOfOpListOpIndices.count());

*startIndex = fCurOpListIndex;
*stopIndex = fEndOfOpListOpIndices.count();

Expand All @@ -332,8 +364,9 @@ bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* o
this->dumpIntervals();
#endif
while (Interval* cur = fIntvlList.popHead()) {
if (fEndOfOpListOpIndices[fCurOpListIndex] < cur->start()) {
if (fEndOfOpListOpIndices[fCurOpListIndex] <= cur->start()) {
fCurOpListIndex++;
SkASSERT(fCurOpListIndex < fNumOpLists);
}

this->expire(cur->start());
Expand All @@ -352,19 +385,8 @@ bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* o

if (fResourceProvider->overBudget()) {
// Only force intermediate draws on opList boundaries
if (!fIntvlList.empty() &&
fEndOfOpListOpIndices[fCurOpListIndex] < fIntvlList.peekHead()->start()) {
*stopIndex = fCurOpListIndex+1;

// This is interrupting the allocation of resources for this flush. We need to
// proactively clear the active interval list of any intervals that aren't
// guaranteed to survive the partial flush lest they become zombies (i.e.,
// holding a deleted surface proxy).
if (const Interval* tmp = fIntvlList.peekHead()) {
this->expire(tmp->start());
} else {
this->expire(std::numeric_limits<unsigned int>::max());
}
if (this->onOpListBoundary()) {
this->forceIntermediateFlush(stopIndex);
return true;
}
}
Expand Down Expand Up @@ -409,19 +431,8 @@ bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* o

if (fResourceProvider->overBudget()) {
// Only force intermediate draws on opList boundaries
if (!fIntvlList.empty() &&
fEndOfOpListOpIndices[fCurOpListIndex] < fIntvlList.peekHead()->start()) {
*stopIndex = fCurOpListIndex+1;

// This is interrupting the allocation of resources for this flush. We need to
// proactively clear the active interval list of any intervals that aren't
// guaranteed to survive the partial flush lest they become zombies (i.e.,
// holding a deleted surface proxy).
if (const Interval* tmp = fIntvlList.peekHead()) {
this->expire(tmp->start());
} else {
this->expire(std::numeric_limits<unsigned int>::max());
}
if (this->onOpListBoundary()) {
this->forceIntermediateFlush(stopIndex);
return true;
}
}
Expand Down
18 changes: 13 additions & 5 deletions src/gpu/GrResourceAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,13 @@ class GrResourceProvider;
*/
class GrResourceAllocator {
public:
GrResourceAllocator(GrResourceProvider* resourceProvider, GrDeinstantiateProxyTracker* tracker)
: fResourceProvider(resourceProvider), fDeinstantiateTracker(tracker) {}
GrResourceAllocator(GrResourceProvider* resourceProvider,
GrDeinstantiateProxyTracker* tracker
SkDEBUGCODE(, int numOpLists))
: fResourceProvider(resourceProvider)
, fDeinstantiateTracker(tracker)
SkDEBUGCODE(, fNumOpLists(numOpLists)) {
}

~GrResourceAllocator();

Expand Down Expand Up @@ -88,6 +93,9 @@ class GrResourceAllocator {
// Remove dead intervals from the active list
void expire(unsigned int curIndex);

bool onOpListBoundary() const;
void forceIntermediateFlush(int* stopIndex);

// These two methods wrap the interactions with the free pool
void recycleSurface(sk_sp<GrSurface> surface);
sk_sp<GrSurface> findSurfaceFor(const GrSurfaceProxy* proxy, bool needsStencil);
Expand Down Expand Up @@ -218,11 +226,11 @@ class GrResourceAllocator {

IntervalList fIntvlList; // All the intervals sorted by increasing start
IntervalList fActiveIntvls; // List of live intervals during assignment
// (sorted by increasing end)
unsigned int fNumOps = 1; // op # 0 is reserved for uploads at the start
// of a flush
// (sorted by increasing end)
unsigned int fNumOps = 0;
SkTArray<unsigned int> fEndOfOpListOpIndices;
int fCurOpListIndex = 0;
SkDEBUGCODE(const int fNumOpLists = -1;)

SkDEBUGCODE(bool fAssigned = false;)

Expand Down
120 changes: 99 additions & 21 deletions tests/ResourceAllocatorTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ struct ProxyParams {
SkBackingFit fFit;
int fSampleCnt;
GrSurfaceOrigin fOrigin;
SkBudgeted fBudgeted;
// TODO: do we care about mipmapping
};

Expand All @@ -45,7 +46,7 @@ static GrSurfaceProxy* make_deferred(GrProxyProvider* proxyProvider, const GrCap

const GrBackendFormat format = caps->getBackendFormatFromColorType(p.fColorType);

auto tmp = proxyProvider->createProxy(format, desc, p.fOrigin, p.fFit, SkBudgeted::kNo);
auto tmp = proxyProvider->createProxy(format, desc, p.fOrigin, p.fFit, p.fBudgeted);
if (!tmp) {
return nullptr;
}
Expand Down Expand Up @@ -91,10 +92,12 @@ static void cleanup_backend(GrContext* context, const GrBackendTexture& backendT
static void overlap_test(skiatest::Reporter* reporter, GrResourceProvider* resourceProvider,
GrSurfaceProxy* p1, GrSurfaceProxy* p2, bool expectedResult) {
GrDeinstantiateProxyTracker deinstantiateTracker;
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker);
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 1));

alloc.addInterval(p1, 0, 4);
alloc.incOps();
alloc.addInterval(p2, 1, 2);
alloc.incOps();
alloc.markEndOfOpList(0);

int startIndex, stopIndex;
Expand All @@ -114,7 +117,14 @@ static void non_overlap_test(skiatest::Reporter* reporter, GrResourceProvider* r
GrSurfaceProxy* p1, GrSurfaceProxy* p2,
bool expectedResult) {
GrDeinstantiateProxyTracker deinstantiateTracker;
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker);
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 1));

alloc.incOps();
alloc.incOps();
alloc.incOps();
alloc.incOps();
alloc.incOps();
alloc.incOps();

alloc.addInterval(p1, 0, 2);
alloc.addInterval(p2, 3, 5);
Expand Down Expand Up @@ -167,14 +177,16 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
const GrSurfaceOrigin kTL = kTopLeft_GrSurfaceOrigin;
const GrSurfaceOrigin kBL = kBottomLeft_GrSurfaceOrigin;

const SkBudgeted kNotB = SkBudgeted::kNo;

//--------------------------------------------------------------------------------------------
TestCase gOverlappingTests[] = {
//----------------------------------------------------------------------------------------
// Two proxies with overlapping intervals and compatible descriptors should never share
// RT version
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kDontShare },
{ { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, kDontShare },
// non-RT version
{ { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kDontShare },
{ { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, kDontShare },
};

for (auto test : gOverlappingTests) {
Expand All @@ -195,28 +207,28 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
//----------------------------------------------------------------------------------------
// Two non-overlapping intervals w/ compatible proxies should share
// both same size & approx
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kShare },
{ { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kConditionallyShare },
{ { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, kShare },
{ { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, kConditionallyShare },
// diffs sizes but still approx
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 50, kRT, kRGBA, kA, 0, kTL }, kShare },
{ { 64, kNotRT, kRGBA, kA, 0, kTL }, { 50, kNotRT, kRGBA, kA, 0, kTL }, kConditionallyShare },
{ { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 50, kRT, kRGBA, kA, 0, kTL, kNotB }, kShare },
{ { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, { 50, kNotRT, kRGBA, kA, 0, kTL, kNotB }, kConditionallyShare },
// sames sizes but exact
{ { 64, kRT, kRGBA, kE, 0, kTL }, { 64, kRT, kRGBA, kE, 0, kTL }, kShare },
{ { 64, kNotRT, kRGBA, kE, 0, kTL }, { 64, kNotRT, kRGBA, kE, 0, kTL }, kConditionallyShare },
{ { 64, kRT, kRGBA, kE, 0, kTL, kNotB }, { 64, kRT, kRGBA, kE, 0, kTL, kNotB }, kShare },
{ { 64, kNotRT, kRGBA, kE, 0, kTL, kNotB }, { 64, kNotRT, kRGBA, kE, 0, kTL, kNotB }, kConditionallyShare },
//----------------------------------------------------------------------------------------
// Two non-overlapping intervals w/ different exact sizes should not share
{ { 56, kRT, kRGBA, kE, 0, kTL }, { 54, kRT, kRGBA, kE, 0, kTL }, kDontShare },
{ { 56, kRT, kRGBA, kE, 0, kTL, kNotB }, { 54, kRT, kRGBA, kE, 0, kTL, kNotB }, kDontShare },
// Two non-overlapping intervals w/ _very different_ approx sizes should not share
{ { 255, kRT, kRGBA, kA, 0, kTL }, { 127, kRT, kRGBA, kA, 0, kTL }, kDontShare },
{ { 255, kRT, kRGBA, kA, 0, kTL, kNotB }, { 127, kRT, kRGBA, kA, 0, kTL, kNotB }, kDontShare },
// Two non-overlapping intervals w/ different MSAA sample counts should not share
{ { 64, kRT, kRGBA, kA, k2, kTL },{ 64, kRT, kRGBA, kA, k4, kTL}, k2 == k4 },
{ { 64, kRT, kRGBA, kA, k2, kTL, kNotB },{ 64, kRT, kRGBA, kA, k4,kTL, kNotB}, k2 == k4 },
// Two non-overlapping intervals w/ different configs should not share
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kBGRA, kA, 0, kTL }, kDontShare },
{ { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kRT, kBGRA, kA, 0, kTL, kNotB }, kDontShare },
// Two non-overlapping intervals w/ different RT classifications should never share
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kDontShare },
{ { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kDontShare },
{ { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, kDontShare },
{ { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, kDontShare },
// Two non-overlapping intervals w/ different origins should share
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kBL }, kShare },
{ { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kRT, kRGBA, kA, 0, kBL, kNotB }, kShare },
};

for (auto test : gNonOverlappingTests) {
Expand All @@ -236,7 +248,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
{
// Wrapped backend textures should never be reused
TestCase t[1] = {
{ { 64, kNotRT, kRGBA, kE, 0, kTL }, { 64, kNotRT, kRGBA, kE, 0, kTL }, kDontShare }
{ { 64, kNotRT, kRGBA, kE, 0, kTL, kNotB }, { 64, kNotRT, kRGBA, kE, 0, kTL, kNotB }, kDontShare }
};

GrBackendTexture backEndTex;
Expand Down Expand Up @@ -315,7 +327,7 @@ sk_sp<GrSurfaceProxy> make_lazy(GrProxyProvider* proxyProvider, const GrCaps* ca
: GrSurfaceProxy::LazyInstantiationType ::kSingleUse;
GrInternalSurfaceFlags flags = GrInternalSurfaceFlags::kNone;
return proxyProvider->createLazyProxy(callback, format, desc, p.fOrigin, GrMipMapped::kNo,
flags, p.fFit, SkBudgeted::kNo, lazyType);
flags, p.fFit, p.fBudgeted, lazyType);
}

DEF_GPUTEST_FOR_RENDERING_CONTEXTS(LazyDeinstantiation, reporter, ctxInfo) {
Expand All @@ -330,6 +342,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(LazyDeinstantiation, reporter, ctxInfo) {
texParams.fIsRT = false;
texParams.fSampleCnt = 1;
texParams.fSize = 100;
texParams.fBudgeted = SkBudgeted::kNo;
ProxyParams rtParams = texParams;
rtParams.fIsRT = true;
auto proxyProvider = context->priv().proxyProvider();
Expand All @@ -342,11 +355,12 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(LazyDeinstantiation, reporter, ctxInfo) {

GrDeinstantiateProxyTracker deinstantiateTracker;
{
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker);
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 1));
alloc.addInterval(p0.get(), 0, 1);
alloc.addInterval(p1.get(), 0, 1);
alloc.addInterval(p2.get(), 0, 1);
alloc.addInterval(p3.get(), 0, 1);
alloc.incOps();
alloc.markEndOfOpList(0);
int startIndex, stopIndex;
GrResourceAllocator::AssignError error;
Expand All @@ -359,3 +373,67 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(LazyDeinstantiation, reporter, ctxInfo) {
REPORTER_ASSERT(reporter, p3->isInstantiated());
}
}

// Set up so there are two opLists that need to be flushed but the resource allocator thinks
// it is over budget. The two opLists should be flushed separately and the opList indices
// returned from assign should be correct.
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorOverBudgetTest, reporter, ctxInfo) {
GrContext* context = ctxInfo.grContext();
const GrCaps* caps = context->priv().caps();
GrProxyProvider* proxyProvider = context->priv().proxyProvider();
GrResourceProvider* resourceProvider = context->priv().resourceProvider();

bool orig = resourceProvider->testingOnly_setExplicitlyAllocateGPUResources(true);

int origMaxNum;
size_t origMaxBytes;
context->getResourceCacheLimits(&origMaxNum, &origMaxBytes);

// Force the resource allocator to always believe it is over budget
context->setResourceCacheLimits(0, 0);

const ProxyParams params = { 64, false, kRGBA_8888_SkColorType,
SkBackingFit::kExact, 0, kTopLeft_GrSurfaceOrigin,
SkBudgeted::kYes };

{
GrSurfaceProxy* p1 = make_deferred(proxyProvider, caps, params);
GrSurfaceProxy* p2 = make_deferred(proxyProvider, caps, params);
GrSurfaceProxy* p3 = make_deferred(proxyProvider, caps, params);
GrSurfaceProxy* p4 = make_deferred(proxyProvider, caps, params);

GrDeinstantiateProxyTracker deinstantiateTracker;
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 2));

alloc.addInterval(p1, 0, 0);
alloc.incOps();
alloc.addInterval(p2, 1, 1);
alloc.incOps();
alloc.markEndOfOpList(0);

alloc.addInterval(p3, 2, 2);
alloc.incOps();
alloc.addInterval(p4, 3, 3);
alloc.incOps();
alloc.markEndOfOpList(1);

int startIndex, stopIndex;
GrResourceAllocator::AssignError error;

alloc.assign(&startIndex, &stopIndex, &error);
REPORTER_ASSERT(reporter, GrResourceAllocator::AssignError::kNoError == error);
REPORTER_ASSERT(reporter, 0 == startIndex && 1 == stopIndex);

alloc.assign(&startIndex, &stopIndex, &error);
REPORTER_ASSERT(reporter, GrResourceAllocator::AssignError::kNoError == error);
REPORTER_ASSERT(reporter, 1 == startIndex && 2 == stopIndex);

p1->completedRead();
p2->completedRead();
p3->completedRead();
p4->completedRead();
}

context->setResourceCacheLimits(origMaxNum, origMaxBytes);
resourceProvider->testingOnly_setExplicitlyAllocateGPUResources(orig);
}

0 comments on commit c476e5d

Please sign in to comment.