diff --git a/packages/react-server/src/ReactFizzServer.js b/packages/react-server/src/ReactFizzServer.js index 7a7eacc2dd277..ef6ac80dcd6da 100644 --- a/packages/react-server/src/ReactFizzServer.js +++ b/packages/react-server/src/ReactFizzServer.js @@ -296,10 +296,14 @@ const OPEN = 0; const CLOSING = 1; const CLOSED = 2; +type ScheduleState = 10 | 11 | 12; +const IDLE = 10; +const WORK = 11; +const FLUSH = 12; + export opaque type Request = { destination: null | Destination, - epoch: number, - flushScheduled: boolean, + schedule: ScheduleState, +resumableState: ResumableState, +renderState: RenderState, +rootFormatContext: FormatContext, @@ -381,8 +385,7 @@ export function createRequest( const abortSet: Set = new Set(); const request: Request = { destination: null, - epoch: 0, - flushScheduled: false, + schedule: IDLE, resumableState, renderState, rootFormatContext, @@ -494,8 +497,7 @@ export function resumeRequest( const abortSet: Set = new Set(); const request: Request = { destination: null, - epoch: 0, - flushScheduled: false, + schedule: IDLE, resumableState: postponedState.resumableState, renderState, rootFormatContext: postponedState.rootFormatContext, @@ -4304,8 +4306,8 @@ function flushCompletedQueues( } } -function completeWorkEpoch(request: Request) { - request.epoch++; +function flushWork(request: Request) { + request.schedule = IDLE; const destination = request.destination; if (destination) { flushCompletedQueues(request, destination); @@ -4313,14 +4315,14 @@ function completeWorkEpoch(request: Request) { } function startPerformingWork(request: Request): void { - request.epoch++; + request.schedule = WORK; if (supportsRequestStorage) { scheduleWork(() => requestStorage.run(request, performWork, request)); } else { scheduleWork(() => performWork(request)); } scheduleWork(() => { - completeWorkEpoch(request); + flushWork(request); }); } @@ -4357,31 +4359,23 @@ function enqueueEarlyPreloadsAfterInitialWork(request: Request) { function enqueueFlush(request: Request): void { if ( - request.flushScheduled === false && + request.schedule === IDLE && // If there are pinged tasks we are going to flush anyway after work completes request.pingedTasks.length === 0 && // If there is no destination there is nothing we can flush to. A flush will // happen when we start flowing again request.destination !== null ) { - request.flushScheduled = true; - const currentEpoch = request.epoch; + request.schedule = FLUSH; scheduleWork(() => { - // In builds where scheduleWork is synchronous this will always initiate a - // flush immediately. That's not ideal but it's not what we're optimizing for - // and we ought to consider not using the sync form except for legacy. Regardless - // the logic is still sound because the epoch and destination could not have - // changed so while we're doing unecessary checks here it still preserves the same - // semantics as the async case. - - request.flushScheduled = false; - if (currentEpoch !== request.epoch) { - // We scheduled this flush when no work was being performed but since - // then we've started a new epoch (we're either rendering or we've already flushed) - // so we don't need to flush here anymore. + if (request.schedule !== FLUSH) { + // We already flushed or we started a new render and will let that finish first + // which will end up flushing so we have nothing to do here. return; } + request.schedule = IDLE; + // We need to existence check destination again here because it might go away // in between the enqueueFlush call and the work execution const destination = request.destination; diff --git a/packages/react-server/src/ReactFlightServer.js b/packages/react-server/src/ReactFlightServer.js index ef40f3dca16fb..035b9b67c2c09 100644 --- a/packages/react-server/src/ReactFlightServer.js +++ b/packages/react-server/src/ReactFlightServer.js @@ -279,10 +279,14 @@ type Task = { interface Reference {} +type ScheduleState = 10 | 11 | 12; +const IDLE = 10; +const WORK = 11; +const FLUSH = 12; + export type Request = { status: 0 | 1 | 2, - epoch: number, - flushScheduled: boolean, + schedule: ScheduleState, fatalError: mixed, destination: null | Destination, bundlerConfig: ClientManifest, @@ -379,8 +383,7 @@ export function createRequest( const hints = createHints(); const request: Request = ({ status: OPEN, - epoch: 0, - flushScheduled: false, + schedule: IDLE, fatalError: null, destination: null, bundlerConfig, @@ -3104,8 +3107,8 @@ function flushCompletedChunks( } } -function completeWorkEpoch(request: Request) { - request.epoch++; +function flushWork(request: Request) { + request.schedule = IDLE; const destination = request.destination; if (destination) { flushCompletedChunks(request, destination); @@ -3113,33 +3116,31 @@ function completeWorkEpoch(request: Request) { } function startPerformingWork(request: Request): void { - request.epoch++; + request.schedule = WORK; if (supportsRequestStorage) { scheduleWork(() => requestStorage.run(request, performWork, request)); } else { scheduleWork(() => performWork(request)); } scheduleWork(() => { - completeWorkEpoch(request); + flushWork(request); }); } export function startWork(request: Request): void { - request.flushScheduled = request.destination !== null; startPerformingWork(request); } function enqueueFlush(request: Request): void { if ( - request.flushScheduled === false && + request.schedule === IDLE && // If there are pinged tasks we are going to flush anyway after work completes request.pingedTasks.length === 0 && // If there is no destination there is nothing we can flush to. A flush will // happen when we start flowing again request.destination !== null ) { - request.flushScheduled = true; - const currentEpoch = request.epoch; + request.schedule = FLUSH; scheduleWork(() => { // In builds where scheduleWork is synchronous this will always initiate a // flush immediately. That's not ideal but it's not what we're optimizing for @@ -3148,14 +3149,14 @@ function enqueueFlush(request: Request): void { // changed so while we're doing unecessary checks here it still preserves the same // semantics as the async case. - request.flushScheduled = false; - if (currentEpoch !== request.epoch) { - // We scheduled this flush when no work was being performed but since - // then we've started a new epoch (we're either rendering or we've already flushed) - // so we don't need to flush here anymore. + if (request.schedule !== FLUSH) { + // We already flushed or we started a new render and will let that finish first + // which will end up flushing so we have nothing to do here. return; } + request.schedule = IDLE; + // We need to existence check destination again here because it might go away // in between the enqueueFlush call and the work execution const destination = request.destination;