From d71af9480d1e8f65621ce99a6e18cfa814b0866f Mon Sep 17 00:00:00 2001 From: Josh Story Date: Tue, 23 Apr 2024 13:20:00 -0700 Subject: [PATCH] [Flight][Fizz] schedule flushing independently from performing work Today work that pings in a microtask in Flight and Fizz will end up being rendered in a new macrotask. This is potentially sub-optimal for cache-locality reasons but it also has an impact on flushing because flushes always happen synchronously after rendering. It would be ideal if tasks that can ping in a microtask get a chance to render before we flush because we can end up with a more compact wire format for flight and we can potentially avoid showing fallbacks for fizz. This commit doesn't actually implement rendering in microtasks. This will come in a follow up change. What this change does do is refactor the flushing controls to be able to schedule flushing in a macrotask separately from the render phase. The appraoch uses a notion of epochs to allow scheduled work to infer if it is still valid. For instance if Float causes a flush to be enqueued but then the next task is a ping leading to a render we don't necessarily want to flush before additional renders are allowed to complete. Additionally if there is already a flush scheduled we don't want an enqueued flush from Float to lead to an additional flush. the request's flushScheduled property is now more narrowly tailored to represent out-of-band flushes enqueued through float and not the general purpose flushing that is done after every work loop. the request now has an epoch property which can be used to determine if we haven't started a new work loop or flush since the task was previously scheduled. In some environments schedulWork is synchronous. All the logic still makes sense for synchronous work even if it can have unecessary overhead (such as checking if we're in the same epoch since this will necessarily be true in sync mode). However sync mode is mostly useful for legacy renderers like renderToString and we should probably move away from in for the browser build anyway so I don't think we ought to concern ourselves with further optimization of the sync case. --- .../src/__tests__/ReactDOMFizzServer-test.js | 10 +++- packages/react-server/src/ReactFizzServer.js | 48 ++++++++++++++----- 2 files changed, 46 insertions(+), 12 deletions(-) diff --git a/packages/react-dom/src/__tests__/ReactDOMFizzServer-test.js b/packages/react-dom/src/__tests__/ReactDOMFizzServer-test.js index dc514f2939ed6..0d2b3b739c5f1 100644 --- a/packages/react-dom/src/__tests__/ReactDOMFizzServer-test.js +++ b/packages/react-dom/src/__tests__/ReactDOMFizzServer-test.js @@ -3708,8 +3708,12 @@ describe('ReactDOMFizzServer', () => { // method has an opportunity to schedule a write. We should fix this probably and once we do this test will start // to fail if the underyling issue of writing after stream completion isn't fixed it('does not try to write to the stream after it has been closed', async () => { + let resolve; + const promise = new Promise(res => { + resolve = res; + }); async function preloadLate() { - await 1; + await promise; ReactDOM.preconnect('foo'); } @@ -3732,6 +3736,10 @@ describe('ReactDOMFizzServer', () => { renderToPipeableStream().pipe(writable); }); + await act(() => { + resolve(); + }); + expect(getVisibleChildren(document)).toEqual( diff --git a/packages/react-server/src/ReactFizzServer.js b/packages/react-server/src/ReactFizzServer.js index b6925b1c7b58b..5308263497d19 100644 --- a/packages/react-server/src/ReactFizzServer.js +++ b/packages/react-server/src/ReactFizzServer.js @@ -298,6 +298,7 @@ const CLOSED = 2; export opaque type Request = { destination: null | Destination, + epoch: number, flushScheduled: boolean, +resumableState: ResumableState, +renderState: RenderState, @@ -380,6 +381,7 @@ export function createRequest( const abortSet: Set = new Set(); const request: Request = { destination: null, + epoch: 0, flushScheduled: false, resumableState, renderState, @@ -492,6 +494,7 @@ export function resumeRequest( const abortSet: Set = new Set(); const request: Request = { destination: null, + epoch: 0, flushScheduled: false, resumableState: postponedState.resumableState, renderState, @@ -594,8 +597,7 @@ function pingTask(request: Request, task: Task): void { const pingedTasks = request.pingedTasks; pingedTasks.push(task); if (request.pingedTasks.length === 1) { - request.flushScheduled = request.destination !== null; - scheduleWork(() => performWork(request)); + startPerformingWork(request); } } @@ -3815,9 +3817,6 @@ export function performWork(request: Request): void { retryTask(request, task); } pingedTasks.splice(0, i); - if (request.destination !== null) { - flushCompletedQueues(request, request.destination); - } } catch (error) { const errorInfo: ThrownInfo = {}; logRecoverableError(request, error, errorInfo); @@ -4279,7 +4278,6 @@ function flushCompletedQueues( // We don't need to check any partially completed segments because // either they have pending task or they're complete. ) { - request.flushScheduled = false; // We write the trailing tags but only if don't have any data to resume. // If we need to resume we'll write the postamble in the resume instead. if (!enablePostpone || request.trackedPostpones === null) { @@ -4306,12 +4304,27 @@ function flushCompletedQueues( } } +function completeWorkEpoch(request: Request) { + request.epoch++; + const destination = request.destination; + if (destination) { + flushCompletedQueues(request, destination); + } +} + +function startPerformingWork(request: Request): void { + request.epoch++; + scheduleWork(() => performWork(request)); + scheduleWork(() => { + completeWorkEpoch(request); + }); +} + export function startWork(request: Request): void { - request.flushScheduled = request.destination !== null; if (supportsRequestStorage) { - scheduleWork(() => requestStorage.run(request, performWork, request)); + requestStorage.run(request, startPerformingWork, request); } else { - scheduleWork(() => performWork(request)); + startPerformingWork(request); } if (request.trackedPostpones === null) { // this is either a regular render or a resume. For regular render we want @@ -4352,14 +4365,27 @@ function enqueueFlush(request: Request): void { request.destination !== null ) { request.flushScheduled = true; + const currentEpoch = request.epoch; scheduleWork(() => { + // In builds where scheduleWork is synchronous this will always initiate a + // flush immediately. That's not ideal but it's not what we're optimizing for + // and we ought to consider not using the sync form except for legacy. Regardless + // the logic is still sound because the epoch and destination could not have + // changed so while we're doing unecessary checks here it still preserves the same + // semantics as the async case. + + request.flushScheduled = false; + if (currentEpoch !== request.epoch) { + // We scheduled this flush when no work was being performed but since + // then we've started a new epoch (we're either rendering or we've already flushed) + // so we don't need to flush here anymore. + } + // We need to existence check destination again here because it might go away // in between the enqueueFlush call and the work execution const destination = request.destination; if (destination) { flushCompletedQueues(request, destination); - } else { - request.flushScheduled = false; } }); }