From 585b6005a7aea8c839a259dcb5ce784951c53c8b Mon Sep 17 00:00:00 2001 From: Alfonso Noriega Date: Fri, 13 Mar 2026 15:39:57 +0100 Subject: [PATCH 1/3] fix(cli-kit): batch large output chunks to prevent event loop blocking When a POS UI extension throws a large stack trace (3MB+), all lines arrive as a single write to ConcurrentOutput's Writable stream. The synchronous stripAnsi + split + React state update causes a long render cycle that blocks the Node.js event loop, making keyboard shortcuts (q, p) unresponsive. Fix: split chunks exceeding 100 lines into batches and schedule each via setImmediate, yielding to the event loop between renders so Ink's useInput hook can process keypresses between batches. Co-Authored-By: Claude Sonnet 4.6 --- .../ui/components/ConcurrentOutput.test.tsx | 35 +++++++++++++++ .../node/ui/components/ConcurrentOutput.tsx | 44 ++++++++++++++----- 2 files changed, 69 insertions(+), 10 deletions(-) diff --git a/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.test.tsx b/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.test.tsx index a0b2d7757ae..e68b2522ee7 100644 --- a/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.test.tsx +++ b/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.test.tsx @@ -262,6 +262,41 @@ describe('ConcurrentOutput', () => { expect(logColumns[1]?.length).toBe(25 + 2) }) + test('renders large chunks split into batches without dropping lines', async () => { + // Given - simulate a large stack trace (>100 lines) arriving as a single write + const processSync = new Synchronizer() + const lineCount = 250 + const largeOutput = Array.from({length: lineCount}, (_, i) => `line ${i + 1}`).join('\n') + + const processes = [ + { + prefix: 'pos-ext', + action: async (stdout: Writable, _stderr: Writable, _signal: AbortSignal) => { + stdout.write(largeOutput) + processSync.resolve() + }, + }, + ] + + // When - keepRunningAfterProcessesResolve prevents the component from unmounting + // before all setImmediate-batched state updates have been applied + const renderInstance = render( + , + ) + await processSync.promise + await waitForContent(renderInstance, `line ${lineCount}`) + + // Then - all lines should be rendered + const frame = unstyled(renderInstance.lastFrame()!) + for (let i = 1; i <= lineCount; i++) { + expect(frame).toContain(`line ${i}`) + } + }) + test('rejects with the error thrown inside one of the processes', async () => { // Given const backendProcess = { diff --git a/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx b/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx index c868899c4ea..1abb6536765 100644 --- a/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx +++ b/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx @@ -8,6 +8,8 @@ import stripAnsi from 'strip-ansi' import {Writable} from 'stream' import {AsyncLocalStorage} from 'node:async_hooks' +const MAX_LINES_PER_BATCH = 100 + export interface ConcurrentOutputProps { processes: OutputProcess[] prefixColumnSize?: number @@ -141,16 +143,38 @@ const ConcurrentOutput: FunctionComponent = ({ const index = addPrefix(prefix, prefixes) - const lines = shouldStripAnsi ? stripAnsi(log).split(/\n/) : log.split(/\n/) - setProcessOutput((previousProcessOutput) => [ - ...previousProcessOutput, - { - color: lineColor(index), - prefix, - lines, - }, - ]) - next() + const allLines = shouldStripAnsi ? stripAnsi(log).split(/\n/) : log.split(/\n/) + + if (allLines.length <= MAX_LINES_PER_BATCH) { + setProcessOutput((previousProcessOutput) => [ + ...previousProcessOutput, + {color: lineColor(index), prefix, lines: allLines}, + ]) + next() + return + } + + // For large chunks (e.g. big stack traces), split into batches and yield + // between each via setImmediate so the event loop can process keyboard + // events (q, p, etc.) between renders instead of blocking. + const batches: string[][] = [] + for (let i = 0; i < allLines.length; i += MAX_LINES_PER_BATCH) { + batches.push(allLines.slice(i, i + MAX_LINES_PER_BATCH)) + } + + const scheduleBatch = (batchIndex: number) => { + if (batchIndex >= batches.length) { + next() + return + } + setProcessOutput((previousProcessOutput) => [ + ...previousProcessOutput, + {color: lineColor(index), prefix, lines: batches[batchIndex]!}, + ]) + setImmediate(() => scheduleBatch(batchIndex + 1)) + } + + scheduleBatch(0) }, }) }, From af49f9bf0d1721cba22b3e967a897fcfdb52a1a8 Mon Sep 17 00:00:00 2001 From: Alfonso Noriega Date: Tue, 17 Mar 2026 16:12:06 +0100 Subject: [PATCH 2/3] perf(cli-kit): reduce MAX_LINES_PER_BATCH from 100 to 20 Benchmarking with a 3.85MB stack trace (30k lines) shows: - batch=100: 121ms max event loop block - batch=20: 44ms max event loop block (vs 27,266ms on main) - batch=10: 35ms max event loop block Batch=20 hits the sweet spot: 4x lower max block than 100 with essentially the same total flush time (~380ms vs ~430ms). Co-Authored-By: Claude Sonnet 4.6 --- .../cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx b/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx index 1abb6536765..02d5596ce92 100644 --- a/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx +++ b/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx @@ -8,7 +8,7 @@ import stripAnsi from 'strip-ansi' import {Writable} from 'stream' import {AsyncLocalStorage} from 'node:async_hooks' -const MAX_LINES_PER_BATCH = 100 +const MAX_LINES_PER_BATCH = 20 export interface ConcurrentOutputProps { processes: OutputProcess[] From 1afb5c2a1d9cbdcf5572fe976960bbbd402feff7 Mon Sep 17 00:00:00 2001 From: Alfonso Noriega Date: Tue, 17 Mar 2026 17:35:21 +0100 Subject: [PATCH 3/3] refactor(cli-kit): replace recursive setImmediate batching with Transform+pipe Replace the manual recursive-setImmediate approach with a proper Node.js stream pipeline: - Transform (splitter): reads outputContextStore while still in the writer's async context, strips ANSI, and splits large chunks into MAX_LINES_PER_BATCH (20) line pieces. Single-batch writes pass through unchanged. - Writable (sink): renders each batch into React state. For large-chunk batches setImmediate(next) yields the event loop between renders so keyboard shortcuts (q, p) can fire. It also creates real Node.js backpressure: when next() is pending the pipe pauses the splitter, capping memory use from fast producers without manual bookkeeping. Single-batch writes call next() synchronously to preserve existing rendering behaviour. Benchmark (3.85 MB / 30k-line stack trace): main: 27,266ms max event loop block recursive fix: 44ms max event loop block stream fix: 32ms max event loop block Co-Authored-By: Claude Sonnet 4.6 --- .../node/ui/components/ConcurrentOutput.tsx | 74 ++++++++++--------- 1 file changed, 41 insertions(+), 33 deletions(-) diff --git a/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx b/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx index 02d5596ce92..53f0ec40d2e 100644 --- a/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx +++ b/packages/cli-kit/src/private/node/ui/components/ConcurrentOutput.tsx @@ -5,7 +5,7 @@ import {Box, Static, Text, TextProps, useApp} from 'ink' import figures from 'figures' import stripAnsi from 'strip-ansi' -import {Writable} from 'stream' +import {Transform, Writable} from 'stream' import {AsyncLocalStorage} from 'node:async_hooks' const MAX_LINES_PER_BATCH = 20 @@ -134,49 +134,57 @@ const ConcurrentOutput: FunctionComponent = ({ const writableStream = useCallback( (process: OutputProcess, prefixes: string[]) => { - return new Writable({ - write(chunk, _encoding, next) { + // Transform: splits incoming chunks into MAX_LINES_PER_BATCH-line pieces. + // Runs synchronously inside the writer's async context, so outputContextStore + // (prefix, stripAnsi overrides set by useConcurrentOutputContext) is available here. + const splitter = new Transform({ + readableObjectMode: true, + transform(chunk, _encoding, callback) { const context = outputContextStore.getStore() const prefix = context?.outputPrefix ?? process.prefix const shouldStripAnsi = context?.stripAnsi ?? true const log = chunk.toString('utf8').replace(/(\n)$/, '') - - const index = addPrefix(prefix, prefixes) - const allLines = shouldStripAnsi ? stripAnsi(log).split(/\n/) : log.split(/\n/) - - if (allLines.length <= MAX_LINES_PER_BATCH) { - setProcessOutput((previousProcessOutput) => [ - ...previousProcessOutput, - {color: lineColor(index), prefix, lines: allLines}, - ]) - next() - return - } - - // For large chunks (e.g. big stack traces), split into batches and yield - // between each via setImmediate so the event loop can process keyboard - // events (q, p, etc.) between renders instead of blocking. - const batches: string[][] = [] + // Flag batches that came from a large chunk so the sink knows to yield + // between them. Single-batch writes keep synchronous next() to preserve + // existing behaviour for normal (small) output. + const isLargeChunk = allLines.length > MAX_LINES_PER_BATCH for (let i = 0; i < allLines.length; i += MAX_LINES_PER_BATCH) { - batches.push(allLines.slice(i, i + MAX_LINES_PER_BATCH)) + this.push({prefix, lines: allLines.slice(i, i + MAX_LINES_PER_BATCH), isLargeChunk}) } + callback() + }, + }) - const scheduleBatch = (batchIndex: number) => { - if (batchIndex >= batches.length) { - next() - return - } - setProcessOutput((previousProcessOutput) => [ - ...previousProcessOutput, - {color: lineColor(index), prefix, lines: batches[batchIndex]!}, - ]) - setImmediate(() => scheduleBatch(batchIndex + 1)) + // Writable: renders each batch into React state. + // For large-chunk batches, setImmediate(next) yields the event loop so keyboard + // shortcuts (q, p) can fire between renders, and creates real Node.js backpressure: + // when next() is pending the pipe pauses the splitter, preventing unbounded + // memory growth from fast producers. + // For normal output (single-batch writes) next() is called synchronously to + // preserve the existing rendering behaviour. + const sink = new Writable({ + objectMode: true, + write( + {prefix, lines, isLargeChunk}: {prefix: string; lines: string[]; isLargeChunk: boolean}, + _encoding, + next, + ) { + const index = addPrefix(prefix, prefixes) + setProcessOutput((previousProcessOutput) => [ + ...previousProcessOutput, + {color: lineColor(index), prefix, lines}, + ]) + if (isLargeChunk) { + setImmediate(next) + } else { + next() } - - scheduleBatch(0) }, }) + + splitter.pipe(sink) + return splitter }, [lineColor], )