perf improvements + /login fix

This commit is contained in:
x1xhlol
2026-04-01 17:12:45 +02:00
Unverified
parent 7282a40549
commit c5912b625e
21 changed files with 942 additions and 198 deletions

61
PERFORMANCE.md Normal file
View File

@@ -0,0 +1,61 @@
# Performance Workflow
This repository includes a lightweight performance workflow built around the
existing startup and query profilers.
## Build Once
```bash
bun run build
```
## Baseline Scenarios
1. Cold start and command latency
```bash
bun run perf:startup -- --help
```
2. Interactive startup in a real session
```bash
bun run perf:startup --
```
Use your OS task manager while the app is idle to inspect CPU, RSS, and handle
count after first render.
3. Headless query / TTFT
```bash
bun run perf:query -- --print "Summarize the current directory."
```
This requires whatever auth/config is normally needed for the chosen provider.
## Artifacts
Each run writes an isolated artifact bundle under `.perf-artifacts/`:
- `summary.json`: wall-clock timing and parsed profiler highlights
- `config/startup-perf/*`: startup profiler output
- `config/query-perf/*`: query profiler output
- `debug/*`: debug logs for runs that need them
## Regression Checks
Run the focused regression checks for the new performance helpers with:
```bash
bun run perf:regression
```
## Suggested Before/After Loop
1. Run the startup baseline.
2. Run the headless query baseline.
3. If you are changing long-running behavior, also launch an interactive session and
watch idle CPU and memory for a few minutes.
4. Compare the new `.perf-artifacts` summary against the previous run before and
after each optimization pass.

View File

@@ -4,7 +4,7 @@
Better-Clawd exists because the original had a genuinely great core idea and too many self-inflicted problems around it. This fork keeps what worked, fixes what did not, and gives people real choice over how they log in, which provider they use, and how much of the product they actually want phoning home.
No telemetry. No vendor lock-in. Better long-session performance. Less corporate baggage.
No telemetry. No vendor lock-in. Faster startup, lower idle overhead, and better long-session performance than the original Claude Code. Less corporate baggage.
[NPM package](https://www.npmjs.com/package/better-clawd)
@@ -29,9 +29,18 @@ Better-Clawd is the response to that.
- It keeps the good parts of the original UX
- It removes telemetry and unnecessary phone-home behavior
- It supports multiple providers without turning setup into a science project
- It improves performance over the original Claude Code, especially in longer sessions
- It improves performance over the original Claude Code, with better startup behavior, less idle background churn, and smoother long sessions
- It is easier to inspect, modify, and run on your own terms
## Performance
Better-Clawd is intentionally tuned to be leaner than upstream Claude Code:
- Lower startup and initialization cost
- Less background polling and idle CPU churn
- Better memory and render behavior during long transcript-heavy sessions
- Focused performance workflow and regression checks in `PERFORMANCE.md`
## What Better-Clawd Changes
- Full Better-Clawd rebrand across the CLI, UI, config paths, installers, and app identity
@@ -88,7 +97,7 @@ OPENROUTER_BASE_URL=https://openrouter.ai/api/v1
## What You Get
- Better provider freedom
- Better performance than the original Claude Code
- Better performance than the original Claude Code across startup, idle usage, and long sessions
- OpenAI and OpenRouter support without weird bolt-on hacks
- Less phone-home behavior
- A CLI that feels more practical, more open, and more yours

View File

@@ -1,6 +1,6 @@
{
"name": "better-clawd",
"version": "0.1.0",
"version": "0.1.1",
"description": "Claude Code, but better.",
"type": "module",
"bin": {
@@ -16,6 +16,9 @@
"build": "bun run scripts/build.ts",
"start": "node dist/cli.mjs",
"typecheck": "tsc --noEmit",
"perf:startup": "bun run build && bun run scripts/perf.ts startup --",
"perf:query": "bun run build && bun run scripts/perf.ts query --",
"perf:regression": "bun test src/utils/task/TaskOutput.test.ts src/utils/cronScheduler.test.ts src/services/tools/toolOrchestration.test.ts",
"smoke": "bun run build && node dist/cli.mjs --version",
"prepack": "npm run build"
},

163
scripts/perf.ts Normal file
View File

@@ -0,0 +1,163 @@
import { spawn } from 'child_process'
import {
existsSync,
mkdirSync,
readdirSync,
readFileSync,
statSync,
writeFileSync,
} from 'fs'
import { join, resolve } from 'path'
type Mode = 'startup' | 'query'
type RunSummary = {
mode: Mode
command: string[]
artifactDir: string
startupReportPath: string | null
queryReportPath: string | null
wallTimeMs: number
exitCode: number
startupTotalMs: number | null
queryTtftMs: number | null
queryTotalTimeMs: number | null
queryPreApiOverheadMs: number | null
}
function fail(message: string): never {
process.stderr.write(`${message}\n`)
process.exit(1)
}
function parseArgs(argv: string[]): { mode: Mode; cliArgs: string[] } {
const [mode, ...rest] = argv
if (mode !== 'startup' && mode !== 'query') {
fail(
'Usage: bun run scripts/perf.ts <startup|query> -- <better-clawd args...>',
)
}
const separatorIdx = rest.indexOf('--')
const cliArgs = separatorIdx === -1 ? rest : rest.slice(separatorIdx + 1)
return { mode, cliArgs }
}
function ensureBuilt(): void {
if (!existsSync(resolve('dist/cli.mjs'))) {
fail('Missing dist/cli.mjs. Run `bun run build` before running perf scripts.')
}
}
function getRunId(): string {
return new Date().toISOString().replace(/[:.]/g, '-')
}
function getLatestFile(dir: string): string | null {
if (!existsSync(dir)) return null
const files = readdirSync(dir)
.map(name => join(dir, name))
.filter(path => statSync(path).isFile())
.sort(
(a, b) => statSync(b).mtimeMs - statSync(a).mtimeMs,
)
return files[0] ?? null
}
function extractMs(report: string, regex: RegExp): number | null {
const match = report.match(regex)
if (!match?.[1]) return null
const value = Number.parseFloat(match[1])
return Number.isFinite(value) ? value : null
}
function readMaybe(path: string | null): string | null {
if (!path) return null
return readFileSync(path, 'utf8')
}
async function runCommand(
mode: Mode,
cliArgs: string[],
artifactDir: string,
): Promise<RunSummary> {
const configDir = join(artifactDir, 'config')
const debugDir = join(artifactDir, 'debug')
mkdirSync(configDir, { recursive: true })
mkdirSync(debugDir, { recursive: true })
const childArgs = ['dist/cli.mjs', ...cliArgs]
if (
mode === 'query' &&
!cliArgs.some(
arg => arg === '--debug-file' || arg.startsWith('--debug-file='),
)
) {
childArgs.splice(1, 0, '--debug-file', join(debugDir, 'query-debug.txt'))
}
const env = {
...process.env,
BETTER_CLAWD_CONFIG_DIR: configDir,
CLAUDE_CODE_DEBUG_LOGS_DIR: debugDir,
CLAUDE_CODE_PROFILE_STARTUP: mode === 'startup' ? '1' : '0',
CLAUDE_CODE_PROFILE_QUERY: mode === 'query' ? '1' : '0',
}
const startedAt = Date.now()
const exitCode = await new Promise<number>((resolveExit, reject) => {
const child = spawn('node', childArgs, {
cwd: resolve('.'),
env,
stdio: 'inherit',
})
child.on('error', reject)
child.on('exit', code => resolveExit(code ?? 1))
})
const wallTimeMs = Date.now() - startedAt
const startupReportPath = getLatestFile(join(configDir, 'startup-perf'))
const queryReportPath = getLatestFile(join(configDir, 'query-perf'))
const startupReport = readMaybe(startupReportPath)
const queryReport = readMaybe(queryReportPath)
return {
mode,
command: ['node', ...childArgs],
artifactDir,
startupReportPath,
queryReportPath,
wallTimeMs,
exitCode,
startupTotalMs: startupReport
? extractMs(startupReport, /Total startup time:\s+([0-9.]+)ms/)
: null,
queryTtftMs: queryReport
? extractMs(queryReport, /Total TTFT:\s+([0-9.]+)ms/)
: null,
queryTotalTimeMs: queryReport
? extractMs(queryReport, /Total time:\s+([0-9.]+)ms/)
: null,
queryPreApiOverheadMs: queryReport
? extractMs(queryReport, /Total pre-API overhead\s+([0-9.]+)ms/)
: null,
}
}
const { mode, cliArgs } = parseArgs(process.argv.slice(2))
ensureBuilt()
const artifactDir = resolve('.perf-artifacts', `${mode}-${getRunId()}`)
mkdirSync(artifactDir, { recursive: true })
const summary = await runCommand(mode, cliArgs, artifactDir)
const summaryPath = join(artifactDir, 'summary.json')
writeFileSync(summaryPath, `${JSON.stringify(summary, null, 2)}\n`, 'utf8')
process.stdout.write(`${JSON.stringify(summary, null, 2)}\n`)
process.stdout.write(`Summary written to ${summaryPath}\n`)
if (summary.exitCode !== 0) {
process.exit(summary.exitCode)
}

View File

@@ -544,9 +544,21 @@ export async function runHeadless(
proactiveModule.activateProactive('command')
}
// Periodically force a full GC to keep memory usage in check
// Headless sessions can run for a long time, but forcing a full GC every
// second burns CPU even when memory is healthy. Poll less often and only
// collect aggressively once the heap is actually elevated, unless the user
// explicitly opts into the old behavior for profiling/debugging.
if (typeof Bun !== 'undefined') {
const gcTimer = setInterval(Bun.gc, 1000)
const GC_INTERVAL_MS = 15_000
const GC_HEAP_THRESHOLD_BYTES = 768 * 1024 * 1024
const gcTimer = setInterval(() => {
if (
isEnvTruthy(process.env.CLAUDE_CODE_FORCE_PERIODIC_GC) ||
process.memoryUsage().heapUsed >= GC_HEAP_THRESHOLD_BYTES
) {
Bun.gc(true)
}
}, GC_INTERVAL_MS)
gcTimer.unref()
}

File diff suppressed because one or more lines are too long

View File

@@ -377,7 +377,13 @@ const MessagesImpl = ({
columns
} = useTerminalSize();
const toggleShowAllShortcut = useShortcutDisplay('transcript:toggleShowAll', 'Transcript', 'Ctrl+E');
const normalizedMessages = useMemo(() => normalizeMessages(messages).filter(isNotEmptyMessage), [messages]);
// In the main-screen renderer, pre-compact history already lives in native
// terminal scrollback, so normalizing it again just adds O(n) work on every
// render. Fullscreen/transcript modes still need the full in-memory history.
const normalizationSourceMessages = useMemo(() => verbose || isFullscreenEnvEnabled() ? messages : getMessagesAfterCompactBoundary(messages, {
includeSnipped: true
}), [messages, verbose]);
const normalizedMessages = useMemo(() => normalizeMessages(normalizationSourceMessages).filter(isNotEmptyMessage), [normalizationSourceMessages]);
// Check if streaming thinking should be visible (streaming or within 30s timeout)
const isStreamingThinkingVisible = useMemo(() => {
@@ -485,18 +491,7 @@ const MessagesImpl = ({
hasTruncatedMessages: hasTruncatedMessages_0,
hiddenMessageCount: hiddenMessageCount_0
} = useMemo(() => {
// In fullscreen mode the alt buffer has no native scrollback, so the
// compact-boundary filter just hides history the ScrollBox could
// otherwise scroll to. Main-screen mode keeps the filter — pre-compact
// rows live above the viewport in native scrollback there, and
// re-rendering them triggers full resets.
// includeSnipped: UI rendering keeps snipped messages for scrollback
// (this PR's core goal — full history in UI, filter only for the model).
// Also avoids a UUID mismatch: normalizeMessages derives new UUIDs, so
// projectSnippedView's check against original removedUuids would fail.
const compactAwareMessages = verbose || isFullscreenEnvEnabled() ? normalizedMessages : getMessagesAfterCompactBoundary(normalizedMessages, {
includeSnipped: true
});
const compactAwareMessages = normalizedMessages;
const messagesToShowNotTruncated = reorderMessagesInUI(compactAwareMessages.filter((msg_2): msg_2 is Exclude<NormalizedMessage, ProgressMessageType> => msg_2.type !== 'progress')
// CC-724: drop attachment messages that AttachmentMessage renders as
// null (hook_success, hook_additional_context, hook_cancelled, etc.)

View File

@@ -0,0 +1,88 @@
import * as React from 'react'
import { useState } from 'react'
import { Box, Text } from '../ink.js'
import { saveOpenRouterApiKey } from '../utils/auth.js'
import { Spinner } from './Spinner.js'
import TextInput from './TextInput.js'
type OpenRouterLoginFlowProps = {
onDone: () => void
startingMessage?: string
}
export function OpenRouterLoginFlow({
onDone,
startingMessage,
}: OpenRouterLoginFlowProps): React.ReactNode {
const [isBusy, setIsBusy] = useState(false)
const [status, setStatus] = useState<string | null>(null)
const [inputValue, setInputValue] = useState('')
const [cursorOffset, setCursorOffset] = useState(0)
async function handleSubmit(value: string): Promise<void> {
const trimmed = value.trim()
if (!trimmed) {
return
}
setIsBusy(true)
setStatus(null)
try {
await saveOpenRouterApiKey(trimmed)
onDone()
} catch (error) {
setStatus(error instanceof Error ? error.message : String(error))
} finally {
setIsBusy(false)
}
}
if (isBusy) {
return (
<Box flexDirection="column" gap={1}>
<Box>
<Spinner />
<Text>Configuring OpenRouter login for Better-Clawd...</Text>
</Box>
<Text dimColor={true}>
OpenRouter support uses your OpenRouter API key with the Responses API
endpoint.
</Text>
</Box>
)
}
return (
<Box flexDirection="column" gap={1}>
<Text>
{startingMessage ??
'Better-Clawd can use OpenRouter with your OpenRouter API key.'}
</Text>
<Text dimColor={true}>
Paste your OpenRouter key to use `https://openrouter.ai/api/v1` and the
Responses API compatibility layer.
</Text>
<Box>
<Text>Paste your OpenRouter API key:</Text>
<TextInput
value={inputValue}
onChange={setInputValue}
onSubmit={handleSubmit}
onExit={() => {
setInputValue('')
setCursorOffset(0)
}}
cursorOffset={cursorOffset}
onChangeCursorOffset={setCursorOffset}
columns={72}
mask="*"
/>
</Box>
{status ? <Text color="error">{status}</Text> : null}
<Text dimColor={true}>
Press <Text bold={true}>Enter</Text> to save, or <Text bold={true}>Esc</Text>{' '}
to cancel.
</Text>
</Box>
)
}

View File

@@ -37,7 +37,6 @@ import { hasGrowthBookEnvOverride, initializeGrowthBook, refreshGrowthBookAfterA
import { fetchBootstrapData } from './services/api/bootstrap.js';
import { type DownloadResult, downloadSessionFiles, type FilesApiConfig, parseFileSpecs } from './services/api/filesApi.js';
import { prefetchPassesEligibility } from './services/api/referral.js';
import { prefetchOfficialMcpUrls } from './services/mcp/officialRegistry.js';
import type { McpSdkServerConfig, McpServerConfig, ScopedMcpServerConfig } from './services/mcp/types.js';
import { isPolicyAllowed, loadPolicyLimits, refreshPolicyLimits, waitForPolicyLimitsToLoad } from './services/policyLimits/index.js';
import { loadRemoteManagedSettings, refreshRemoteManagedSettings } from './services/remoteManagedSettings/index.js';
@@ -58,8 +57,6 @@ import { createSystemMessage, createUserMessage } from './utils/messages.js';
import { getPlatform } from './utils/platform.js';
import { getBaseRenderOptions } from './utils/renderOptions.js';
import { getSessionIngressAuthToken } from './utils/sessionIngressAuth.js';
import { settingsChangeDetector } from './utils/settings/changeDetector.js';
import { skillChangeDetector } from './utils/skills/skillChangeDetector.js';
import { jsonParse, writeFileSync_DEPRECATED } from './utils/slowOperations.js';
import { computeInitialTeamContext } from './utils/swarm/reconnection.js';
import { initializeWarningHandler } from './utils/warningHandler.js';
@@ -90,12 +87,10 @@ import type { StatsStore } from './context/stats.js';
import { launchAssistantInstallWizard, launchAssistantSessionChooser, launchInvalidSettingsDialog, launchResumeChooser, launchSnapshotUpdateDialog, launchTeleportRepoMismatchDialog, launchTeleportResumeWrapper } from './dialogLaunchers.js';
import { SHOW_CURSOR } from './ink/termio/dec.js';
import { exitWithError, exitWithMessage, getRenderContext, renderAndRun, showSetupScreens } from './interactiveHelpers.js';
import { initBuiltinPlugins } from './plugins/bundled/index.js';
/* eslint-enable @typescript-eslint/no-require-imports */
import { checkQuotaStatus } from './services/claudeAiLimits.js';
import { getMcpToolsCommandsAndResources, prefetchAllMcpResources } from './services/mcp/client.js';
import { VALID_INSTALLABLE_SCOPES, VALID_UPDATE_SCOPES } from './services/plugins/pluginCliCommands.js';
import { initBundledSkills } from './skills/bundled/index.js';
import type { AgentColorName } from './tools/AgentTool/agentColorManager.js';
import { getActiveAgentsFromList, getAgentDefinitionsWithOverrides, isBuiltInAgent, isCustomAgent, parseAgentsFromJson } from './tools/AgentTool/loadAgentsDir.js';
import type { LogOption } from './types/logs.js';
@@ -119,8 +114,6 @@ import { getDefaultMainLoopModel, getUserSpecifiedModelSetting, normalizeModelSt
import { ensureModelStringsInitialized } from './utils/model/modelStrings.js';
import { PERMISSION_MODES } from './utils/permissions/PermissionMode.js';
import { checkAndDisableBypassPermissions, getAutoModeEnabledStateIfCached, initializeToolPermissionContext, initialPermissionModeFromCLI, isDefaultPermissionModeAuto, parseToolListFromCLI, removeDangerousPermissions, stripDangerousPermissionsForAutoMode, verifyAutoModeGateAccess } from './utils/permissions/permissionSetup.js';
import { cleanupOrphanedPluginVersionsInBackground } from './utils/plugins/cacheUtils.js';
import { initializeVersionedPlugins } from './utils/plugins/installedPluginsManager.js';
import { getManagedPluginNames } from './utils/plugins/managedPlugins.js';
import { getGlobExclusionsForPluginCache } from './utils/plugins/orphanedPluginFilter.js';
import { getPluginSeedDirs } from './utils/plugins/pluginDirectories.js';
@@ -146,7 +139,6 @@ import { clearServerCache } from 'src/services/mcp/client.js';
import { areMcpConfigsAllowedWithEnterpriseMcpConfig, dedupClaudeAiMcpServers, doesEnterpriseMcpConfigExist, filterMcpServersByPolicy, getClaudeCodeMcpConfigs, getMcpServerSignature, parseMcpConfig, parseMcpConfigFromFilePath } from 'src/services/mcp/config.js';
import { excludeCommandsByServer, excludeResourcesByServer } from 'src/services/mcp/utils.js';
import { isXaaEnabled } from 'src/services/mcp/xaaIdpLogin.js';
import { getRelevantTips } from 'src/services/tips/tipRegistry.js';
import { logContextMetrics } from 'src/utils/api.js';
import { CLAUDE_IN_CHROME_MCP_SERVER_NAME, isClaudeInChromeMCPServer } from 'src/utils/claudeInChrome/common.js';
import { registerCleanup } from 'src/utils/cleanupRegistry.js';
@@ -159,7 +151,6 @@ import { errorMessage, getErrnoCode, isENOENT, TeleportOperationError, toError }
import { getFsImplementation, safeResolvePath } from 'src/utils/fsOperations.js';
import { gracefulShutdown, gracefulShutdownSync } from 'src/utils/gracefulShutdown.js';
import { setAllHookEventsEnabled } from 'src/utils/hooks/hookEvents.js';
import { refreshModelCapabilities } from 'src/utils/model/modelCapabilities.js';
import { peekForStdinData, writeToStderr } from 'src/utils/process.js';
import { setCwd } from 'src/utils/Shell.js';
import { type ProcessedResume, processResumedConversation } from 'src/utils/sessionRestore.js';
@@ -379,6 +370,32 @@ function prefetchSystemContextIfSafe(): void {
// Otherwise, don't prefetch - wait for trust to be established first
}
function runDeferredStartupTask(
label: string,
task: () => Promise<void>,
): void {
void task().catch(error => {
logForDebugging(
`[STARTUP] deferred ${label} failed: ${errorMessage(error)}`,
{ level: 'warn' },
);
});
}
async function runPluginRuntimeBookkeeping(): Promise<void> {
const [
{ initializeVersionedPlugins },
{ cleanupOrphanedPluginVersionsInBackground },
] = await Promise.all([
import('./utils/plugins/installedPluginsManager.js'),
import('./utils/plugins/cacheUtils.js'),
]);
await initializeVersionedPlugins();
profileCheckpoint('action_after_plugins_init');
await cleanupOrphanedPluginVersionsInBackground();
void getGlobExclusionsForPluginCache();
}
/**
* Start background prefetches and housekeeping that are NOT needed before first render.
* These are deferred from setup() to reduce event loop contention and child process
@@ -404,7 +421,10 @@ export function startDeferredPrefetches(): void {
void initUser();
void getUserContext();
prefetchSystemContextIfSafe();
void getRelevantTips();
runDeferredStartupTask('tips prefetch', async () => {
const { getRelevantTips } = await import('./services/tips/tipRegistry.js');
await getRelevantTips();
});
if (isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) && !isEnvTruthy(process.env.CLAUDE_CODE_SKIP_BEDROCK_AUTH)) {
void prefetchAwsCredentialsAndBedRockInfoIfSafe();
}
@@ -415,13 +435,25 @@ export function startDeferredPrefetches(): void {
// Analytics and feature flag initialization
void initializeAnalyticsGates();
void prefetchOfficialMcpUrls();
void refreshModelCapabilities();
runDeferredStartupTask('official MCP registry prefetch', async () => {
const { prefetchOfficialMcpUrls } = await import('./services/mcp/officialRegistry.js');
await prefetchOfficialMcpUrls();
});
runDeferredStartupTask('model capabilities refresh', async () => {
const { refreshModelCapabilities } = await import('./utils/model/modelCapabilities.js');
await refreshModelCapabilities();
});
// File change detectors deferred from init() to unblock first render
void settingsChangeDetector.initialize();
runDeferredStartupTask('settings change detector', async () => {
const { settingsChangeDetector } = await import('./utils/settings/changeDetector.js');
await settingsChangeDetector.initialize();
});
if (!isBareMode()) {
void skillChangeDetector.initialize();
runDeferredStartupTask('skill change detector', async () => {
const { skillChangeDetector } = await import('./utils/skills/skillChangeDetector.js');
await skillChangeDetector.initialize();
});
}
// Event loop stall detector — logs when the main thread is blocked >500ms
@@ -1921,6 +1953,10 @@ async function run(): Promise<CommanderCommand> {
// reads synchronously. Previously ran inside setup() after ~20ms of
// await points, so the parallel getCommands() memoized an empty list.
if (process.env.CLAUDE_CODE_ENTRYPOINT !== 'local-agent') {
const [{ initBuiltinPlugins }, { initBundledSkills }] = await Promise.all([
import('./plugins/bundled/index.js'),
import('./skills/bundled/index.js'),
]);
initBuiltinPlugins();
initBundledSkills();
}
@@ -2556,17 +2592,11 @@ async function run(): Promise<CommanderCommand> {
// skip — no-op
} else if (isNonInteractiveSession) {
// In headless mode, await to ensure plugin sync completes before CLI exits
await initializeVersionedPlugins();
profileCheckpoint('action_after_plugins_init');
void cleanupOrphanedPluginVersionsInBackground().then(() => getGlobExclusionsForPluginCache());
await runPluginRuntimeBookkeeping();
} else {
// In interactive mode, fire-and-forget — this is purely bookkeeping
// that doesn't affect runtime behavior of the current session
void initializeVersionedPlugins().then(async () => {
profileCheckpoint('action_after_plugins_init');
await cleanupOrphanedPluginVersionsInBackground();
void getGlobExclusionsForPluginCache();
});
void runPluginRuntimeBookkeeping().catch(error => logError(toError(error)));
}
const setupTrigger = initOnly || init ? 'init' : maintenance ? 'maintenance' : null;
if (initOnly) {

View File

@@ -150,6 +150,7 @@ import { useMergedTools } from '../hooks/useMergedTools.js';
import { mergeAndFilterTools } from '../utils/toolPool.js';
import { useMergedCommands } from '../hooks/useMergedCommands.js';
import { useSkillsChange } from '../hooks/useSkillsChange.js';
import { useSettingsChange } from '../hooks/useSettingsChange.js';
import { useManagePlugins } from '../hooks/useManagePlugins.js';
import { Messages } from '../components/Messages.js';
import { TaskListV2 } from '../components/TaskListV2.js';
@@ -295,6 +296,32 @@ const EMPTY_MCP_CLIENTS: MCPServerConnection[] = [];
const EMPTY_TOOL_USE_CONFIRM_QUEUE: ToolUseConfirm[] = [];
const EMPTY_IN_PROGRESS_TOOL_USE_IDS = new Set<string>();
type TurnContextSnapshot = {
key: string;
defaultSystemPrompt: string[];
userContext: {
[k: string]: string;
};
systemContext: {
[k: string]: string;
};
};
function getMcpContextSignature(mcpClients: MCPServerConnection[]): string {
return mcpClients.map(client => {
if (client.type === 'connected') {
return [client.name, client.config.type ?? 'stdio', client.instructions ?? ''].join('::');
}
return [client.name, client.type].join('::');
}).join('||');
}
function buildTurnContextCacheKey(model: string, tools: ReadonlyArray<{
name: string;
}>, additionalWorkingDirectories: readonly string[], mcpClients: MCPServerConnection[]): string {
return [model, tools.map(tool => tool.name).sort().join('|'), additionalWorkingDirectories.join('|'), getMcpContextSignature(mcpClients)].join('|||');
}
// Stable stub for useAssistantHistory's non-KAIROS branch — avoids a new
// function identity each render, which would break composedOnScroll's memo.
const HISTORY_STUB = {
@@ -681,9 +708,16 @@ export function REPL({
// Local state for commands (hot-reloadable when skill files change)
const [localCommands, setLocalCommands] = useState(initialCommands);
const turnContextCacheRef = useRef<TurnContextSnapshot | null>(null);
// Watch for skill file changes and reload all commands
useSkillsChange(isRemoteSession ? undefined : getProjectRoot(), setLocalCommands);
useEffect(() => {
turnContextCacheRef.current = null;
}, [localCommands]);
useSettingsChange(() => {
turnContextCacheRef.current = null;
});
// Track proactive mode for tools dependency - SleepTool filters by proactive state
const proactiveActive = React.useSyncExternalStore(proactiveModule?.subscribeToProactiveChanges ?? PROACTIVE_NO_OP_SUBSCRIBE, proactiveModule?.isProactiveActive ?? PROACTIVE_FALSE);
@@ -2531,6 +2565,23 @@ export function REPL({
contentReplacementState: contentReplacementStateRef.current
};
}, [commands, combinedInitialTools, mainThreadAgentDefinition, debug, initialMcpClients, ideInstallationStatus, dynamicMcpConfig, theme, allowedAgentTypes, store, setAppState, reverify, addNotification, setMessages, onChangeDynamicMcpConfig, resume, requestPrompt, disabled, customSystemPrompt, appendSystemPrompt, setConversationId]);
const loadTurnContext = useCallback(async (toolsForPrompt: typeof combinedInitialTools, mcpClientsForPrompt: MCPServerConnection[], model: string): Promise<TurnContextSnapshot> => {
const additionalWorkingDirectories = Array.from(toolPermissionContext.additionalWorkingDirectories.keys());
const cacheKey = buildTurnContextCacheKey(model, toolsForPrompt, additionalWorkingDirectories, mcpClientsForPrompt);
const cached = turnContextCacheRef.current;
if (cached?.key === cacheKey) {
return cached;
}
const [defaultSystemPrompt, userContext, systemContext] = await Promise.all([getSystemPrompt(toolsForPrompt, model, additionalWorkingDirectories, mcpClientsForPrompt), getUserContext(), getSystemContext()]);
const next = {
key: cacheKey,
defaultSystemPrompt,
userContext,
systemContext
};
turnContextCacheRef.current = next;
return next;
}, [toolPermissionContext]);
// Session backgrounding (Ctrl+B to background/foreground)
const handleBackgroundQuery = useCallback(() => {
@@ -2542,7 +2593,11 @@ export function REPL({
const removedNotifications = removeByFilter(cmd => cmd.mode === 'task-notification');
void (async () => {
const toolUseContext = getToolUseContext(messagesRef.current, [], new AbortController(), mainLoopModel);
const [defaultSystemPrompt, userContext, systemContext] = await Promise.all([getSystemPrompt(toolUseContext.options.tools, mainLoopModel, Array.from(toolPermissionContext.additionalWorkingDirectories.keys()), toolUseContext.options.mcpClients), getUserContext(), getSystemContext()]);
const {
defaultSystemPrompt,
userContext,
systemContext
} = await loadTurnContext(toolUseContext.options.tools, toolUseContext.options.mcpClients, mainLoopModel);
const systemPrompt = buildEffectiveSystemPrompt({
mainThreadAgentDefinition,
toolUseContext,
@@ -2581,7 +2636,7 @@ export function REPL({
agentDefinition: mainThreadAgentDefinition
});
})();
}, [abortController, mainLoopModel, toolPermissionContext, mainThreadAgentDefinition, getToolUseContext, customSystemPrompt, appendSystemPrompt, canUseTool, setAppState]);
}, [abortController, mainLoopModel, mainThreadAgentDefinition, getToolUseContext, customSystemPrompt, appendSystemPrompt, canUseTool, setAppState, loadTurnContext]);
const {
handleBackgroundSession
} = useSessionBackgrounding({
@@ -2775,11 +2830,16 @@ export function REPL({
});
}
queryCheckpoint('query_context_loading_start');
const [,, defaultSystemPrompt, baseUserContext, systemContext] = await Promise.all([
const [,, turnContext] = await Promise.all([
// IMPORTANT: do this after setMessages() above, to avoid UI jank
checkAndDisableBypassPermissionsIfNeeded(toolPermissionContext, setAppState),
// Gated on TRANSCRIPT_CLASSIFIER so GrowthBook kill switch runs wherever auto mode is built in
feature('TRANSCRIPT_CLASSIFIER') ? checkAndDisableAutoModeIfNeeded(toolPermissionContext, setAppState, store.getState().fastMode) : undefined, getSystemPrompt(freshTools, mainLoopModelParam, Array.from(toolPermissionContext.additionalWorkingDirectories.keys()), freshMcpClients), getUserContext(), getSystemContext()]);
feature('TRANSCRIPT_CLASSIFIER') ? checkAndDisableAutoModeIfNeeded(toolPermissionContext, setAppState, store.getState().fastMode) : undefined, loadTurnContext(freshTools, freshMcpClients, mainLoopModelParam)]);
const {
defaultSystemPrompt,
userContext: baseUserContext,
systemContext
} = turnContext;
const userContext = {
...baseUserContext,
...getCoordinatorUserContext(freshMcpClients, isScratchpadEnabled() ? getScratchpadDir() : undefined),
@@ -2861,7 +2921,7 @@ export function REPL({
// Signal that a query turn has completed successfully
await onTurnComplete?.(messagesRef.current);
}, [initialMcpClients, resetLoadingState, getToolUseContext, toolPermissionContext, setAppState, customSystemPrompt, onTurnComplete, appendSystemPrompt, canUseTool, mainThreadAgentDefinition, onQueryEvent, sessionTitle, titleDisabled]);
}, [initialMcpClients, resetLoadingState, getToolUseContext, toolPermissionContext, setAppState, customSystemPrompt, onTurnComplete, appendSystemPrompt, canUseTool, mainThreadAgentDefinition, onQueryEvent, sessionTitle, titleDisabled, loadTurnContext]);
const onQuery = useCallback(async (newMessages: MessageType[], abortController: AbortController, shouldQuery: boolean, additionalAllowedTools: string[], mainLoopModelParam: string, onBeforeQueryCallback?: (input: string, newMessages: MessageType[]) => Promise<boolean>, input?: string, effort?: EffortValue): Promise<void> => {
// If this is a teammate, mark them as active when starting a turn
if (isAgentSwarmsEnabled()) {
@@ -4941,8 +5001,11 @@ export function REPL({
}
const newAbortController = createAbortController();
const context = getToolUseContext(compactMessages, [], newAbortController, mainLoopModel);
const appState = context.getAppState();
const defaultSysPrompt = await getSystemPrompt(context.options.tools, context.options.mainLoopModel, Array.from(appState.toolPermissionContext.additionalWorkingDirectories.keys()), context.options.mcpClients);
const {
defaultSystemPrompt: defaultSysPrompt,
userContext,
systemContext
} = await loadTurnContext(context.options.tools, context.options.mcpClients, context.options.mainLoopModel);
const systemPrompt = buildEffectiveSystemPrompt({
mainThreadAgentDefinition: undefined,
toolUseContext: context,
@@ -4950,7 +5013,6 @@ export function REPL({
defaultSystemPrompt: defaultSysPrompt,
appendSystemPrompt: context.options.appendSystemPrompt
});
const [userContext, systemContext] = await Promise.all([getUserContext(), getSystemContext()]);
const result = await partialCompactConversation(compactMessages, messageIndex, context, {
systemPrompt,
userContext,

View File

@@ -9,6 +9,7 @@ import { findToolByName, type Tools, type ToolUseContext } from '../../Tool.js'
import { BASH_TOOL_NAME } from '../../tools/BashTool/toolName.js'
import type { AssistantMessage, Message } from '../../types/message.js'
import { createChildAbortController } from '../../utils/abortController.js'
import { getMaxToolUseConcurrency } from './toolConcurrency.js'
import { runToolUse } from './toolExecution.js'
type MessageUpdate = {
@@ -31,6 +32,20 @@ type TrackedTool = {
contextModifiers?: Array<(context: ToolUseContext) => ToolUseContext>
}
const EPHEMERAL_PROGRESS_TYPES = new Set([
'bash_progress',
'powershell_progress',
'mcp_progress',
'sleep_progress',
])
function isEphemeralProgressMessage(message: Message): boolean {
return (
message.type === 'progress' &&
EPHEMERAL_PROGRESS_TYPES.has(message.data.type)
)
}
/**
* Executes tools as they stream in with concurrency control.
* - Concurrent-safe tools can execute in parallel with other concurrent-safe tools
@@ -128,10 +143,13 @@ export class StreamingToolExecutor {
*/
private canExecuteTool(isConcurrencySafe: boolean): boolean {
const executingTools = this.tools.filter(t => t.status === 'executing')
return (
executingTools.length === 0 ||
(isConcurrencySafe && executingTools.every(t => t.isConcurrencySafe))
)
if (executingTools.length === 0) {
return true
}
if (!isConcurrencySafe || !executingTools.every(t => t.isConcurrencySafe)) {
return false
}
return executingTools.length < getMaxToolUseConcurrency()
}
/**
@@ -366,7 +384,17 @@ export class StreamingToolExecutor {
if (update.message) {
// Progress messages go to pendingProgress for immediate yielding
if (update.message.type === 'progress') {
tool.pendingProgress.push(update.message)
const lastPending = tool.pendingProgress.at(-1)
if (
isEphemeralProgressMessage(update.message) &&
lastPending?.type === 'progress' &&
lastPending.data.type === update.message.data.type
) {
tool.pendingProgress[tool.pendingProgress.length - 1] =
update.message
} else {
tool.pendingProgress.push(update.message)
}
// Signal that progress is available
if (this.progressAvailableResolve) {
this.progressAvailableResolve()

View File

@@ -0,0 +1,8 @@
export const DEFAULT_MAX_TOOL_USE_CONCURRENCY = 4
export function getMaxToolUseConcurrency(): number {
const parsed = parseInt(process.env.CLAUDE_CODE_MAX_TOOL_USE_CONCURRENCY || '', 10)
return Number.isFinite(parsed) && parsed > 0
? parsed
: DEFAULT_MAX_TOOL_USE_CONCURRENCY
}

View File

@@ -0,0 +1,33 @@
import { afterEach, expect, test } from 'bun:test'
import {
DEFAULT_MAX_TOOL_USE_CONCURRENCY,
getMaxToolUseConcurrency,
} from './toolConcurrency.js'
const ORIGINAL_CONCURRENCY = process.env.CLAUDE_CODE_MAX_TOOL_USE_CONCURRENCY
afterEach(() => {
if (ORIGINAL_CONCURRENCY === undefined) {
delete process.env.CLAUDE_CODE_MAX_TOOL_USE_CONCURRENCY
} else {
process.env.CLAUDE_CODE_MAX_TOOL_USE_CONCURRENCY = ORIGINAL_CONCURRENCY
}
})
test('defaults tool concurrency to a bounded budget', () => {
delete process.env.CLAUDE_CODE_MAX_TOOL_USE_CONCURRENCY
expect(getMaxToolUseConcurrency()).toBe(DEFAULT_MAX_TOOL_USE_CONCURRENCY)
})
test('allows an explicit positive concurrency override', () => {
process.env.CLAUDE_CODE_MAX_TOOL_USE_CONCURRENCY = '7'
expect(getMaxToolUseConcurrency()).toBe(7)
})
test('ignores invalid concurrency overrides', () => {
process.env.CLAUDE_CODE_MAX_TOOL_USE_CONCURRENCY = '0'
expect(getMaxToolUseConcurrency()).toBe(DEFAULT_MAX_TOOL_USE_CONCURRENCY)
process.env.CLAUDE_CODE_MAX_TOOL_USE_CONCURRENCY = 'not-a-number'
expect(getMaxToolUseConcurrency()).toBe(DEFAULT_MAX_TOOL_USE_CONCURRENCY)
})

View File

@@ -4,12 +4,10 @@ import { findToolByName, type ToolUseContext } from '../../Tool.js'
import type { AssistantMessage, Message } from '../../types/message.js'
import { all } from '../../utils/generators.js'
import { type MessageUpdateLazy, runToolUse } from './toolExecution.js'
function getMaxToolUseConcurrency(): number {
return (
parseInt(process.env.CLAUDE_CODE_MAX_TOOL_USE_CONCURRENCY || '', 10) || 10
)
}
export {
DEFAULT_MAX_TOOL_USE_CONCURRENCY,
getMaxToolUseConcurrency,
} from './toolConcurrency.js'
export type MessageUpdate = {
message?: Message

View File

@@ -10,12 +10,16 @@ import { requireComputerUseSwift } from './swiftLoader.js'
* promises hang. Electron drains it via CFRunLoop so Cowork doesn't need this.
*
* One refcounted setInterval calls `_drainMainRunLoop` (RunLoop.main.run)
* every 1ms while any main-queue-dependent call is pending. Multiple
* every few milliseconds while any main-queue-dependent call is pending. Multiple
* concurrent drainRunLoop() calls share the single pump via retain/release.
*/
let pump: ReturnType<typeof setInterval> | undefined
let pending = 0
const DRAIN_INTERVAL_MS = Math.max(
1,
Number.parseInt(process.env.CLAUDE_CODE_COMPUTER_USE_PUMP_MS || '', 10) || 4,
)
function drainTick(cu: ReturnType<typeof requireComputerUseSwift>): void {
cu._drainMainRunLoop()
@@ -24,7 +28,8 @@ function drainTick(cu: ReturnType<typeof requireComputerUseSwift>): void {
function retain(): void {
pending++
if (pump === undefined) {
pump = setInterval(drainTick, 1, requireComputerUseSwift())
pump = setInterval(drainTick, DRAIN_INTERVAL_MS, requireComputerUseSwift())
pump.unref?.()
logForDebugging('[drainRunLoop] pump started', { level: 'verbose' })
}
}

View File

@@ -0,0 +1,15 @@
import { expect, test } from 'bun:test'
import { getSchedulerCheckDelayMs } from './cronScheduler.js'
test('uses a slower idle cadence when no cron tasks are pending', () => {
expect(getSchedulerCheckDelayMs(null, 10_000)).toBe(2000)
})
test('keeps the minimum cadence for tasks that are due soon', () => {
expect(getSchedulerCheckDelayMs(10_500, 10_000)).toBe(1000)
})
test('backs off proportionally for tasks scheduled farther out', () => {
expect(getSchedulerCheckDelayMs(16_000, 10_000)).toBe(3000)
expect(getSchedulerCheckDelayMs(22_000, 10_000)).toBe(5000)
})

View File

@@ -38,6 +38,8 @@ import {
import { logForDebugging } from './debug.js'
const CHECK_INTERVAL_MS = 1000
const NO_TASKS_CHECK_DELAY_MS = 2000
const MAX_CHECK_DELAY_MS = 5000
const FILE_STABILITY_MS = 300
// How often a non-owning session re-probes the scheduler lock. Coarse
// because takeover only matters when the owning session has crashed.
@@ -59,6 +61,31 @@ export function isRecurringTaskAged(
return Boolean(t.recurring && !t.permanent && nowMs - t.createdAt >= maxAgeMs)
}
export function getSchedulerCheckDelayMs(
nextFireAtMs: number | null,
nowMs: number,
options?: {
minMs?: number
maxMs?: number
noTasksDelayMs?: number
},
): number {
const minMs = options?.minMs ?? CHECK_INTERVAL_MS
const maxMs = options?.maxMs ?? MAX_CHECK_DELAY_MS
const noTasksDelayMs = options?.noTasksDelayMs ?? NO_TASKS_CHECK_DELAY_MS
if (nextFireAtMs === null) {
return noTasksDelayMs
}
const untilFireMs = nextFireAtMs - nowMs
if (untilFireMs <= minMs) {
return minMs
}
return Math.min(maxMs, Math.max(minMs, Math.floor(untilFireMs / 2)))
}
type CronSchedulerOptions = {
/** Called when a task fires (regular or missed-on-startup). */
onFire: (prompt: string) => void
@@ -170,7 +197,7 @@ export function createCronScheduler(
const inFlight = new Set<string>()
let enablePoll: ReturnType<typeof setInterval> | null = null
let checkTimer: ReturnType<typeof setInterval> | null = null
let checkTimer: ReturnType<typeof setTimeout> | null = null
let lockProbeTimer: ReturnType<typeof setInterval> | null = null
let watcher: FSWatcher | null = null
let stopped = false
@@ -189,7 +216,10 @@ export function createCronScheduler(
// Recurring tasks are NOT surfaced or deleted — check() handles them
// correctly (fires on first tick, reschedules forward). Only one-shot
// missed tasks need user input (run once now, or discard forever).
if (!initial) return
if (!initial) {
scheduleCheck(CHECK_INTERVAL_MS)
return
}
const now = Date.now()
const missed = findMissedTasks(next, now).filter(
@@ -225,6 +255,7 @@ export function createCronScheduler(
`[ScheduledTasks] surfaced ${missed.length} missed one-shot task(s)`,
)
}
scheduleCheck(CHECK_INTERVAL_MS)
}
function check() {
@@ -393,6 +424,29 @@ export function createCronScheduler(
}
}
function getNextFireTimeValue(): number | null {
let min = Infinity
for (const t of nextFireAt.values()) {
if (t < min) min = t
}
return min === Infinity ? null : min
}
function scheduleCheck(delayMs = CHECK_INTERVAL_MS): void {
if (stopped) return
if (checkTimer) {
clearTimeout(checkTimer)
checkTimer = null
}
checkTimer = setTimeout(() => {
checkTimer = null
if (stopped) return
check()
scheduleCheck(getSchedulerCheckDelayMs(getNextFireTimeValue(), Date.now()))
}, delayMs)
checkTimer.unref?.()
}
async function enable() {
if (stopped) return
if (enablePoll) {
@@ -450,13 +504,11 @@ export function createCronScheduler(
if (!stopped) {
tasks = []
nextFireAt.clear()
scheduleCheck(NO_TASKS_CHECK_DELAY_MS)
}
})
checkTimer = setInterval(check, CHECK_INTERVAL_MS)
// Don't keep the process alive for the scheduler alone — in -p text mode
// the process should exit after the single turn even if a cron was created.
checkTimer.unref?.()
scheduleCheck(CHECK_INTERVAL_MS)
}
return {
@@ -503,7 +555,7 @@ export function createCronScheduler(
enablePoll = null
}
if (checkTimer) {
clearInterval(checkTimer)
clearTimeout(checkTimer)
checkTimer = null
}
if (lockProbeTimer) {
@@ -521,11 +573,7 @@ export function createCronScheduler(
// nextFireAt uses Infinity for "never" (in-flight one-shots, bad cron
// strings). Filter those out so callers can distinguish "soon" from
// "nothing pending".
let min = Infinity
for (const t of nextFireAt.values()) {
if (t < min) min = t
}
return min === Infinity ? null : min
return getNextFireTimeValue()
},
}
}

View File

@@ -27,9 +27,13 @@
* - query_end: End of query
*/
import { join } from 'path'
import { getSessionId } from 'src/bootstrap/state.js'
import { logForDebugging } from './debug.js'
import { isEnvTruthy } from './envUtils.js'
import { getClaudeConfigHomeDir, isEnvTruthy } from './envUtils.js'
import { getFsImplementation } from './fsOperations.js'
import { formatMs, formatTimelineLine, getPerformance } from './profilerBase.js'
import { writeFileSync_DEPRECATED } from './slowOperations.js'
// Module-level state - initialized once when the module loads
// eslint-disable-next-line custom-rules/no-process-env-top-level
@@ -210,6 +214,14 @@ function getQueryProfileReport(): string {
return lines.join('\n')
}
export function getQueryPerfLogPath(queryNumber = queryCount): string {
return join(
getClaudeConfigHomeDir(),
'query-perf',
`${getSessionId()}-q${queryNumber}.txt`,
)
}
/**
* Get phase-based summary showing time spent in each major phase
*/
@@ -297,5 +309,17 @@ function getPhaseSummary(
*/
export function logQueryProfileReport(): void {
if (!ENABLED) return
logForDebugging(getQueryProfileReport())
const report = getQueryProfileReport()
logForDebugging(report)
try {
const path = getQueryPerfLogPath()
getFsImplementation().mkdirSync(join(getClaudeConfigHomeDir(), 'query-perf'))
writeFileSync_DEPRECATED(path, report, {
encoding: 'utf8',
flush: true,
})
} catch {
// Best-effort artifact write. Debug output above still preserves the report.
}
}

View File

@@ -0,0 +1,19 @@
import { expect, test } from 'bun:test'
import { getNextTaskOutputPollIntervalMs } from './taskOutputPolling.js'
test('resets task output polling to the active interval on new activity', () => {
expect(getNextTaskOutputPollIntervalMs(4000, true)).toBe(250)
})
test('backs task output polling off when the command is quiet', () => {
let intervalMs = getNextTaskOutputPollIntervalMs(250, false)
expect(intervalMs).toBe(1000)
intervalMs = getNextTaskOutputPollIntervalMs(intervalMs, false)
expect(intervalMs).toBe(2000)
intervalMs = getNextTaskOutputPollIntervalMs(intervalMs, false)
expect(intervalMs).toBe(4000)
expect(getNextTaskOutputPollIntervalMs(intervalMs, false)).toBe(4000)
})

View File

@@ -5,9 +5,12 @@ import { readFileRange, tailFile } from '../fsOperations.js'
import { getMaxOutputLength } from '../shell/outputLimits.js'
import { safeJoinLines } from '../stringUtils.js'
import { DiskTaskOutput, getTaskOutputPath } from './diskOutput.js'
import {
ACTIVE_TASK_OUTPUT_POLL_INTERVAL_MS,
getNextTaskOutputPollIntervalMs,
} from './taskOutputPolling.js'
const DEFAULT_MAX_MEMORY = 8 * 1024 * 1024 // 8MB
const POLL_INTERVAL_MS = 1000
const PROGRESS_TAIL_BYTES = 4096
type ProgressCallback = (
@@ -54,6 +57,8 @@ export class TaskOutput {
/** Subset of #registry currently being polled (visibility-driven by React). */
static #activePolling = new Map<string, TaskOutput>()
static #pollInterval: ReturnType<typeof setInterval> | null = null
static #pollIntervalMs = ACTIVE_TASK_OUTPUT_POLL_INTERVAL_MS
static #tickInFlight = false
constructor(
taskId: string,
@@ -84,10 +89,7 @@ export class TaskOutput {
return
}
TaskOutput.#activePolling.set(taskId, instance)
if (!TaskOutput.#pollInterval) {
TaskOutput.#pollInterval = setInterval(TaskOutput.#tick, POLL_INTERVAL_MS)
TaskOutput.#pollInterval.unref()
}
TaskOutput.#setPollInterval(ACTIVE_TASK_OUTPUT_POLL_INTERVAL_MS)
}
/**
@@ -99,20 +101,52 @@ export class TaskOutput {
if (TaskOutput.#activePolling.size === 0 && TaskOutput.#pollInterval) {
clearInterval(TaskOutput.#pollInterval)
TaskOutput.#pollInterval = null
TaskOutput.#pollIntervalMs = ACTIVE_TASK_OUTPUT_POLL_INTERVAL_MS
}
}
static #setPollInterval(intervalMs: number): void {
TaskOutput.#pollIntervalMs = intervalMs
if (TaskOutput.#pollInterval) {
clearInterval(TaskOutput.#pollInterval)
TaskOutput.#pollInterval = null
}
if (TaskOutput.#activePolling.size === 0) {
return
}
TaskOutput.#pollInterval = setInterval(TaskOutput.#tick, intervalMs)
TaskOutput.#pollInterval.unref()
}
/**
* Shared tick: reads the file tail for every actively-polled task.
* Non-async body (.then) to avoid stacking if I/O is slow.
* Skip overlapping ticks so slow I/O does not stack more work.
*/
static #tick(): void {
for (const [, entry] of TaskOutput.#activePolling) {
if (!entry.#onProgress) {
continue
}
void tailFile(entry.path, PROGRESS_TAIL_BYTES).then(
({ content, bytesRead, bytesTotal }) => {
if (TaskOutput.#tickInFlight) {
return
}
TaskOutput.#tickInFlight = true
void TaskOutput.#runTick().finally(() => {
TaskOutput.#tickInFlight = false
})
}
static async #runTick(): Promise<void> {
let sawActivity = false
await Promise.all(
Array.from(TaskOutput.#activePolling.values(), async entry => {
if (!entry.#onProgress) {
return
}
try {
const { content, bytesRead, bytesTotal } = await tailFile(
entry.path,
PROGRESS_TAIL_BYTES,
)
const bytesChanged = bytesTotal !== entry.#totalBytes
sawActivity = sawActivity || bytesChanged
if (!entry.#onProgress) {
return
}
@@ -120,6 +154,7 @@ export class TaskOutput {
// progress loop wakes up and can check for backgrounding.
// Commands like `git log -S` produce no output for long periods.
if (!content) {
entry.#totalBytes = bytesTotal
entry.#onProgress('', '', entry.#totalLines, bytesTotal, false)
return
}
@@ -155,11 +190,18 @@ export class TaskOutput {
bytesTotal,
bytesRead < bytesTotal,
)
},
() => {
} catch {
// File may not exist yet
},
)
}
}),
)
const nextIntervalMs = getNextTaskOutputPollIntervalMs(
TaskOutput.#pollIntervalMs,
sawActivity,
)
if (nextIntervalMs !== TaskOutput.#pollIntervalMs) {
TaskOutput.#setPollInterval(nextIntervalMs)
}
}

View File

@@ -0,0 +1,16 @@
export const ACTIVE_TASK_OUTPUT_POLL_INTERVAL_MS = 250
const IDLE_TASK_OUTPUT_POLL_INTERVAL_MS = 1000
const MAX_TASK_OUTPUT_POLL_INTERVAL_MS = 4000
export function getNextTaskOutputPollIntervalMs(
currentIntervalMs: number,
sawActivity: boolean,
): number {
if (sawActivity) {
return ACTIVE_TASK_OUTPUT_POLL_INTERVAL_MS
}
if (currentIntervalMs < IDLE_TASK_OUTPUT_POLL_INTERVAL_MS) {
return IDLE_TASK_OUTPUT_POLL_INTERVAL_MS
}
return Math.min(MAX_TASK_OUTPUT_POLL_INTERVAL_MS, currentIntervalMs * 2)
}