rebrand better-clawd and ship initial npm-ready release

This commit is contained in:
x1xhlol
2026-04-01 16:51:18 +02:00
Unverified
parent 420d4155ec
commit 407fa14d6f
109 changed files with 4155 additions and 1690 deletions

View File

@@ -1,237 +1,32 @@
import type { AnyValueMap, Logger, logs } from '@opentelemetry/api-logs'
import { resourceFromAttributes } from '@opentelemetry/resources'
import {
BatchLogRecordProcessor,
LoggerProvider,
} from '@opentelemetry/sdk-logs'
import {
ATTR_SERVICE_NAME,
ATTR_SERVICE_VERSION,
} from '@opentelemetry/semantic-conventions'
import { randomUUID } from 'crypto'
import { isEqual } from 'lodash-es'
import { getOrCreateUserID } from '../../utils/config.js'
import { logForDebugging } from '../../utils/debug.js'
import { logError } from '../../utils/log.js'
import { getPlatform, getWslVersion } from '../../utils/platform.js'
import { jsonStringify } from '../../utils/slowOperations.js'
import { profileCheckpoint } from '../../utils/startupProfiler.js'
import { getCoreUserData } from '../../utils/user.js'
import { isAnalyticsDisabled } from './config.js'
import { FirstPartyEventLoggingExporter } from './firstPartyEventLoggingExporter.js'
import type { GrowthBookUserAttributes } from './growthbook.js'
import { getDynamicConfig_CACHED_MAY_BE_STALE } from './growthbook.js'
import { getEventMetadata } from './metadata.js'
import { isSinkKilled } from './sinkKillswitch.js'
/**
* Configuration for sampling individual event types.
* Each event name maps to an object containing sample_rate (0-1).
* Events not in the config are logged at 100% rate.
*/
export type EventSamplingConfig = {
[eventName: string]: {
sample_rate: number
}
}
const EVENT_SAMPLING_CONFIG_NAME = 'tengu_event_sampling_config'
/**
* Get the event sampling configuration from GrowthBook.
* Uses cached value if available, updates cache in background.
*/
export function getEventSamplingConfig(): EventSamplingConfig {
return getDynamicConfig_CACHED_MAY_BE_STALE<EventSamplingConfig>(
EVENT_SAMPLING_CONFIG_NAME,
{},
)
return {}
}
/**
* Determine if an event should be sampled based on its sample rate.
* Returns the sample rate if sampled, null if not sampled.
*
* @param eventName - Name of the event to check
* @returns The sample_rate if event should be logged, null if it should be dropped
*/
export function shouldSampleEvent(eventName: string): number | null {
const config = getEventSamplingConfig()
const eventConfig = config[eventName]
// If no config for this event, log at 100% rate (no sampling)
if (!eventConfig) {
return null
}
const sampleRate = eventConfig.sample_rate
// Validate sample rate is in valid range
if (typeof sampleRate !== 'number' || sampleRate < 0 || sampleRate > 1) {
return null
}
// Sample rate of 1 means log everything (no need to add metadata)
if (sampleRate >= 1) {
return null
}
// Sample rate of 0 means drop everything
if (sampleRate <= 0) {
return 0
}
// Randomly decide whether to sample this event
return Math.random() < sampleRate ? sampleRate : 0
export function shouldSampleEvent(_eventName: string): number | null {
return null
}
const BATCH_CONFIG_NAME = 'tengu_1p_event_batch_config'
type BatchConfig = {
scheduledDelayMillis?: number
maxExportBatchSize?: number
maxQueueSize?: number
skipAuth?: boolean
maxAttempts?: number
path?: string
baseUrl?: string
}
function getBatchConfig(): BatchConfig {
return getDynamicConfig_CACHED_MAY_BE_STALE<BatchConfig>(
BATCH_CONFIG_NAME,
{},
)
}
// Module-local state for event logging (not exposed globally)
let firstPartyEventLogger: ReturnType<typeof logs.getLogger> | null = null
let firstPartyEventLoggerProvider: LoggerProvider | null = null
// Last batch config used to construct the provider — used by
// reinitialize1PEventLoggingIfConfigChanged to decide whether a rebuild is
// needed when GrowthBook refreshes.
let lastBatchConfig: BatchConfig | null = null
/**
* Flush and shutdown the 1P event logger.
* This should be called as the final step before process exit to ensure
* all events (including late ones from API responses) are exported.
*/
export async function shutdown1PEventLogging(): Promise<void> {
if (!firstPartyEventLoggerProvider) {
return
}
try {
await firstPartyEventLoggerProvider.shutdown()
if (process.env.USER_TYPE === 'ant') {
logForDebugging('1P event logging: final shutdown complete')
}
} catch {
// Ignore shutdown errors
}
return
}
/**
* Check if 1P event logging is enabled.
* Respects the same opt-outs as other analytics sinks:
* - Test environment
* - Third-party cloud providers (Bedrock/Vertex)
* - Global telemetry opt-outs
* - Non-essential traffic disabled
*
* Note: Unlike BigQuery metrics, event logging does NOT check organization-level
* metrics opt-out via API. It follows the same pattern as Statsig event logging.
*/
export function is1PEventLoggingEnabled(): boolean {
// Respect standard analytics opt-outs
return !isAnalyticsDisabled()
return false
}
/**
* Log a 1st-party event for internal analytics (async version).
* Events are batched and exported to /api/event_logging/batch
*
* This enriches the event with core metadata (model, session, env context, etc.)
* at log time, similar to logEventToStatsig.
*
* @param eventName - Name of the event (e.g., 'tengu_api_query')
* @param metadata - Additional metadata for the event (intentionally no strings, to avoid accidentally logging code/filepaths)
*/
async function logEventTo1PAsync(
firstPartyEventLogger: Logger,
eventName: string,
metadata: Record<string, number | boolean | undefined> = {},
): Promise<void> {
try {
// Enrich with core metadata at log time (similar to Statsig pattern)
const coreMetadata = await getEventMetadata({
model: metadata.model,
betas: metadata.betas,
})
// Build attributes - OTel supports nested objects natively via AnyValueMap
// Cast through unknown since our nested objects are structurally compatible
// with AnyValue but TS doesn't recognize it due to missing index signatures
const attributes = {
event_name: eventName,
event_id: randomUUID(),
// Pass objects directly - no JSON serialization needed
core_metadata: coreMetadata,
user_metadata: getCoreUserData(true),
event_metadata: metadata,
} as unknown as AnyValueMap
// Add user_id if available
const userId = getOrCreateUserID()
if (userId) {
attributes.user_id = userId
}
// Debug logging when debug mode is enabled
if (process.env.USER_TYPE === 'ant') {
logForDebugging(
`[ANT-ONLY] 1P event: ${eventName} ${jsonStringify(metadata, null, 0)}`,
)
}
// Emit log record
firstPartyEventLogger.emit({
body: eventName,
attributes,
})
} catch (e) {
if (process.env.NODE_ENV === 'development') {
throw e
}
if (process.env.USER_TYPE === 'ant') {
logError(e as Error)
}
// swallow
}
}
/**
* Log a 1st-party event for internal analytics.
* Events are batched and exported to /api/event_logging/batch
*
* @param eventName - Name of the event (e.g., 'tengu_api_query')
* @param metadata - Additional metadata for the event (intentionally no strings, to avoid accidentally logging code/filepaths)
*/
export function logEventTo1P(
eventName: string,
metadata: Record<string, number | boolean | undefined> = {},
): void {
if (!is1PEventLoggingEnabled()) {
return
}
_eventName: string,
_metadata: Record<string, number | boolean | undefined> = {},
): void {}
if (!firstPartyEventLogger || isSinkKilled('firstParty')) {
return
}
// Fire and forget - don't block on metadata enrichment
void logEventTo1PAsync(firstPartyEventLogger, eventName, metadata)
}
/**
* GrowthBook experiment event data for logging
*/
export type GrowthBookExperimentData = {
experimentId: string
variationId: number
@@ -239,211 +34,12 @@ export type GrowthBookExperimentData = {
experimentMetadata?: Record<string, unknown>
}
// api.anthropic.com only serves the "production" GrowthBook environment
// (see starling/starling/cli/cli.py DEFAULT_ENVIRONMENTS). Staging and
// development environments are not exported to the prod API.
function getEnvironmentForGrowthBook(): string {
return 'production'
}
/**
* Log a GrowthBook experiment assignment event to 1P.
* Events are batched and exported to /api/event_logging/batch
*
* @param data - GrowthBook experiment assignment data
*/
export function logGrowthBookExperimentTo1P(
data: GrowthBookExperimentData,
): void {
if (!is1PEventLoggingEnabled()) {
return
}
_data: GrowthBookExperimentData,
): void {}
if (!firstPartyEventLogger || isSinkKilled('firstParty')) {
return
}
export function initialize1PEventLogging(): void {}
const userId = getOrCreateUserID()
const { accountUuid, organizationUuid } = getCoreUserData(true)
// Build attributes for GrowthbookExperimentEvent
const attributes = {
event_type: 'GrowthbookExperimentEvent',
event_id: randomUUID(),
experiment_id: data.experimentId,
variation_id: data.variationId,
...(userId && { device_id: userId }),
...(accountUuid && { account_uuid: accountUuid }),
...(organizationUuid && { organization_uuid: organizationUuid }),
...(data.userAttributes && {
session_id: data.userAttributes.sessionId,
user_attributes: jsonStringify(data.userAttributes),
}),
...(data.experimentMetadata && {
experiment_metadata: jsonStringify(data.experimentMetadata),
}),
environment: getEnvironmentForGrowthBook(),
}
if (process.env.USER_TYPE === 'ant') {
logForDebugging(
`[ANT-ONLY] 1P GrowthBook experiment: ${data.experimentId} variation=${data.variationId}`,
)
}
firstPartyEventLogger.emit({
body: 'growthbook_experiment',
attributes,
})
}
const DEFAULT_LOGS_EXPORT_INTERVAL_MS = 10000
const DEFAULT_MAX_EXPORT_BATCH_SIZE = 200
const DEFAULT_MAX_QUEUE_SIZE = 8192
/**
* Initialize 1P event logging infrastructure.
* This creates a separate LoggerProvider for internal event logging,
* independent of customer OTLP telemetry.
*
* This uses its own minimal resource configuration with just the attributes
* we need for internal analytics (service name, version, platform info).
*/
export function initialize1PEventLogging(): void {
profileCheckpoint('1p_event_logging_start')
const enabled = is1PEventLoggingEnabled()
if (!enabled) {
if (process.env.USER_TYPE === 'ant') {
logForDebugging('1P event logging not enabled')
}
return
}
// Fetch batch processor configuration from GrowthBook dynamic config
// Uses cached value if available, refreshes in background
const batchConfig = getBatchConfig()
lastBatchConfig = batchConfig
profileCheckpoint('1p_event_after_growthbook_config')
const scheduledDelayMillis =
batchConfig.scheduledDelayMillis ||
parseInt(
process.env.OTEL_LOGS_EXPORT_INTERVAL ||
DEFAULT_LOGS_EXPORT_INTERVAL_MS.toString(),
)
const maxExportBatchSize =
batchConfig.maxExportBatchSize || DEFAULT_MAX_EXPORT_BATCH_SIZE
const maxQueueSize = batchConfig.maxQueueSize || DEFAULT_MAX_QUEUE_SIZE
// Build our own resource for 1P event logging with minimal attributes
const platform = getPlatform()
const attributes: Record<string, string> = {
[ATTR_SERVICE_NAME]: 'claude-code',
[ATTR_SERVICE_VERSION]: MACRO.VERSION,
}
// Add WSL-specific attributes if running on WSL
if (platform === 'wsl') {
const wslVersion = getWslVersion()
if (wslVersion) {
attributes['wsl.version'] = wslVersion
}
}
const resource = resourceFromAttributes(attributes)
// Create a new LoggerProvider with the EventLoggingExporter
// NOTE: This is kept separate from customer telemetry logs to ensure
// internal events don't leak to customer endpoints and vice versa.
// We don't register this globally - it's only used for internal event logging.
const eventLoggingExporter = new FirstPartyEventLoggingExporter({
maxBatchSize: maxExportBatchSize,
skipAuth: batchConfig.skipAuth,
maxAttempts: batchConfig.maxAttempts,
path: batchConfig.path,
baseUrl: batchConfig.baseUrl,
isKilled: () => isSinkKilled('firstParty'),
})
firstPartyEventLoggerProvider = new LoggerProvider({
resource,
processors: [
new BatchLogRecordProcessor(eventLoggingExporter, {
scheduledDelayMillis,
maxExportBatchSize,
maxQueueSize,
}),
],
})
// Initialize event logger from our internal provider (NOT from global API)
// IMPORTANT: We must get the logger from our local provider, not logs.getLogger()
// because logs.getLogger() returns a logger from the global provider, which is
// separate and used for customer telemetry.
firstPartyEventLogger = firstPartyEventLoggerProvider.getLogger(
'com.anthropic.claude_code.events',
MACRO.VERSION,
)
}
/**
* Rebuild the 1P event logging pipeline if the batch config changed.
* Register this with onGrowthBookRefresh so long-running sessions pick up
* changes to batch size, delay, endpoint, etc.
*
* Event-loss safety:
* 1. Null the logger first — concurrent logEventTo1P() calls hit the
* !firstPartyEventLogger guard and bail during the swap window. This drops
* a handful of events but prevents emitting to a draining provider.
* 2. forceFlush() drains the old BatchLogRecordProcessor buffer to the
* exporter. Export failures go to disk at getCurrentBatchFilePath() which
* is keyed by module-level BATCH_UUID + sessionId — unchanged across
* reinit — so the NEW exporter's disk-backed retry picks them up.
* 3. Swap to new provider/logger; old provider shutdown runs in background
* (buffer already drained, just cleanup).
*/
export async function reinitialize1PEventLoggingIfConfigChanged(): Promise<void> {
if (!is1PEventLoggingEnabled() || !firstPartyEventLoggerProvider) {
return
}
const newConfig = getBatchConfig()
if (isEqual(newConfig, lastBatchConfig)) {
return
}
if (process.env.USER_TYPE === 'ant') {
logForDebugging(
`1P event logging: ${BATCH_CONFIG_NAME} changed, reinitializing`,
)
}
const oldProvider = firstPartyEventLoggerProvider
const oldLogger = firstPartyEventLogger
firstPartyEventLogger = null
try {
await oldProvider.forceFlush()
} catch {
// Export failures are already on disk; new exporter will retry them.
}
firstPartyEventLoggerProvider = null
try {
initialize1PEventLogging()
} catch (e) {
// Restore so the next GrowthBook refresh can retry. oldProvider was
// only forceFlush()'d, not shut down — it's still functional. Without
// this, both stay null and the !firstPartyEventLoggerProvider gate at
// the top makes recovery impossible.
firstPartyEventLoggerProvider = oldProvider
firstPartyEventLogger = oldLogger
logError(e)
return
}
void oldProvider.shutdown().catch(() => {})
return
}

View File

@@ -1,114 +1,14 @@
/**
* Analytics sink implementation
*
* This module contains the actual analytics routing logic and should be
* initialized during app startup. It routes events to Datadog and 1P event
* logging.
*
* Usage: Call initializeAnalyticsSink() during app startup to attach the sink.
*/
import { attachAnalyticsSink } from './index.js'
import { trackDatadogEvent } from './datadog.js'
import { logEventTo1P, shouldSampleEvent } from './firstPartyEventLogger.js'
import { checkStatsigFeatureGate_CACHED_MAY_BE_STALE } from './growthbook.js'
import { attachAnalyticsSink, stripProtoFields } from './index.js'
import { isSinkKilled } from './sinkKillswitch.js'
// Local type matching the logEvent metadata signature
type LogEventMetadata = { [key: string]: boolean | number | undefined }
const DATADOG_GATE_NAME = 'tengu_log_datadog_events'
function dropEvent(_eventName: string, _metadata: LogEventMetadata): void {}
// Module-level gate state - starts undefined, initialized during startup
let isDatadogGateEnabled: boolean | undefined = undefined
export function initializeAnalyticsGates(): void {}
/**
* Check if Datadog tracking is enabled.
* Falls back to cached value from previous session if not yet initialized.
*/
function shouldTrackDatadog(): boolean {
if (isSinkKilled('datadog')) {
return false
}
if (isDatadogGateEnabled !== undefined) {
return isDatadogGateEnabled
}
// Fallback to cached value from previous session
try {
return checkStatsigFeatureGate_CACHED_MAY_BE_STALE(DATADOG_GATE_NAME)
} catch {
return false
}
}
/**
* Log an event (synchronous implementation)
*/
function logEventImpl(eventName: string, metadata: LogEventMetadata): void {
// Check if this event should be sampled
const sampleResult = shouldSampleEvent(eventName)
// If sample result is 0, the event was not selected for logging
if (sampleResult === 0) {
return
}
// If sample result is a positive number, add it to metadata
const metadataWithSampleRate =
sampleResult !== null
? { ...metadata, sample_rate: sampleResult }
: metadata
if (shouldTrackDatadog()) {
// Datadog is a general-access backend — strip _PROTO_* keys
// (unredacted PII-tagged values meant only for the 1P privileged column).
void trackDatadogEvent(eventName, stripProtoFields(metadataWithSampleRate))
}
// 1P receives the full payload including _PROTO_* — the exporter
// destructures and routes those keys to proto fields itself.
logEventTo1P(eventName, metadataWithSampleRate)
}
/**
* Log an event (asynchronous implementation)
*
* With Segment removed the two remaining sinks are fire-and-forget, so this
* just wraps the sync impl — kept to preserve the sink interface contract.
*/
function logEventAsyncImpl(
eventName: string,
metadata: LogEventMetadata,
): Promise<void> {
logEventImpl(eventName, metadata)
return Promise.resolve()
}
/**
* Initialize analytics gates during startup.
*
* Updates gate values from server. Early events use cached values from previous
* session to avoid data loss during initialization.
*
* Called from main.tsx during setupBackend().
*/
export function initializeAnalyticsGates(): void {
isDatadogGateEnabled =
checkStatsigFeatureGate_CACHED_MAY_BE_STALE(DATADOG_GATE_NAME)
}
/**
* Initialize the analytics sink.
*
* Call this during app startup to attach the analytics backend.
* Any events logged before this is called will be queued and drained.
*
* Idempotent: safe to call multiple times (subsequent calls are no-ops).
*/
export function initializeAnalyticsSink(): void {
attachAnalyticsSink({
logEvent: logEventImpl,
logEventAsync: logEventAsyncImpl,
logEvent: dropEvent,
logEventAsync: async (_eventName, _metadata) => {},
})
}

View File

@@ -6,7 +6,10 @@ import {
getAnthropicApiKey,
getApiKeyFromApiKeyHelper,
getClaudeAIOAuthTokens,
getOpenAIApiKey,
getOpenRouterApiKey,
isClaudeAISubscriber,
refreshOpenAIAuthTokenIfNeeded,
refreshAndGetAwsCredentials,
refreshGcpCredentialsIfNeeded,
} from 'src/utils/auth.js'
@@ -14,9 +17,12 @@ import { getUserAgent } from 'src/utils/http.js'
import { getSmallFastModel } from 'src/utils/model/model.js'
import {
getAPIProvider,
getOpenAIBaseUrl,
getOpenRouterBaseUrl,
isFirstPartyAnthropicBaseUrl,
} from 'src/utils/model/providers.js'
import { getProxyFetchOptions } from 'src/utils/proxy.js'
import { OpenAIResponsesCompatClient } from './openaiCompat.js'
import {
getIsNonInteractiveSession,
getSessionId,
@@ -98,6 +104,7 @@ export async function getAnthropicClient({
fetchOverride?: ClientOptions['fetch']
source?: string
}): Promise<Anthropic> {
const provider = getAPIProvider()
const containerId = process.env.CLAUDE_CODE_CONTAINER_ID
const remoteSessionId = process.env.CLAUDE_CODE_REMOTE_SESSION_ID
const clientApp = process.env.CLAUDE_AGENT_SDK_CLIENT_APP
@@ -150,7 +157,7 @@ export async function getAnthropicClient({
fetch: resolvedFetch,
}),
}
if (isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK)) {
if (provider === 'bedrock') {
const { AnthropicBedrock } = await import('@anthropic-ai/bedrock-sdk')
// Use region override for small fast model if specified
const awsRegion =
@@ -188,7 +195,7 @@ export async function getAnthropicClient({
// we have always been lying about the return type - this doesn't support batching or models
return new AnthropicBedrock(bedrockArgs) as unknown as Anthropic
}
if (isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY)) {
if (provider === 'foundry') {
const { AnthropicFoundry } = await import('@anthropic-ai/foundry-sdk')
// Determine Azure AD token provider based on configuration
// SDK reads ANTHROPIC_FOUNDRY_API_KEY by default
@@ -218,7 +225,7 @@ export async function getAnthropicClient({
// we have always been lying about the return type - this doesn't support batching or models
return new AnthropicFoundry(foundryArgs) as unknown as Anthropic
}
if (isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX)) {
if (provider === 'vertex') {
// Refresh GCP credentials if gcpAuthRefresh is configured and credentials are expired
// This is similar to how we handle AWS credential refresh for Bedrock
if (!isEnvTruthy(process.env.CLAUDE_CODE_SKIP_VERTEX_AUTH)) {
@@ -297,7 +304,37 @@ export async function getAnthropicClient({
return new AnthropicVertex(vertexArgs) as unknown as Anthropic
}
// Determine authentication method based on available tokens
if (provider === 'openrouter') {
const clientConfig: ConstructorParameters<typeof Anthropic>[0] = {
apiKey: null,
authToken: apiKey || getOpenRouterApiKey(),
baseURL: getOpenRouterBaseUrl(),
...ARGS,
...(isDebugToStdErr() && { logger: createStderrLogger() }),
}
return new Anthropic(clientConfig)
}
if (provider === 'openai') {
await refreshOpenAIAuthTokenIfNeeded()
const openAIKey = apiKey || getOpenAIApiKey()
if (!openAIKey) {
throw new Error(
'OpenAI provider selected but no OpenAI API key or access token is configured.',
)
}
return new OpenAIResponsesCompatClient({
apiKey: openAIKey,
baseURL: getOpenAIBaseUrl(),
defaultHeaders,
fetchImpl: resolvedFetch,
timeoutMs: ARGS.timeout,
}) as unknown as Anthropic
}
// Determine authentication method based on available Anthropic tokens
const clientConfig: ConstructorParameters<typeof Anthropic>[0] = {
apiKey: isClaudeAISubscriber() ? null : apiKey || getAnthropicApiKey(),
authToken: isClaudeAISubscriber()

View File

@@ -343,13 +343,13 @@ export async function checkGroveForNonInteractive(): Promise<void> {
if (config === null || config.notice_is_grace_period) {
// Grace period is still active - show informational message and continue
writeToStderr(
'\nAn update to our Consumer Terms and Privacy Policy will take effect on October 8, 2025. Run `claude` to review the updated terms.\n\n',
'\nAn update to our Consumer Terms and Privacy Policy will take effect on October 8, 2025. Run `better-clawd` to review the updated terms.\n\n',
)
await markGroveNoticeViewed()
} else {
// Grace period has ended - show error message and exit
writeToStderr(
'\n[ACTION REQUIRED] An update to our Consumer Terms and Privacy Policy has taken effect on October 8, 2025. You must run `claude` to review the updated terms.\n\n',
'\n[ACTION REQUIRED] An update to our Consumer Terms and Privacy Policy has taken effect on October 8, 2025. You must run `better-clawd` to review the updated terms.\n\n',
)
await gracefulShutdown(1)
}

View File

@@ -0,0 +1,495 @@
type AnthropicTool = {
name: string
description?: string
input_schema?: Record<string, unknown>
}
type AnthropicContentBlock =
| { type: 'text'; text: string }
| { type: 'tool_use'; id?: string; name: string; input?: unknown }
| { type: 'tool_result'; tool_use_id?: string; content?: unknown }
| { type: string; [key: string]: unknown }
type AnthropicMessage = {
role: 'user' | 'assistant'
content: string | AnthropicContentBlock[]
}
type AnthropicMessagesCreateParams = {
model: string
messages: AnthropicMessage[]
system?: string | Array<{ type?: string; text?: string }>
tools?: AnthropicTool[]
tool_choice?: { type?: string; name?: string } | null
max_tokens?: number
temperature?: number
stream?: boolean
}
type OpenAIResponseOutputItem =
| {
type: 'message'
role?: 'assistant'
content?: Array<{ type: string; text?: string }>
}
| {
type: 'function_call'
id?: string
call_id?: string
name: string
arguments?: string
}
| {
type: string
id?: string
call_id?: string
name?: string
arguments?: string
content?: Array<{ type: string; text?: string }>
summary?: Array<{ type: string; text?: string }>
}
type OpenAIResponse = {
id: string
model: string
output?: OpenAIResponseOutputItem[]
usage?: {
input_tokens?: number
output_tokens?: number
total_tokens?: number
}
}
type OpenAICompatOptions = {
apiKey: string
baseURL: string
defaultHeaders?: Record<string, string>
fetchImpl?: typeof fetch
timeoutMs: number
}
type StreamWithResponse = {
withResponse(): Promise<{
request_id: string
response: Response
data: OpenAICompatStream
}>
}
class OpenAICompatStream implements AsyncIterable<Record<string, unknown>> {
private readonly events: Record<string, unknown>[]
controller = {
abort: () => {
this.aborted = true
},
}
private aborted = false
constructor(events: Record<string, unknown>[]) {
this.events = events
}
async *[Symbol.asyncIterator](): AsyncIterator<Record<string, unknown>> {
for (const event of this.events) {
if (this.aborted) {
return
}
yield event
}
}
}
function normalizeOpenAIModel(model: string): string {
if (
model.startsWith('gpt-') ||
model.startsWith('o') ||
model.startsWith('codex')
) {
return model
}
return process.env.OPENAI_DEFAULT_MODEL || 'gpt-5.4'
}
function systemToInstructions(
system?: AnthropicMessagesCreateParams['system'],
): string | undefined {
if (!system) {
return undefined
}
if (typeof system === 'string') {
return system
}
return system
.map(block => ('text' in block && typeof block.text === 'string' ? block.text : ''))
.filter(Boolean)
.join('\n\n')
}
function stringifyToolOutput(content: unknown): string {
if (typeof content === 'string') {
return content
}
if (Array.isArray(content)) {
return content
.map(item => {
if (typeof item === 'string') {
return item
}
if (
item &&
typeof item === 'object' &&
'text' in item &&
typeof item.text === 'string'
) {
return item.text
}
return JSON.stringify(item)
})
.join('\n')
}
return JSON.stringify(content ?? '')
}
function anthropicMessagesToOpenAIInput(
messages: AnthropicMessage[],
): Array<Record<string, unknown>> {
const input: Array<Record<string, unknown>> = []
for (const message of messages) {
if (typeof message.content === 'string') {
input.push({ role: message.role, content: message.content })
continue
}
let bufferedText: string[] = []
const flushBufferedText = () => {
if (bufferedText.length === 0) {
return
}
input.push({
role: message.role,
content: bufferedText.join('\n'),
})
bufferedText = []
}
for (const block of message.content) {
if (block.type === 'text' && typeof block.text === 'string') {
bufferedText.push(block.text)
continue
}
flushBufferedText()
if (block.type === 'tool_use' && message.role === 'assistant') {
input.push({
type: 'function_call',
call_id: block.id ?? `call_${block.name}`,
name: block.name,
arguments: JSON.stringify(block.input ?? {}),
})
continue
}
if (block.type === 'tool_result' && message.role === 'user') {
input.push({
type: 'function_call_output',
call_id: block.tool_use_id ?? 'tool_call',
output: stringifyToolOutput(block.content),
})
continue
}
input.push({
role: message.role,
content: `[${block.type}] ${stringifyToolOutput(block)}`,
})
}
flushBufferedText()
}
return input
}
function anthropicToolsToOpenAI(
tools?: AnthropicTool[],
): Array<Record<string, unknown>> | undefined {
if (!tools || tools.length === 0) {
return undefined
}
return tools.map(tool => ({
type: 'function',
name: tool.name,
description: tool.description,
parameters: tool.input_schema ?? {
type: 'object',
properties: {},
additionalProperties: true,
},
strict: false,
}))
}
function anthropicToolChoiceToOpenAI(
toolChoice: AnthropicMessagesCreateParams['tool_choice'],
): string | Record<string, unknown> | undefined {
if (!toolChoice?.type) {
return undefined
}
if (toolChoice.type === 'auto' || toolChoice.type === 'none') {
return toolChoice.type
}
if (toolChoice.type === 'tool' && toolChoice.name) {
return {
type: 'function',
name: toolChoice.name,
}
}
return undefined
}
function extractAssistantText(item: OpenAIResponseOutputItem): string {
if ('content' in item && Array.isArray(item.content)) {
return item.content
.map(part => (typeof part.text === 'string' ? part.text : ''))
.join('')
}
if ('summary' in item && Array.isArray(item.summary)) {
return item.summary
.map(part => (typeof part.text === 'string' ? part.text : ''))
.join('')
}
return ''
}
function openAIOutputToAnthropicBlocks(
output: OpenAIResponseOutputItem[] = [],
): Array<Record<string, unknown>> {
const blocks: Array<Record<string, unknown>> = []
for (const item of output) {
if (item.type === 'message') {
const text = extractAssistantText(item)
if (text) {
blocks.push({ type: 'text', text })
}
continue
}
if (item.type === 'function_call') {
let parsedArguments: unknown = {}
try {
parsedArguments = item.arguments ? JSON.parse(item.arguments) : {}
} catch {
parsedArguments = item.arguments ?? {}
}
blocks.push({
type: 'tool_use',
id: item.call_id ?? item.id ?? `call_${item.name}`,
name: item.name,
input: parsedArguments,
})
continue
}
const text = extractAssistantText(item)
if (text) {
blocks.push({ type: 'text', text })
}
}
return blocks
}
function openAIResponseToAnthropicMessage(
response: OpenAIResponse,
model: string,
): Record<string, unknown> {
const blocks = openAIOutputToAnthropicBlocks(response.output)
const stopReason = blocks.some(block => block.type === 'tool_use')
? 'tool_use'
: 'end_turn'
return {
id: response.id,
type: 'message',
role: 'assistant',
model,
content: blocks,
stop_reason: stopReason,
stop_sequence: null,
usage: {
input_tokens: response.usage?.input_tokens ?? 0,
output_tokens: response.usage?.output_tokens ?? 0,
},
}
}
function openAIResponseToAnthropicEvents(
response: OpenAIResponse,
model: string,
): Record<string, unknown>[] {
const message = openAIResponseToAnthropicMessage(response, model)
const blocks = (message.content as Array<Record<string, unknown>>) ?? []
const events: Record<string, unknown>[] = [
{
type: 'message_start',
message,
},
]
blocks.forEach((block, index) => {
if (block.type === 'text') {
events.push({
type: 'content_block_start',
index,
content_block: { type: 'text', text: '' },
})
events.push({
type: 'content_block_delta',
index,
delta: {
type: 'text_delta',
text: block.text,
},
})
events.push({ type: 'content_block_stop', index })
return
}
if (block.type === 'tool_use') {
const rawInput =
typeof block.input === 'string'
? block.input
: JSON.stringify(block.input ?? {})
events.push({
type: 'content_block_start',
index,
content_block: {
type: 'tool_use',
id: block.id,
name: block.name,
input: '',
},
})
events.push({
type: 'content_block_delta',
index,
delta: {
type: 'input_json_delta',
partial_json: rawInput,
},
})
events.push({ type: 'content_block_stop', index })
}
})
events.push({
type: 'message_delta',
delta: {
stop_reason: message.stop_reason,
stop_sequence: null,
},
usage: {
output_tokens: response.usage?.output_tokens ?? 0,
},
})
events.push({ type: 'message_stop' })
return events
}
function buildOpenAIRequestBody(
params: AnthropicMessagesCreateParams,
): Record<string, unknown> {
return {
model: normalizeOpenAIModel(params.model),
input: anthropicMessagesToOpenAIInput(params.messages),
instructions: systemToInstructions(params.system),
tools: anthropicToolsToOpenAI(params.tools),
tool_choice: anthropicToolChoiceToOpenAI(params.tool_choice),
max_output_tokens: params.max_tokens,
temperature: params.temperature,
}
}
export class OpenAIResponsesCompatClient {
private readonly options: OpenAICompatOptions
beta = {
messages: {
create: (
params: AnthropicMessagesCreateParams,
requestOptions?: { signal?: AbortSignal },
): Promise<Record<string, unknown>> | StreamWithResponse => {
if (params.stream) {
return {
withResponse: async () => {
const response = await this.createResponse(params, requestOptions)
const stream = new OpenAICompatStream(
openAIResponseToAnthropicEvents(
response,
normalizeOpenAIModel(params.model),
),
)
return {
request_id: response.id,
response: new Response(JSON.stringify(response), {
status: 200,
headers: { 'content-type': 'application/json' },
}),
data: stream,
}
},
}
}
return this.createResponse(params, requestOptions).then(response =>
openAIResponseToAnthropicMessage(
response,
normalizeOpenAIModel(params.model),
),
)
},
},
}
constructor(options: OpenAICompatOptions) {
this.options = options
}
private async createResponse(
params: AnthropicMessagesCreateParams,
requestOptions?: { signal?: AbortSignal },
): Promise<OpenAIResponse> {
const controller = new AbortController()
const timeout = setTimeout(() => controller.abort(), this.options.timeoutMs)
requestOptions?.signal?.addEventListener('abort', () => controller.abort())
try {
const response = await (this.options.fetchImpl ?? globalThis.fetch)(
`${this.options.baseURL.replace(/\/$/, '')}/responses`,
{
method: 'POST',
signal: controller.signal,
headers: {
'content-type': 'application/json',
Authorization: `Bearer ${this.options.apiKey}`,
...this.options.defaultHeaders,
},
body: JSON.stringify(buildOpenAIRequestBody(params)),
},
)
if (!response.ok) {
throw new Error(
`OpenAI Responses API error ${response.status}: ${await response.text()}`,
)
}
return (await response.json()) as OpenAIResponse
} finally {
clearTimeout(timeout)
}
}
}

View File

@@ -0,0 +1,83 @@
export type CacheEditsBlock = {
type: 'cache_edits'
delete_tool_result_ids: string[]
}
export type PinnedCacheEdits = {
userMessageIndex: number
block: CacheEditsBlock
}
export type CachedMCState = {
registeredTools: Set<string>
deletedRefs: Set<string>
toolOrder: string[]
pinnedEdits: PinnedCacheEdits[]
}
export function isCachedMicrocompactEnabled(): boolean {
return false
}
export function isModelSupportedForCacheEditing(_model: string): boolean {
return false
}
export function getCachedMCConfig() {
return {
triggerThreshold: 0,
keepRecent: 0,
supportedModels: [] as string[],
}
}
export function createCachedMCState(): CachedMCState {
return {
registeredTools: new Set(),
deletedRefs: new Set(),
toolOrder: [],
pinnedEdits: [],
}
}
export function registerToolResult(
state: CachedMCState,
toolUseId: string,
): void {
if (state.registeredTools.has(toolUseId)) {
return
}
state.registeredTools.add(toolUseId)
state.toolOrder.push(toolUseId)
}
export function registerToolMessage(
_state: CachedMCState,
_groupIds: string[],
): void {}
export function getToolResultsToDelete(_state: CachedMCState): string[] {
return []
}
export function createCacheEditsBlock(
_state: CachedMCState,
toolIds: string[],
): CacheEditsBlock | null {
if (toolIds.length === 0) {
return null
}
return {
type: 'cache_edits',
delete_tool_result_ids: toolIds,
}
}
export function markToolsSentToAPI(_state: CachedMCState): void {}
export function resetCachedMCState(state: CachedMCState): void {
state.registeredTools.clear()
state.deletedRefs.clear()
state.toolOrder.length = 0
state.pinnedEdits.length = 0
}

View File

@@ -748,8 +748,9 @@ export async function compactConversation(
}
} catch (error) {
// Only show the error notification for manual /compact.
// Auto-compact failures are retried on the next turn and the
// notification is confusing when compaction eventually succeeds.
// Auto-compact failures are retried silently until the session-level
// circuit breaker trips, and a user-facing notification here would be
// noisy for failures that recover on a later turn.
if (!isAutoCompact) {
addErrorNotificationIfNeeded(error, context)
}

View File

@@ -0,0 +1,25 @@
import type { Message, SystemMessage } from '../../types/message.js'
export type SnipCompactResult = {
messages: Message[]
tokensFreed: number
boundaryMessage?: SystemMessage
}
export function isSnipRuntimeEnabled(): boolean {
return false
}
export function isSnipMarkerMessage(_message: Message): boolean {
return false
}
export function snipCompactIfNeeded(
messages: Message[],
_options?: { force?: boolean },
): SnipCompactResult {
return {
messages,
tokensFreed: 0,
}
}

View File

@@ -0,0 +1,27 @@
type CollapseStats = {
collapsedSpans: number
stagedSpans: number
health: {
totalErrors: number
totalEmptySpawns: number
emptySpawnWarningEmitted: boolean
}
}
const EMPTY_STATS: CollapseStats = {
collapsedSpans: 0,
stagedSpans: 0,
health: {
totalErrors: 0,
totalEmptySpawns: 0,
emptySpawnWarningEmitted: false,
},
}
export function getStats(): CollapseStats {
return EMPTY_STATS
}
export function subscribe(_listener: () => void): () => void {
return () => {}
}