diff --git a/.claude/settings.local.json b/.claude/settings.local.json
deleted file mode 100644
index 6fccb8f..0000000
--- a/.claude/settings.local.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "permissions": {
- "allow": [
- "Bash(cat:*)",
- "Bash(for f in ~/.local/share/opencode/storage/part/*/*)",
- "Bash(do grep -l \"\"type\"\":\"\"reasoning\"\" $f)",
- "Bash(done)",
- "WebSearch",
- "WebFetch(domain:ai-sdk.dev)",
- "Bash(npm run typecheck:*)"
- ],
- "deny": [],
- "ask": []
- }
-}
diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml
index f9c7804..e665cfc 100644
--- a/.github/workflows/pr-checks.yml
+++ b/.github/workflows/pr-checks.yml
@@ -2,9 +2,7 @@ name: PR Checks
on:
pull_request:
- branches: [main, master]
- push:
- branches: [main, master]
+ branches: [master, dev]
jobs:
validate:
diff --git a/.gitignore b/.gitignore
index 6fcd193..c4c6365 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,7 +27,6 @@ Thumbs.db
# OpenCode
.opencode/
-AGENTS.md
# Tests (local development only)
tests/
diff --git a/README.md b/README.md
index 113c8ff..59b222a 100644
--- a/README.md
+++ b/README.md
@@ -23,19 +23,15 @@ Restart OpenCode. The plugin will automatically start optimizing your sessions.
## How Pruning Works
-DCP uses two complementary techniques:
+DCP uses multiple strategies to reduce context size:
-**Automatic Deduplication** — Silently identifies repeated tool calls (e.g., reading the same file multiple times) and keeps only the most recent output. Runs on every request with zero LLM cost.
+**Deduplication** — Identifies repeated tool calls (e.g., reading the same file multiple times) and keeps only the most recent output. Runs automatically on every request with zero LLM cost.
-**AI Analysis** — Uses a language model to semantically analyze conversation context and identify tool outputs that are no longer relevant to the current task.
+**On Idle Analysis** — Uses a language model to semantically analyze conversation context during idle periods and identify tool outputs that are no longer relevant.
-## Context Pruning Tool
+**Prune Tool** — Exposes a `prune` tool that the AI can call to manually trigger pruning when it determines context cleanup is needed.
-When `strategies.onTool` is enabled, DCP exposes a `prune` tool to Opencode that the AI can call to trigger pruning on demand.
-
-Adjust `nudge_freq` to control how aggressively the AI is prompted to prune — lower values trigger reminders sooner and more often.
-
-## How It Works
+*More strategies coming soon.*
Your session history is never modified. DCP replaces pruned outputs with a placeholder before sending requests to your LLM.
@@ -47,40 +43,74 @@ LLM providers like Anthropic and OpenAI cache prompts based on exact prefix matc
## Configuration
-DCP uses its own config file (`~/.config/opencode/dcp.jsonc` or `.opencode/dcp.jsonc`), created automatically on first run.
+DCP uses its own config file:
-### Options
+- Global: `~/.config/opencode/dcp.jsonc` (or `dcp.json`), created automatically on first run
+- Custom config directory: `$OPENCODE_CONFIG_DIR/dcp.jsonc` (or `dcp.json`), if `OPENCODE_CONFIG_DIR` is set
+- Project: `.opencode/dcp.jsonc` (or `dcp.json`) in your project’s `.opencode` directory
-| Option | Default | Description |
-|--------|---------|-------------|
-| `enabled` | `true` | Enable/disable the plugin |
-| `debug` | `false` | Log to `~/.config/opencode/logs/dcp/` |
-| `model` | (session) | Model for analysis (e.g., `"anthropic/claude-haiku-4-5"`) |
-| `showModelErrorToasts` | `true` | Show notifications on model fallback |
-| `showUpdateToasts` | `true` | Show notifications when a new version is available |
-| `strictModelSelection` | `false` | Only run AI analysis with session or configured model (disables fallback models) |
-| `pruning_summary` | `"detailed"` | `"off"`, `"minimal"`, or `"detailed"` |
-| `nudge_freq` | `10` | How often to remind AI to prune (lower = more frequent) |
-| `protectedTools` | `["task", "todowrite", "todoread", "prune", "batch", "write", "edit"]` | Tools that are never pruned |
-| `strategies.onIdle` | `["ai-analysis"]` | Strategies for automatic pruning |
-| `strategies.onTool` | `["ai-analysis"]` | Strategies when AI calls `prune` |
-
-**Strategies:** `"ai-analysis"` uses LLM to identify prunable outputs. Empty array disables that trigger. Deduplication runs automatically on every request.
+
+Default Configuration (click to expand)
```jsonc
{
+ // Enable or disable the plugin
"enabled": true,
+ // Enable debug logging to ~/.config/opencode/logs/dcp/
+ "debug": false,
+ // Show toast notifications when a new version is available
+ "showUpdateToasts": true,
+ // Summary display: "off", "minimal", or "detailed"
+ "pruningSummary": "detailed",
+ // Strategies for pruning tokens from chat history
"strategies": {
- "onIdle": ["ai-analysis"],
- "onTool": ["ai-analysis"]
- },
- "protectedTools": ["task", "todowrite", "todoread", "prune", "batch", "write", "edit"]
+ // Remove duplicate tool calls (same tool with same arguments)
+ "deduplication": {
+ "enabled": true,
+ // Additional tools to protect from pruning
+ "protectedTools": []
+ },
+ // Exposes a prune tool to your LLM to call when it determines pruning is necessary
+ "pruneTool": {
+ "enabled": true,
+ // Additional tools to protect from pruning
+ "protectedTools": [],
+ // Nudge the LLM to use the prune tool (every tool results)
+ "nudge": {
+ "enabled": true,
+ "frequency": 10
+ }
+ },
+ // (Legacy) Run an LLM to analyze what tool calls are no longer relevant on idle
+ "onIdle": {
+ "enabled": false,
+ // Override model for analysis (format: "provider/model")
+ // "model": "anthropic/claude-haiku-4-5",
+ // Show toast notifications when model selection fails
+ "showModelErrorToasts": true,
+ // When true, fallback models are not permitted
+ "strictModelSelection": false,
+ // Additional tools to protect from pruning
+ "protectedTools": []
+ }
+ }
}
```
+
+
+### Protected Tools
+
+By default, these tools are always protected from pruning across all strategies:
+`task`, `todowrite`, `todoread`, `prune`, `batch`, `write`, `edit`
+
+The `protectedTools` arrays in each strategy add to this default list.
+
### Config Precedence
-Settings are merged in order: **Defaults** → **Global** (`~/.config/opencode/dcp.jsonc`) → **Project** (`.opencode/dcp.jsonc`). Each level overrides the previous, so project settings take priority over global, which takes priority over defaults.
+Settings are merged in order:
+Defaults → Global (`~/.config/opencode/dcp.jsonc`) → Config Dir (`$OPENCODE_CONFIG_DIR/dcp.jsonc`) → Project (`.opencode/dcp.jsonc`).
+Each level overrides the previous, so project settings take priority over config-dir and global, which take priority over defaults.
Restart OpenCode after making config changes.
diff --git a/index.ts b/index.ts
index 0677d42..6b617c4 100644
--- a/index.ts
+++ b/index.ts
@@ -1,16 +1,13 @@
import type { Plugin } from "@opencode-ai/plugin"
import { getConfig } from "./lib/config"
import { Logger } from "./lib/logger"
-import { createJanitorContext } from "./lib/core/janitor"
-import { checkForUpdates } from "./lib/version-checker"
-import { createPluginState } from "./lib/state"
-import { installFetchWrapper } from "./lib/fetch-wrapper"
-import { createPruningTool } from "./lib/pruning-tool"
-import { createEventHandler, createChatParamsHandler } from "./lib/hooks"
-import { createToolTracker } from "./lib/fetch-wrapper/tool-tracker"
+import { loadPrompt } from "./lib/prompt"
+import { createSessionState } from "./lib/state"
+import { createPruneTool } from "./lib/strategies"
+import { createChatMessageTransformHandler, createEventHandler } from "./lib/hooks"
const plugin: Plugin = (async (ctx) => {
- const { config, migrations } = getConfig(ctx)
+ const config = getConfig(ctx)
if (!config.enabled) {
return {}
@@ -23,82 +20,46 @@ const plugin: Plugin = (async (ctx) => {
// Initialize core components
const logger = new Logger(config.debug)
- const state = createPluginState()
-
- const janitorCtx = createJanitorContext(
- ctx.client,
- state,
- logger,
- {
- protectedTools: config.protectedTools,
- model: config.model,
- showModelErrorToasts: config.showModelErrorToasts ?? true,
- strictModelSelection: config.strictModelSelection ?? false,
- pruningSummary: config.pruning_summary,
- workingDirectory: ctx.directory
- }
- )
-
- // Create tool tracker for nudge injection
- const toolTracker = createToolTracker()
-
- // Install global fetch wrapper for context pruning and system message injection
- installFetchWrapper(state, logger, ctx.client, config, toolTracker)
+ const state = createSessionState()
// Log initialization
- logger.info("plugin", "DCP initialized", {
+ logger.info("DCP initialized", {
strategies: config.strategies,
- model: config.model || "auto"
})
- // Check for updates after a delay
- setTimeout(() => {
- checkForUpdates(ctx.client, logger, config.showUpdateToasts ?? true).catch(() => { })
- }, 5000)
-
- // Show migration toast if there were config migrations
- if (migrations.length > 0) {
- setTimeout(async () => {
- try {
- await ctx.client.tui.showToast({
- body: {
- title: "DCP: Config upgraded",
- message: migrations.join('\n'),
- variant: "info",
- duration: 8000
- }
- })
- } catch {
- // Silently ignore toast errors
- }
- }, 7000)
- }
-
return {
+ "experimental.chat.system.transform": async (_input: unknown, output: { system: string[] }) => {
+ const syntheticPrompt = loadPrompt("synthetic")
+ output.system.push(syntheticPrompt)
+ },
+ "experimental.chat.messages.transform": createChatMessageTransformHandler(
+ ctx.client,
+ state,
+ logger,
+ config
+ ),
+ tool: config.strategies.pruneTool.enabled ? {
+ prune: createPruneTool({
+ client: ctx.client,
+ state,
+ logger,
+ config,
+ workingDirectory: ctx.directory
+ }),
+ } : undefined,
config: async (opencodeConfig) => {
// Add prune to primary_tools by mutating the opencode config
// This works because config is cached and passed by reference
- if (config.strategies.onTool.length > 0) {
+ if (config.strategies.pruneTool.enabled) {
const existingPrimaryTools = opencodeConfig.experimental?.primary_tools ?? []
opencodeConfig.experimental = {
...opencodeConfig.experimental,
primary_tools: [...existingPrimaryTools, "prune"],
}
- logger.info("plugin", "Added 'prune' to experimental.primary_tools via config mutation")
+ logger.info("Added 'prune' to experimental.primary_tools via config mutation")
}
},
- event: createEventHandler(ctx.client, janitorCtx, logger, config, toolTracker),
- "chat.params": createChatParamsHandler(ctx.client, state, logger, toolTracker),
- tool: config.strategies.onTool.length > 0 ? {
- prune: createPruningTool({
- client: ctx.client,
- state,
- logger,
- config,
- notificationCtx: janitorCtx.notificationCtx,
- workingDirectory: ctx.directory
- }, toolTracker),
- } : undefined,
+ event: createEventHandler(ctx.client, config, state, logger, ctx.directory),
}
}) satisfies Plugin
diff --git a/lib/config.ts b/lib/config.ts
index 57a2093..eb90adc 100644
--- a/lib/config.ts
+++ b/lib/config.ts
@@ -1,61 +1,246 @@
-import { readFileSync, writeFileSync, existsSync, mkdirSync, statSync, copyFileSync } from 'fs'
+import { readFileSync, writeFileSync, existsSync, mkdirSync, statSync } from 'fs'
import { join, dirname } from 'path'
import { homedir } from 'os'
import { parse } from 'jsonc-parser'
-import { Logger } from './logger'
import type { PluginInput } from '@opencode-ai/plugin'
-export type PruningStrategy = "deduplication" | "ai-analysis"
-
-export interface PluginConfig {
+export interface Deduplication {
enabled: boolean
- debug: boolean
protectedTools: string[]
+}
+
+export interface OnIdle {
+ enabled: boolean
model?: string
showModelErrorToasts?: boolean
- showUpdateToasts?: boolean
strictModelSelection?: boolean
- pruning_summary: "off" | "minimal" | "detailed"
- nudge_freq: number
+ protectedTools: string[]
+}
+
+export interface PruneToolNudge {
+ enabled: boolean
+ frequency: number
+}
+
+export interface PruneTool {
+ enabled: boolean
+ protectedTools: string[]
+ nudge: PruneToolNudge
+}
+
+export interface PluginConfig {
+ enabled: boolean
+ debug: boolean
+ showUpdateToasts?: boolean
+ pruningSummary: "off" | "minimal" | "detailed"
strategies: {
- onIdle: PruningStrategy[]
- onTool: PruningStrategy[]
+ deduplication: Deduplication
+ onIdle: OnIdle
+ pruneTool: PruneTool
}
}
-export interface ConfigResult {
- config: PluginConfig
- migrations: string[]
+const DEFAULT_PROTECTED_TOOLS = ['task', 'todowrite', 'todoread', 'prune', 'batch', 'write', 'edit']
+
+// Valid config keys for validation against user config
+export const VALID_CONFIG_KEYS = new Set([
+ // Top-level keys
+ 'enabled',
+ 'debug',
+ 'showUpdateToasts',
+ 'pruningSummary',
+ 'strategies',
+ // strategies.deduplication
+ 'strategies.deduplication',
+ 'strategies.deduplication.enabled',
+ 'strategies.deduplication.protectedTools',
+ // strategies.onIdle
+ 'strategies.onIdle',
+ 'strategies.onIdle.enabled',
+ 'strategies.onIdle.model',
+ 'strategies.onIdle.showModelErrorToasts',
+ 'strategies.onIdle.strictModelSelection',
+ 'strategies.onIdle.protectedTools',
+ // strategies.pruneTool
+ 'strategies.pruneTool',
+ 'strategies.pruneTool.enabled',
+ 'strategies.pruneTool.protectedTools',
+ 'strategies.pruneTool.nudge',
+ 'strategies.pruneTool.nudge.enabled',
+ 'strategies.pruneTool.nudge.frequency',
+])
+
+// Extract all key paths from a config object for validation
+function getConfigKeyPaths(obj: Record, prefix = ''): string[] {
+ const keys: string[] = []
+ for (const key of Object.keys(obj)) {
+ const fullKey = prefix ? `${prefix}.${key}` : key
+ keys.push(fullKey)
+ if (obj[key] && typeof obj[key] === 'object' && !Array.isArray(obj[key])) {
+ keys.push(...getConfigKeyPaths(obj[key], fullKey))
+ }
+ }
+ return keys
+}
+
+// Returns invalid keys found in user config
+export function getInvalidConfigKeys(userConfig: Record): string[] {
+ const userKeys = getConfigKeyPaths(userConfig)
+ return userKeys.filter(key => !VALID_CONFIG_KEYS.has(key))
+}
+
+// Type validators for config values
+interface ValidationError {
+ key: string
+ expected: string
+ actual: string
+}
+
+function validateConfigTypes(config: Record): ValidationError[] {
+ const errors: ValidationError[] = []
+
+ // Top-level validators
+ if (config.enabled !== undefined && typeof config.enabled !== 'boolean') {
+ errors.push({ key: 'enabled', expected: 'boolean', actual: typeof config.enabled })
+ }
+ if (config.debug !== undefined && typeof config.debug !== 'boolean') {
+ errors.push({ key: 'debug', expected: 'boolean', actual: typeof config.debug })
+ }
+ if (config.showUpdateToasts !== undefined && typeof config.showUpdateToasts !== 'boolean') {
+ errors.push({ key: 'showUpdateToasts', expected: 'boolean', actual: typeof config.showUpdateToasts })
+ }
+ if (config.pruningSummary !== undefined) {
+ const validValues = ['off', 'minimal', 'detailed']
+ if (!validValues.includes(config.pruningSummary)) {
+ errors.push({ key: 'pruningSummary', expected: '"off" | "minimal" | "detailed"', actual: JSON.stringify(config.pruningSummary) })
+ }
+ }
+
+ // Strategies validators
+ const strategies = config.strategies
+ if (strategies) {
+ // deduplication
+ if (strategies.deduplication?.enabled !== undefined && typeof strategies.deduplication.enabled !== 'boolean') {
+ errors.push({ key: 'strategies.deduplication.enabled', expected: 'boolean', actual: typeof strategies.deduplication.enabled })
+ }
+ if (strategies.deduplication?.protectedTools !== undefined && !Array.isArray(strategies.deduplication.protectedTools)) {
+ errors.push({ key: 'strategies.deduplication.protectedTools', expected: 'string[]', actual: typeof strategies.deduplication.protectedTools })
+ }
+
+ // onIdle
+ if (strategies.onIdle) {
+ if (strategies.onIdle.enabled !== undefined && typeof strategies.onIdle.enabled !== 'boolean') {
+ errors.push({ key: 'strategies.onIdle.enabled', expected: 'boolean', actual: typeof strategies.onIdle.enabled })
+ }
+ if (strategies.onIdle.model !== undefined && typeof strategies.onIdle.model !== 'string') {
+ errors.push({ key: 'strategies.onIdle.model', expected: 'string', actual: typeof strategies.onIdle.model })
+ }
+ if (strategies.onIdle.showModelErrorToasts !== undefined && typeof strategies.onIdle.showModelErrorToasts !== 'boolean') {
+ errors.push({ key: 'strategies.onIdle.showModelErrorToasts', expected: 'boolean', actual: typeof strategies.onIdle.showModelErrorToasts })
+ }
+ if (strategies.onIdle.strictModelSelection !== undefined && typeof strategies.onIdle.strictModelSelection !== 'boolean') {
+ errors.push({ key: 'strategies.onIdle.strictModelSelection', expected: 'boolean', actual: typeof strategies.onIdle.strictModelSelection })
+ }
+ if (strategies.onIdle.protectedTools !== undefined && !Array.isArray(strategies.onIdle.protectedTools)) {
+ errors.push({ key: 'strategies.onIdle.protectedTools', expected: 'string[]', actual: typeof strategies.onIdle.protectedTools })
+ }
+ }
+
+ // pruneTool
+ if (strategies.pruneTool) {
+ if (strategies.pruneTool.enabled !== undefined && typeof strategies.pruneTool.enabled !== 'boolean') {
+ errors.push({ key: 'strategies.pruneTool.enabled', expected: 'boolean', actual: typeof strategies.pruneTool.enabled })
+ }
+ if (strategies.pruneTool.protectedTools !== undefined && !Array.isArray(strategies.pruneTool.protectedTools)) {
+ errors.push({ key: 'strategies.pruneTool.protectedTools', expected: 'string[]', actual: typeof strategies.pruneTool.protectedTools })
+ }
+ if (strategies.pruneTool.nudge) {
+ if (strategies.pruneTool.nudge.enabled !== undefined && typeof strategies.pruneTool.nudge.enabled !== 'boolean') {
+ errors.push({ key: 'strategies.pruneTool.nudge.enabled', expected: 'boolean', actual: typeof strategies.pruneTool.nudge.enabled })
+ }
+ if (strategies.pruneTool.nudge.frequency !== undefined && typeof strategies.pruneTool.nudge.frequency !== 'number') {
+ errors.push({ key: 'strategies.pruneTool.nudge.frequency', expected: 'number', actual: typeof strategies.pruneTool.nudge.frequency })
+ }
+ }
+ }
+ }
+
+ return errors
+}
+
+// Show validation warnings for a config file
+function showConfigValidationWarnings(
+ ctx: PluginInput,
+ configPath: string,
+ configData: Record,
+ isProject: boolean
+): void {
+ const invalidKeys = getInvalidConfigKeys(configData)
+ const typeErrors = validateConfigTypes(configData)
+
+ if (invalidKeys.length === 0 && typeErrors.length === 0) {
+ return
+ }
+
+ const configType = isProject ? 'project config' : 'config'
+ const messages: string[] = []
+
+ if (invalidKeys.length > 0) {
+ const keyList = invalidKeys.slice(0, 3).join(', ')
+ const suffix = invalidKeys.length > 3 ? ` (+${invalidKeys.length - 3} more)` : ''
+ messages.push(`Unknown keys: ${keyList}${suffix}`)
+ }
+
+ if (typeErrors.length > 0) {
+ for (const err of typeErrors.slice(0, 2)) {
+ messages.push(`${err.key}: expected ${err.expected}, got ${err.actual}`)
+ }
+ if (typeErrors.length > 2) {
+ messages.push(`(+${typeErrors.length - 2} more type errors)`)
+ }
+ }
+
+ setTimeout(() => {
+ try {
+ ctx.client.tui.showToast({
+ body: {
+ title: `DCP: Invalid ${configType}`,
+ message: `${configPath}\n${messages.join('\n')}`,
+ variant: "warning",
+ duration: 7000
+ }
+ })
+ } catch {}
+ }, 7000)
}
const defaultConfig: PluginConfig = {
enabled: true,
debug: false,
- protectedTools: ['task', 'todowrite', 'todoread', 'prune', 'batch', 'write', 'edit'],
- showModelErrorToasts: true,
showUpdateToasts: true,
- strictModelSelection: false,
- pruning_summary: 'detailed',
- nudge_freq: 10,
+ pruningSummary: 'detailed',
strategies: {
- onIdle: ['ai-analysis'],
- onTool: ['ai-analysis']
+ deduplication: {
+ enabled: true,
+ protectedTools: [...DEFAULT_PROTECTED_TOOLS]
+ },
+ pruneTool: {
+ enabled: true,
+ protectedTools: [...DEFAULT_PROTECTED_TOOLS],
+ nudge: {
+ enabled: true,
+ frequency: 10
+ }
+ },
+ onIdle: {
+ enabled: false,
+ showModelErrorToasts: true,
+ strictModelSelection: false,
+ protectedTools: [...DEFAULT_PROTECTED_TOOLS]
+ }
}
}
-const VALID_CONFIG_KEYS = new Set([
- 'enabled',
- 'debug',
- 'protectedTools',
- 'model',
- 'showModelErrorToasts',
- 'showUpdateToasts',
- 'strictModelSelection',
- 'pruning_summary',
- 'nudge_freq',
- 'strategies'
-])
-
const GLOBAL_CONFIG_DIR = join(homedir(), '.config', 'opencode')
const GLOBAL_CONFIG_PATH_JSONC = join(GLOBAL_CONFIG_DIR, 'dcp.jsonc')
const GLOBAL_CONFIG_PATH_JSON = join(GLOBAL_CONFIG_DIR, 'dcp.json')
@@ -74,14 +259,30 @@ function findOpencodeDir(startDir: string): string | null {
return null
}
-function getConfigPaths(ctx?: PluginInput): { global: string | null, project: string | null } {
+function getConfigPaths(ctx?: PluginInput): { global: string | null, configDir: string | null, project: string | null} {
+
+ // Global: ~/.config/opencode/dcp.jsonc|json
let globalPath: string | null = null
if (existsSync(GLOBAL_CONFIG_PATH_JSONC)) {
globalPath = GLOBAL_CONFIG_PATH_JSONC
} else if (existsSync(GLOBAL_CONFIG_PATH_JSON)) {
globalPath = GLOBAL_CONFIG_PATH_JSON
}
-
+
+ // Custom config directory: $OPENCODE_CONFIG_DIR/dcp.jsonc|json
+ let configDirPath: string | null = null
+ const opencodeConfigDir = process.env.OPENCODE_CONFIG_DIR
+ if (opencodeConfigDir) {
+ const configJsonc = join(opencodeConfigDir, 'dcp.jsonc')
+ const configJson = join(opencodeConfigDir, 'dcp.json')
+ if (existsSync(configJsonc)) {
+ configDirPath = configJsonc
+ } else if (existsSync(configJson)) {
+ configDirPath = configJson
+ }
+ }
+
+ // Project: /.opencode/dcp.jsonc|json
let projectPath: string | null = null
if (ctx?.directory) {
const opencodeDir = findOpencodeDir(ctx.directory)
@@ -96,7 +297,7 @@ function getConfigPaths(ctx?: PluginInput): { global: string | null, project: st
}
}
- return { global: globalPath, project: projectPath }
+ return { global: globalPath, configDir: configDirPath, project: projectPath }
}
function createDefaultConfig(): void {
@@ -109,63 +310,69 @@ function createDefaultConfig(): void {
"enabled": true,
// Enable debug logging to ~/.config/opencode/logs/dcp/
"debug": false,
- // Override model for analysis (format: "provider/model", e.g. "anthropic/claude-haiku-4-5")
- // "model": "anthropic/claude-haiku-4-5",
- // Show toast notifications when model selection fails
- "showModelErrorToasts": true,
// Show toast notifications when a new version is available
"showUpdateToasts": true,
- // Only run AI analysis with session model or configured model (disables fallback models)
- "strictModelSelection": false,
- // AI analysis strategies (deduplication runs automatically on every request)
- "strategies": {
- // Strategies to run when session goes idle
- "onIdle": ["ai-analysis"],
- // Strategies to run when AI calls prune tool
- "onTool": ["ai-analysis"]
- },
// Summary display: "off", "minimal", or "detailed"
- "pruning_summary": "detailed",
- // How often to nudge the AI to prune (every N tool results, 0 = disabled)
- "nudge_freq": 10
- // Additional tools to protect from pruning
- // "protectedTools": ["bash"]
+ "pruningSummary": "detailed",
+ // Strategies for pruning tokens from chat history
+ "strategies": {
+ // Remove duplicate tool calls (same tool with same arguments)
+ "deduplication": {
+ "enabled": true,
+ // Additional tools to protect from pruning
+ "protectedTools": []
+ },
+ // Exposes a prune tool to your LLM to call when it determines pruning is necessary
+ "pruneTool": {
+ "enabled": true,
+ // Additional tools to protect from pruning
+ "protectedTools": [],
+ // Nudge the LLM to use the prune tool (every tool results)
+ "nudge": {
+ "enabled": true,
+ "frequency": 10
+ }
+ },
+ // (Legacy) Run an LLM to analyze what tool calls are no longer relevant on idle
+ "onIdle": {
+ "enabled": false,
+ // Override model for analysis (format: "provider/model")
+ // "model": "anthropic/claude-haiku-4-5",
+ // Show toast notifications when model selection fails
+ "showModelErrorToasts": true,
+ // When true, fallback models are not permitted
+ "strictModelSelection": false,
+ // Additional tools to protect from pruning
+ "protectedTools": []
+ }
+ }
}
`
-
writeFileSync(GLOBAL_CONFIG_PATH_JSONC, configContent, 'utf-8')
}
-function loadConfigFile(configPath: string): Record | null {
- try {
- const fileContent = readFileSync(configPath, 'utf-8')
- return parse(fileContent)
- } catch (error: any) {
- return null
- }
+interface ConfigLoadResult {
+ data: Record | null
+ parseError?: string
}
-function getInvalidKeys(config: Record): string[] {
- const invalidKeys: string[] = []
- for (const key of Object.keys(config)) {
- if (!VALID_CONFIG_KEYS.has(key)) {
- invalidKeys.push(key)
- }
+function loadConfigFile(configPath: string): ConfigLoadResult {
+ let fileContent: string
+ try {
+ fileContent = readFileSync(configPath, 'utf-8')
+ } catch {
+ // File doesn't exist or can't be read - not a parse error
+ return { data: null }
}
- return invalidKeys
-}
-function backupAndResetConfig(configPath: string, logger: Logger): string | null {
try {
- const backupPath = configPath + '.bak'
- copyFileSync(configPath, backupPath)
- logger.info('config', 'Created config backup', { backup: backupPath })
- createDefaultConfig()
- logger.info('config', 'Created fresh default config', { path: GLOBAL_CONFIG_PATH_JSONC })
- return backupPath
+ const parsed = parse(fileContent)
+ if (parsed === undefined || parsed === null) {
+ return { data: null, parseError: 'Config file is empty or invalid' }
+ }
+ return { data: parsed }
} catch (error: any) {
- logger.error('config', 'Failed to backup/reset config', { error: error.message })
- return null
+ return { data: null, parseError: error.message || 'Failed to parse config' }
}
}
@@ -174,80 +381,160 @@ function mergeStrategies(
override?: Partial
): PluginConfig['strategies'] {
if (!override) return base
+
+ return {
+ deduplication: {
+ enabled: override.deduplication?.enabled ?? base.deduplication.enabled,
+ protectedTools: [
+ ...new Set([
+ ...base.deduplication.protectedTools,
+ ...(override.deduplication?.protectedTools ?? [])
+ ])
+ ]
+ },
+ onIdle: {
+ enabled: override.onIdle?.enabled ?? base.onIdle.enabled,
+ model: override.onIdle?.model ?? base.onIdle.model,
+ showModelErrorToasts: override.onIdle?.showModelErrorToasts ?? base.onIdle.showModelErrorToasts,
+ strictModelSelection: override.onIdle?.strictModelSelection ?? base.onIdle.strictModelSelection,
+ protectedTools: [
+ ...new Set([
+ ...base.onIdle.protectedTools,
+ ...(override.onIdle?.protectedTools ?? [])
+ ])
+ ]
+ },
+ pruneTool: {
+ enabled: override.pruneTool?.enabled ?? base.pruneTool.enabled,
+ protectedTools: [
+ ...new Set([
+ ...base.pruneTool.protectedTools,
+ ...(override.pruneTool?.protectedTools ?? [])
+ ])
+ ],
+ nudge: {
+ enabled: override.pruneTool?.nudge?.enabled ?? base.pruneTool.nudge.enabled,
+ frequency: override.pruneTool?.nudge?.frequency ?? base.pruneTool.nudge.frequency
+ }
+ }
+ }
+}
+
+function deepCloneConfig(config: PluginConfig): PluginConfig {
return {
- onIdle: override.onIdle ?? base.onIdle,
- onTool: override.onTool ?? base.onTool
+ ...config,
+ strategies: {
+ deduplication: {
+ ...config.strategies.deduplication,
+ protectedTools: [...config.strategies.deduplication.protectedTools]
+ },
+ onIdle: {
+ ...config.strategies.onIdle,
+ protectedTools: [...config.strategies.onIdle.protectedTools]
+ },
+ pruneTool: {
+ ...config.strategies.pruneTool,
+ protectedTools: [...config.strategies.pruneTool.protectedTools],
+ nudge: { ...config.strategies.pruneTool.nudge }
+ }
+ }
}
}
-export function getConfig(ctx?: PluginInput): ConfigResult {
- let config = { ...defaultConfig, protectedTools: [...defaultConfig.protectedTools] }
+
+export function getConfig(ctx: PluginInput): PluginConfig {
+ let config = deepCloneConfig(defaultConfig)
const configPaths = getConfigPaths(ctx)
- const logger = new Logger(true)
- const migrations: string[] = []
+ // Load and merge global config
if (configPaths.global) {
- const globalConfig = loadConfigFile(configPaths.global)
- if (globalConfig) {
- const invalidKeys = getInvalidKeys(globalConfig)
-
- if (invalidKeys.length > 0) {
- logger.info('config', 'Found invalid config keys', { keys: invalidKeys })
- const backupPath = backupAndResetConfig(configPaths.global, logger)
- if (backupPath) {
- migrations.push(`Old config backed up to ${backupPath}`)
- }
- } else {
- config = {
- enabled: globalConfig.enabled ?? config.enabled,
- debug: globalConfig.debug ?? config.debug,
- protectedTools: [...new Set([...config.protectedTools, ...(globalConfig.protectedTools ?? [])])],
- model: globalConfig.model ?? config.model,
- showModelErrorToasts: globalConfig.showModelErrorToasts ?? config.showModelErrorToasts,
- showUpdateToasts: globalConfig.showUpdateToasts ?? config.showUpdateToasts,
- strictModelSelection: globalConfig.strictModelSelection ?? config.strictModelSelection,
- strategies: mergeStrategies(config.strategies, globalConfig.strategies as any),
- pruning_summary: globalConfig.pruning_summary ?? config.pruning_summary,
- nudge_freq: globalConfig.nudge_freq ?? config.nudge_freq
- }
- logger.info('config', 'Loaded global config', { path: configPaths.global })
+ const result = loadConfigFile(configPaths.global)
+ if (result.parseError) {
+ setTimeout(async () => {
+ try {
+ ctx.client.tui.showToast({
+ body: {
+ title: "DCP: Invalid config",
+ message: `${configPaths.global}\n${result.parseError}\nUsing default values`,
+ variant: "warning",
+ duration: 7000
+ }
+ })
+ } catch {}
+ }, 7000)
+ } else if (result.data) {
+ // Validate config keys and types
+ showConfigValidationWarnings(ctx, configPaths.global, result.data, false)
+ config = {
+ enabled: result.data.enabled ?? config.enabled,
+ debug: result.data.debug ?? config.debug,
+ showUpdateToasts: result.data.showUpdateToasts ?? config.showUpdateToasts,
+ pruningSummary: result.data.pruningSummary ?? config.pruningSummary,
+ strategies: mergeStrategies(config.strategies, result.data.strategies as any)
}
}
} else {
+ // No config exists, create default
createDefaultConfig()
- logger.info('config', 'Created default global config', { path: GLOBAL_CONFIG_PATH_JSONC })
}
+ // Load and merge $OPENCODE_CONFIG_DIR/dcp.jsonc|json (overrides global)
+ if (configPaths.configDir) {
+ const result = loadConfigFile(configPaths.configDir)
+ if (result.parseError) {
+ setTimeout(async () => {
+ try {
+ ctx.client.tui.showToast({
+ body: {
+ title: "DCP: Invalid configDir config",
+ message: `${configPaths.configDir}\n${result.parseError}\nUsing global/default values`,
+ variant: "warning",
+ duration: 7000
+ }
+ })
+ } catch {}
+ }, 7000)
+ } else if (result.data) {
+ // Validate config keys and types
+ showConfigValidationWarnings(ctx, configPaths.configDir, result.data, true)
+ config = {
+ enabled: result.data.enabled ?? config.enabled,
+ debug: result.data.debug ?? config.debug,
+ showUpdateToasts: result.data.showUpdateToasts ?? config.showUpdateToasts,
+ pruningSummary: result.data.pruningSummary ?? config.pruningSummary,
+ strategies: mergeStrategies(config.strategies, result.data.strategies as any)
+ }
+ }
+ }
+
+ // Load and merge project config (overrides global)
if (configPaths.project) {
- const projectConfig = loadConfigFile(configPaths.project)
- if (projectConfig) {
- const invalidKeys = getInvalidKeys(projectConfig)
-
- if (invalidKeys.length > 0) {
- logger.warn('config', 'Project config has invalid keys (ignored)', {
- path: configPaths.project,
- keys: invalidKeys
- })
- migrations.push(`Project config has invalid keys: ${invalidKeys.join(', ')}`)
- } else {
- config = {
- enabled: projectConfig.enabled ?? config.enabled,
- debug: projectConfig.debug ?? config.debug,
- protectedTools: [...new Set([...config.protectedTools, ...(projectConfig.protectedTools ?? [])])],
- model: projectConfig.model ?? config.model,
- showModelErrorToasts: projectConfig.showModelErrorToasts ?? config.showModelErrorToasts,
- showUpdateToasts: projectConfig.showUpdateToasts ?? config.showUpdateToasts,
- strictModelSelection: projectConfig.strictModelSelection ?? config.strictModelSelection,
- strategies: mergeStrategies(config.strategies, projectConfig.strategies as any),
- pruning_summary: projectConfig.pruning_summary ?? config.pruning_summary,
- nudge_freq: projectConfig.nudge_freq ?? config.nudge_freq
- }
- logger.info('config', 'Loaded project config (overrides global)', { path: configPaths.project })
+ const result = loadConfigFile(configPaths.project)
+ if (result.parseError) {
+ setTimeout(async () => {
+ try {
+ ctx.client.tui.showToast({
+ body: {
+ title: "DCP: Invalid project config",
+ message: `${configPaths.project}\n${result.parseError}\nUsing global/default values`,
+ variant: "warning",
+ duration: 7000
+ }
+ })
+ } catch {}
+ }, 7000)
+ } else if (result.data) {
+ // Validate config keys and types
+ showConfigValidationWarnings(ctx, configPaths.project, result.data, true)
+ config = {
+ enabled: result.data.enabled ?? config.enabled,
+ debug: result.data.debug ?? config.debug,
+ showUpdateToasts: result.data.showUpdateToasts ?? config.showUpdateToasts,
+ pruningSummary: result.data.pruningSummary ?? config.pruningSummary,
+ strategies: mergeStrategies(config.strategies, result.data.strategies as any)
}
}
- } else if (ctx?.directory) {
- logger.debug('config', 'No project config found', { searchedFrom: ctx.directory })
}
- return { config, migrations }
+ return config
}
diff --git a/lib/core/janitor.ts b/lib/core/janitor.ts
deleted file mode 100644
index 27bbd85..0000000
--- a/lib/core/janitor.ts
+++ /dev/null
@@ -1,447 +0,0 @@
-import { z } from "zod"
-import type { Logger } from "../logger"
-import type { PruningStrategy } from "../config"
-import type { PluginState } from "../state"
-import type { ToolMetadata, SessionStats, GCStats, PruningResult } from "../fetch-wrapper/types"
-import { findCurrentAgent } from "../hooks"
-import { buildAnalysisPrompt } from "./prompt"
-import { selectModel, extractModelFromSession } from "../model-selector"
-import { estimateTokensBatch, formatTokenCount } from "../tokenizer"
-import { saveSessionState } from "../state/persistence"
-import { ensureSessionRestored } from "../state"
-import {
- sendUnifiedNotification,
- type NotificationContext
-} from "../ui/notification"
-
-export type { SessionStats, GCStats, PruningResult }
-
-export interface PruningOptions {
- reason?: string
- trigger: 'idle' | 'tool'
-}
-
-export interface JanitorConfig {
- protectedTools: string[]
- model?: string
- showModelErrorToasts: boolean
- strictModelSelection: boolean
- pruningSummary: "off" | "minimal" | "detailed"
- workingDirectory?: string
-}
-
-export interface JanitorContext {
- client: any
- state: PluginState
- logger: Logger
- config: JanitorConfig
- notificationCtx: NotificationContext
-}
-
-// ============================================================================
-// Context factory
-// ============================================================================
-
-export function createJanitorContext(
- client: any,
- state: PluginState,
- logger: Logger,
- config: JanitorConfig
-): JanitorContext {
- return {
- client,
- state,
- logger,
- config,
- notificationCtx: {
- client,
- logger,
- config: {
- pruningSummary: config.pruningSummary,
- workingDirectory: config.workingDirectory
- }
- }
- }
-}
-
-// ============================================================================
-// Public API
-// ============================================================================
-
-/**
- * Run pruning on idle trigger.
- * Note: onTool pruning is now handled directly by pruning-tool.ts
- */
-export async function runOnIdle(
- ctx: JanitorContext,
- sessionID: string,
- strategies: PruningStrategy[]
-): Promise {
- return runWithStrategies(ctx, sessionID, strategies, { trigger: 'idle' })
-}
-
-// ============================================================================
-// Core pruning logic (for onIdle only)
-// ============================================================================
-
-async function runWithStrategies(
- ctx: JanitorContext,
- sessionID: string,
- strategies: PruningStrategy[],
- options: PruningOptions
-): Promise {
- const { client, state, logger, config } = ctx
-
- try {
- if (strategies.length === 0) {
- return null
- }
-
- // Ensure persisted state is restored before processing
- await ensureSessionRestored(state, sessionID, logger)
-
- const [sessionInfoResponse, messagesResponse] = await Promise.all([
- client.session.get({ path: { id: sessionID } }),
- client.session.messages({ path: { id: sessionID }, query: { limit: 500 } })
- ])
-
- const sessionInfo = sessionInfoResponse.data
- const messages = messagesResponse.data || messagesResponse
-
- if (!messages || messages.length < 3) {
- return null
- }
-
- const currentAgent = findCurrentAgent(messages)
- const { toolCallIds, toolOutputs, toolMetadata } = parseMessages(messages, state.toolParameters)
-
- const alreadyPrunedIds = state.prunedIds.get(sessionID) ?? []
- // Normalized set for filtering to avoid re-processing already pruned tools with different casing
- const alreadyPrunedLower = new Set(alreadyPrunedIds.map(id => id.toLowerCase()))
- const unprunedToolCallIds = toolCallIds.filter(id => !alreadyPrunedLower.has(id))
-
- const gcPending = state.gcPending.get(sessionID) ?? null
-
- if (unprunedToolCallIds.length === 0 && !gcPending) {
- return null
- }
-
- const candidateCount = unprunedToolCallIds.filter(id => {
- const metadata = toolMetadata.get(id)
- return !metadata || !config.protectedTools.includes(metadata.tool)
- }).length
-
- // PHASE 1: LLM ANALYSIS
- let llmPrunedIds: string[] = []
-
- if (strategies.includes('ai-analysis') && unprunedToolCallIds.length > 0) {
- llmPrunedIds = await runLlmAnalysis(
- ctx,
- sessionID,
- sessionInfo,
- messages,
- unprunedToolCallIds,
- alreadyPrunedIds,
- toolMetadata,
- options
- )
- }
-
- const finalNewlyPrunedIds = llmPrunedIds.filter(id => !alreadyPrunedLower.has(id.toLowerCase()))
-
- if (finalNewlyPrunedIds.length === 0 && !gcPending) {
- return null
- }
-
- // Calculate stats & send notification
- const tokensSaved = await calculateTokensSaved(finalNewlyPrunedIds, toolOutputs)
-
- const currentStats = state.stats.get(sessionID) ?? {
- totalToolsPruned: 0,
- totalTokensSaved: 0,
- totalGCTokens: 0,
- totalGCTools: 0
- }
-
- const sessionStats: SessionStats = {
- totalToolsPruned: currentStats.totalToolsPruned + finalNewlyPrunedIds.length,
- totalTokensSaved: currentStats.totalTokensSaved + tokensSaved,
- totalGCTokens: currentStats.totalGCTokens + (gcPending?.tokensCollected ?? 0),
- totalGCTools: currentStats.totalGCTools + (gcPending?.toolsDeduped ?? 0)
- }
- state.stats.set(sessionID, sessionStats)
-
- const notificationSent = await sendUnifiedNotification(
- ctx.notificationCtx,
- sessionID,
- {
- aiPrunedCount: llmPrunedIds.length,
- aiTokensSaved: tokensSaved,
- aiPrunedIds: llmPrunedIds,
- toolMetadata,
- gcPending,
- sessionStats
- },
- currentAgent
- )
-
- if (gcPending) {
- state.gcPending.delete(sessionID)
- }
-
- if (finalNewlyPrunedIds.length === 0) {
- if (notificationSent) {
- logger.info("janitor", `GC-only notification: ~${formatTokenCount(gcPending?.tokensCollected ?? 0)} tokens from ${gcPending?.toolsDeduped ?? 0} deduped tools`, {
- trigger: options.trigger
- })
- }
- return null
- }
-
- // State update (only if something was pruned)
- const allPrunedIds = [...new Set([...alreadyPrunedIds, ...llmPrunedIds])]
- state.prunedIds.set(sessionID, allPrunedIds)
-
- const sessionName = sessionInfo?.title
- saveSessionState(sessionID, new Set(allPrunedIds), sessionStats, logger, sessionName).catch(err => {
- logger.error("janitor", "Failed to persist state", { error: err.message })
- })
-
- const prunedCount = finalNewlyPrunedIds.length
- const keptCount = candidateCount - prunedCount
-
- const logMeta: Record = { trigger: options.trigger }
- if (options.reason) {
- logMeta.reason = options.reason
- }
- if (gcPending) {
- logMeta.gcTokens = gcPending.tokensCollected
- logMeta.gcTools = gcPending.toolsDeduped
- }
-
- logger.info("janitor", `Pruned ${prunedCount}/${candidateCount} tools, ${keptCount} kept (~${formatTokenCount(tokensSaved)} tokens)`, logMeta)
-
- return {
- prunedCount: finalNewlyPrunedIds.length,
- tokensSaved,
- llmPrunedIds,
- toolMetadata,
- sessionStats
- }
-
- } catch (error: any) {
- ctx.logger.error("janitor", "Analysis failed", {
- error: error.message,
- trigger: options.trigger
- })
- return null
- }
-}
-
-// ============================================================================
-// LLM Analysis
-// ============================================================================
-
-async function runLlmAnalysis(
- ctx: JanitorContext,
- sessionID: string,
- sessionInfo: any,
- messages: any[],
- unprunedToolCallIds: string[],
- alreadyPrunedIds: string[],
- toolMetadata: Map,
- options: PruningOptions
-): Promise {
- const { client, state, logger, config } = ctx
-
- const protectedToolCallIds: string[] = []
- const prunableToolCallIds = unprunedToolCallIds.filter(id => {
- const metadata = toolMetadata.get(id)
- if (metadata && config.protectedTools.includes(metadata.tool)) {
- protectedToolCallIds.push(id)
- return false
- }
- return true
- })
-
- if (prunableToolCallIds.length === 0) {
- return []
- }
-
- const cachedModelInfo = state.model.get(sessionID)
- const sessionModelInfo = extractModelFromSession(sessionInfo, logger)
- const currentModelInfo = cachedModelInfo || sessionModelInfo
-
- const modelSelection = await selectModel(currentModelInfo, logger, config.model, config.workingDirectory)
-
- logger.info("janitor", `Model: ${modelSelection.modelInfo.providerID}/${modelSelection.modelInfo.modelID}`, {
- source: modelSelection.source
- })
-
- if (modelSelection.failedModel && config.showModelErrorToasts) {
- const skipAi = modelSelection.source === 'fallback' && config.strictModelSelection
- try {
- await client.tui.showToast({
- body: {
- title: skipAi ? "DCP: AI analysis skipped" : "DCP: Model fallback",
- message: skipAi
- ? `${modelSelection.failedModel.providerID}/${modelSelection.failedModel.modelID} failed\nAI analysis skipped (strictModelSelection enabled)`
- : `${modelSelection.failedModel.providerID}/${modelSelection.failedModel.modelID} failed\nUsing ${modelSelection.modelInfo.providerID}/${modelSelection.modelInfo.modelID}`,
- variant: "info",
- duration: 5000
- }
- })
- } catch (toastError: any) {
- // Ignore toast errors
- }
- }
-
- if (modelSelection.source === 'fallback' && config.strictModelSelection) {
- logger.info("janitor", "Skipping AI analysis (fallback model, strictModelSelection enabled)")
- return []
- }
-
- const { generateObject } = await import('ai')
-
- const sanitizedMessages = replacePrunedToolOutputs(messages, alreadyPrunedIds)
-
- const analysisPrompt = buildAnalysisPrompt(
- prunableToolCallIds,
- sanitizedMessages,
- alreadyPrunedIds,
- protectedToolCallIds,
- options.reason
- )
-
- await logger.saveWrappedContext(
- "janitor-shadow",
- [{ role: "user", content: analysisPrompt }],
- {
- sessionID,
- modelProvider: modelSelection.modelInfo.providerID,
- modelID: modelSelection.modelInfo.modelID,
- candidateToolCount: prunableToolCallIds.length,
- alreadyPrunedCount: alreadyPrunedIds.length,
- protectedToolCount: protectedToolCallIds.length,
- trigger: options.trigger,
- reason: options.reason
- }
- )
-
- const result = await generateObject({
- model: modelSelection.model,
- schema: z.object({
- pruned_tool_call_ids: z.array(z.string()),
- reasoning: z.string(),
- }),
- prompt: analysisPrompt
- })
-
- const rawLlmPrunedIds = result.object.pruned_tool_call_ids
- const llmPrunedIds = rawLlmPrunedIds.filter(id =>
- prunableToolCallIds.includes(id.toLowerCase())
- )
-
- if (llmPrunedIds.length > 0) {
- const reasoning = result.object.reasoning.replace(/\n+/g, ' ').replace(/\s+/g, ' ').trim()
- logger.info("janitor", `LLM reasoning: ${reasoning.substring(0, 200)}${reasoning.length > 200 ? '...' : ''}`)
- }
-
- return llmPrunedIds
-}
-
-function replacePrunedToolOutputs(messages: any[], prunedIds: string[]): any[] {
- if (prunedIds.length === 0) return messages
-
- const prunedIdsSet = new Set(prunedIds.map(id => id.toLowerCase()))
-
- return messages.map(msg => {
- if (!msg.parts) return msg
-
- return {
- ...msg,
- parts: msg.parts.map((part: any) => {
- if (part.type === 'tool' &&
- part.callID &&
- prunedIdsSet.has(part.callID.toLowerCase()) &&
- part.state?.output) {
- return {
- ...part,
- state: {
- ...part.state,
- output: '[Output removed to save context - information superseded or no longer needed]'
- }
- }
- }
- return part
- })
- }
- })
-}
-
-// ============================================================================
-// Message parsing
-// ============================================================================
-
-interface ParsedMessages {
- toolCallIds: string[]
- toolOutputs: Map
- toolMetadata: Map
-}
-
-export function parseMessages(
- messages: any[],
- toolParametersCache: Map
-): ParsedMessages {
- const toolCallIds: string[] = []
- const toolOutputs = new Map()
- const toolMetadata = new Map()
-
- for (const msg of messages) {
- if (msg.parts) {
- for (const part of msg.parts) {
- if (part.type === "tool" && part.callID) {
- const normalizedId = part.callID.toLowerCase()
- toolCallIds.push(normalizedId)
-
- const cachedData = toolParametersCache.get(part.callID) || toolParametersCache.get(normalizedId)
- const parameters = cachedData?.parameters ?? part.state?.input ?? part.parameters
-
- toolMetadata.set(normalizedId, {
- tool: part.tool,
- parameters: parameters
- })
-
- if (part.state?.status === "completed" && part.state.output) {
- toolOutputs.set(normalizedId, part.state.output)
- }
- }
- }
- }
- }
-
- return { toolCallIds, toolOutputs, toolMetadata }
-}
-
-// ============================================================================
-// Helpers
-// ============================================================================
-
-async function calculateTokensSaved(prunedIds: string[], toolOutputs: Map): Promise {
- const outputsToTokenize: string[] = []
-
- for (const prunedId of prunedIds) {
- const normalizedId = prunedId.toLowerCase()
- const output = toolOutputs.get(normalizedId)
- if (output) {
- outputsToTokenize.push(output)
- }
- }
-
- if (outputsToTokenize.length > 0) {
- const tokenCounts = await estimateTokensBatch(outputsToTokenize)
- return tokenCounts.reduce((sum, count) => sum + count, 0)
- }
-
- return 0
-}
diff --git a/lib/core/strategies/deduplication.ts b/lib/core/strategies/deduplication.ts
deleted file mode 100644
index ace7b3f..0000000
--- a/lib/core/strategies/deduplication.ts
+++ /dev/null
@@ -1,89 +0,0 @@
-import { extractParameterKey } from "../../ui/display-utils"
-import type { PruningStrategy, StrategyResult, ToolMetadata } from "./index"
-
-/**
- * Deduplication strategy - prunes older tool calls that have identical
- * tool name and parameters, keeping only the most recent occurrence.
- */
-export const deduplicationStrategy: PruningStrategy = {
- name: "deduplication",
-
- detect(
- toolMetadata: Map,
- unprunedIds: string[],
- protectedTools: string[]
- ): StrategyResult {
- const signatureMap = new Map()
-
- const deduplicatableIds = unprunedIds.filter(id => {
- const metadata = toolMetadata.get(id)
- const protectedToolsLower = protectedTools.map(t => t.toLowerCase())
- return !metadata || !protectedToolsLower.includes(metadata.tool.toLowerCase())
- })
-
- for (const id of deduplicatableIds) {
- const metadata = toolMetadata.get(id)
- if (!metadata) continue
-
- const signature = createToolSignature(metadata.tool, metadata.parameters)
- if (!signatureMap.has(signature)) {
- signatureMap.set(signature, [])
- }
- signatureMap.get(signature)!.push(id)
- }
-
- const prunedIds: string[] = []
- const details = new Map()
-
- for (const [signature, ids] of signatureMap.entries()) {
- if (ids.length > 1) {
- const metadata = toolMetadata.get(ids[0])!
- const idsToRemove = ids.slice(0, -1) // All except last
- prunedIds.push(...idsToRemove)
-
- details.set(signature, {
- toolName: metadata.tool,
- parameterKey: extractParameterKey(metadata),
- reason: `duplicate (${ids.length} occurrences, kept most recent)`,
- duplicateCount: ids.length,
- prunedIds: idsToRemove,
- keptId: ids[ids.length - 1]
- })
- }
- }
-
- return { prunedIds, details }
- }
-}
-
-function createToolSignature(tool: string, parameters?: any): string {
- if (!parameters) return tool
-
- const normalized = normalizeParameters(parameters)
- const sorted = sortObjectKeys(normalized)
- return `${tool}::${JSON.stringify(sorted)}`
-}
-
-function normalizeParameters(params: any): any {
- if (typeof params !== 'object' || params === null) return params
- if (Array.isArray(params)) return params
-
- const normalized: any = {}
- for (const [key, value] of Object.entries(params)) {
- if (value !== undefined && value !== null) {
- normalized[key] = value
- }
- }
- return normalized
-}
-
-function sortObjectKeys(obj: any): any {
- if (typeof obj !== 'object' || obj === null) return obj
- if (Array.isArray(obj)) return obj.map(sortObjectKeys)
-
- const sorted: any = {}
- for (const key of Object.keys(obj).sort()) {
- sorted[key] = sortObjectKeys(obj[key])
- }
- return sorted
-}
diff --git a/lib/core/strategies/index.ts b/lib/core/strategies/index.ts
deleted file mode 100644
index b4eb8af..0000000
--- a/lib/core/strategies/index.ts
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Strategy runner - executes all enabled pruning strategies and collects results.
- */
-
-import { deduplicationStrategy } from "./deduplication"
-import type { ToolMetadata } from "../../fetch-wrapper/types"
-
-export type { ToolMetadata }
-
-/**
- * Common interface for rule-based pruning strategies.
- * Each strategy analyzes tool metadata and returns IDs that should be pruned.
- */
-
-export interface StrategyResult {
- /** Tool call IDs that should be pruned */
- prunedIds: string[]
- /** Optional details about what was pruned and why */
- details?: Map
-}
-
-export interface StrategyDetail {
- toolName: string
- parameterKey: string
- reason: string
- /** Additional info specific to the strategy */
- [key: string]: any
-}
-
-export interface PruningStrategy {
- /** Unique identifier for this strategy */
- name: string
-
- /**
- * Analyze tool metadata and determine which tool calls should be pruned.
- *
- * @param toolMetadata - Map of tool call ID to metadata (tool name + parameters)
- * @param unprunedIds - Tool call IDs that haven't been pruned yet (chronological order)
- * @param protectedTools - Tool names that should never be pruned
- * @returns IDs to prune and optional details
- */
- detect(
- toolMetadata: Map,
- unprunedIds: string[],
- protectedTools: string[]
- ): StrategyResult
-}
-
-/** All available strategies */
-const ALL_STRATEGIES: PruningStrategy[] = [
- deduplicationStrategy,
- // Future strategies will be added here:
- // errorPruningStrategy,
- // writeReadStrategy,
- // partialReadStrategy,
-]
-
-export interface RunStrategiesResult {
- /** All tool IDs that should be pruned (deduplicated) */
- prunedIds: string[]
- /** Results keyed by strategy name */
- byStrategy: Map
-}
-
-/**
- * Run all enabled strategies and collect pruned IDs.
- *
- * @param toolMetadata - Map of tool call ID to metadata
- * @param unprunedIds - Tool call IDs not yet pruned (chronological order)
- * @param protectedTools - Tool names that should never be pruned
- * @param enabledStrategies - Strategy names to run (defaults to all)
- */
-export function runStrategies(
- toolMetadata: Map,
- unprunedIds: string[],
- protectedTools: string[],
- enabledStrategies?: string[]
-): RunStrategiesResult {
- const byStrategy = new Map()
- const allPrunedIds = new Set()
-
- // Filter to enabled strategies (or all if not specified)
- const strategies = enabledStrategies
- ? ALL_STRATEGIES.filter(s => enabledStrategies.includes(s.name))
- : ALL_STRATEGIES
-
- // Track which IDs are still available for each strategy
- let remainingIds = unprunedIds
-
- for (const strategy of strategies) {
- const result = strategy.detect(toolMetadata, remainingIds, protectedTools)
-
- if (result.prunedIds.length > 0) {
- byStrategy.set(strategy.name, result)
-
- // Add to overall pruned set
- for (const id of result.prunedIds) {
- allPrunedIds.add(id)
- }
-
- // Remove pruned IDs from remaining for next strategy
- const prunedSet = new Set(result.prunedIds.map(id => id.toLowerCase()))
- remainingIds = remainingIds.filter(id => !prunedSet.has(id.toLowerCase()))
- }
- }
-
- return {
- prunedIds: Array.from(allPrunedIds),
- byStrategy
- }
-}
diff --git a/lib/fetch-wrapper/formats/anthropic.ts b/lib/fetch-wrapper/formats/anthropic.ts
deleted file mode 100644
index a409e3e..0000000
--- a/lib/fetch-wrapper/formats/anthropic.ts
+++ /dev/null
@@ -1,111 +0,0 @@
-import type { FormatDescriptor, ToolOutput } from "../types"
-import type { PluginState } from "../../state"
-
-/**
- * Anthropic Messages API format with top-level `system` array.
- * Tool calls: `tool_use` blocks in assistant content with `id`
- * Tool results: `tool_result` blocks in user content with `tool_use_id`
- */
-export const anthropicFormat: FormatDescriptor = {
- name: 'anthropic',
-
- detect(body: any): boolean {
- return (
- body.system !== undefined &&
- Array.isArray(body.messages)
- )
- },
-
- getDataArray(body: any): any[] | undefined {
- return body.messages
- },
-
- injectSystemMessage(body: any, injection: string): boolean {
- if (!injection) return false
-
- if (typeof body.system === 'string') {
- body.system = [{ type: 'text', text: body.system }]
- } else if (!Array.isArray(body.system)) {
- body.system = []
- }
-
- body.system.push({ type: 'text', text: injection })
- return true
- },
-
- appendUserMessage(body: any, injection: string): boolean {
- if (!injection || !body.messages) return false
- body.messages.push({ role: 'user', content: [{ type: 'text', text: injection }] })
- return true
- },
-
- extractToolOutputs(data: any[], state: PluginState): ToolOutput[] {
- const outputs: ToolOutput[] = []
-
- for (const m of data) {
- if (m.role === 'user' && Array.isArray(m.content)) {
- for (const block of m.content) {
- if (block.type === 'tool_result' && block.tool_use_id) {
- const toolUseId = block.tool_use_id.toLowerCase()
- const metadata = state.toolParameters.get(toolUseId)
- outputs.push({
- id: toolUseId,
- toolName: metadata?.tool
- })
- }
- }
- }
- }
-
- return outputs
- },
-
- replaceToolOutput(data: any[], toolId: string, prunedMessage: string, _state: PluginState): boolean {
- const toolIdLower = toolId.toLowerCase()
- let replaced = false
-
- for (let i = 0; i < data.length; i++) {
- const m = data[i]
-
- if (m.role === 'user' && Array.isArray(m.content)) {
- let messageModified = false
- const newContent = m.content.map((block: any) => {
- if (block.type === 'tool_result' && block.tool_use_id?.toLowerCase() === toolIdLower) {
- messageModified = true
- return {
- ...block,
- content: prunedMessage
- }
- }
- return block
- })
- if (messageModified) {
- data[i] = { ...m, content: newContent }
- replaced = true
- }
- }
- }
-
- return replaced
- },
-
- hasToolOutputs(data: any[]): boolean {
- for (const m of data) {
- if (m.role === 'user' && Array.isArray(m.content)) {
- for (const block of m.content) {
- if (block.type === 'tool_result') return true
- }
- }
- }
- return false
- },
-
- getLogMetadata(data: any[], replacedCount: number, inputUrl: string): Record {
- return {
- url: inputUrl,
- replacedCount,
- totalMessages: data.length,
- format: 'anthropic'
- }
- }
-}
diff --git a/lib/fetch-wrapper/formats/bedrock.ts b/lib/fetch-wrapper/formats/bedrock.ts
deleted file mode 100644
index 4f4f7ce..0000000
--- a/lib/fetch-wrapper/formats/bedrock.ts
+++ /dev/null
@@ -1,113 +0,0 @@
-import type { FormatDescriptor, ToolOutput } from "../types"
-import type { PluginState } from "../../state"
-
-/**
- * Bedrock uses top-level `system` array + `inferenceConfig` (distinguishes from OpenAI/Anthropic).
- * Tool calls: `toolUse` blocks in assistant content with `toolUseId`
- * Tool results: `toolResult` blocks in user content with `toolUseId`
- */
-export const bedrockFormat: FormatDescriptor = {
- name: 'bedrock',
-
- detect(body: any): boolean {
- return (
- Array.isArray(body.system) &&
- body.inferenceConfig !== undefined &&
- Array.isArray(body.messages)
- )
- },
-
- getDataArray(body: any): any[] | undefined {
- return body.messages
- },
-
- injectSystemMessage(body: any, injection: string): boolean {
- if (!injection) return false
-
- if (!Array.isArray(body.system)) {
- body.system = []
- }
-
- body.system.push({ text: injection })
- return true
- },
-
- appendUserMessage(body: any, injection: string): boolean {
- if (!injection || !body.messages) return false
- body.messages.push({ role: 'user', content: [{ text: injection }] })
- return true
- },
-
- extractToolOutputs(data: any[], state: PluginState): ToolOutput[] {
- const outputs: ToolOutput[] = []
-
- for (const m of data) {
- if (m.role === 'user' && Array.isArray(m.content)) {
- for (const block of m.content) {
- if (block.toolResult && block.toolResult.toolUseId) {
- const toolUseId = block.toolResult.toolUseId.toLowerCase()
- const metadata = state.toolParameters.get(toolUseId)
- outputs.push({
- id: toolUseId,
- toolName: metadata?.tool
- })
- }
- }
- }
- }
-
- return outputs
- },
-
- replaceToolOutput(data: any[], toolId: string, prunedMessage: string, _state: PluginState): boolean {
- const toolIdLower = toolId.toLowerCase()
- let replaced = false
-
- for (let i = 0; i < data.length; i++) {
- const m = data[i]
-
- if (m.role === 'user' && Array.isArray(m.content)) {
- let messageModified = false
- const newContent = m.content.map((block: any) => {
- if (block.toolResult && block.toolResult.toolUseId?.toLowerCase() === toolIdLower) {
- messageModified = true
- return {
- ...block,
- toolResult: {
- ...block.toolResult,
- content: [{ text: prunedMessage }]
- }
- }
- }
- return block
- })
- if (messageModified) {
- data[i] = { ...m, content: newContent }
- replaced = true
- }
- }
- }
-
- return replaced
- },
-
- hasToolOutputs(data: any[]): boolean {
- for (const m of data) {
- if (m.role === 'user' && Array.isArray(m.content)) {
- for (const block of m.content) {
- if (block.toolResult) return true
- }
- }
- }
- return false
- },
-
- getLogMetadata(data: any[], replacedCount: number, inputUrl: string): Record {
- return {
- url: inputUrl,
- replacedCount,
- totalMessages: data.length,
- format: 'bedrock'
- }
- }
-}
diff --git a/lib/fetch-wrapper/formats/gemini.ts b/lib/fetch-wrapper/formats/gemini.ts
deleted file mode 100644
index 46ec2ad..0000000
--- a/lib/fetch-wrapper/formats/gemini.ts
+++ /dev/null
@@ -1,160 +0,0 @@
-import type { FormatDescriptor, ToolOutput } from "../types"
-import type { PluginState } from "../../state"
-
-/**
- * Gemini doesn't include tool call IDs in its native format.
- * We use position-based correlation via state.googleToolCallMapping which maps
- * "toolName:index" -> "toolCallId" (populated by hooks.ts from message events).
- */
-export const geminiFormat: FormatDescriptor = {
- name: 'gemini',
-
- detect(body: any): boolean {
- return body.contents && Array.isArray(body.contents)
- },
-
- getDataArray(body: any): any[] | undefined {
- return body.contents
- },
-
- injectSystemMessage(body: any, injection: string): boolean {
- if (!injection) return false
-
- if (!body.systemInstruction) {
- body.systemInstruction = { parts: [] }
- }
- if (!Array.isArray(body.systemInstruction.parts)) {
- body.systemInstruction.parts = []
- }
-
- body.systemInstruction.parts.push({ text: injection })
- return true
- },
-
- appendUserMessage(body: any, injection: string): boolean {
- if (!injection || !body.contents) return false
- body.contents.push({ role: 'user', parts: [{ text: injection }] })
- return true
- },
-
- extractToolOutputs(data: any[], state: PluginState): ToolOutput[] {
- const outputs: ToolOutput[] = []
-
- let positionMapping: Map | undefined
- for (const [_sessionId, mapping] of state.googleToolCallMapping) {
- if (mapping && mapping.size > 0) {
- positionMapping = mapping
- break
- }
- }
-
- if (!positionMapping) {
- return outputs
- }
-
- const toolPositionCounters = new Map()
-
- for (const content of data) {
- if (!Array.isArray(content.parts)) continue
-
- for (const part of content.parts) {
- if (part.functionResponse) {
- const funcName = part.functionResponse.name?.toLowerCase()
- if (funcName) {
- const currentIndex = toolPositionCounters.get(funcName) || 0
- toolPositionCounters.set(funcName, currentIndex + 1)
-
- const positionKey = `${funcName}:${currentIndex}`
- const toolCallId = positionMapping.get(positionKey)
-
- if (toolCallId) {
- const metadata = state.toolParameters.get(toolCallId.toLowerCase())
- outputs.push({
- id: toolCallId.toLowerCase(),
- toolName: metadata?.tool
- })
- }
- }
- }
- }
- }
-
- return outputs
- },
-
- replaceToolOutput(data: any[], toolId: string, prunedMessage: string, state: PluginState): boolean {
- let positionMapping: Map | undefined
- for (const [_sessionId, mapping] of state.googleToolCallMapping) {
- if (mapping && mapping.size > 0) {
- positionMapping = mapping
- break
- }
- }
-
- if (!positionMapping) {
- return false
- }
-
- const toolIdLower = toolId.toLowerCase()
- const toolPositionCounters = new Map()
- let replaced = false
-
- for (let i = 0; i < data.length; i++) {
- const content = data[i]
- if (!Array.isArray(content.parts)) continue
-
- let contentModified = false
- const newParts = content.parts.map((part: any) => {
- if (part.functionResponse) {
- const funcName = part.functionResponse.name?.toLowerCase()
- if (funcName) {
- const currentIndex = toolPositionCounters.get(funcName) || 0
- toolPositionCounters.set(funcName, currentIndex + 1)
-
- const positionKey = `${funcName}:${currentIndex}`
- const mappedToolId = positionMapping!.get(positionKey)
-
- if (mappedToolId?.toLowerCase() === toolIdLower) {
- contentModified = true
- replaced = true
- // Preserve thoughtSignature if present (required for Gemini 3 Pro)
- return {
- ...part,
- functionResponse: {
- ...part.functionResponse,
- response: {
- name: part.functionResponse.name,
- content: prunedMessage
- }
- }
- }
- }
- }
- }
- return part
- })
-
- if (contentModified) {
- data[i] = { ...content, parts: newParts }
- }
- }
-
- return replaced
- },
-
- hasToolOutputs(data: any[]): boolean {
- return data.some((content: any) =>
- Array.isArray(content.parts) &&
- content.parts.some((part: any) => part.functionResponse)
- )
- },
-
- getLogMetadata(data: any[], replacedCount: number, inputUrl: string): Record {
- return {
- url: inputUrl,
- replacedCount,
- totalContents: data.length,
- format: 'google-gemini'
- }
- }
-}
diff --git a/lib/fetch-wrapper/formats/index.ts b/lib/fetch-wrapper/formats/index.ts
deleted file mode 100644
index 5e13d3f..0000000
--- a/lib/fetch-wrapper/formats/index.ts
+++ /dev/null
@@ -1,5 +0,0 @@
-export { openaiChatFormat } from './openai-chat'
-export { openaiResponsesFormat } from './openai-responses'
-export { geminiFormat } from './gemini'
-export { bedrockFormat } from './bedrock'
-export { anthropicFormat } from './anthropic'
diff --git a/lib/fetch-wrapper/formats/openai-chat.ts b/lib/fetch-wrapper/formats/openai-chat.ts
deleted file mode 100644
index ca41dbf..0000000
--- a/lib/fetch-wrapper/formats/openai-chat.ts
+++ /dev/null
@@ -1,115 +0,0 @@
-import type { FormatDescriptor, ToolOutput } from "../types"
-import type { PluginState } from "../../state"
-
-export const openaiChatFormat: FormatDescriptor = {
- name: 'openai-chat',
-
- detect(body: any): boolean {
- return body.messages && Array.isArray(body.messages)
- },
-
- getDataArray(body: any): any[] | undefined {
- return body.messages
- },
-
- injectSystemMessage(body: any, injection: string): boolean {
- if (!injection || !body.messages) return false
-
- let lastSystemIndex = -1
- for (let i = 0; i < body.messages.length; i++) {
- if (body.messages[i].role === 'system') {
- lastSystemIndex = i
- }
- }
-
- const insertIndex = lastSystemIndex + 1
- body.messages.splice(insertIndex, 0, { role: 'system', content: injection })
- return true
- },
-
- appendUserMessage(body: any, injection: string): boolean {
- if (!injection || !body.messages) return false
- body.messages.push({ role: 'user', content: injection })
- return true
- },
-
- extractToolOutputs(data: any[], state: PluginState): ToolOutput[] {
- const outputs: ToolOutput[] = []
-
- for (const m of data) {
- if (m.role === 'tool' && m.tool_call_id) {
- const metadata = state.toolParameters.get(m.tool_call_id.toLowerCase())
- outputs.push({
- id: m.tool_call_id.toLowerCase(),
- toolName: metadata?.tool
- })
- }
-
- if (m.role === 'user' && Array.isArray(m.content)) {
- for (const part of m.content) {
- if (part.type === 'tool_result' && part.tool_use_id) {
- const metadata = state.toolParameters.get(part.tool_use_id.toLowerCase())
- outputs.push({
- id: part.tool_use_id.toLowerCase(),
- toolName: metadata?.tool
- })
- }
- }
- }
- }
-
- return outputs
- },
-
- replaceToolOutput(data: any[], toolId: string, prunedMessage: string, _state: PluginState): boolean {
- const toolIdLower = toolId.toLowerCase()
- let replaced = false
-
- for (let i = 0; i < data.length; i++) {
- const m = data[i]
-
- if (m.role === 'tool' && m.tool_call_id?.toLowerCase() === toolIdLower) {
- data[i] = { ...m, content: prunedMessage }
- replaced = true
- }
-
- if (m.role === 'user' && Array.isArray(m.content)) {
- let messageModified = false
- const newContent = m.content.map((part: any) => {
- if (part.type === 'tool_result' && part.tool_use_id?.toLowerCase() === toolIdLower) {
- messageModified = true
- return { ...part, content: prunedMessage }
- }
- return part
- })
- if (messageModified) {
- data[i] = { ...m, content: newContent }
- replaced = true
- }
- }
- }
-
- return replaced
- },
-
- hasToolOutputs(data: any[]): boolean {
- for (const m of data) {
- if (m.role === 'tool') return true
- if (m.role === 'user' && Array.isArray(m.content)) {
- for (const part of m.content) {
- if (part.type === 'tool_result') return true
- }
- }
- }
- return false
- },
-
- getLogMetadata(data: any[], replacedCount: number, inputUrl: string): Record {
- return {
- url: inputUrl,
- replacedCount,
- totalMessages: data.length,
- format: 'openai-chat'
- }
- }
-}
diff --git a/lib/fetch-wrapper/formats/openai-responses.ts b/lib/fetch-wrapper/formats/openai-responses.ts
deleted file mode 100644
index 2cabafe..0000000
--- a/lib/fetch-wrapper/formats/openai-responses.ts
+++ /dev/null
@@ -1,75 +0,0 @@
-import type { FormatDescriptor, ToolOutput } from "../types"
-import type { PluginState } from "../../state"
-
-export const openaiResponsesFormat: FormatDescriptor = {
- name: 'openai-responses',
-
- detect(body: any): boolean {
- return body.input && Array.isArray(body.input)
- },
-
- getDataArray(body: any): any[] | undefined {
- return body.input
- },
-
- injectSystemMessage(body: any, injection: string): boolean {
- if (!injection) return false
-
- if (body.instructions && typeof body.instructions === 'string') {
- body.instructions = body.instructions + '\n\n' + injection
- } else {
- body.instructions = injection
- }
- return true
- },
-
- appendUserMessage(body: any, injection: string): boolean {
- if (!injection || !body.input) return false
- body.input.push({ type: 'message', role: 'user', content: injection })
- return true
- },
-
- extractToolOutputs(data: any[], state: PluginState): ToolOutput[] {
- const outputs: ToolOutput[] = []
-
- for (const item of data) {
- if (item.type === 'function_call_output' && item.call_id) {
- const metadata = state.toolParameters.get(item.call_id.toLowerCase())
- outputs.push({
- id: item.call_id.toLowerCase(),
- toolName: metadata?.tool ?? item.name
- })
- }
- }
-
- return outputs
- },
-
- replaceToolOutput(data: any[], toolId: string, prunedMessage: string, _state: PluginState): boolean {
- const toolIdLower = toolId.toLowerCase()
- let replaced = false
-
- for (let i = 0; i < data.length; i++) {
- const item = data[i]
- if (item.type === 'function_call_output' && item.call_id?.toLowerCase() === toolIdLower) {
- data[i] = { ...item, output: prunedMessage }
- replaced = true
- }
- }
-
- return replaced
- },
-
- hasToolOutputs(data: any[]): boolean {
- return data.some((item: any) => item.type === 'function_call_output')
- },
-
- getLogMetadata(data: any[], replacedCount: number, inputUrl: string): Record {
- return {
- url: inputUrl,
- replacedCount,
- totalItems: data.length,
- format: 'openai-responses-api'
- }
- }
-}
diff --git a/lib/fetch-wrapper/gc-tracker.ts b/lib/fetch-wrapper/gc-tracker.ts
deleted file mode 100644
index 950a21a..0000000
--- a/lib/fetch-wrapper/gc-tracker.ts
+++ /dev/null
@@ -1,77 +0,0 @@
-import type { PluginState } from "../state"
-import type { Logger } from "../logger"
-
-export function accumulateGCStats(
- state: PluginState,
- sessionId: string,
- prunedIds: string[],
- body: any,
- logger: Logger
-): void {
- if (prunedIds.length === 0) return
-
- const toolOutputs = extractToolOutputsFromBody(body, prunedIds)
- const tokensCollected = estimateTokensFromOutputs(toolOutputs)
-
- const existing = state.gcPending.get(sessionId) ?? { tokensCollected: 0, toolsDeduped: 0 }
-
- state.gcPending.set(sessionId, {
- tokensCollected: existing.tokensCollected + tokensCollected,
- toolsDeduped: existing.toolsDeduped + prunedIds.length
- })
-
- logger.debug("gc-tracker", "Accumulated GC stats", {
- sessionId: sessionId.substring(0, 8),
- newlyDeduped: prunedIds.length,
- tokensThisCycle: tokensCollected,
- pendingTotal: state.gcPending.get(sessionId)
- })
-}
-
-function extractToolOutputsFromBody(body: any, prunedIds: string[]): string[] {
- const outputs: string[] = []
- const prunedIdSet = new Set(prunedIds.map(id => id.toLowerCase()))
-
- // OpenAI Chat format
- if (body.messages && Array.isArray(body.messages)) {
- for (const m of body.messages) {
- if (m.role === 'tool' && m.tool_call_id && prunedIdSet.has(m.tool_call_id.toLowerCase())) {
- if (typeof m.content === 'string') {
- outputs.push(m.content)
- }
- }
- // Anthropic format
- if (m.role === 'user' && Array.isArray(m.content)) {
- for (const part of m.content) {
- if (part.type === 'tool_result' && part.tool_use_id && prunedIdSet.has(part.tool_use_id.toLowerCase())) {
- if (typeof part.content === 'string') {
- outputs.push(part.content)
- }
- }
- }
- }
- }
- }
-
- // OpenAI Responses format
- if (body.input && Array.isArray(body.input)) {
- for (const item of body.input) {
- if (item.type === 'function_call_output' && item.call_id && prunedIdSet.has(item.call_id.toLowerCase())) {
- if (typeof item.output === 'string') {
- outputs.push(item.output)
- }
- }
- }
- }
-
- return outputs
-}
-
-// Character-based approximation (chars / 4) to avoid async tokenizer in fetch path
-function estimateTokensFromOutputs(outputs: string[]): number {
- let totalChars = 0
- for (const output of outputs) {
- totalChars += output.length
- }
- return Math.round(totalChars / 4)
-}
diff --git a/lib/fetch-wrapper/handler.ts b/lib/fetch-wrapper/handler.ts
deleted file mode 100644
index a4cd693..0000000
--- a/lib/fetch-wrapper/handler.ts
+++ /dev/null
@@ -1,165 +0,0 @@
-import type { FetchHandlerContext, FetchHandlerResult, FormatDescriptor, PrunedIdData } from "./types"
-import { type PluginState, ensureSessionRestored } from "../state"
-import type { Logger } from "../logger"
-import { buildPrunableToolsList, buildEndInjection } from "./prunable-list"
-import { syncToolCache } from "../state/tool-cache"
-import { loadPrompt } from "../core/prompt"
-
-const SYNTHETIC_INSTRUCTION = loadPrompt("synthetic")
-const PRUNED_CONTENT_MESSAGE = '[Output removed to save context - information superseded or no longer needed]'
-
-function getMostRecentActiveSession(allSessions: any): any | undefined {
- const activeSessions = allSessions.data?.filter((s: any) => !s.parentID) || []
- return activeSessions.length > 0 ? activeSessions[0] : undefined
-}
-
-async function fetchSessionMessages(
- client: any,
- sessionId: string
-): Promise {
- try {
- const messagesResponse = await client.session.messages({
- path: { id: sessionId },
- query: { limit: 500 }
- })
- return Array.isArray(messagesResponse.data)
- ? messagesResponse.data
- : Array.isArray(messagesResponse) ? messagesResponse : undefined
- } catch (e) {
- return undefined
- }
-}
-
-async function getAllPrunedIds(
- client: any,
- state: PluginState,
- logger?: Logger
-): Promise {
- const allSessions = await client.session.list()
- const allPrunedIds = new Set()
-
- const currentSession = getMostRecentActiveSession(allSessions)
- if (currentSession) {
- await ensureSessionRestored(state, currentSession.id, logger)
- const prunedIds = state.prunedIds.get(currentSession.id) ?? []
- prunedIds.forEach((id: string) => allPrunedIds.add(id.toLowerCase()))
-
- if (logger && prunedIds.length > 0) {
- logger.debug("fetch", "Loaded pruned IDs for replacement", {
- sessionId: currentSession.id,
- prunedCount: prunedIds.length
- })
- }
- }
-
- return { allSessions, allPrunedIds }
-}
-
-export async function handleFormat(
- body: any,
- ctx: FetchHandlerContext,
- inputUrl: string,
- format: FormatDescriptor
-): Promise {
- const data = format.getDataArray(body)
- if (!data) {
- return { modified: false, body }
- }
-
- let modified = false
-
- // Sync tool parameters from OpenCode's session API (single source of truth)
- // Also tracks new tool results for nudge injection
- const sessionId = ctx.state.lastSeenSessionId
- const protectedSet = new Set(ctx.config.protectedTools)
- if (sessionId) {
- await ensureSessionRestored(ctx.state, sessionId, ctx.logger)
- await syncToolCache(ctx.client, sessionId, ctx.state, ctx.toolTracker, protectedSet, ctx.logger)
- }
-
- if (ctx.config.strategies.onTool.length > 0 && sessionId) {
- const toolIds = Array.from(ctx.state.toolParameters.keys())
- const alreadyPruned = ctx.state.prunedIds.get(sessionId) ?? []
- const alreadyPrunedLower = new Set(alreadyPruned.map(id => id.toLowerCase()))
- const unprunedIds = toolIds.filter(id => !alreadyPrunedLower.has(id.toLowerCase()))
-
- const { list: prunableList, numericIds } = buildPrunableToolsList(
- sessionId,
- unprunedIds,
- ctx.state.toolParameters,
- ctx.config.protectedTools
- )
-
- if (prunableList) {
- const includeNudge = ctx.config.nudge_freq > 0 && ctx.toolTracker.toolResultCount > ctx.config.nudge_freq
- if (format.injectSystemMessage(body, SYNTHETIC_INSTRUCTION)) {
- modified = true
- }
-
- const endInjection = buildEndInjection(prunableList, includeNudge)
-
- if (format.appendUserMessage && format.appendUserMessage(body, endInjection)) {
- const nudgeMsg = includeNudge ? " with nudge" : ""
- ctx.logger.debug("fetch", `Appended prunable tools list${nudgeMsg} as user message (${format.name})`, {
- ids: numericIds,
- nudge: includeNudge,
- toolsSincePrune: ctx.toolTracker.toolResultCount
- })
- modified = true
- }
- }
- }
-
- if (!format.hasToolOutputs(data)) {
- return { modified, body }
- }
-
- const { allSessions, allPrunedIds } = await getAllPrunedIds(ctx.client, ctx.state, ctx.logger)
-
- if (allPrunedIds.size === 0) {
- return { modified, body }
- }
-
- const toolOutputs = format.extractToolOutputs(data, ctx.state)
- let replacedCount = 0
- let prunableCount = 0
-
- for (const output of toolOutputs) {
- // Skip tools not in cache (protected tools are excluded from cache)
- if (!output.toolName) continue
- prunableCount++
-
- if (allPrunedIds.has(output.id)) {
- if (format.replaceToolOutput(data, output.id, PRUNED_CONTENT_MESSAGE, ctx.state)) {
- replacedCount++
- }
- }
- }
-
- if (replacedCount > 0) {
- ctx.logger.info("fetch", `Replaced pruned tool outputs (${format.name})`, {
- replaced: replacedCount,
- total: prunableCount
- })
-
- if (ctx.logger.enabled) {
- const activeSessions = allSessions.data?.filter((s: any) => !s.parentID) || []
- let sessionMessages: any[] | undefined
- if (activeSessions.length > 0) {
- const mostRecentSession = activeSessions[0]
- sessionMessages = await fetchSessionMessages(ctx.client, mostRecentSession.id)
- }
-
- await ctx.logger.saveWrappedContext(
- "global",
- data,
- format.getLogMetadata(data, replacedCount, inputUrl),
- sessionMessages
- )
- }
-
- return { modified: true, body }
- }
-
- return { modified, body }
-}
diff --git a/lib/fetch-wrapper/index.ts b/lib/fetch-wrapper/index.ts
deleted file mode 100644
index 244103b..0000000
--- a/lib/fetch-wrapper/index.ts
+++ /dev/null
@@ -1,134 +0,0 @@
-import type { PluginState } from "../state"
-import type { Logger } from "../logger"
-import type { FetchHandlerContext } from "./types"
-import type { ToolTracker } from "./types"
-import type { PluginConfig } from "../config"
-import { openaiChatFormat, openaiResponsesFormat, geminiFormat, bedrockFormat, anthropicFormat } from "./formats"
-import { handleFormat } from "./handler"
-import { runStrategies } from "../core/strategies"
-import { accumulateGCStats } from "./gc-tracker"
-import { trimToolParametersCache } from "../state/tool-cache"
-
-export type { FetchHandlerContext, FetchHandlerResult } from "./types"
-
-/**
- * Creates a wrapped global fetch that intercepts API calls and performs
- * context pruning on tool outputs that have been marked for removal.
- *
- * Supports five API formats:
- * 1. OpenAI Chat Completions (body.messages with role='tool')
- * 2. Anthropic Messages API (body.system + body.messages with tool_result)
- * 3. Google/Gemini (body.contents with functionResponse parts)
- * 4. OpenAI Responses API (body.input with function_call_output items)
- * 5. AWS Bedrock Converse API (body.system + body.messages with toolResult blocks)
- */
-export function installFetchWrapper(
- state: PluginState,
- logger: Logger,
- client: any,
- config: PluginConfig,
- toolTracker: ToolTracker
-): () => void {
- const originalGlobalFetch = globalThis.fetch
-
- const ctx: FetchHandlerContext = {
- state,
- logger,
- client,
- config,
- toolTracker
- }
-
- globalThis.fetch = async (input: any, init?: any) => {
- if (state.lastSeenSessionId && state.subagentSessions.has(state.lastSeenSessionId)) {
- logger.debug("fetch-wrapper", "Skipping DCP processing for subagent session", {
- sessionId: state.lastSeenSessionId.substring(0, 8)
- })
- return originalGlobalFetch(input, init)
- }
-
- if (init?.body && typeof init.body === 'string') {
- try {
- const body = JSON.parse(init.body)
- const inputUrl = typeof input === 'string' ? input : 'URL object'
- let modified = false
-
- const toolIdsBefore = new Set(state.toolParameters.keys())
-
- // Mutually exclusive format handlers
- // Order matters: More specific formats first to avoid incorrect detection
- // 1. OpenAI Responses API: has body.input (not body.messages)
- // 2. Bedrock: has body.system + body.inferenceConfig + body.messages
- // 3. Anthropic: has body.system + body.messages (no inferenceConfig)
- // 4. OpenAI Chat: has body.messages (no top-level system)
- // 5. Gemini: has body.contents
- if (openaiResponsesFormat.detect(body)) {
- const result = await handleFormat(body, ctx, inputUrl, openaiResponsesFormat)
- if (result.modified) {
- modified = true
- }
- }
- else if (bedrockFormat.detect(body)) {
- const result = await handleFormat(body, ctx, inputUrl, bedrockFormat)
- if (result.modified) {
- modified = true
- }
- }
- else if (anthropicFormat.detect(body)) {
- const result = await handleFormat(body, ctx, inputUrl, anthropicFormat)
- if (result.modified) {
- modified = true
- }
- }
- else if (openaiChatFormat.detect(body)) {
- const result = await handleFormat(body, ctx, inputUrl, openaiChatFormat)
- if (result.modified) {
- modified = true
- }
- }
- else if (geminiFormat.detect(body)) {
- const result = await handleFormat(body, ctx, inputUrl, geminiFormat)
- if (result.modified) {
- modified = true
- }
- }
-
- const sessionId = state.lastSeenSessionId
- const toolIdsAfter = Array.from(state.toolParameters.keys())
- const newToolsCached = toolIdsAfter.filter(id => !toolIdsBefore.has(id)).length > 0
-
- if (sessionId && newToolsCached && state.toolParameters.size > 0) {
- const toolIds = Array.from(state.toolParameters.keys())
- const alreadyPruned = state.prunedIds.get(sessionId) ?? []
- const alreadyPrunedLower = new Set(alreadyPruned.map(id => id.toLowerCase()))
- const unpruned = toolIds.filter(id => !alreadyPrunedLower.has(id.toLowerCase()))
- if (unpruned.length > 1) {
- const result = runStrategies(
- state.toolParameters,
- unpruned,
- config.protectedTools
- )
- if (result.prunedIds.length > 0) {
- const normalizedIds = result.prunedIds.map(id => id.toLowerCase())
- state.prunedIds.set(sessionId, [...new Set([...alreadyPruned, ...normalizedIds])])
- accumulateGCStats(state, sessionId, result.prunedIds, body, logger)
- }
- }
-
- trimToolParametersCache(state)
- }
-
- if (modified) {
- init.body = JSON.stringify(body)
- }
- } catch (e) {
- }
- }
-
- return originalGlobalFetch(input, init)
- }
-
- return () => {
- globalThis.fetch = originalGlobalFetch
- }
-}
diff --git a/lib/fetch-wrapper/prunable-list.ts b/lib/fetch-wrapper/prunable-list.ts
deleted file mode 100644
index 677f8c9..0000000
--- a/lib/fetch-wrapper/prunable-list.ts
+++ /dev/null
@@ -1,60 +0,0 @@
-import { extractParameterKey } from '../ui/display-utils'
-import { getOrCreateNumericId } from '../state/id-mapping'
-import { loadPrompt } from '../core/prompt'
-import type { ToolMetadata } from './types'
-
-const NUDGE_INSTRUCTION = loadPrompt("nudge")
-
-export interface PrunableListResult {
- list: string
- numericIds: number[]
-}
-
-export function buildPrunableToolsList(
- sessionId: string,
- unprunedToolCallIds: string[],
- toolMetadata: Map,
- protectedTools: string[]
-): PrunableListResult {
- const lines: string[] = []
- const numericIds: number[] = []
-
- for (const actualId of unprunedToolCallIds) {
- const metadata = toolMetadata.get(actualId)
- if (!metadata) continue
- if (protectedTools.includes(metadata.tool)) continue
-
- const numericId = getOrCreateNumericId(sessionId, actualId)
- numericIds.push(numericId)
-
- const paramKey = extractParameterKey(metadata)
- const description = paramKey ? `${metadata.tool}, ${paramKey}` : metadata.tool
- lines.push(`${numericId}: ${description}`)
- }
-
- if (lines.length === 0) {
- return { list: '', numericIds: [] }
- }
-
- return {
- list: `\nThe following tools have been invoked and are available for pruning. This list does not mandate immediate action. Consider your current goals and the resources you need before discarding valuable tool outputs. Keep the context free of noise.\n${lines.join('\n')}\n`,
- numericIds
- }
-}
-
-export function buildEndInjection(
- prunableList: string,
- includeNudge: boolean
-): string {
- if (!prunableList) {
- return ''
- }
-
- const parts = [prunableList]
-
- if (includeNudge) {
- parts.push(NUDGE_INSTRUCTION)
- }
-
- return parts.join('\n\n')
-}
diff --git a/lib/fetch-wrapper/tool-tracker.ts b/lib/fetch-wrapper/tool-tracker.ts
deleted file mode 100644
index a195a67..0000000
--- a/lib/fetch-wrapper/tool-tracker.ts
+++ /dev/null
@@ -1,19 +0,0 @@
-export interface ToolTracker {
- seenToolResultIds: Set
- toolResultCount: number // Tools since last prune
- skipNextIdle: boolean
-}
-
-export function createToolTracker(): ToolTracker {
- return { seenToolResultIds: new Set(), toolResultCount: 0, skipNextIdle: false }
-}
-
-export function resetToolTrackerCount(tracker: ToolTracker): void {
- tracker.toolResultCount = 0
-}
-
-export function clearToolTracker(tracker: ToolTracker): void {
- tracker.seenToolResultIds.clear()
- tracker.toolResultCount = 0
- tracker.skipNextIdle = false
-}
diff --git a/lib/fetch-wrapper/types.ts b/lib/fetch-wrapper/types.ts
deleted file mode 100644
index d15b640..0000000
--- a/lib/fetch-wrapper/types.ts
+++ /dev/null
@@ -1,76 +0,0 @@
-import type { PluginState } from "../state"
-import type { Logger } from "../logger"
-import type { PluginConfig } from "../config"
-import type { ToolTracker } from "./tool-tracker"
-export type { ToolTracker } from "./tool-tracker"
-
-export interface ToolOutput {
- id: string
- toolName?: string
-}
-
-export interface ToolMetadata {
- tool: string
- parameters?: any
-}
-
-export interface FormatDescriptor {
- name: string
- detect(body: any): boolean
- getDataArray(body: any): any[] | undefined
- injectSystemMessage(body: any, injection: string): boolean
- appendUserMessage?(body: any, injection: string): boolean
- extractToolOutputs(data: any[], state: PluginState): ToolOutput[]
- replaceToolOutput(data: any[], toolId: string, prunedMessage: string, state: PluginState): boolean
- hasToolOutputs(data: any[]): boolean
- getLogMetadata(data: any[], replacedCount: number, inputUrl: string): Record
-}
-
-export interface FetchHandlerContext {
- state: PluginState
- logger: Logger
- client: any
- config: PluginConfig
- toolTracker: ToolTracker
-}
-
-export interface FetchHandlerResult {
- modified: boolean
- body: any
-}
-
-export interface PrunedIdData {
- allSessions: any
- allPrunedIds: Set
-}
-
-/** The 3 scenarios that trigger explicit LLM pruning */
-export type PruneReason = "completion" | "noise" | "consolidation"
-
-/** Human-readable labels for prune reasons */
-export const PRUNE_REASON_LABELS: Record = {
- completion: "Task Complete",
- noise: "Noise Removal",
- consolidation: "Consolidation"
-}
-
-export interface SessionStats {
- totalToolsPruned: number
- totalTokensSaved: number
- totalGCTokens: number
- totalGCTools: number
-}
-
-export interface GCStats {
- tokensCollected: number
- toolsDeduped: number
-}
-
-export interface PruningResult {
- prunedCount: number
- tokensSaved: number
- llmPrunedIds: string[]
- toolMetadata: Map
- sessionStats: SessionStats
- reason?: PruneReason
-}
diff --git a/lib/hooks.ts b/lib/hooks.ts
index 48ca6cc..72fda69 100644
--- a/lib/hooks.ts
+++ b/lib/hooks.ts
@@ -1,168 +1,73 @@
-import type { PluginState } from "./state"
+import type { SessionState, WithParts } from "./state"
import type { Logger } from "./logger"
-import type { JanitorContext } from "./core/janitor"
-import { runOnIdle } from "./core/janitor"
-import type { PluginConfig, PruningStrategy } from "./config"
-import type { ToolTracker } from "./fetch-wrapper/tool-tracker"
-import { resetToolTrackerCount, clearToolTracker } from "./fetch-wrapper/tool-tracker"
-import { clearAllMappings } from "./state/id-mapping"
+import type { PluginConfig } from "./config"
+import { syncToolCache } from "./state/tool-cache"
+import { deduplicate } from "./strategies"
+import { prune, insertPruneToolContext } from "./messages"
+import { checkSession } from "./state"
+import { runOnIdle } from "./strategies/on-idle"
-export async function isSubagentSession(client: any, sessionID: string): Promise {
- try {
- const result = await client.session.get({ path: { id: sessionID } })
- return !!result.data?.parentID
- } catch (error: any) {
- return false
- }
-}
-function toolStrategiesCoveredByIdle(onIdle: PruningStrategy[], onTool: PruningStrategy[]): boolean {
- return onTool.every(strategy => onIdle.includes(strategy))
-}
-
-export function createEventHandler(
+export function createChatMessageTransformHandler(
client: any,
- janitorCtx: JanitorContext,
+ state: SessionState,
logger: Logger,
- config: PluginConfig,
- toolTracker?: ToolTracker
+ config: PluginConfig
) {
- return async ({ event }: { event: any }) => {
- if (event.type === "session.status" && event.properties.status.type === "idle") {
- if (await isSubagentSession(client, event.properties.sessionID)) return
- if (config.strategies.onIdle.length === 0) return
+ return async (
+ input: {},
+ output: { messages: WithParts[] }
+ ) => {
+ await checkSession(client, state, logger, output.messages)
+
+ if (state.isSubAgent) {
+ return
+ }
- if (toolTracker?.skipNextIdle) {
- toolTracker.skipNextIdle = false
- if (toolStrategiesCoveredByIdle(config.strategies.onIdle, config.strategies.onTool)) {
- return
- }
- }
+ syncToolCache(state, config, logger, output.messages);
- try {
- const result = await runOnIdle(janitorCtx, event.properties.sessionID, config.strategies.onIdle)
+ deduplicate(state, logger, config, output.messages)
- if (result && result.prunedCount > 0 && toolTracker && config.nudge_freq > 0) {
- if (toolStrategiesCoveredByIdle(config.strategies.onIdle, config.strategies.onTool)) {
- resetToolTrackerCount(toolTracker)
- }
- }
- } catch (err: any) {
- logger.error("janitor", "Failed", { error: err.message })
- }
- }
+ prune(state, logger, config, output.messages)
+
+ insertPruneToolContext(state, config, logger, output.messages)
}
}
-/**
- * Creates the chat.params hook for model caching and Google tool call mapping.
- */
-export function createChatParamsHandler(
+export function createEventHandler(
client: any,
- state: PluginState,
+ config: PluginConfig,
+ state: SessionState,
logger: Logger,
- toolTracker?: ToolTracker
+ workingDirectory?: string
) {
- return async (input: any, _output: any) => {
- const sessionId = input.sessionID
- let providerID = (input.provider as any)?.info?.id || input.provider?.id
- const modelID = input.model?.id
-
- if (!providerID && input.message?.model?.providerID) {
- providerID = input.message.model.providerID
+ return async (
+ { event }: { event: any }
+ ) => {
+ if (state.sessionId === null || state.isSubAgent) {
+ return
}
- if (state.lastSeenSessionId && state.lastSeenSessionId !== sessionId) {
- logger.info("chat.params", "Session changed, resetting state", {
- from: state.lastSeenSessionId,
- to: sessionId
- })
- clearAllMappings()
- state.toolParameters.clear()
- if (toolTracker) {
- clearToolTracker(toolTracker)
+ if (event.type === "session.status" && event.properties.status.type === "idle") {
+ if (!config.strategies.onIdle.enabled) {
+ return
}
- }
-
- state.lastSeenSessionId = sessionId
-
- if (!state.checkedSessions.has(sessionId)) {
- state.checkedSessions.add(sessionId)
- const isSubagent = await isSubagentSession(client, sessionId)
- if (isSubagent) {
- state.subagentSessions.add(sessionId)
+ if (state.lastToolPrune) {
+ logger.info("Skipping OnIdle pruning - last tool was prune")
+ return
}
- }
- // Cache model info for the session (used by janitor for model selection)
- if (providerID && modelID) {
- state.model.set(sessionId, {
- providerID: providerID,
- modelID: modelID
- })
- }
-
- // Build position-based mapping for Gemini (which loses tool call IDs in native format)
- if (providerID === 'google' || providerID === 'google-vertex') {
try {
- const messagesResponse = await client.session.messages({
- path: { id: sessionId },
- query: { limit: 500 }
- })
- const messages = messagesResponse.data || messagesResponse
-
- if (Array.isArray(messages)) {
- const toolCallsByName = new Map()
-
- for (const msg of messages) {
- if (msg.parts) {
- for (const part of msg.parts) {
- if (part.type === 'tool' && part.callID && part.tool) {
- const toolName = part.tool.toLowerCase()
- const callId = part.callID.toLowerCase()
-
- if (!toolCallsByName.has(toolName)) {
- toolCallsByName.set(toolName, [])
- }
- toolCallsByName.get(toolName)!.push(callId)
- }
- }
- }
- }
-
- const positionMapping = new Map()
- for (const [toolName, callIds] of toolCallsByName) {
- callIds.forEach((callId, index) => {
- positionMapping.set(`${toolName}:${index}`, callId)
- })
- }
-
- state.googleToolCallMapping.set(sessionId, positionMapping)
- logger.info("chat.params", "Built Google tool call mapping", {
- sessionId: sessionId.substring(0, 8),
- toolCount: positionMapping.size,
- toolParamsCount: state.toolParameters.size
- })
- }
- } catch (error: any) {
- logger.error("chat.params", "Failed to build Google tool call mapping", {
- error: error.message
- })
+ await runOnIdle(
+ client,
+ state,
+ logger,
+ config,
+ workingDirectory
+ )
+ } catch (err: any) {
+ logger.error("OnIdle pruning failed", { error: err.message })
}
}
}
}
-
-/**
- * Finds the current agent from messages by scanning backward for user messages.
- */
-export function findCurrentAgent(messages: any[]): string | undefined {
- for (let i = messages.length - 1; i >= 0; i--) {
- const msg = messages[i]
- const info = msg.info
- if (info?.role === 'user') {
- return info.agent || 'build'
- }
- }
- return undefined
-}
diff --git a/lib/logger.ts b/lib/logger.ts
index d51e888..0081db1 100644
--- a/lib/logger.ts
+++ b/lib/logger.ts
@@ -6,7 +6,6 @@ import { homedir } from "os"
export class Logger {
private logDir: string
public enabled: boolean
- private fileCounter: number = 0
constructor(enabled: boolean) {
this.enabled = enabled
@@ -45,6 +44,29 @@ export class Logger {
return parts.join(" ")
}
+ private getCallerFile(skipFrames: number = 3): string {
+ const originalPrepareStackTrace = Error.prepareStackTrace
+ try {
+ const err = new Error()
+ Error.prepareStackTrace = (_, stack) => stack
+ const stack = err.stack as unknown as NodeJS.CallSite[]
+ Error.prepareStackTrace = originalPrepareStackTrace
+
+ // Skip specified number of frames to get to actual caller
+ for (let i = skipFrames; i < stack.length; i++) {
+ const filename = stack[i]?.getFileName()
+ if (filename && !filename.includes('/logger.')) {
+ // Extract just the filename without path and extension
+ const match = filename.match(/([^/\\]+)\.[tj]s$/)
+ return match ? match[1] : filename
+ }
+ }
+ return 'unknown'
+ } catch {
+ return 'unknown'
+ }
+ }
+
private async write(level: string, component: string, message: string, data?: any) {
if (!this.enabled) return
@@ -67,196 +89,23 @@ export class Logger {
}
}
- info(component: string, message: string, data?: any) {
+ info(message: string, data?: any) {
+ const component = this.getCallerFile(2)
return this.write("INFO", component, message, data)
}
- debug(component: string, message: string, data?: any) {
+ debug(message: string, data?: any) {
+ const component = this.getCallerFile(2)
return this.write("DEBUG", component, message, data)
}
- warn(component: string, message: string, data?: any) {
+ warn(message: string, data?: any) {
+ const component = this.getCallerFile(2)
return this.write("WARN", component, message, data)
}
- error(component: string, message: string, data?: any) {
+ error(message: string, data?: any) {
+ const component = this.getCallerFile(2)
return this.write("ERROR", component, message, data)
}
-
- private parseJanitorPrompt(prompt: string): {
- instructions: string
- availableToolCallIds: string[]
- sessionHistory: any[]
- responseSchema: any
- } | null {
- try {
- const idsMatch = prompt.match(/Available tool call IDs for analysis:\s*([^\n]+)/)
- const availableToolCallIds = idsMatch
- ? idsMatch[1].split(',').map(id => id.trim())
- : []
-
- const historyMatch = prompt.match(/Session history[^\n]*:\s*\n([\s\S]*?)\n\nYou MUST respond/)
- let sessionHistory: any[] = []
-
- if (historyMatch) {
- const historyText = historyMatch[1]
-
- const fixedJson = this.escapeNewlinesInJson(historyText)
- sessionHistory = JSON.parse(fixedJson)
- }
-
- const instructionsMatch = prompt.match(/([\s\S]*?)\n\nIMPORTANT: Available tool call IDs/)
- const instructions = instructionsMatch
- ? instructionsMatch[1].trim()
- : ''
-
- const schemaMatch = prompt.match(/matching this exact schema:\s*\n(\{[\s\S]*?\})\s*$/)
- const responseSchema = schemaMatch
- ? schemaMatch[1]
- : null
-
- return {
- instructions,
- availableToolCallIds,
- sessionHistory,
- responseSchema
- }
- } catch (error) {
- return null
- }
- }
-
- private escapeNewlinesInJson(jsonText: string): string {
- let result = ''
- let inString = false
-
- for (let i = 0; i < jsonText.length; i++) {
- const char = jsonText[i]
- const prevChar = i > 0 ? jsonText[i - 1] : ''
-
- if (char === '"' && prevChar !== '\\') {
- inString = !inString
- result += char
- } else if (char === '\n' && inString) {
- result += '\\n'
- } else {
- result += char
- }
- }
-
- return result
- }
-
- private extractReasoningBlocks(sessionMessages: any[]): any[] {
- const reasoningBlocks: any[] = []
-
- for (const msg of sessionMessages) {
- if (!msg.parts) continue
-
- for (const part of msg.parts) {
- if (part.type === "reasoning") {
- // Calculate encrypted content size for different providers
- let encryptedContentLength = 0
- if (part.metadata?.openai?.reasoningEncryptedContent) {
- encryptedContentLength = part.metadata.openai.reasoningEncryptedContent.length
- } else if (part.metadata?.anthropic?.signature) {
- encryptedContentLength = part.metadata.anthropic.signature.length
- } else if (part.metadata?.google?.thoughtSignature) {
- encryptedContentLength = part.metadata.google.thoughtSignature.length
- }
-
- reasoningBlocks.push({
- messageId: msg.id,
- messageRole: msg.role,
- text: part.text,
- textLength: part.text?.length || 0,
- encryptedContentLength,
- time: part.time,
- hasMetadata: !!part.metadata,
- metadataKeys: part.metadata ? Object.keys(part.metadata) : []
- })
- }
- }
- }
-
- return reasoningBlocks
- }
-
- async saveWrappedContext(sessionID: string, messages: any[], metadata: any, sessionMessages?: any[]) {
- if (!this.enabled) return
-
- try {
- await this.ensureLogDir()
-
- const aiContextDir = join(this.logDir, "ai-context")
- if (!existsSync(aiContextDir)) {
- await mkdir(aiContextDir, { recursive: true })
- }
-
- const timestamp = new Date().toISOString().replace(/:/g, '-').replace(/\./g, '-')
- const counter = (this.fileCounter++).toString().padStart(3, '0')
- const filename = `${timestamp}_${counter}_${sessionID.substring(0, 15)}.json`
- const filepath = join(aiContextDir, filename)
-
- const isJanitorShadow = sessionID === "janitor-shadow" &&
- messages.length === 1 &&
- messages[0]?.role === 'user' &&
- typeof messages[0]?.content === 'string'
-
- let content: any
-
- if (isJanitorShadow) {
- const parsed = this.parseJanitorPrompt(messages[0].content)
-
- if (parsed) {
- content = {
- timestamp: new Date().toISOString(),
- sessionID,
- metadata,
- janitorAnalysis: {
- instructions: parsed.instructions,
- availableToolCallIds: parsed.availableToolCallIds,
- protectedTools: ["task", "todowrite", "todoread"],
- sessionHistory: parsed.sessionHistory,
- responseSchema: parsed.responseSchema
- },
- rawPrompt: messages[0].content
- }
- } else {
- content = {
- timestamp: new Date().toISOString(),
- sessionID,
- metadata,
- messages,
- note: "Failed to parse janitor prompt structure"
- }
- }
- } else {
- // Extract reasoning blocks from session messages if available
- const reasoningBlocks = sessionMessages
- ? this.extractReasoningBlocks(sessionMessages)
- : []
-
- content = {
- timestamp: new Date().toISOString(),
- sessionID,
- metadata,
- messages,
- ...(reasoningBlocks.length > 0 && {
- reasoning: {
- count: reasoningBlocks.length,
- totalTextCharacters: reasoningBlocks.reduce((sum, b) => sum + b.textLength, 0),
- totalEncryptedCharacters: reasoningBlocks.reduce((sum, b) => sum + b.encryptedContentLength, 0),
- blocks: reasoningBlocks
- }
- })
- }
- }
-
- const jsonString = JSON.stringify(content, null, 2)
-
- await writeFile(filepath, jsonString)
- } catch (error) {
- }
- }
}
diff --git a/lib/messages/index.ts b/lib/messages/index.ts
new file mode 100644
index 0000000..e854003
--- /dev/null
+++ b/lib/messages/index.ts
@@ -0,0 +1 @@
+export { prune, insertPruneToolContext } from "./prune"
diff --git a/lib/messages/prune.ts b/lib/messages/prune.ts
new file mode 100644
index 0000000..7361b74
--- /dev/null
+++ b/lib/messages/prune.ts
@@ -0,0 +1,126 @@
+import type { SessionState, WithParts } from "../state"
+import type { Logger } from "../logger"
+import type { PluginConfig } from "../config"
+import { getLastUserMessage, extractParameterKey, buildToolIdList } from "./utils"
+import { loadPrompt } from "../prompt"
+
+const PRUNED_TOOL_OUTPUT_REPLACEMENT = '[Output removed to save context - information superseded or no longer needed]'
+const NUDGE_STRING = loadPrompt("nudge")
+
+const buildPrunableToolsList = (
+ state: SessionState,
+ config: PluginConfig,
+ logger: Logger,
+ messages: WithParts[],
+): string => {
+ const lines: string[] = []
+ const toolIdList: string[] = buildToolIdList(messages)
+
+ state.toolParameters.forEach((toolParameterEntry, toolCallId) => {
+ if (state.prune.toolIds.includes(toolCallId)) {
+ return
+ }
+ if (config.strategies.pruneTool.protectedTools.includes(toolParameterEntry.tool)) {
+ return
+ }
+ if (toolParameterEntry.compacted) {
+ return
+ }
+ const numericId = toolIdList.indexOf(toolCallId)
+ const paramKey = extractParameterKey(toolParameterEntry.tool, toolParameterEntry.parameters)
+ const description = paramKey ? `${toolParameterEntry.tool}, ${paramKey}` : toolParameterEntry.tool
+ lines.push(`${numericId}: ${description}`)
+ logger.debug(`Prunable tool found - ID: ${numericId}, Tool: ${toolParameterEntry.tool}, Call ID: ${toolCallId}`)
+ })
+
+ if (lines.length === 0) {
+ return ""
+ }
+
+ return `\nThe following tools have been invoked and are available for pruning. This list does not mandate immediate action. Consider your current goals and the resources you need before discarding valuable tool outputs. Keep the context free of noise.\n${lines.join('\n')}\n`
+}
+
+export const insertPruneToolContext = (
+ state: SessionState,
+ config: PluginConfig,
+ logger: Logger,
+ messages: WithParts[]
+): void => {
+ if (!config.strategies.pruneTool.enabled) {
+ return
+ }
+
+ const lastUserMessage = getLastUserMessage(messages)
+ if (!lastUserMessage || lastUserMessage.info.role !== 'user') {
+ return
+ }
+
+ const prunableToolsList = buildPrunableToolsList(state, config, logger, messages)
+ if (!prunableToolsList) {
+ return
+ }
+
+ let nudgeString = ""
+ if (state.nudgeCounter >= config.strategies.pruneTool.nudge.frequency) {
+ logger.info("Inserting prune nudge message")
+ nudgeString = "\n" + NUDGE_STRING
+ }
+
+ const userMessage: WithParts = {
+ info: {
+ id: "msg_01234567890123456789012345",
+ sessionID: lastUserMessage.info.sessionID,
+ role: "user",
+ time: { created: Date.now() },
+ agent: lastUserMessage.info.agent || "build",
+ model: {
+ providerID: lastUserMessage.info.model.providerID,
+ modelID: lastUserMessage.info.model.modelID
+ }
+ },
+ parts: [
+ {
+ id: "prt_01234567890123456789012345",
+ sessionID: lastUserMessage.info.sessionID,
+ messageID: "msg_01234567890123456789012345",
+ type: "text",
+ text: prunableToolsList + nudgeString,
+ }
+ ]
+ }
+
+ messages.push(userMessage)
+}
+
+export const prune = (
+ state: SessionState,
+ logger: Logger,
+ config: PluginConfig,
+ messages: WithParts[]
+): void => {
+ pruneToolOutputs(state, logger, messages)
+ // more prune methods coming here
+}
+
+const pruneToolOutputs = (
+ state: SessionState,
+ logger: Logger,
+ messages: WithParts[]
+): void => {
+ for (const msg of messages) {
+ for (const part of msg.parts) {
+ if (part.type !== 'tool') {
+ continue
+ }
+ if (!state.prune.toolIds.includes(part.callID)) {
+ continue
+ }
+ if (part.state.status === 'completed') {
+ part.state.output = PRUNED_TOOL_OUTPUT_REPLACEMENT
+ }
+ // if (part.state.status === 'error') {
+ // part.state.error = PRUNED_TOOL_OUTPUT_REPLACEMENT
+ // }
+ }
+ }
+}
diff --git a/lib/messages/utils.ts b/lib/messages/utils.ts
new file mode 100644
index 0000000..26b2c60
--- /dev/null
+++ b/lib/messages/utils.ts
@@ -0,0 +1,104 @@
+import type { WithParts } from "../state"
+
+/**
+ * Extracts a human-readable key from tool metadata for display purposes.
+ */
+export const extractParameterKey = (tool: string, parameters: any): string => {
+ if (!parameters) return ''
+
+ if (tool === "read" && parameters.filePath) {
+ return parameters.filePath
+ }
+ if (tool === "write" && parameters.filePath) {
+ return parameters.filePath
+ }
+ if (tool === "edit" && parameters.filePath) {
+ return parameters.filePath
+ }
+
+ if (tool === "list") {
+ return parameters.path || '(current directory)'
+ }
+ if (tool === "glob") {
+ if (parameters.pattern) {
+ const pathInfo = parameters.path ? ` in ${parameters.path}` : ""
+ return `"${parameters.pattern}"${pathInfo}`
+ }
+ return '(unknown pattern)'
+ }
+ if (tool === "grep") {
+ if (parameters.pattern) {
+ const pathInfo = parameters.path ? ` in ${parameters.path}` : ""
+ return `"${parameters.pattern}"${pathInfo}`
+ }
+ return '(unknown pattern)'
+ }
+
+ if (tool === "bash") {
+ if (parameters.description) return parameters.description
+ if (parameters.command) {
+ return parameters.command.length > 50
+ ? parameters.command.substring(0, 50) + "..."
+ : parameters.command
+ }
+ }
+
+ if (tool === "webfetch" && parameters.url) {
+ return parameters.url
+ }
+ if (tool === "websearch" && parameters.query) {
+ return `"${parameters.query}"`
+ }
+ if (tool === "codesearch" && parameters.query) {
+ return `"${parameters.query}"`
+ }
+
+ if (tool === "todowrite") {
+ return `${parameters.todos?.length || 0} todos`
+ }
+ if (tool === "todoread") {
+ return "read todo list"
+ }
+
+ if (tool === "task" && parameters.description) {
+ return parameters.description
+ }
+
+ const paramStr = JSON.stringify(parameters)
+ if (paramStr === '{}' || paramStr === '[]' || paramStr === 'null') {
+ return ''
+ }
+ return paramStr.substring(0, 50)
+}
+
+export const getLastUserMessage = (
+ messages: WithParts[]
+): WithParts | null => {
+ for (let i = messages.length - 1; i >= 0; i--) {
+ const msg = messages[i]
+ if (msg.info.role === 'user') {
+ return msg
+ }
+ }
+ return null
+}
+
+export function findCurrentAgent(messages: WithParts[]): string | undefined {
+ const userMsg = getLastUserMessage(messages)
+ if (!userMsg) return undefined
+ return (userMsg.info as any).agent || 'build'
+}
+
+export function buildToolIdList(messages: WithParts[]): string[] {
+ const toolIds: string[] = []
+ for (const msg of messages) {
+ if (msg.parts) {
+ for (const part of msg.parts) {
+ if (part.type === 'tool' && part.callID && part.tool) {
+ toolIds.push(part.callID)
+ }
+ }
+ }
+ }
+ return toolIds
+}
\ No newline at end of file
diff --git a/lib/model-selector.ts b/lib/model-selector.ts
index e0e9895..d1499eb 100644
--- a/lib/model-selector.ts
+++ b/lib/model-selector.ts
@@ -55,7 +55,7 @@ async function importOpencodeAI(logger?: Logger, maxRetries: number = 3, delayMs
lastError = error;
if (error.message?.includes('before initialization')) {
- logger?.debug('model-selector', `Import attempt ${attempt}/${maxRetries} failed, will retry`, {
+ logger?.debug(`Import attempt ${attempt}/${maxRetries} failed, will retry`, {
error: error.message
});
@@ -85,7 +85,7 @@ export async function selectModel(
if (configModel) {
const parts = configModel.split('/');
if (parts.length !== 2) {
- logger?.warn('model-selector', 'Invalid config model format', { configModel });
+ logger?.warn('Invalid config model format', { configModel });
} else {
const [providerID, modelID] = parts;
@@ -98,7 +98,7 @@ export async function selectModel(
reason: 'Using model specified in dcp.jsonc config'
};
} catch (error: any) {
- logger?.warn('model-selector', `Config model failed: ${providerID}/${modelID}`, {
+ logger?.warn(`Config model failed: ${providerID}/${modelID}`, {
error: error.message
});
failedModelInfo = { providerID, modelID };
diff --git a/lib/core/prompt.ts b/lib/prompt.ts
similarity index 93%
rename from lib/core/prompt.ts
rename to lib/prompt.ts
index e7f44d4..76cc94f 100644
--- a/lib/core/prompt.ts
+++ b/lib/prompt.ts
@@ -2,7 +2,7 @@ import { readFileSync } from "fs"
import { join } from "path"
export function loadPrompt(name: string, vars?: Record): string {
- const filePath = join(__dirname, "..", "prompts", `${name}.txt`)
+ const filePath = join(__dirname, "prompts", `${name}.txt`)
let content = readFileSync(filePath, "utf8").trim()
if (vars) {
for (const [key, value] of Object.entries(vars)) {
@@ -122,18 +122,12 @@ export function buildAnalysisPrompt(
unprunedToolCallIds: string[],
messages: any[],
alreadyPrunedIds?: string[],
- protectedToolCallIds?: string[],
- reason?: string
+ protectedToolCallIds?: string[]
): string {
const minimizedMessages = minimizeMessages(messages, alreadyPrunedIds, protectedToolCallIds)
const messagesJson = JSON.stringify(minimizedMessages, null, 2).replace(/\\n/g, '\n')
- const reasonContext = reason
- ? `\nContext: The AI has requested pruning with the following reason: "${reason}"\nUse this context to inform your decisions about what is most relevant to keep.`
- : ''
-
return loadPrompt("pruning", {
- reason_context: reasonContext,
available_tool_call_ids: unprunedToolCallIds.join(", "),
session_history: messagesJson
})
diff --git a/lib/prompts/pruning.txt b/lib/prompts/pruning.txt
index 49e1e82..62045c3 100644
--- a/lib/prompts/pruning.txt
+++ b/lib/prompts/pruning.txt
@@ -1,5 +1,5 @@
You are a conversation analyzer that identifies obsolete tool outputs in a coding session.
-{{reason_context}}
+
Your task: Analyze the session history and identify tool call IDs whose outputs are NO LONGER RELEVANT to the current conversation context.
Guidelines for identifying obsolete tool calls:
diff --git a/lib/prompts/tool.txt b/lib/prompts/tool.txt
index d727a11..2eda4e8 100644
--- a/lib/prompts/tool.txt
+++ b/lib/prompts/tool.txt
@@ -1,7 +1,7 @@
Prunes tool outputs from context to manage conversation size and reduce noise.
## IMPORTANT: The Prunable List
-A `` list is injected into user messages showing available tool outputs you can prune. Each line has the format `ID: tool, parameter` (e.g., `20: read, /path/to/file.ts`). Use these numeric IDs to select which tools to prune.
+A `` list is injected into user messages showing available tool outputs you can prune. Each line has the format `ID: tool, parameter` (e.g., `20: read, /path/to/file.ts`). You MUST only use numeric IDs that appear in this list to select which tools to prune.
## CRITICAL: When and How to Prune
@@ -36,7 +36,7 @@ You must use this tool in three specific scenarios. The rules for distillation (
Assistant: [Reads 'wrong_file.ts']
This file isn't relevant to the auth system. I'll remove it to clear the context.
-[Uses prune with ids: ["noise", 5]]
+[Uses prune with ids: ["noise", "5"]]
@@ -46,11 +46,11 @@ I have analyzed the configuration. Here is the distillation:
- 'db.ts' connects to mongo:27017.
- The other 3 files were defaults.
I have preserved the signals above, so I am now pruning the raw reads.
-[Uses prune with ids: ["consolidation", 10, 11, 12, 13, 14]]
+[Uses prune with ids: ["consolidation", "10", "11", "12", "13", "14"]]
Assistant: [Runs tests, they pass]
The tests passed. The feature is verified.
-[Uses prune with ids: ["completion", 20, 21]]
+[Uses prune with ids: ["completion", "20", "21"]]
diff --git a/lib/pruning-tool.ts b/lib/pruning-tool.ts
deleted file mode 100644
index d88baea..0000000
--- a/lib/pruning-tool.ts
+++ /dev/null
@@ -1,205 +0,0 @@
-import { tool } from "@opencode-ai/plugin"
-import type { PluginState } from "./state"
-import type { PluginConfig } from "./config"
-import type { ToolTracker } from "./fetch-wrapper/tool-tracker"
-import type { ToolMetadata, PruneReason } from "./fetch-wrapper/types"
-import { resetToolTrackerCount } from "./fetch-wrapper/tool-tracker"
-import { findCurrentAgent } from "./hooks"
-import { getActualId } from "./state/id-mapping"
-import { sendUnifiedNotification, type NotificationContext } from "./ui/notification"
-import { formatPruningResultForTool } from "./ui/display-utils"
-import { ensureSessionRestored } from "./state"
-import { saveSessionState } from "./state/persistence"
-import type { Logger } from "./logger"
-import { estimateTokensBatch } from "./tokenizer"
-import type { SessionStats, PruningResult } from "./core/janitor"
-import { loadPrompt } from "./core/prompt"
-
-/** Tool description loaded from prompts/tool.txt */
-const TOOL_DESCRIPTION = loadPrompt("tool")
-
-export interface PruneToolContext {
- client: any
- state: PluginState
- logger: Logger
- config: PluginConfig
- notificationCtx: NotificationContext
- workingDirectory?: string
-}
-
-/**
- * Creates the prune tool definition.
- * Accepts numeric IDs from the list and prunes those tool outputs.
- */
-export function createPruningTool(
- ctx: PruneToolContext,
- toolTracker: ToolTracker
-): ReturnType {
- return tool({
- description: TOOL_DESCRIPTION,
- args: {
- ids: tool.schema.array(
- tool.schema.union([
- tool.schema.enum(["completion", "noise", "consolidation"]),
- tool.schema.number()
- ])
- ).describe(
- "First element is the reason ('completion', 'noise', 'consolidation'), followed by numeric IDs to prune"
- ),
- },
- async execute(args, toolCtx) {
- const { client, state, logger, config, notificationCtx } = ctx
- const sessionId = toolCtx.sessionID
-
- if (!args.ids || args.ids.length === 0) {
- return "No IDs provided. Check the list for available IDs to prune."
- }
-
- // Parse reason from first element, numeric IDs from the rest
- const firstElement = args.ids[0]
- const validReasons = ["completion", "noise", "consolidation"] as const
- let reason: PruneReason | undefined
- let numericIds: number[]
-
- if (typeof firstElement === "string" && validReasons.includes(firstElement as any)) {
- reason = firstElement as PruneReason
- numericIds = args.ids.slice(1).filter((id): id is number => typeof id === "number")
- } else {
- numericIds = args.ids.filter((id): id is number => typeof id === "number")
- }
-
- if (numericIds.length === 0) {
- return "No numeric IDs provided. Format: [reason, id1, id2, ...] where reason is 'completion', 'noise', or 'consolidation'."
- }
-
- await ensureSessionRestored(state, sessionId, logger)
-
- const prunedIds = numericIds
- .map(numId => getActualId(sessionId, numId))
- .filter((id): id is string => id !== undefined)
-
- if (prunedIds.length === 0) {
- return "None of the provided IDs were valid. Check the list for available IDs."
- }
-
- // Fetch messages to calculate tokens and find current agent
- const messagesResponse = await client.session.messages({
- path: { id: sessionId },
- query: { limit: 200 }
- })
- const messages = messagesResponse.data || messagesResponse
-
- const currentAgent = findCurrentAgent(messages)
- const tokensSaved = await calculateTokensSavedFromMessages(messages, prunedIds)
-
- const currentStats = state.stats.get(sessionId) ?? {
- totalToolsPruned: 0,
- totalTokensSaved: 0,
- totalGCTokens: 0,
- totalGCTools: 0
- }
- const sessionStats: SessionStats = {
- ...currentStats,
- totalToolsPruned: currentStats.totalToolsPruned + prunedIds.length,
- totalTokensSaved: currentStats.totalTokensSaved + tokensSaved
- }
- state.stats.set(sessionId, sessionStats)
-
- const alreadyPrunedIds = state.prunedIds.get(sessionId) ?? []
- const allPrunedIds = [...alreadyPrunedIds, ...prunedIds]
- state.prunedIds.set(sessionId, allPrunedIds)
-
- saveSessionState(sessionId, new Set(allPrunedIds), sessionStats, logger)
- .catch(err => logger.error("prune-tool", "Failed to persist state", { error: err.message }))
-
- const toolMetadata = new Map()
- for (const id of prunedIds) {
- const meta = state.toolParameters.get(id.toLowerCase())
- if (meta) {
- toolMetadata.set(id.toLowerCase(), meta)
- } else {
- logger.debug("prune-tool", "No metadata found for ID", {
- id,
- idLower: id.toLowerCase(),
- hasLower: state.toolParameters.has(id.toLowerCase())
- })
- }
- }
-
- await sendUnifiedNotification(notificationCtx, sessionId, {
- aiPrunedCount: prunedIds.length,
- aiTokensSaved: tokensSaved,
- aiPrunedIds: prunedIds,
- toolMetadata,
- gcPending: null,
- sessionStats,
- reason
- }, currentAgent)
-
- toolTracker.skipNextIdle = true
-
- if (config.nudge_freq > 0) {
- resetToolTrackerCount(toolTracker)
- }
-
- const result: PruningResult = {
- prunedCount: prunedIds.length,
- tokensSaved,
- llmPrunedIds: prunedIds,
- toolMetadata,
- sessionStats,
- reason
- }
-
- return formatPruningResultForTool(result, ctx.workingDirectory)
- },
- })
-}
-
-/**
- * Calculates approximate tokens saved by pruning the given tool call IDs.
- * Uses pre-fetched messages to avoid duplicate API calls.
- */
-async function calculateTokensSavedFromMessages(
- messages: any[],
- prunedIds: string[]
-): Promise {
- try {
- const toolOutputs = new Map()
- for (const msg of messages) {
- if (msg.role === 'tool' && msg.tool_call_id) {
- const content = typeof msg.content === 'string'
- ? msg.content
- : JSON.stringify(msg.content)
- toolOutputs.set(msg.tool_call_id.toLowerCase(), content)
- }
- if (msg.role === 'user' && Array.isArray(msg.content)) {
- for (const part of msg.content) {
- if (part.type === 'tool_result' && part.tool_use_id) {
- const content = typeof part.content === 'string'
- ? part.content
- : JSON.stringify(part.content)
- toolOutputs.set(part.tool_use_id.toLowerCase(), content)
- }
- }
- }
- }
-
- const contents: string[] = []
- for (const id of prunedIds) {
- const content = toolOutputs.get(id.toLowerCase())
- if (content) {
- contents.push(content)
- }
- }
-
- if (contents.length === 0) {
- return prunedIds.length * 500
- }
-
- const tokenCounts = await estimateTokensBatch(contents)
- return tokenCounts.reduce((sum, count) => sum + count, 0)
- } catch (error: any) {
- return prunedIds.length * 500
- }
-}
diff --git a/lib/state/id-mapping.ts b/lib/state/id-mapping.ts
deleted file mode 100644
index 0f73eb4..0000000
--- a/lib/state/id-mapping.ts
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Numeric ID mapping system for tool call IDs.
- *
- * Maps simple incrementing numbers (1, 2, 3...) to actual provider tool call IDs
- * (e.g., "call_abc123xyz..."). This allows the session AI to reference tools by
- * simple numbers when using the prune tool.
- *
- * Design decisions:
- * - IDs are monotonically increasing and never reused (avoids race conditions)
- * - Mappings are rebuilt from session messages on restore (single source of truth)
- * - Per-session mappings to isolate sessions from each other
- */
-
-export interface IdMapping {
- numericToActual: Map // 1 → "call_abc123xyz..."
- actualToNumeric: Map // "call_abc123xyz..." → 1
- nextId: number
-}
-
-/** Per-session ID mappings */
-const sessionMappings = new Map()
-
-function getSessionMapping(sessionId: string): IdMapping {
- let mapping = sessionMappings.get(sessionId)
- if (!mapping) {
- mapping = {
- numericToActual: new Map(),
- actualToNumeric: new Map(),
- nextId: 1
- }
- sessionMappings.set(sessionId, mapping)
- }
- return mapping
-}
-
-/**
- * Assigns a numeric ID to a tool call ID if it doesn't already have one.
- * Returns the numeric ID (existing or newly assigned).
- */
-export function getOrCreateNumericId(sessionId: string, actualId: string): number {
- const mapping = getSessionMapping(sessionId)
-
- // Check if already mapped
- const existing = mapping.actualToNumeric.get(actualId)
- if (existing !== undefined) {
- return existing
- }
-
- // Assign new ID
- const numericId = mapping.nextId++
- mapping.numericToActual.set(numericId, actualId)
- mapping.actualToNumeric.set(actualId, numericId)
-
- return numericId
-}
-
-export function getActualId(sessionId: string, numericId: number): string | undefined {
- const mapping = sessionMappings.get(sessionId)
- return mapping?.numericToActual.get(numericId)
-}
-
-export function clearAllMappings(): void {
- sessionMappings.clear()
-}
diff --git a/lib/state/index.ts b/lib/state/index.ts
index a3c2584..a665a74 100644
--- a/lib/state/index.ts
+++ b/lib/state/index.ts
@@ -1,77 +1,3 @@
-import type { SessionStats, GCStats } from "../core/janitor"
-import type { Logger } from "../logger"
-import { loadSessionState } from "./persistence"
-
-export interface PluginState {
- prunedIds: Map
- stats: Map
- gcPending: Map
- toolParameters: Map
- model: Map
- googleToolCallMapping: Map>
- restoredSessions: Set
- checkedSessions: Set
- subagentSessions: Set
- lastSeenSessionId: string | null
-}
-
-export type ToolStatus = "pending" | "running" | "completed" | "error"
-
-export interface ToolParameterEntry {
- tool: string
- parameters: any
- status?: ToolStatus
- error?: string
-}
-
-export interface ModelInfo {
- providerID: string
- modelID: string
-}
-
-export function createPluginState(): PluginState {
- return {
- prunedIds: new Map(),
- stats: new Map(),
- gcPending: new Map(),
- toolParameters: new Map(),
- model: new Map(),
- googleToolCallMapping: new Map(),
- restoredSessions: new Set(),
- checkedSessions: new Set(),
- subagentSessions: new Set(),
- lastSeenSessionId: null,
- }
-}
-
-export async function ensureSessionRestored(
- state: PluginState,
- sessionId: string,
- logger?: Logger
-): Promise {
- if (state.restoredSessions.has(sessionId)) {
- return
- }
-
- state.restoredSessions.add(sessionId)
-
- const persisted = await loadSessionState(sessionId, logger)
- if (persisted) {
- if (!state.prunedIds.has(sessionId)) {
- state.prunedIds.set(sessionId, persisted.prunedIds)
- logger?.info("persist", "Restored prunedIds from disk", {
- sessionId: sessionId.slice(0, 8),
- count: persisted.prunedIds.length,
- })
- }
- if (!state.stats.has(sessionId)) {
- const stats: SessionStats = {
- totalToolsPruned: persisted.stats.totalToolsPruned,
- totalTokensSaved: persisted.stats.totalTokensSaved,
- totalGCTokens: persisted.stats.totalGCTokens ?? 0,
- totalGCTools: persisted.stats.totalGCTools ?? 0
- }
- state.stats.set(sessionId, stats)
- }
- }
-}
+export * from "./persistence"
+export * from "./types"
+export * from "./state"
diff --git a/lib/state/persistence.ts b/lib/state/persistence.ts
index b394ef2..21f0092 100644
--- a/lib/state/persistence.ts
+++ b/lib/state/persistence.ts
@@ -8,12 +8,12 @@ import * as fs from "fs/promises";
import { existsSync } from "fs";
import { homedir } from "os";
import { join } from "path";
-import type { SessionStats } from "../core/janitor";
+import type { SessionState, SessionStats, Prune } from "./types"
import type { Logger } from "../logger";
export interface PersistedSessionState {
sessionName?: string;
- prunedIds: string[];
+ prune: Prune
stats: SessionStats;
lastUpdated: string;
}
@@ -39,34 +39,35 @@ function getSessionFilePath(sessionId: string): string {
}
export async function saveSessionState(
- sessionId: string,
- prunedIds: Set,
- stats: SessionStats,
- logger?: Logger,
+ sessionState: SessionState,
+ logger: Logger,
sessionName?: string
): Promise {
try {
+ if (!sessionState.sessionId) {
+ return;
+ }
+
await ensureStorageDir();
const state: PersistedSessionState = {
- ...(sessionName && { sessionName }),
- prunedIds: Array.from(prunedIds),
- stats,
+ sessionName: sessionName,
+ prune: sessionState.prune,
+ stats: sessionState.stats,
lastUpdated: new Date().toISOString(),
};
- const filePath = getSessionFilePath(sessionId);
+ const filePath = getSessionFilePath(sessionState.sessionId);
const content = JSON.stringify(state, null, 2);
await fs.writeFile(filePath, content, "utf-8");
- logger?.info("persist", "Saved session state to disk", {
- sessionId: sessionId.slice(0, 8),
- prunedIds: prunedIds.size,
- totalTokensSaved: stats.totalTokensSaved,
+ logger.info("Saved session state to disk", {
+ sessionId: sessionState.sessionId,
+ totalTokensSaved: state.stats.totalPruneTokens
});
} catch (error: any) {
- logger?.error("persist", "Failed to save session state", {
- sessionId: sessionId.slice(0, 8),
+ logger.error("Failed to save session state", {
+ sessionId: sessionState.sessionId,
error: error?.message,
});
}
@@ -74,7 +75,7 @@ export async function saveSessionState(
export async function loadSessionState(
sessionId: string,
- logger?: Logger
+ logger: Logger
): Promise {
try {
const filePath = getSessionFilePath(sessionId);
@@ -86,23 +87,26 @@ export async function loadSessionState(
const content = await fs.readFile(filePath, "utf-8");
const state = JSON.parse(content) as PersistedSessionState;
- if (!state || !Array.isArray(state.prunedIds) || !state.stats) {
- logger?.warn("persist", "Invalid session state file, ignoring", {
- sessionId: sessionId.slice(0, 8),
+ if (!state ||
+ !state.prune ||
+ !Array.isArray(state.prune.toolIds) ||
+ !state.stats
+ ) {
+ logger.warn("Invalid session state file, ignoring", {
+ sessionId: sessionId,
});
return null;
}
- logger?.info("persist", "Loaded session state from disk", {
- sessionId: sessionId.slice(0, 8),
- prunedIds: state.prunedIds.length,
- totalTokensSaved: state.stats.totalTokensSaved,
+ logger.info("Loaded session state from disk", {
+ sessionId: sessionId,
+ totalTokensSaved: state.stats.totalPruneTokens
});
return state;
} catch (error: any) {
- logger?.warn("persist", "Failed to load session state", {
- sessionId: sessionId.slice(0, 8),
+ logger.warn("Failed to load session state", {
+ sessionId: sessionId,
error: error?.message,
});
return null;
diff --git a/lib/state/state.ts b/lib/state/state.ts
new file mode 100644
index 0000000..91e3f92
--- /dev/null
+++ b/lib/state/state.ts
@@ -0,0 +1,98 @@
+import type { SessionState, ToolParameterEntry, WithParts } from "./types"
+import type { Logger } from "../logger"
+import { loadSessionState } from "./persistence"
+import { getLastUserMessage } from "../messages/utils"
+import { isSubAgentSession } from "../utils"
+
+export const checkSession = async (
+ client: any,
+ state: SessionState,
+ logger: Logger,
+ messages: WithParts[]
+): Promise => {
+
+ const lastUserMessage = getLastUserMessage(messages)
+ if (!lastUserMessage) {
+ return
+ }
+
+ const lastSessionId = lastUserMessage.info.sessionID
+
+ if (state.sessionId === null || state.sessionId !== lastSessionId) {
+ logger.info(`Session changed: ${state.sessionId} -> ${lastSessionId}`)
+ try {
+ await ensureSessionInitialized(client, state, lastSessionId, logger)
+ } catch (err: any) {
+ logger.error("Failed to initialize session state", { error: err.message })
+ }
+ }
+}
+
+export function createSessionState(): SessionState {
+ return {
+ sessionId: null,
+ isSubAgent: false,
+ prune: {
+ toolIds: []
+ },
+ stats: {
+ pruneTokenCounter: 0,
+ totalPruneTokens: 0,
+ },
+ toolParameters: new Map(),
+ nudgeCounter: 0,
+ lastToolPrune: false
+ }
+}
+
+export function resetSessionState(state: SessionState): void {
+ state.sessionId = null
+ state.isSubAgent = false
+ state.prune = {
+ toolIds: []
+ }
+ state.stats = {
+ pruneTokenCounter: 0,
+ totalPruneTokens: 0,
+ }
+ state.toolParameters.clear()
+ state.nudgeCounter = 0
+ state.lastToolPrune = false
+}
+
+export async function ensureSessionInitialized(
+ client: any,
+ state: SessionState,
+ sessionId: string,
+ logger: Logger
+): Promise {
+ if (state.sessionId === sessionId) {
+ return;
+ }
+
+ logger.info("session ID = " + sessionId)
+ logger.info("Initializing session state", { sessionId: sessionId })
+
+ // Clear previous session data
+ resetSessionState(state)
+ state.sessionId = sessionId
+
+ const isSubAgent = await isSubAgentSession(client, sessionId)
+ state.isSubAgent = isSubAgent
+ logger.info("isSubAgent = " + isSubAgent)
+
+ // Load session data from storage
+ const persisted = await loadSessionState(sessionId, logger)
+ if (persisted === null) {
+ return;
+ }
+
+ // Populate state with loaded data
+ state.prune = {
+ toolIds: persisted.prune.toolIds || []
+ }
+ state.stats = {
+ pruneTokenCounter: persisted.stats?.pruneTokenCounter || 0,
+ totalPruneTokens: persisted.stats?.totalPruneTokens || 0,
+ }
+}
diff --git a/lib/state/tool-cache.ts b/lib/state/tool-cache.ts
index 2970720..a6140c7 100644
--- a/lib/state/tool-cache.ts
+++ b/lib/state/tool-cache.ts
@@ -1,83 +1,58 @@
-import type { PluginState, ToolStatus } from "./index"
+import type { SessionState, ToolStatus, WithParts } from "./index"
import type { Logger } from "../logger"
-import type { ToolTracker } from "../fetch-wrapper/tool-tracker"
+import { PluginConfig } from "../config"
-/** Maximum number of entries to keep in the tool parameters cache */
-const MAX_TOOL_CACHE_SIZE = 500
+const MAX_TOOL_CACHE_SIZE = 1000
/**
* Sync tool parameters from OpenCode's session.messages() API.
- * This is the single source of truth for tool parameters, replacing
- * format-specific parsing from LLM API requests.
*/
export async function syncToolCache(
- client: any,
- sessionId: string,
- state: PluginState,
- tracker?: ToolTracker,
- protectedTools?: Set,
- logger?: Logger
+ state: SessionState,
+ config: PluginConfig,
+ logger: Logger,
+ messages: WithParts[],
): Promise {
try {
- const messagesResponse = await client.session.messages({
- path: { id: sessionId },
- query: { limit: 500 }
- })
- const messages = messagesResponse.data || messagesResponse
-
- if (!Array.isArray(messages)) {
- return
- }
+ logger.info("Syncing tool parameters from OpenCode messages")
- let synced = 0
- // Build lowercase set of pruned IDs for comparison (IDs in state may be mixed case)
- const prunedIdsLower = tracker
- ? new Set((state.prunedIds.get(sessionId) ?? []).map(id => id.toLowerCase()))
- : null
+ state.nudgeCounter = 0
for (const msg of messages) {
- if (!msg.parts) continue
-
for (const part of msg.parts) {
- if (part.type !== "tool" || !part.callID) continue
-
- const id = part.callID.toLowerCase()
+ if (part.type !== "tool" || !part.callID) {
+ continue
+ }
- // Track tool results for nudge injection
- if (tracker && !tracker.seenToolResultIds.has(id)) {
- tracker.seenToolResultIds.add(id)
- // Only count non-protected tools toward nudge threshold
- // Also skip already-pruned tools to avoid re-counting on restart
- if ((!part.tool || !protectedTools?.has(part.tool)) && !prunedIdsLower?.has(id)) {
- tracker.toolResultCount++
- }
+ if (part.tool === "prune") {
+ state.nudgeCounter = 0
+ } else if (!config.strategies.pruneTool.protectedTools.includes(part.tool)) {
+ state.nudgeCounter++
}
+ state.lastToolPrune = part.tool === "prune"
- if (state.toolParameters.has(id)) continue
- if (part.tool && protectedTools?.has(part.tool)) continue
+ if (state.toolParameters.has(part.callID)) {
+ continue
+ }
- const status = part.state?.status as ToolStatus | undefined
- state.toolParameters.set(id, {
- tool: part.tool,
- parameters: part.state?.input ?? {},
- status,
- error: status === "error" ? part.state?.error : undefined,
- })
- synced++
+ state.toolParameters.set(
+ part.callID,
+ {
+ tool: part.tool,
+ parameters: part.state?.input ?? {},
+ status: part.state.status as ToolStatus | undefined,
+ error: part.state.status === "error" ? part.state.error : undefined,
+ compacted: part.state.status === "completed" && !!part.state.time.compacted,
+ }
+ )
}
}
- trimToolParametersCache(state)
+ // logger.info(`nudgeCounter=${state.nudgeCounter}, lastToolPrune=${state.lastToolPrune}`)
- if (logger && synced > 0) {
- logger.debug("tool-cache", "Synced tool parameters from OpenCode", {
- sessionId: sessionId.slice(0, 8),
- synced
- })
- }
+ trimToolParametersCache(state)
} catch (error) {
- logger?.warn("tool-cache", "Failed to sync tool parameters from OpenCode", {
- sessionId: sessionId.slice(0, 8),
+ logger.warn("Failed to sync tool parameters from OpenCode", {
error: error instanceof Error ? error.message : String(error)
})
}
@@ -87,7 +62,7 @@ export async function syncToolCache(
* Trim the tool parameters cache to prevent unbounded memory growth.
* Uses FIFO eviction - removes oldest entries first.
*/
-export function trimToolParametersCache(state: PluginState): void {
+export function trimToolParametersCache(state: SessionState): void {
if (state.toolParameters.size <= MAX_TOOL_CACHE_SIZE) {
return
}
diff --git a/lib/state/types.ts b/lib/state/types.ts
new file mode 100644
index 0000000..e1b92a7
--- /dev/null
+++ b/lib/state/types.ts
@@ -0,0 +1,35 @@
+import { Message, Part } from "@opencode-ai/sdk"
+
+export interface WithParts {
+ info: Message
+ parts: Part[]
+}
+
+export type ToolStatus = "pending" | "running" | "completed" | "error"
+
+export interface ToolParameterEntry {
+ tool: string
+ parameters: any
+ status?: ToolStatus
+ error?: string
+ compacted?: boolean
+}
+
+export interface SessionStats {
+ pruneTokenCounter: number
+ totalPruneTokens: number
+}
+
+export interface Prune {
+ toolIds: string[]
+}
+
+export interface SessionState {
+ sessionId: string | null
+ isSubAgent: boolean
+ prune: Prune
+ stats: SessionStats
+ toolParameters: Map
+ nudgeCounter: number
+ lastToolPrune: boolean
+}
diff --git a/lib/strategies/deduplication.ts b/lib/strategies/deduplication.ts
new file mode 100644
index 0000000..61cc484
--- /dev/null
+++ b/lib/strategies/deduplication.ts
@@ -0,0 +1,110 @@
+import { PluginConfig } from "../config"
+import { Logger } from "../logger"
+import type { SessionState, WithParts } from "../state"
+import { calculateTokensSaved } from "../utils"
+import { buildToolIdList } from "../messages/utils"
+
+/**
+ * Deduplication strategy - prunes older tool calls that have identical
+ * tool name and parameters, keeping only the most recent occurrence.
+ * Modifies the session state in place to add pruned tool call IDs.
+ */
+export const deduplicate = (
+ state: SessionState,
+ logger: Logger,
+ config: PluginConfig,
+ messages: WithParts[]
+): void => {
+ if (!config.strategies.deduplication.enabled) {
+ return
+ }
+
+ // Build list of all tool call IDs from messages (chronological order)
+ const allToolIds = buildToolIdList(messages)
+ if (allToolIds.length === 0) {
+ return
+ }
+
+ // Filter out IDs already pruned
+ const alreadyPruned = new Set(state.prune.toolIds)
+ const unprunedIds = allToolIds.filter(id => !alreadyPruned.has(id))
+
+ if (unprunedIds.length === 0) {
+ return
+ }
+
+ const protectedTools = config.strategies.deduplication.protectedTools
+
+ // Group by signature (tool name + normalized parameters)
+ const signatureMap = new Map()
+
+ for (const id of unprunedIds) {
+ const metadata = state.toolParameters.get(id)
+ if (!metadata) {
+ logger.warn(`Missing metadata for tool call ID: ${id}`)
+ continue
+ }
+
+ // Skip protected tools
+ if (protectedTools.includes(metadata.tool)) {
+ continue
+ }
+
+ const signature = createToolSignature(metadata.tool, metadata.parameters)
+ if (!signatureMap.has(signature)) {
+ signatureMap.set(signature, [])
+ }
+ signatureMap.get(signature)!.push(id)
+ }
+
+ // Find duplicates - keep only the most recent (last) in each group
+ const newPruneIds: string[] = []
+
+ for (const [, ids] of signatureMap.entries()) {
+ if (ids.length > 1) {
+ // All except last (most recent) should be pruned
+ const idsToRemove = ids.slice(0, -1)
+ newPruneIds.push(...idsToRemove)
+ }
+ }
+
+ state.stats.totalPruneTokens += calculateTokensSaved(messages, newPruneIds)
+
+ if (newPruneIds.length > 0) {
+ state.prune.toolIds.push(...newPruneIds)
+ logger.debug(`Marked ${newPruneIds.length} duplicate tool calls for pruning`)
+ }
+}
+
+function createToolSignature(tool: string, parameters?: any): string {
+ if (!parameters) {
+ return tool
+ }
+ const normalized = normalizeParameters(parameters)
+ const sorted = sortObjectKeys(normalized)
+ return `${tool}::${JSON.stringify(sorted)}`
+}
+
+function normalizeParameters(params: any): any {
+ if (typeof params !== 'object' || params === null) return params
+ if (Array.isArray(params)) return params
+
+ const normalized: any = {}
+ for (const [key, value] of Object.entries(params)) {
+ if (value !== undefined && value !== null) {
+ normalized[key] = value
+ }
+ }
+ return normalized
+}
+
+function sortObjectKeys(obj: any): any {
+ if (typeof obj !== 'object' || obj === null) return obj
+ if (Array.isArray(obj)) return obj.map(sortObjectKeys)
+
+ const sorted: any = {}
+ for (const key of Object.keys(obj).sort()) {
+ sorted[key] = sortObjectKeys(obj[key])
+ }
+ return sorted
+}
diff --git a/lib/strategies/index.ts b/lib/strategies/index.ts
new file mode 100644
index 0000000..105d9c8
--- /dev/null
+++ b/lib/strategies/index.ts
@@ -0,0 +1,3 @@
+export { deduplicate } from "./deduplication"
+export { runOnIdle } from "./on-idle"
+export { createPruneTool } from "./prune-tool"
diff --git a/lib/strategies/on-idle.ts b/lib/strategies/on-idle.ts
new file mode 100644
index 0000000..49887d3
--- /dev/null
+++ b/lib/strategies/on-idle.ts
@@ -0,0 +1,318 @@
+import { z } from "zod"
+import type { SessionState, WithParts, ToolParameterEntry } from "../state"
+import type { Logger } from "../logger"
+import type { PluginConfig } from "../config"
+import { buildAnalysisPrompt } from "../prompt"
+import { selectModel, extractModelFromSession, ModelInfo } from "../model-selector"
+import { calculateTokensSaved } from "../utils"
+import { findCurrentAgent } from "../messages/utils"
+import { saveSessionState } from "../state/persistence"
+import { sendUnifiedNotification } from "../ui/notification"
+
+export interface OnIdleResult {
+ prunedCount: number
+ tokensSaved: number
+ prunedIds: string[]
+}
+
+/**
+ * Parse messages to extract tool information.
+ */
+function parseMessages(
+ messages: WithParts[],
+ toolParametersCache: Map
+): {
+ toolCallIds: string[]
+ toolMetadata: Map
+} {
+ const toolCallIds: string[] = []
+ const toolMetadata = new Map()
+
+ for (const msg of messages) {
+ if (msg.parts) {
+ for (const part of msg.parts) {
+ if (part.type === "tool" && part.callID) {
+ toolCallIds.push(part.callID)
+
+ const cachedData = toolParametersCache.get(part.callID)
+ const parameters = cachedData?.parameters ?? part.state?.input ?? {}
+
+ toolMetadata.set(part.callID, {
+ tool: part.tool,
+ parameters: parameters,
+ status: part.state?.status,
+ error: part.state?.status === "error" ? part.state.error : undefined
+ })
+ }
+ }
+ }
+ }
+
+ return { toolCallIds, toolMetadata }
+}
+
+/**
+ * Replace pruned tool outputs in messages for LLM analysis.
+ */
+function replacePrunedToolOutputs(messages: WithParts[], prunedIds: string[]): WithParts[] {
+ if (prunedIds.length === 0) return messages
+
+ const prunedIdsSet = new Set(prunedIds)
+
+ return messages.map(msg => {
+ if (!msg.parts) return msg
+
+ return {
+ ...msg,
+ parts: msg.parts.map((part: any) => {
+ if (part.type === 'tool' &&
+ part.callID &&
+ prunedIdsSet.has(part.callID) &&
+ part.state?.output) {
+ return {
+ ...part,
+ state: {
+ ...part.state,
+ output: '[Output removed to save context - information superseded or no longer needed]'
+ }
+ }
+ }
+ return part
+ })
+ }
+ }) as WithParts[]
+}
+
+/**
+ * Run LLM analysis to determine which tool calls can be pruned.
+ */
+async function runLlmAnalysis(
+ client: any,
+ state: SessionState,
+ logger: Logger,
+ config: PluginConfig,
+ messages: WithParts[],
+ unprunedToolCallIds: string[],
+ alreadyPrunedIds: string[],
+ toolMetadata: Map,
+ workingDirectory?: string
+): Promise {
+ const protectedToolCallIds: string[] = []
+ const prunableToolCallIds = unprunedToolCallIds.filter(id => {
+ const metadata = toolMetadata.get(id)
+ if (metadata && config.strategies.onIdle.protectedTools.includes(metadata.tool)) {
+ protectedToolCallIds.push(id)
+ return false
+ }
+ return true
+ })
+
+ if (prunableToolCallIds.length === 0) {
+ return []
+ }
+
+ // Get model info from messages
+ let validModelInfo: ModelInfo | undefined = undefined
+ if (messages.length > 0) {
+ const lastMessage = messages[messages.length - 1]
+ const model = (lastMessage.info as any)?.model
+ if (model?.providerID && model?.modelID) {
+ validModelInfo = {
+ providerID: model.providerID,
+ modelID: model.modelID
+ }
+ }
+ }
+
+ const modelSelection = await selectModel(
+ validModelInfo,
+ logger,
+ config.strategies.onIdle.model,
+ workingDirectory
+ )
+
+ logger.info(`OnIdle Model: ${modelSelection.modelInfo.providerID}/${modelSelection.modelInfo.modelID}`, {
+ source: modelSelection.source
+ })
+
+ if (modelSelection.failedModel && config.strategies.onIdle.showModelErrorToasts) {
+ const skipAi = modelSelection.source === 'fallback' && config.strategies.onIdle.strictModelSelection
+ try {
+ await client.tui.showToast({
+ body: {
+ title: skipAi ? "DCP: AI analysis skipped" : "DCP: Model fallback",
+ message: skipAi
+ ? `${modelSelection.failedModel.providerID}/${modelSelection.failedModel.modelID} failed\nAI analysis skipped (strictModelSelection enabled)`
+ : `${modelSelection.failedModel.providerID}/${modelSelection.failedModel.modelID} failed\nUsing ${modelSelection.modelInfo.providerID}/${modelSelection.modelInfo.modelID}`,
+ variant: "info",
+ duration: 5000
+ }
+ })
+ } catch {
+ // Ignore toast errors
+ }
+ }
+
+ if (modelSelection.source === 'fallback' && config.strategies.onIdle.strictModelSelection) {
+ logger.info("Skipping AI analysis (fallback model, strictModelSelection enabled)")
+ return []
+ }
+
+ const { generateObject } = await import('ai')
+
+ const sanitizedMessages = replacePrunedToolOutputs(messages, alreadyPrunedIds)
+
+ const analysisPrompt = buildAnalysisPrompt(
+ prunableToolCallIds,
+ sanitizedMessages,
+ alreadyPrunedIds,
+ protectedToolCallIds
+ )
+
+ const result = await generateObject({
+ model: modelSelection.model,
+ schema: z.object({
+ pruned_tool_call_ids: z.array(z.string()),
+ reasoning: z.string(),
+ }),
+ prompt: analysisPrompt
+ })
+
+ const rawLlmPrunedIds = result.object.pruned_tool_call_ids
+ const llmPrunedIds = rawLlmPrunedIds.filter(id =>
+ prunableToolCallIds.includes(id)
+ )
+
+ // Always log LLM output as debug
+ const reasoning = result.object.reasoning.replace(/\n+/g, ' ').replace(/\s+/g, ' ').trim()
+ logger.debug(`OnIdle LLM output`, {
+ pruned_tool_call_ids: rawLlmPrunedIds,
+ reasoning: reasoning
+ })
+
+ return llmPrunedIds
+}
+
+/**
+ * Run the onIdle pruning strategy.
+ * This is called when the session transitions to idle state.
+ */
+export async function runOnIdle(
+ client: any,
+ state: SessionState,
+ logger: Logger,
+ config: PluginConfig,
+ workingDirectory?: string
+): Promise {
+ try {
+ if (!state.sessionId) {
+ return null
+ }
+
+ const sessionId = state.sessionId
+
+ // Fetch session info and messages
+ const [sessionInfoResponse, messagesResponse] = await Promise.all([
+ client.session.get({ path: { id: sessionId } }),
+ client.session.messages({ path: { id: sessionId }})
+ ])
+
+ const sessionInfo = sessionInfoResponse.data
+ const messages: WithParts[] = messagesResponse.data || messagesResponse
+
+ if (!messages || messages.length < 3) {
+ return null
+ }
+
+ const currentAgent = findCurrentAgent(messages)
+ const { toolCallIds, toolMetadata } = parseMessages(messages, state.toolParameters)
+
+ const alreadyPrunedIds = state.prune.toolIds
+ const unprunedToolCallIds = toolCallIds.filter(id => !alreadyPrunedIds.includes(id))
+
+ if (unprunedToolCallIds.length === 0) {
+ return null
+ }
+
+ // Count prunable tools (excluding protected)
+ const candidateCount = unprunedToolCallIds.filter(id => {
+ const metadata = toolMetadata.get(id)
+ return !metadata || !config.strategies.onIdle.protectedTools.includes(metadata.tool)
+ }).length
+
+ if (candidateCount === 0) {
+ return null
+ }
+
+ // Run LLM analysis
+ const llmPrunedIds = await runLlmAnalysis(
+ client,
+ state,
+ logger,
+ config,
+ messages,
+ unprunedToolCallIds,
+ alreadyPrunedIds,
+ toolMetadata,
+ workingDirectory
+ )
+
+ const newlyPrunedIds = llmPrunedIds.filter(id => !alreadyPrunedIds.includes(id))
+
+ if (newlyPrunedIds.length === 0) {
+ return null
+ }
+
+ // Log the tool IDs being pruned with their tool names
+ for (const id of newlyPrunedIds) {
+ const metadata = toolMetadata.get(id)
+ const toolName = metadata?.tool || 'unknown'
+ logger.info(`OnIdle pruning tool: ${toolName}`, { callID: id })
+ }
+
+ // Update state
+ const allPrunedIds = [...new Set([...alreadyPrunedIds, ...newlyPrunedIds])]
+ state.prune.toolIds = allPrunedIds
+
+ state.stats.pruneTokenCounter += calculateTokensSaved(messages, newlyPrunedIds)
+
+ // Build tool metadata map for notification
+ const prunedToolMetadata = new Map()
+ for (const id of newlyPrunedIds) {
+ const metadata = toolMetadata.get(id)
+ if (metadata) {
+ prunedToolMetadata.set(id, metadata)
+ }
+ }
+
+ // Send notification
+ await sendUnifiedNotification(
+ client,
+ logger,
+ config,
+ state,
+ sessionId,
+ newlyPrunedIds,
+ prunedToolMetadata,
+ undefined, // reason
+ currentAgent,
+ workingDirectory || ""
+ )
+
+ state.stats.totalPruneTokens += state.stats.pruneTokenCounter
+ state.stats.pruneTokenCounter = 0
+ state.nudgeCounter = 0
+ state.lastToolPrune = true
+
+ // Persist state
+ const sessionName = sessionInfo?.title
+ saveSessionState(state, logger, sessionName).catch(err => {
+ logger.error("Failed to persist state", { error: err.message })
+ })
+
+ logger.info(`OnIdle: Pruned ${newlyPrunedIds.length}/${candidateCount} tools`)
+ } catch (error: any) {
+ logger.error("OnIdle analysis failed", { error: error.message })
+ return null
+ }
+}
diff --git a/lib/strategies/prune-tool.ts b/lib/strategies/prune-tool.ts
new file mode 100644
index 0000000..c546363
--- /dev/null
+++ b/lib/strategies/prune-tool.ts
@@ -0,0 +1,130 @@
+import { tool } from "@opencode-ai/plugin"
+import type { SessionState, ToolParameterEntry, WithParts } from "../state"
+import type { PluginConfig } from "../config"
+import { findCurrentAgent, buildToolIdList } from "../messages/utils"
+import { calculateTokensSaved } from "../utils"
+import { PruneReason, sendUnifiedNotification } from "../ui/notification"
+import { formatPruningResultForTool } from "../ui/display-utils"
+import { ensureSessionInitialized } from "../state"
+import { saveSessionState } from "../state/persistence"
+import type { Logger } from "../logger"
+import { loadPrompt } from "../prompt"
+
+/** Tool description loaded from prompts/tool.txt */
+const TOOL_DESCRIPTION = loadPrompt("tool")
+
+export interface PruneToolContext {
+ client: any
+ state: SessionState
+ logger: Logger
+ config: PluginConfig
+ workingDirectory: string
+}
+
+/**
+ * Creates the prune tool definition.
+ * Accepts numeric IDs from the list and prunes those tool outputs.
+ */
+export function createPruneTool(
+ ctx: PruneToolContext,
+): ReturnType {
+ return tool({
+ description: TOOL_DESCRIPTION,
+ args: {
+ ids: tool.schema.array(
+ tool.schema.string()
+ ).describe(
+ "First element is the reason ('completion', 'noise', 'consolidation'), followed by numeric IDs as strings to prune"
+ ),
+ },
+ async execute(args, toolCtx) {
+ const { client, state, logger, config, workingDirectory } = ctx
+ const sessionId = toolCtx.sessionID
+
+ if (!args.ids || args.ids.length === 0) {
+ return "No IDs provided. Check the list for available IDs to prune."
+ }
+
+ // Parse reason from first element, numeric IDs from the rest
+
+ const reason = args.ids[0];
+ const validReasons = ["completion", "noise", "consolidation"] as const
+ if (typeof reason !== "string" || !validReasons.includes(reason as any)) {
+ return "No valid pruning reason found. Use 'completion', 'noise', or 'consolidation' as the first element."
+ }
+
+ const numericToolIds: number[] = args.ids.slice(1)
+ .map(id => parseInt(id, 10))
+ .filter((n): n is number => !isNaN(n))
+ if (numericToolIds.length === 0) {
+ return "No numeric IDs provided. Format: [reason, id1, id2, ...] where reason is 'completion', 'noise', or 'consolidation'."
+ }
+
+ await ensureSessionInitialized(ctx.client, state, sessionId, logger)
+
+ // Fetch messages to calculate tokens and find current agent
+ const messagesResponse = await client.session.messages({
+ path: { id: sessionId }
+ })
+ const messages: WithParts[] = messagesResponse.data || messagesResponse
+
+ const currentAgent: string | undefined = findCurrentAgent(messages)
+ const toolIdList: string[] = buildToolIdList(messages)
+
+ // Validate that all numeric IDs are within bounds
+ if (numericToolIds.some(id => id < 0 || id >= toolIdList.length)) {
+ return "Invalid IDs provided. Only use numeric IDs from the list."
+ }
+
+ // Check for protected tools (model hallucinated an ID not in the prunable list)
+ for (const index of numericToolIds) {
+ const id = toolIdList[index]
+ const metadata = state.toolParameters.get(id)
+ if (metadata && config.strategies.pruneTool.protectedTools.includes(metadata.tool)) {
+ return "Invalid IDs provided. Only use numeric IDs from the list."
+ }
+ }
+
+ const pruneToolIds: string[] = numericToolIds.map(index => toolIdList[index])
+ state.prune.toolIds.push(...pruneToolIds)
+
+ const toolMetadata = new Map()
+ for (const id of pruneToolIds) {
+ const toolParameters = state.toolParameters.get(id)
+ if (toolParameters) {
+ toolMetadata.set(id, toolParameters)
+ } else {
+ logger.debug("No metadata found for ID", { id })
+ }
+ }
+
+ state.stats.pruneTokenCounter += calculateTokensSaved(messages, pruneToolIds)
+
+ await sendUnifiedNotification(
+ client,
+ logger,
+ config,
+ state,
+ sessionId,
+ pruneToolIds,
+ toolMetadata,
+ reason as PruneReason,
+ currentAgent,
+ workingDirectory
+ )
+ state.stats.totalPruneTokens += state.stats.pruneTokenCounter
+ state.stats.pruneTokenCounter = 0
+ state.nudgeCounter = 0
+
+ saveSessionState(state, logger)
+ .catch(err => logger.error("Failed to persist state", { error: err.message }))
+
+ return formatPruningResultForTool(
+ pruneToolIds,
+ toolMetadata,
+ workingDirectory
+ )
+ },
+ })
+}
+
diff --git a/lib/tokenizer.ts b/lib/tokenizer.ts
deleted file mode 100644
index 711a449..0000000
--- a/lib/tokenizer.ts
+++ /dev/null
@@ -1,15 +0,0 @@
-export async function estimateTokensBatch(texts: string[]): Promise {
- try {
- const { encode } = await import('gpt-tokenizer')
- return texts.map(text => encode(text).length)
- } catch {
- return texts.map(text => Math.round(text.length / 4))
- }
-}
-
-export function formatTokenCount(tokens: number): string {
- if (tokens >= 1000) {
- return `${(tokens / 1000).toFixed(1)}K`.replace('.0K', 'K') + ' tokens'
- }
- return tokens.toString() + ' tokens'
-}
diff --git a/lib/ui/display-utils.ts b/lib/ui/display-utils.ts
index 6ba7eb4..deb23a3 100644
--- a/lib/ui/display-utils.ts
+++ b/lib/ui/display-utils.ts
@@ -1,79 +1,5 @@
-import type { ToolMetadata } from "../fetch-wrapper/types"
-import type { PruningResult } from "../core/janitor"
-
-/**
- * Extracts a human-readable key from tool metadata for display purposes.
- * Used by both deduplication and AI analysis to show what was pruned.
- */
-export function extractParameterKey(metadata: { tool: string, parameters?: any }): string {
- if (!metadata.parameters) return ''
-
- const { tool, parameters } = metadata
-
- if (tool === "read" && parameters.filePath) {
- return parameters.filePath
- }
- if (tool === "write" && parameters.filePath) {
- return parameters.filePath
- }
- if (tool === "edit" && parameters.filePath) {
- return parameters.filePath
- }
-
- if (tool === "list") {
- return parameters.path || '(current directory)'
- }
- if (tool === "glob") {
- if (parameters.pattern) {
- const pathInfo = parameters.path ? ` in ${parameters.path}` : ""
- return `"${parameters.pattern}"${pathInfo}`
- }
- return '(unknown pattern)'
- }
- if (tool === "grep") {
- if (parameters.pattern) {
- const pathInfo = parameters.path ? ` in ${parameters.path}` : ""
- return `"${parameters.pattern}"${pathInfo}`
- }
- return '(unknown pattern)'
- }
-
- if (tool === "bash") {
- if (parameters.description) return parameters.description
- if (parameters.command) {
- return parameters.command.length > 50
- ? parameters.command.substring(0, 50) + "..."
- : parameters.command
- }
- }
-
- if (tool === "webfetch" && parameters.url) {
- return parameters.url
- }
- if (tool === "websearch" && parameters.query) {
- return `"${parameters.query}"`
- }
- if (tool === "codesearch" && parameters.query) {
- return `"${parameters.query}"`
- }
-
- if (tool === "todowrite") {
- return `${parameters.todos?.length || 0} todos`
- }
- if (tool === "todoread") {
- return "read todo list"
- }
-
- if (tool === "task" && parameters.description) {
- return parameters.description
- }
-
- const paramStr = JSON.stringify(parameters)
- if (paramStr === '{}' || paramStr === '[]' || paramStr === 'null') {
- return ''
- }
- return paramStr.substring(0, 50)
-}
+import { ToolParameterEntry } from "../state"
+import { extractParameterKey } from "../messages/utils"
export function truncate(str: string, maxLen: number = 60): string {
if (str.length <= maxLen) return str
@@ -109,18 +35,17 @@ function shortenSinglePath(path: string, workingDirectory?: string): string {
* Formats a list of pruned items in the style: "→ tool: parameter"
*/
export function formatPrunedItemsList(
- prunedIds: string[],
- toolMetadata: Map,
+ pruneToolIds: string[],
+ toolMetadata: Map,
workingDirectory?: string
): string[] {
const lines: string[] = []
- for (const prunedId of prunedIds) {
- const normalizedId = prunedId.toLowerCase()
- const metadata = toolMetadata.get(normalizedId)
+ for (const id of pruneToolIds) {
+ const metadata = toolMetadata.get(id)
if (metadata) {
- const paramKey = extractParameterKey(metadata)
+ const paramKey = extractParameterKey(metadata.tool, metadata.parameters)
if (paramKey) {
// Use 60 char limit to match notification style
const displayKey = truncate(shortenPath(paramKey, workingDirectory), 60)
@@ -131,10 +56,10 @@ export function formatPrunedItemsList(
}
}
- const knownCount = prunedIds.filter(id =>
- toolMetadata.has(id.toLowerCase())
+ const knownCount = pruneToolIds.filter(id =>
+ toolMetadata.has(id)
).length
- const unknownCount = prunedIds.length - knownCount
+ const unknownCount = pruneToolIds.length - knownCount
if (unknownCount > 0) {
lines.push(`→ (${unknownCount} tool${unknownCount > 1 ? 's' : ''} with unknown metadata)`)
@@ -147,16 +72,17 @@ export function formatPrunedItemsList(
* Formats a PruningResult into a human-readable string for the prune tool output.
*/
export function formatPruningResultForTool(
- result: PruningResult,
+ prunedIds: string[],
+ toolMetadata: Map,
workingDirectory?: string
): string {
const lines: string[] = []
- lines.push(`Context pruning complete. Pruned ${result.prunedCount} tool outputs.`)
+ lines.push(`Context pruning complete. Pruned ${prunedIds.length} tool outputs.`)
lines.push('')
- if (result.llmPrunedIds.length > 0) {
- lines.push(`Semantically pruned (${result.llmPrunedIds.length}):`)
- lines.push(...formatPrunedItemsList(result.llmPrunedIds, result.toolMetadata, workingDirectory))
+ if (prunedIds.length > 0) {
+ lines.push(`Semantically pruned (${prunedIds.length}):`)
+ lines.push(...formatPrunedItemsList(prunedIds, toolMetadata, workingDirectory))
}
return lines.join('\n').trim()
diff --git a/lib/ui/notification.ts b/lib/ui/notification.ts
index a2507ad..06b370e 100644
--- a/lib/ui/notification.ts
+++ b/lib/ui/notification.ts
@@ -1,66 +1,97 @@
import type { Logger } from "../logger"
-import type { SessionStats, GCStats } from "../core/janitor"
-import type { ToolMetadata, PruneReason } from "../fetch-wrapper/types"
-import { PRUNE_REASON_LABELS } from "../fetch-wrapper/types"
-import { formatTokenCount } from "../tokenizer"
+import type { SessionState } from "../state"
+import { formatTokenCount } from "../utils"
import { formatPrunedItemsList } from "./display-utils"
+import { ToolParameterEntry } from "../state"
+import { PluginConfig } from "../config"
+
+export type PruneReason = "completion" | "noise" | "consolidation"
+export const PRUNE_REASON_LABELS: Record = {
+ completion: "Task Complete",
+ noise: "Noise Removal",
+ consolidation: "Consolidation"
+}
-export type PruningSummaryLevel = "off" | "minimal" | "detailed"
-
-export interface NotificationConfig {
- pruningSummary: PruningSummaryLevel
- workingDirectory?: string
+function formatStatsHeader(
+ totalTokensSaved: number,
+ pruneTokenCounter: number
+): string {
+ const totalTokensSavedStr = `~${formatTokenCount(totalTokensSaved + pruneTokenCounter)}`
+ return [
+ `▣ DCP | ${totalTokensSavedStr} saved total`,
+ ].join('\n')
}
-export interface NotificationContext {
- client: any
- logger: Logger
- config: NotificationConfig
+function buildMinimalMessage(
+ state: SessionState,
+ reason: PruneReason | undefined
+): string {
+ const reasonSuffix = reason ? ` [${PRUNE_REASON_LABELS[reason]}]` : ''
+ return formatStatsHeader(
+ state.stats.totalPruneTokens,
+ state.stats.pruneTokenCounter
+ ) + reasonSuffix
}
-export interface NotificationData {
- aiPrunedCount: number
- aiTokensSaved: number
- aiPrunedIds: string[]
- toolMetadata: Map
- gcPending: GCStats | null
- sessionStats: SessionStats | null
- reason?: PruneReason
+function buildDetailedMessage(
+ state: SessionState,
+ reason: PruneReason | undefined,
+ pruneToolIds: string[],
+ toolMetadata: Map,
+ workingDirectory?: string
+): string {
+ let message = formatStatsHeader(state.stats.totalPruneTokens, state.stats.pruneTokenCounter)
+
+ if (pruneToolIds.length > 0) {
+ const pruneTokenCounterStr = `~${formatTokenCount(state.stats.pruneTokenCounter)}`
+ const reasonLabel = reason ? ` — ${PRUNE_REASON_LABELS[reason]}` : ''
+ message += `\n\n▣ Pruned tools (${pruneTokenCounterStr})${reasonLabel}`
+
+ const itemLines = formatPrunedItemsList(pruneToolIds, toolMetadata, workingDirectory)
+ message += '\n' + itemLines.join('\n')
+ }
+
+ return message.trim()
}
export async function sendUnifiedNotification(
- ctx: NotificationContext,
- sessionID: string,
- data: NotificationData,
- agent?: string
+ client: any,
+ logger: Logger,
+ config: PluginConfig,
+ state: SessionState,
+ sessionId: string,
+ pruneToolIds: string[],
+ toolMetadata: Map,
+ reason: PruneReason | undefined,
+ agent: string | undefined,
+ workingDirectory: string
): Promise {
- const hasAiPruning = data.aiPrunedCount > 0
- const hasGcActivity = data.gcPending && data.gcPending.toolsDeduped > 0
-
- if (!hasAiPruning && !hasGcActivity) {
+ const hasPruned = pruneToolIds.length > 0
+ if (!hasPruned) {
return false
}
- if (ctx.config.pruningSummary === 'off') {
+ if (config.pruningSummary === 'off') {
return false
}
- const message = ctx.config.pruningSummary === 'minimal'
- ? buildMinimalMessage(data)
- : buildDetailedMessage(data, ctx.config.workingDirectory)
+ const message = config.pruningSummary === 'minimal'
+ ? buildMinimalMessage(state, reason)
+ : buildDetailedMessage(state, reason, pruneToolIds, toolMetadata, workingDirectory)
- await sendIgnoredMessage(ctx, sessionID, message, agent)
+ await sendIgnoredMessage(client, logger, sessionId, message, agent)
return true
}
export async function sendIgnoredMessage(
- ctx: NotificationContext,
+ client: any,
+ logger: Logger,
sessionID: string,
text: string,
agent?: string
): Promise {
try {
- await ctx.client.session.prompt({
+ await client.session.prompt({
path: { id: sessionID },
body: {
noReply: true,
@@ -73,57 +104,7 @@ export async function sendIgnoredMessage(
}
})
} catch (error: any) {
- ctx.logger.error("notification", "Failed to send notification", { error: error.message })
+ logger.error("Failed to send notification", { error: error.message })
}
}
-function buildMinimalMessage(data: NotificationData): string {
- const { justNowTokens, totalTokens } = calculateStats(data)
- const reasonSuffix = data.reason ? ` [${PRUNE_REASON_LABELS[data.reason]}]` : ''
- return formatStatsHeader(totalTokens, justNowTokens) + reasonSuffix
-}
-
-function buildDetailedMessage(data: NotificationData, workingDirectory?: string): string {
- const { justNowTokens, totalTokens } = calculateStats(data)
-
- let message = formatStatsHeader(totalTokens, justNowTokens)
-
- if (data.aiPrunedCount > 0) {
- const justNowTokensStr = `~${formatTokenCount(justNowTokens)}`
- const reasonLabel = data.reason ? ` — ${PRUNE_REASON_LABELS[data.reason]}` : ''
- message += `\n\n▣ Pruned tools (${justNowTokensStr})${reasonLabel}`
-
- const itemLines = formatPrunedItemsList(data.aiPrunedIds, data.toolMetadata, workingDirectory)
- message += '\n' + itemLines.join('\n')
- }
-
- return message.trim()
-}
-
-function calculateStats(data: NotificationData): {
- justNowTokens: number
- totalTokens: number
-} {
- const justNowTokens = data.aiTokensSaved + (data.gcPending?.tokensCollected ?? 0)
-
- const totalTokens = data.sessionStats
- ? data.sessionStats.totalTokensSaved + data.sessionStats.totalGCTokens
- : justNowTokens
-
- return { justNowTokens, totalTokens }
-}
-
-function formatStatsHeader(
- totalTokens: number,
- justNowTokens: number
-): string {
- const totalTokensStr = `~${formatTokenCount(totalTokens)}`
- const justNowTokensStr = `~${formatTokenCount(justNowTokens)}`
-
- const maxTokenLen = Math.max(totalTokensStr.length, justNowTokensStr.length)
- const totalTokensPadded = totalTokensStr.padStart(maxTokenLen)
-
- return [
- `▣ DCP | ${totalTokensPadded} saved total`,
- ].join('\n')
-}
diff --git a/lib/utils.ts b/lib/utils.ts
new file mode 100644
index 0000000..842b964
--- /dev/null
+++ b/lib/utils.ts
@@ -0,0 +1,65 @@
+import { WithParts } from "./state"
+import { encode } from 'gpt-tokenizer'
+
+/**
+ * Estimates token counts for a batch of texts using gpt-tokenizer.
+ */
+function estimateTokensBatch(texts: string[]): number[] {
+ try {
+ return texts.map(text => encode(text).length)
+ } catch {
+ return texts.map(text => Math.round(text.length / 4))
+ }
+}
+
+/**
+ * Calculates approximate tokens saved by pruning the given tool call IDs.
+ * TODO: Make it count message content that are not tool outputs. Currently it ONLY covers tool outputs and errors
+ */
+export const calculateTokensSaved = (
+ messages: WithParts[],
+ pruneToolIds: string[]
+): number => {
+ try {
+ const contents: string[] = []
+ for (const msg of messages) {
+ for (const part of msg.parts) {
+ if (part.type !== 'tool' || !pruneToolIds.includes(part.callID)) {
+ continue
+ }
+ if (part.state.status === "completed") {
+ const content = typeof part.state.output === 'string'
+ ? part.state.output
+ : JSON.stringify(part.state.output)
+ contents.push(content)
+ }
+ if (part.state.status === "error") {
+ const content = typeof part.state.error === 'string'
+ ? part.state.error
+ : JSON.stringify(part.state.error)
+ contents.push(content)
+ }
+ }
+ }
+ const tokenCounts: number[] = estimateTokensBatch(contents)
+ return tokenCounts.reduce((sum, count) => sum + count, 0)
+ } catch (error: any) {
+ return 0
+ }
+}
+
+export function formatTokenCount(tokens: number): string {
+ if (tokens >= 1000) {
+ return `${(tokens / 1000).toFixed(1)}K`.replace('.0K', 'K') + ' tokens'
+ }
+ return tokens.toString() + ' tokens'
+}
+
+export async function isSubAgentSession(client: any, sessionID: string): Promise {
+ try {
+ const result = await client.session.get({ path: { id: sessionID } })
+ return !!result.data?.parentID
+ } catch (error: any) {
+ return false
+ }
+}
diff --git a/lib/version-checker.ts b/lib/version-checker.ts
deleted file mode 100644
index 5aed6b1..0000000
--- a/lib/version-checker.ts
+++ /dev/null
@@ -1,84 +0,0 @@
-import { readFileSync } from 'fs'
-import { join, dirname } from 'path'
-import { fileURLToPath } from 'url'
-
-export const PACKAGE_NAME = '@tarquinen/opencode-dcp'
-export const NPM_REGISTRY_URL = `https://registry.npmjs.org/${PACKAGE_NAME}/latest`
-
-const __filename = fileURLToPath(import.meta.url)
-const __dirname = dirname(__filename)
-
-export function getLocalVersion(): string {
- try {
- const pkgPath = join(__dirname, '../../package.json')
- const pkg = JSON.parse(readFileSync(pkgPath, 'utf-8'))
- return pkg.version
- } catch {
- return '0.0.0'
- }
-}
-
-export async function getNpmVersion(): Promise {
- try {
- const controller = new AbortController()
- const timeout = setTimeout(() => controller.abort(), 5000)
-
- const res = await fetch(NPM_REGISTRY_URL, {
- signal: controller.signal,
- headers: { 'Accept': 'application/json' }
- })
- clearTimeout(timeout)
-
- if (!res.ok) return null
- const data = await res.json() as { version?: string }
- return data.version ?? null
- } catch {
- return null
- }
-}
-
-export function isOutdated(local: string, remote: string): boolean {
- const parseVersion = (v: string) => v.split('.').map(n => parseInt(n, 10) || 0)
- const [localParts, remoteParts] = [parseVersion(local), parseVersion(remote)]
-
- for (let i = 0; i < Math.max(localParts.length, remoteParts.length); i++) {
- const l = localParts[i] ?? 0
- const r = remoteParts[i] ?? 0
- if (r > l) return true
- if (l > r) return false
- }
- return false
-}
-
-export async function checkForUpdates(client: any, logger?: { info: (component: string, message: string, data?: any) => void }, showToast: boolean = true): Promise {
- try {
- const local = getLocalVersion()
- const npm = await getNpmVersion()
-
- if (!npm) {
- logger?.info("version", "Version check skipped", { reason: "npm fetch failed" })
- return
- }
-
- if (!isOutdated(local, npm)) {
- logger?.info("version", "Up to date", { local, npm })
- return
- }
-
- logger?.info("version", "Update available", { local, npm })
-
- if (!showToast) {
- return
- }
-
- await client.tui.showToast({
- body: {
- title: "DCP: Update available",
- message: `v${local} → v${npm}\nUse ${PACKAGE_NAME}@latest to auto-update`,
- variant: "info",
- duration: 6000
- }
- })
- } catch {
- }
-}
diff --git a/package-lock.json b/package-lock.json
index a6c3dcd..d56198b 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "@tarquinen/opencode-dcp",
- "version": "0.4.17",
+ "version": "1.0.0-beta.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@tarquinen/opencode-dcp",
- "version": "0.4.17",
+ "version": "1.0.0-beta.2",
"license": "MIT",
"dependencies": {
"@ai-sdk/openai-compatible": "^1.0.28",
diff --git a/package.json b/package.json
index ffb7bb1..b4c2d13 100644
--- a/package.json
+++ b/package.json
@@ -1,7 +1,7 @@
{
"$schema": "https://json.schemastore.org/package.json",
"name": "@tarquinen/opencode-dcp",
- "version": "0.4.17",
+ "version": "1.0.0-beta.3",
"type": "module",
"description": "OpenCode plugin that optimizes token usage by pruning obsolete tool outputs from conversation context",
"main": "./dist/index.js",
diff --git a/watch-logs.sh b/watch-logs.sh
deleted file mode 100755
index 2b833f1..0000000
--- a/watch-logs.sh
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-# Helper script to watch plugin logs in real time
-
-PLUGIN_DIR="$(cd "$(dirname "$0")" && pwd)"
-LOG_DIR="$PLUGIN_DIR/logs"
-LOG_FILE="$LOG_DIR/$(date +%Y-%m-%d).log"
-
-echo "Dynamic Context Pruning - Log Viewer"
-echo "===================================="
-echo ""
-
-# Check if debug is enabled
-if grep -q "debug: false" "$PLUGIN_DIR/lib/config.ts"; then
- echo "⚠️ WARNING: Debug logging is DISABLED"
- echo " Edit lib/config.ts and set 'debug: true' to enable logging"
- echo ""
- exit 1
-fi
-
-echo "✓ Debug logging is enabled"
-echo ""
-
-# Check if log file exists
-if [ ! -f "$LOG_FILE" ]; then
- echo "ℹ️ Log file not found: $LOG_FILE"
- echo ""
- echo " This means OpenCode hasn't been restarted since the plugin was updated."
- echo ""
- echo " To generate logs:"
- echo " 1. Restart OpenCode to reload the plugin"
- echo " 2. Logs will be created automatically"
- echo ""
- echo " Waiting for log file to appear..."
- echo " (Press Ctrl+C to cancel)"
- echo ""
-
- # Wait for file to be created
- while [ ! -f "$LOG_FILE" ]; do
- sleep 2
- done
-
- echo "✓ Log file created!"
- echo ""
-fi
-
-echo "📺 Watching: $LOG_FILE"
-echo " Press Ctrl+C to stop"
-echo ""
-echo "----------------------------------------"
-echo ""
-
-# Show all logs with pretty printing if jq is available
-if command -v jq &> /dev/null; then
- tail -f "$LOG_FILE" | jq --color-output '.'
-else
- tail -f "$LOG_FILE"
-fi
-